nir/algebraic: Make algebraic_parser_test.sh executable.
[mesa.git] / src / compiler / nir / nir_opt_copy_prop_vars.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 #include "util/bitscan.h"
29 #include "util/u_dynarray.h"
30
31 /**
32 * Variable-based copy propagation
33 *
34 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
35 * However, there are cases, especially when dealing with indirects, where SSA
36 * won't help you. This pass is for those times. Specifically, it handles
37 * the following things that the rest of NIR can't:
38 *
39 * 1) Copy-propagation on variables that have indirect access. This includes
40 * propagating from indirect stores into indirect loads.
41 *
42 * 2) Removal of redundant load_deref intrinsics. We can't trust regular CSE
43 * to do this because it isn't aware of variable writes that may alias the
44 * value and make the former load invalid.
45 *
46 * This pass uses an intermediate solution between being local / "per-block"
47 * and a complete data-flow analysis. It follows the control flow graph, and
48 * propagate the available copy information forward, invalidating data at each
49 * cf_node.
50 *
51 * Removal of dead writes to variables is handled by another pass.
52 */
53
54 struct vars_written {
55 nir_variable_mode modes;
56
57 /* Key is deref and value is the uintptr_t with the write mask. */
58 struct hash_table *derefs;
59 };
60
61 struct value {
62 bool is_ssa;
63 union {
64 nir_ssa_def *ssa[4];
65 nir_deref_instr *deref;
66 };
67 };
68
69 struct copy_entry {
70 struct value src;
71
72 nir_deref_instr *dst;
73 };
74
75 struct copy_prop_var_state {
76 nir_function_impl *impl;
77
78 void *mem_ctx;
79 void *lin_ctx;
80
81 /* Maps nodes to vars_written. Used to invalidate copy entries when
82 * visiting each node.
83 */
84 struct hash_table *vars_written_map;
85
86 bool progress;
87 };
88
89 static bool
90 value_equals_store_src(struct value *value, nir_intrinsic_instr *intrin)
91 {
92 assert(intrin->intrinsic == nir_intrinsic_store_deref);
93 uintptr_t write_mask = nir_intrinsic_write_mask(intrin);
94
95 for (unsigned i = 0; i < intrin->num_components; i++) {
96 if ((write_mask & (1 << i)) &&
97 value->ssa[i] != intrin->src[1].ssa)
98 return false;
99 }
100
101 return true;
102 }
103
104 static struct vars_written *
105 create_vars_written(struct copy_prop_var_state *state)
106 {
107 struct vars_written *written =
108 linear_zalloc_child(state->lin_ctx, sizeof(struct vars_written));
109 written->derefs = _mesa_hash_table_create(state->mem_ctx, _mesa_hash_pointer,
110 _mesa_key_pointer_equal);
111 return written;
112 }
113
114 static void
115 gather_vars_written(struct copy_prop_var_state *state,
116 struct vars_written *written,
117 nir_cf_node *cf_node)
118 {
119 struct vars_written *new_written = NULL;
120
121 switch (cf_node->type) {
122 case nir_cf_node_function: {
123 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
124 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
125 gather_vars_written(state, NULL, cf_node);
126 break;
127 }
128
129 case nir_cf_node_block: {
130 if (!written)
131 break;
132
133 nir_block *block = nir_cf_node_as_block(cf_node);
134 nir_foreach_instr(instr, block) {
135 if (instr->type == nir_instr_type_call) {
136 written->modes |= nir_var_shader_out |
137 nir_var_global |
138 nir_var_local |
139 nir_var_shader_storage |
140 nir_var_shared;
141 continue;
142 }
143
144 if (instr->type != nir_instr_type_intrinsic)
145 continue;
146
147 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
148 switch (intrin->intrinsic) {
149 case nir_intrinsic_barrier:
150 case nir_intrinsic_memory_barrier:
151 written->modes |= nir_var_shader_out |
152 nir_var_shader_storage |
153 nir_var_shared;
154 break;
155
156 case nir_intrinsic_emit_vertex:
157 case nir_intrinsic_emit_vertex_with_counter:
158 written->modes = nir_var_shader_out;
159 break;
160
161 case nir_intrinsic_store_deref:
162 case nir_intrinsic_copy_deref: {
163 /* Destination in _both_ store_deref and copy_deref is src[0]. */
164 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
165
166 uintptr_t mask = intrin->intrinsic == nir_intrinsic_store_deref ?
167 nir_intrinsic_write_mask(intrin) : (1 << glsl_get_vector_elements(dst->type)) - 1;
168
169 struct hash_entry *ht_entry = _mesa_hash_table_search(written->derefs, dst);
170 if (ht_entry)
171 ht_entry->data = (void *)(mask | (uintptr_t)ht_entry->data);
172 else
173 _mesa_hash_table_insert(written->derefs, dst, (void *)mask);
174
175 break;
176 }
177
178 default:
179 break;
180 }
181 }
182
183 break;
184 }
185
186 case nir_cf_node_if: {
187 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
188
189 new_written = create_vars_written(state);
190
191 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
192 gather_vars_written(state, new_written, cf_node);
193
194 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
195 gather_vars_written(state, new_written, cf_node);
196
197 break;
198 }
199
200 case nir_cf_node_loop: {
201 nir_loop *loop = nir_cf_node_as_loop(cf_node);
202
203 new_written = create_vars_written(state);
204
205 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
206 gather_vars_written(state, new_written, cf_node);
207
208 break;
209 }
210
211 default:
212 unreachable("Invalid CF node type");
213 }
214
215 if (new_written) {
216 /* Merge new information to the parent control flow node. */
217 if (written) {
218 written->modes |= new_written->modes;
219 hash_table_foreach(new_written->derefs, new_entry) {
220 struct hash_entry *old_entry =
221 _mesa_hash_table_search_pre_hashed(written->derefs, new_entry->hash,
222 new_entry->key);
223 if (old_entry) {
224 nir_component_mask_t merged = (uintptr_t) new_entry->data |
225 (uintptr_t) old_entry->data;
226 old_entry->data = (void *) ((uintptr_t) merged);
227 } else {
228 _mesa_hash_table_insert_pre_hashed(written->derefs, new_entry->hash,
229 new_entry->key, new_entry->data);
230 }
231 }
232 }
233 _mesa_hash_table_insert(state->vars_written_map, cf_node, new_written);
234 }
235 }
236
237 static struct copy_entry *
238 copy_entry_create(struct util_dynarray *copies,
239 nir_deref_instr *dst_deref)
240 {
241 struct copy_entry new_entry = {
242 .dst = dst_deref,
243 };
244 util_dynarray_append(copies, struct copy_entry, new_entry);
245 return util_dynarray_top_ptr(copies, struct copy_entry);
246 }
247
248 /* Remove copy entry by swapping it with the last element and reducing the
249 * size. If used inside an iteration on copies, it must be a reverse
250 * (backwards) iteration. It is safe to use in those cases because the swap
251 * will not affect the rest of the iteration.
252 */
253 static void
254 copy_entry_remove(struct util_dynarray *copies,
255 struct copy_entry *entry)
256 {
257 /* This also works when removing the last element since pop don't shrink
258 * the memory used by the array, so the swap is useless but not invalid.
259 */
260 *entry = util_dynarray_pop(copies, struct copy_entry);
261 }
262
263 static struct copy_entry *
264 lookup_entry_for_deref(struct util_dynarray *copies,
265 nir_deref_instr *deref,
266 nir_deref_compare_result allowed_comparisons)
267 {
268 util_dynarray_foreach(copies, struct copy_entry, iter) {
269 if (nir_compare_derefs(iter->dst, deref) & allowed_comparisons)
270 return iter;
271 }
272
273 return NULL;
274 }
275
276 static struct copy_entry *
277 lookup_entry_and_kill_aliases(struct util_dynarray *copies,
278 nir_deref_instr *deref,
279 unsigned write_mask)
280 {
281 /* TODO: Take into account the write_mask. */
282
283 struct copy_entry *entry = NULL;
284 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
285 if (!iter->src.is_ssa) {
286 /* If this write aliases the source of some entry, get rid of it */
287 if (nir_compare_derefs(iter->src.deref, deref) & nir_derefs_may_alias_bit) {
288 copy_entry_remove(copies, iter);
289 continue;
290 }
291 }
292
293 nir_deref_compare_result comp = nir_compare_derefs(iter->dst, deref);
294
295 if (comp & nir_derefs_equal_bit) {
296 assert(entry == NULL);
297 entry = iter;
298 } else if (comp & nir_derefs_may_alias_bit) {
299 copy_entry_remove(copies, iter);
300 }
301 }
302
303 return entry;
304 }
305
306 static void
307 kill_aliases(struct util_dynarray *copies,
308 nir_deref_instr *deref,
309 unsigned write_mask)
310 {
311 /* TODO: Take into account the write_mask. */
312
313 struct copy_entry *entry =
314 lookup_entry_and_kill_aliases(copies, deref, write_mask);
315 if (entry)
316 copy_entry_remove(copies, entry);
317 }
318
319 static struct copy_entry *
320 get_entry_and_kill_aliases(struct util_dynarray *copies,
321 nir_deref_instr *deref,
322 unsigned write_mask)
323 {
324 /* TODO: Take into account the write_mask. */
325
326 struct copy_entry *entry =
327 lookup_entry_and_kill_aliases(copies, deref, write_mask);
328
329 if (entry == NULL)
330 entry = copy_entry_create(copies, deref);
331
332 return entry;
333 }
334
335 static void
336 apply_barrier_for_modes(struct util_dynarray *copies,
337 nir_variable_mode modes)
338 {
339 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
340 nir_variable *dst_var = nir_deref_instr_get_variable(iter->dst);
341 nir_variable *src_var = iter->src.is_ssa ? NULL :
342 nir_deref_instr_get_variable(iter->src.deref);
343
344 if ((dst_var->data.mode & modes) ||
345 (src_var && (src_var->data.mode & modes)))
346 copy_entry_remove(copies, iter);
347 }
348 }
349
350 static void
351 store_to_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
352 const struct value *value, unsigned write_mask)
353 {
354 if (value->is_ssa) {
355 entry->src.is_ssa = true;
356 /* Only overwrite the written components */
357 for (unsigned i = 0; i < 4; i++) {
358 if (write_mask & (1 << i))
359 entry->src.ssa[i] = value->ssa[i];
360 }
361 } else {
362 /* Non-ssa stores always write everything */
363 entry->src.is_ssa = false;
364 entry->src.deref = value->deref;
365 }
366 }
367
368 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
369 * single SSA def. Because an entry could reference up to 4 different SSA
370 * defs, a vecN operation may be inserted to combine them into a single SSA
371 * def before handing it back to the caller. If the load instruction is no
372 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
373 * possible, in some cases, for the load to be used in the vecN operation in
374 * which case it isn't deleted.)
375 */
376 static bool
377 load_from_ssa_entry_value(struct copy_prop_var_state *state,
378 struct copy_entry *entry,
379 nir_builder *b, nir_intrinsic_instr *intrin,
380 struct value *value)
381 {
382 *value = entry->src;
383 assert(value->is_ssa);
384
385 const struct glsl_type *type = entry->dst->type;
386 unsigned num_components = glsl_get_vector_elements(type);
387
388 nir_component_mask_t available = 0;
389 bool all_same = true;
390 for (unsigned i = 0; i < num_components; i++) {
391 if (value->ssa[i])
392 available |= (1 << i);
393
394 if (value->ssa[i] != value->ssa[0])
395 all_same = false;
396 }
397
398 if (all_same) {
399 /* Our work here is done */
400 b->cursor = nir_instr_remove(&intrin->instr);
401 intrin->instr.block = NULL;
402 return true;
403 }
404
405 if (available != (1 << num_components) - 1 &&
406 intrin->intrinsic == nir_intrinsic_load_deref &&
407 (available & nir_ssa_def_components_read(&intrin->dest.ssa)) == 0) {
408 /* If none of the components read are available as SSA values, then we
409 * should just bail. Otherwise, we would end up replacing the uses of
410 * the load_deref a vecN() that just gathers up its components.
411 */
412 return false;
413 }
414
415 b->cursor = nir_after_instr(&intrin->instr);
416
417 nir_ssa_def *load_def =
418 intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
419
420 bool keep_intrin = false;
421 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
422 for (unsigned i = 0; i < num_components; i++) {
423 if (value->ssa[i]) {
424 comps[i] = nir_channel(b, value->ssa[i], i);
425 } else {
426 /* We don't have anything for this component in our
427 * list. Just re-use a channel from the load.
428 */
429 if (load_def == NULL)
430 load_def = nir_load_deref(b, entry->dst);
431
432 if (load_def->parent_instr == &intrin->instr)
433 keep_intrin = true;
434
435 comps[i] = nir_channel(b, load_def, i);
436 }
437 }
438
439 nir_ssa_def *vec = nir_vec(b, comps, num_components);
440 for (unsigned i = 0; i < num_components; i++)
441 value->ssa[i] = vec;
442
443 if (!keep_intrin) {
444 /* Removing this instruction should not touch the cursor because we
445 * created the cursor after the intrinsic and have added at least one
446 * instruction (the vec) since then.
447 */
448 assert(b->cursor.instr != &intrin->instr);
449 nir_instr_remove(&intrin->instr);
450 intrin->instr.block = NULL;
451 }
452
453 return true;
454 }
455
456 /**
457 * Specialize the wildcards in a deref chain
458 *
459 * This function returns a deref chain identical to \param deref except that
460 * some of its wildcards are replaced with indices from \param specific. The
461 * process is guided by \param guide which references the same type as \param
462 * specific but has the same wildcard array lengths as \param deref.
463 */
464 static nir_deref_instr *
465 specialize_wildcards(nir_builder *b,
466 nir_deref_path *deref,
467 nir_deref_path *guide,
468 nir_deref_path *specific)
469 {
470 nir_deref_instr **deref_p = &deref->path[1];
471 nir_deref_instr **guide_p = &guide->path[1];
472 nir_deref_instr **spec_p = &specific->path[1];
473 nir_deref_instr *ret_tail = deref->path[0];
474 for (; *deref_p; deref_p++) {
475 if ((*deref_p)->deref_type == nir_deref_type_array_wildcard) {
476 /* This is where things get tricky. We have to search through
477 * the entry deref to find its corresponding wildcard and fill
478 * this slot in with the value from the src.
479 */
480 while (*guide_p &&
481 (*guide_p)->deref_type != nir_deref_type_array_wildcard) {
482 guide_p++;
483 spec_p++;
484 }
485 assert(*guide_p && *spec_p);
486
487 ret_tail = nir_build_deref_follower(b, ret_tail, *spec_p);
488
489 guide_p++;
490 spec_p++;
491 } else {
492 ret_tail = nir_build_deref_follower(b, ret_tail, *deref_p);
493 }
494 }
495
496 return ret_tail;
497 }
498
499 /* Do a "load" from an deref-based entry return it in "value" as a value. The
500 * deref returned in "value" will always be a fresh copy so the caller can
501 * steal it and assign it to the instruction directly without copying it
502 * again.
503 */
504 static bool
505 load_from_deref_entry_value(struct copy_prop_var_state *state,
506 struct copy_entry *entry,
507 nir_builder *b, nir_intrinsic_instr *intrin,
508 nir_deref_instr *src, struct value *value)
509 {
510 *value = entry->src;
511
512 b->cursor = nir_instr_remove(&intrin->instr);
513
514 nir_deref_path entry_dst_path, src_path;
515 nir_deref_path_init(&entry_dst_path, entry->dst, state->mem_ctx);
516 nir_deref_path_init(&src_path, src, state->mem_ctx);
517
518 bool need_to_specialize_wildcards = false;
519 nir_deref_instr **entry_p = &entry_dst_path.path[1];
520 nir_deref_instr **src_p = &src_path.path[1];
521 while (*entry_p && *src_p) {
522 nir_deref_instr *entry_tail = *entry_p++;
523 nir_deref_instr *src_tail = *src_p++;
524
525 if (src_tail->deref_type == nir_deref_type_array &&
526 entry_tail->deref_type == nir_deref_type_array_wildcard)
527 need_to_specialize_wildcards = true;
528 }
529
530 /* If the entry deref is longer than the source deref then it refers to a
531 * smaller type and we can't source from it.
532 */
533 assert(*entry_p == NULL);
534
535 if (need_to_specialize_wildcards) {
536 /* The entry has some wildcards that are not in src. This means we need
537 * to construct a new deref based on the entry but using the wildcards
538 * from the source and guided by the entry dst. Oof.
539 */
540 nir_deref_path entry_src_path;
541 nir_deref_path_init(&entry_src_path, entry->src.deref, state->mem_ctx);
542 value->deref = specialize_wildcards(b, &entry_src_path,
543 &entry_dst_path, &src_path);
544 nir_deref_path_finish(&entry_src_path);
545 }
546
547 /* If our source deref is longer than the entry deref, that's ok because
548 * it just means the entry deref needs to be extended a bit.
549 */
550 while (*src_p) {
551 nir_deref_instr *src_tail = *src_p++;
552 value->deref = nir_build_deref_follower(b, value->deref, src_tail);
553 }
554
555 nir_deref_path_finish(&entry_dst_path);
556 nir_deref_path_finish(&src_path);
557
558 return true;
559 }
560
561 static bool
562 try_load_from_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
563 nir_builder *b, nir_intrinsic_instr *intrin,
564 nir_deref_instr *src, struct value *value)
565 {
566 if (entry == NULL)
567 return false;
568
569 if (entry->src.is_ssa) {
570 return load_from_ssa_entry_value(state, entry, b, intrin, value);
571 } else {
572 return load_from_deref_entry_value(state, entry, b, intrin, src, value);
573 }
574 }
575
576 static void
577 invalidate_copies_for_cf_node(struct copy_prop_var_state *state,
578 struct util_dynarray *copies,
579 nir_cf_node *cf_node)
580 {
581 struct hash_entry *ht_entry = _mesa_hash_table_search(state->vars_written_map, cf_node);
582 assert(ht_entry);
583
584 struct vars_written *written = ht_entry->data;
585 if (written->modes) {
586 util_dynarray_foreach_reverse(copies, struct copy_entry, entry) {
587 if (entry->dst->mode & written->modes)
588 copy_entry_remove(copies, entry);
589 }
590 }
591
592 hash_table_foreach (written->derefs, entry) {
593 nir_deref_instr *deref_written = (nir_deref_instr *)entry->key;
594 kill_aliases(copies, deref_written, (uintptr_t)entry->data);
595 }
596 }
597
598 static void
599 copy_prop_vars_block(struct copy_prop_var_state *state,
600 nir_builder *b, nir_block *block,
601 struct util_dynarray *copies)
602 {
603 nir_foreach_instr_safe(instr, block) {
604 if (instr->type == nir_instr_type_call) {
605 apply_barrier_for_modes(copies, nir_var_shader_out |
606 nir_var_global |
607 nir_var_local |
608 nir_var_shader_storage |
609 nir_var_shared);
610 continue;
611 }
612
613 if (instr->type != nir_instr_type_intrinsic)
614 continue;
615
616 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
617 switch (intrin->intrinsic) {
618 case nir_intrinsic_barrier:
619 case nir_intrinsic_memory_barrier:
620 apply_barrier_for_modes(copies, nir_var_shader_out |
621 nir_var_shader_storage |
622 nir_var_shared);
623 break;
624
625 case nir_intrinsic_emit_vertex:
626 case nir_intrinsic_emit_vertex_with_counter:
627 apply_barrier_for_modes(copies, nir_var_shader_out);
628 break;
629
630 case nir_intrinsic_load_deref: {
631 nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
632
633 struct copy_entry *src_entry =
634 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
635 struct value value;
636 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
637 if (value.is_ssa) {
638 /* lookup_load has already ensured that we get a single SSA
639 * value that has all of the channels. We just have to do the
640 * rewrite operation.
641 */
642 if (intrin->instr.block) {
643 /* The lookup left our instruction in-place. This means it
644 * must have used it to vec up a bunch of different sources.
645 * We need to be careful when rewriting uses so we don't
646 * rewrite the vecN itself.
647 */
648 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
649 nir_src_for_ssa(value.ssa[0]),
650 value.ssa[0]->parent_instr);
651 } else {
652 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
653 nir_src_for_ssa(value.ssa[0]));
654 }
655 } else {
656 /* We're turning it into a load of a different variable */
657 intrin->src[0] = nir_src_for_ssa(&value.deref->dest.ssa);
658
659 /* Put it back in again. */
660 nir_builder_instr_insert(b, instr);
661
662 value.is_ssa = true;
663 for (unsigned i = 0; i < intrin->num_components; i++)
664 value.ssa[i] = &intrin->dest.ssa;
665 }
666 state->progress = true;
667 } else {
668 value.is_ssa = true;
669 for (unsigned i = 0; i < intrin->num_components; i++)
670 value.ssa[i] = &intrin->dest.ssa;
671 }
672
673 /* Now that we have a value, we're going to store it back so that we
674 * have the right value next time we come looking for it. In order
675 * to do this, we need an exact match, not just something that
676 * contains what we're looking for.
677 */
678 struct copy_entry *store_entry =
679 lookup_entry_for_deref(copies, src, nir_derefs_equal_bit);
680 if (!store_entry)
681 store_entry = copy_entry_create(copies, src);
682
683 /* Set up a store to this entry with the value of the load. This way
684 * we can potentially remove subsequent loads. However, we use a
685 * NULL instruction so we don't try and delete the load on a
686 * subsequent store.
687 */
688 store_to_entry(state, store_entry, &value,
689 ((1 << intrin->num_components) - 1));
690 break;
691 }
692
693 case nir_intrinsic_store_deref: {
694 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
695 struct copy_entry *entry =
696 lookup_entry_for_deref(copies, dst, nir_derefs_equal_bit);
697 if (entry && value_equals_store_src(&entry->src, intrin)) {
698 /* If we are storing the value from a load of the same var the
699 * store is redundant so remove it.
700 */
701 nir_instr_remove(instr);
702 } else {
703 struct value value = {
704 .is_ssa = true
705 };
706
707 for (unsigned i = 0; i < intrin->num_components; i++)
708 value.ssa[i] = intrin->src[1].ssa;
709
710 unsigned wrmask = nir_intrinsic_write_mask(intrin);
711 struct copy_entry *entry =
712 get_entry_and_kill_aliases(copies, dst, wrmask);
713 store_to_entry(state, entry, &value, wrmask);
714 }
715
716 break;
717 }
718
719 case nir_intrinsic_copy_deref: {
720 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
721 nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
722
723 if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
724 /* This is a no-op self-copy. Get rid of it */
725 nir_instr_remove(instr);
726 continue;
727 }
728
729 struct copy_entry *src_entry =
730 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
731 struct value value;
732 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
733 if (value.is_ssa) {
734 nir_store_deref(b, dst, value.ssa[0], 0xf);
735 intrin = nir_instr_as_intrinsic(nir_builder_last_instr(b));
736 } else {
737 /* If this would be a no-op self-copy, don't bother. */
738 if (nir_compare_derefs(value.deref, dst) & nir_derefs_equal_bit)
739 continue;
740
741 /* Just turn it into a copy of a different deref */
742 intrin->src[1] = nir_src_for_ssa(&value.deref->dest.ssa);
743
744 /* Put it back in again. */
745 nir_builder_instr_insert(b, instr);
746 }
747
748 state->progress = true;
749 } else {
750 value = (struct value) {
751 .is_ssa = false,
752 { .deref = src },
753 };
754 }
755
756 struct copy_entry *dst_entry =
757 get_entry_and_kill_aliases(copies, dst, 0xf);
758 store_to_entry(state, dst_entry, &value, 0xf);
759 break;
760 }
761
762 default:
763 break;
764 }
765 }
766 }
767
768 static void
769 copy_prop_vars_cf_node(struct copy_prop_var_state *state,
770 struct util_dynarray *copies,
771 nir_cf_node *cf_node)
772 {
773 switch (cf_node->type) {
774 case nir_cf_node_function: {
775 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
776
777 struct util_dynarray impl_copies;
778 util_dynarray_init(&impl_copies, state->mem_ctx);
779
780 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
781 copy_prop_vars_cf_node(state, &impl_copies, cf_node);
782
783 break;
784 }
785
786 case nir_cf_node_block: {
787 nir_block *block = nir_cf_node_as_block(cf_node);
788 nir_builder b;
789 nir_builder_init(&b, state->impl);
790 copy_prop_vars_block(state, &b, block, copies);
791 break;
792 }
793
794 case nir_cf_node_if: {
795 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
796
797 /* Clone the copies for each branch of the if statement. The idea is
798 * that they both see the same state of available copies, but do not
799 * interfere to each other.
800 */
801
802 struct util_dynarray then_copies;
803 util_dynarray_clone(&then_copies, state->mem_ctx, copies);
804
805 struct util_dynarray else_copies;
806 util_dynarray_clone(&else_copies, state->mem_ctx, copies);
807
808 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
809 copy_prop_vars_cf_node(state, &then_copies, cf_node);
810
811 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
812 copy_prop_vars_cf_node(state, &else_copies, cf_node);
813
814 /* Both branches copies can be ignored, since the effect of running both
815 * branches was captured in the first pass that collects vars_written.
816 */
817
818 invalidate_copies_for_cf_node(state, copies, cf_node);
819
820 break;
821 }
822
823 case nir_cf_node_loop: {
824 nir_loop *loop = nir_cf_node_as_loop(cf_node);
825
826 /* Invalidate before cloning the copies for the loop, since the loop
827 * body can be executed more than once.
828 */
829
830 invalidate_copies_for_cf_node(state, copies, cf_node);
831
832 struct util_dynarray loop_copies;
833 util_dynarray_clone(&loop_copies, state->mem_ctx, copies);
834
835 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
836 copy_prop_vars_cf_node(state, &loop_copies, cf_node);
837
838 break;
839 }
840
841 default:
842 unreachable("Invalid CF node type");
843 }
844 }
845
846 static bool
847 nir_copy_prop_vars_impl(nir_function_impl *impl)
848 {
849 void *mem_ctx = ralloc_context(NULL);
850
851 struct copy_prop_var_state state = {
852 .impl = impl,
853 .mem_ctx = mem_ctx,
854 .lin_ctx = linear_zalloc_parent(mem_ctx, 0),
855
856 .vars_written_map = _mesa_hash_table_create(mem_ctx, _mesa_hash_pointer,
857 _mesa_key_pointer_equal),
858 };
859
860 gather_vars_written(&state, NULL, &impl->cf_node);
861
862 copy_prop_vars_cf_node(&state, NULL, &impl->cf_node);
863
864 if (state.progress) {
865 nir_metadata_preserve(impl, nir_metadata_block_index |
866 nir_metadata_dominance);
867 }
868
869 ralloc_free(mem_ctx);
870 return state.progress;
871 }
872
873 bool
874 nir_opt_copy_prop_vars(nir_shader *shader)
875 {
876 bool progress = false;
877
878 nir_foreach_function(function, shader) {
879 if (!function->impl)
880 continue;
881 progress |= nir_copy_prop_vars_impl(function->impl);
882 }
883
884 return progress;
885 }