nir: Rework opt_copy_prop_vars to use deref instructions
[mesa.git] / src / compiler / nir / nir_opt_copy_prop_vars.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 #include "util/bitscan.h"
29
30 /**
31 * Variable-based copy propagation
32 *
33 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
34 * However, there are cases, especially when dealing with indirects, where SSA
35 * won't help you. This pass is for those times. Specifically, it handles
36 * the following things that the rest of NIR can't:
37 *
38 * 1) Copy-propagation on variables that have indirect access. This includes
39 * propagating from indirect stores into indirect loads.
40 *
41 * 2) Dead code elimination of store_var and copy_var intrinsics based on
42 * killed destination values.
43 *
44 * 3) Removal of redundant load_deref intrinsics. We can't trust regular CSE
45 * to do this because it isn't aware of variable writes that may alias the
46 * value and make the former load invalid.
47 *
48 * Unfortunately, properly handling all of those cases makes this path rather
49 * complex. In order to avoid additional complexity, this pass is entirely
50 * block-local. If we tried to make it global, the data-flow analysis would
51 * rapidly get out of hand. Fortunately, for anything that is only ever
52 * accessed directly, we get SSA based copy-propagation which is extremely
53 * powerful so this isn't that great a loss.
54 */
55
56 struct value {
57 bool is_ssa;
58 union {
59 nir_ssa_def *ssa[4];
60 nir_deref_instr *deref;
61 };
62 };
63
64 struct copy_entry {
65 struct list_head link;
66
67 nir_instr *store_instr[4];
68
69 unsigned comps_may_be_read;
70 struct value src;
71
72 nir_deref_instr *dst;
73 };
74
75 struct copy_prop_var_state {
76 nir_shader *shader;
77
78 void *mem_ctx;
79
80 struct list_head copies;
81
82 /* We're going to be allocating and deleting a lot of copy entries so we'll
83 * keep a free list to avoid thrashing malloc too badly.
84 */
85 struct list_head copy_free_list;
86
87 bool progress;
88 };
89
90 static struct copy_entry *
91 copy_entry_create(struct copy_prop_var_state *state,
92 nir_deref_instr *dst_deref)
93 {
94 struct copy_entry *entry;
95 if (!list_empty(&state->copy_free_list)) {
96 struct list_head *item = state->copy_free_list.next;
97 list_del(item);
98 entry = LIST_ENTRY(struct copy_entry, item, link);
99 memset(entry, 0, sizeof(*entry));
100 } else {
101 entry = rzalloc(state->mem_ctx, struct copy_entry);
102 }
103
104 entry->dst = dst_deref;
105 list_add(&entry->link, &state->copies);
106
107 return entry;
108 }
109
110 static void
111 copy_entry_remove(struct copy_prop_var_state *state, struct copy_entry *entry)
112 {
113 list_del(&entry->link);
114 list_add(&entry->link, &state->copy_free_list);
115 }
116
117 enum deref_compare_result {
118 derefs_equal_bit = (1 << 0),
119 derefs_may_alias_bit = (1 << 1),
120 derefs_a_contains_b_bit = (1 << 2),
121 derefs_b_contains_a_bit = (1 << 3),
122 };
123
124 /** Returns true if the storage referrenced to by deref completely contains
125 * the storage referenced by sub.
126 *
127 * NOTE: This is fairly general and could be moved to core NIR if someone else
128 * ever needs it.
129 */
130 static enum deref_compare_result
131 compare_deref_paths(nir_deref_path *a_path,
132 nir_deref_path *b_path)
133 {
134 if (a_path->path[0]->var != b_path->path[0]->var)
135 return 0;
136
137 /* Start off assuming they fully compare. We ignore equality for now. In
138 * the end, we'll determine that by containment.
139 */
140 enum deref_compare_result result = derefs_may_alias_bit |
141 derefs_a_contains_b_bit |
142 derefs_b_contains_a_bit;
143
144 nir_deref_instr **a_p = &a_path->path[1];
145 nir_deref_instr **b_p = &b_path->path[1];
146 while (*a_p != NULL && *b_p != NULL) {
147 nir_deref_instr *a_tail = *(a_p++);
148 nir_deref_instr *b_tail = *(b_p++);
149
150 switch (a_tail->deref_type) {
151 case nir_deref_type_array:
152 case nir_deref_type_array_wildcard: {
153 assert(b_tail->deref_type == nir_deref_type_array ||
154 b_tail->deref_type == nir_deref_type_array_wildcard);
155
156 if (a_tail->deref_type == nir_deref_type_array_wildcard) {
157 if (b_tail->deref_type != nir_deref_type_array_wildcard)
158 result &= ~derefs_b_contains_a_bit;
159 } else if (b_tail->deref_type == nir_deref_type_array_wildcard) {
160 if (a_tail->deref_type != nir_deref_type_array_wildcard)
161 result &= ~derefs_a_contains_b_bit;
162 } else {
163 assert(a_tail->deref_type == nir_deref_type_array &&
164 b_tail->deref_type == nir_deref_type_array);
165 assert(a_tail->arr.index.is_ssa && b_tail->arr.index.is_ssa);
166
167 nir_const_value *a_index_const =
168 nir_src_as_const_value(a_tail->arr.index);
169 nir_const_value *b_index_const =
170 nir_src_as_const_value(b_tail->arr.index);
171 if (a_index_const && b_index_const) {
172 /* If they're both direct and have different offsets, they
173 * don't even alias much less anything else.
174 */
175 if (a_index_const->u32[0] != b_index_const->u32[0])
176 return 0;
177 } else if (a_tail->arr.index.ssa == b_tail->arr.index.ssa) {
178 /* They're the same indirect, continue on */
179 } else {
180 /* They're not the same index so we can't prove anything about
181 * containment.
182 */
183 result &= ~(derefs_a_contains_b_bit | derefs_b_contains_a_bit);
184 }
185 }
186 break;
187 }
188
189 case nir_deref_type_struct: {
190 /* If they're different struct members, they don't even alias */
191 if (a_tail->strct.index != b_tail->strct.index)
192 return 0;
193 break;
194 }
195
196 default:
197 unreachable("Invalid deref type");
198 }
199 }
200
201 /* If a is longer than b, then it can't contain b */
202 if (*a_p != NULL)
203 result &= ~derefs_a_contains_b_bit;
204 if (*b_p != NULL)
205 result &= ~derefs_b_contains_a_bit;
206
207 /* If a contains b and b contains a they must be equal. */
208 if ((result & derefs_a_contains_b_bit) && (result & derefs_b_contains_a_bit))
209 result |= derefs_equal_bit;
210
211 return result;
212 }
213
214 static enum deref_compare_result
215 compare_derefs(nir_deref_instr *a, nir_deref_instr *b)
216 {
217 if (a == b) {
218 return derefs_equal_bit | derefs_may_alias_bit |
219 derefs_a_contains_b_bit | derefs_b_contains_a_bit;
220 }
221
222 nir_deref_path a_path, b_path;
223 nir_deref_path_init(&a_path, a, NULL);
224 nir_deref_path_init(&b_path, b, NULL);
225 assert(a_path.path[0]->deref_type == nir_deref_type_var);
226 assert(b_path.path[0]->deref_type == nir_deref_type_var);
227
228 enum deref_compare_result result = compare_deref_paths(&a_path, &b_path);
229
230 nir_deref_path_finish(&a_path);
231 nir_deref_path_finish(&b_path);
232
233 return result;
234 }
235
236 static void
237 remove_dead_writes(struct copy_prop_var_state *state,
238 struct copy_entry *entry, unsigned write_mask)
239 {
240 /* We're overwriting another entry. Some of it's components may not
241 * have been read yet and, if that's the case, we may be able to delete
242 * some instructions but we have to be careful.
243 */
244 unsigned dead_comps = write_mask & ~entry->comps_may_be_read;
245
246 for (unsigned mask = dead_comps; mask;) {
247 unsigned i = u_bit_scan(&mask);
248
249 nir_instr *instr = entry->store_instr[i];
250
251 /* We may have already deleted it on a previous iteration */
252 if (!instr)
253 continue;
254
255 /* See if this instr is used anywhere that it's not dead */
256 bool keep = false;
257 for (unsigned j = 0; j < 4; j++) {
258 if (entry->store_instr[j] == instr) {
259 if (dead_comps & (1 << j)) {
260 entry->store_instr[j] = NULL;
261 } else {
262 keep = true;
263 }
264 }
265 }
266
267 if (!keep) {
268 nir_instr_remove(instr);
269 state->progress = true;
270 }
271 }
272 }
273
274 static struct copy_entry *
275 lookup_entry_for_deref(struct copy_prop_var_state *state,
276 nir_deref_instr *deref,
277 enum deref_compare_result allowed_comparisons)
278 {
279 list_for_each_entry(struct copy_entry, iter, &state->copies, link) {
280 if (compare_derefs(iter->dst, deref) & allowed_comparisons)
281 return iter;
282 }
283
284 return NULL;
285 }
286
287 static void
288 mark_aliased_entries_as_read(struct copy_prop_var_state *state,
289 nir_deref_instr *deref, unsigned components)
290 {
291 list_for_each_entry(struct copy_entry, iter, &state->copies, link) {
292 if (compare_derefs(iter->dst, deref) & derefs_may_alias_bit)
293 iter->comps_may_be_read |= components;
294 }
295 }
296
297 static struct copy_entry *
298 get_entry_and_kill_aliases(struct copy_prop_var_state *state,
299 nir_deref_instr *deref,
300 unsigned write_mask)
301 {
302 struct copy_entry *entry = NULL;
303 list_for_each_entry_safe(struct copy_entry, iter, &state->copies, link) {
304 if (!iter->src.is_ssa) {
305 /* If this write aliases the source of some entry, get rid of it */
306 if (compare_derefs(iter->src.deref, deref) & derefs_may_alias_bit) {
307 copy_entry_remove(state, iter);
308 continue;
309 }
310 }
311
312 enum deref_compare_result comp = compare_derefs(iter->dst, deref);
313 /* This is a store operation. If we completely overwrite some value, we
314 * want to delete any dead writes that may be present.
315 */
316 if (comp & derefs_b_contains_a_bit)
317 remove_dead_writes(state, iter, write_mask);
318
319 if (comp & derefs_equal_bit) {
320 assert(entry == NULL);
321 entry = iter;
322 } else if (comp & derefs_may_alias_bit) {
323 copy_entry_remove(state, iter);
324 }
325 }
326
327 if (entry == NULL)
328 entry = copy_entry_create(state, deref);
329
330 return entry;
331 }
332
333 static void
334 apply_barrier_for_modes(struct copy_prop_var_state *state,
335 nir_variable_mode modes)
336 {
337 list_for_each_entry_safe(struct copy_entry, iter, &state->copies, link) {
338 nir_variable *dst_var = nir_deref_instr_get_variable(iter->dst);
339 nir_variable *src_var = iter->src.is_ssa ? NULL :
340 nir_deref_instr_get_variable(iter->src.deref);
341
342 if ((dst_var->data.mode & modes) ||
343 (src_var && (src_var->data.mode & modes)))
344 copy_entry_remove(state, iter);
345 }
346 }
347
348 static void
349 store_to_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
350 const struct value *value, unsigned write_mask,
351 nir_instr *store_instr)
352 {
353 entry->comps_may_be_read &= ~write_mask;
354 if (value->is_ssa) {
355 entry->src.is_ssa = true;
356 /* Only overwrite the written components */
357 for (unsigned i = 0; i < 4; i++) {
358 if (write_mask & (1 << i)) {
359 entry->store_instr[i] = store_instr;
360 entry->src.ssa[i] = value->ssa[i];
361 }
362 }
363 } else {
364 /* Non-ssa stores always write everything */
365 entry->src.is_ssa = false;
366 entry->src.deref = value->deref;
367 for (unsigned i = 0; i < 4; i++)
368 entry->store_instr[i] = store_instr;
369 }
370 }
371
372 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
373 * single SSA def. Because an entry could reference up to 4 different SSA
374 * defs, a vecN operation may be inserted to combine them into a single SSA
375 * def before handing it back to the caller. If the load instruction is no
376 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
377 * possible, in some cases, for the load to be used in the vecN operation in
378 * which case it isn't deleted.)
379 */
380 static bool
381 load_from_ssa_entry_value(struct copy_prop_var_state *state,
382 struct copy_entry *entry,
383 nir_builder *b, nir_intrinsic_instr *intrin,
384 struct value *value)
385 {
386 *value = entry->src;
387 assert(value->is_ssa);
388
389 const struct glsl_type *type = entry->dst->type;
390 unsigned num_components = glsl_get_vector_elements(type);
391
392 uint8_t available = 0;
393 bool all_same = true;
394 for (unsigned i = 0; i < num_components; i++) {
395 if (value->ssa[i])
396 available |= (1 << i);
397
398 if (value->ssa[i] != value->ssa[0])
399 all_same = false;
400 }
401
402 if (all_same) {
403 /* Our work here is done */
404 b->cursor = nir_instr_remove(&intrin->instr);
405 intrin->instr.block = NULL;
406 return true;
407 }
408
409 if (available != (1 << num_components) - 1 &&
410 intrin->intrinsic == nir_intrinsic_load_deref &&
411 (available & nir_ssa_def_components_read(&intrin->dest.ssa)) == 0) {
412 /* If none of the components read are available as SSA values, then we
413 * should just bail. Otherwise, we would end up replacing the uses of
414 * the load_deref a vecN() that just gathers up its components.
415 */
416 return false;
417 }
418
419 b->cursor = nir_after_instr(&intrin->instr);
420
421 nir_ssa_def *load_def =
422 intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
423
424 bool keep_intrin = false;
425 nir_ssa_def *comps[4];
426 for (unsigned i = 0; i < num_components; i++) {
427 if (value->ssa[i]) {
428 comps[i] = nir_channel(b, value->ssa[i], i);
429 } else {
430 /* We don't have anything for this component in our
431 * list. Just re-use a channel from the load.
432 */
433 if (load_def == NULL)
434 load_def = nir_load_deref(b, entry->dst);
435
436 if (load_def->parent_instr == &intrin->instr)
437 keep_intrin = true;
438
439 comps[i] = nir_channel(b, load_def, i);
440 }
441 }
442
443 nir_ssa_def *vec = nir_vec(b, comps, num_components);
444 for (unsigned i = 0; i < num_components; i++)
445 value->ssa[i] = vec;
446
447 if (!keep_intrin) {
448 /* Removing this instruction should not touch the cursor because we
449 * created the cursor after the intrinsic and have added at least one
450 * instruction (the vec) since then.
451 */
452 assert(b->cursor.instr != &intrin->instr);
453 nir_instr_remove(&intrin->instr);
454 intrin->instr.block = NULL;
455 }
456
457 return true;
458 }
459
460 /**
461 * Specialize the wildcards in a deref chain
462 *
463 * This function returns a deref chain identical to \param deref except that
464 * some of its wildcards are replaced with indices from \param specific. The
465 * process is guided by \param guide which references the same type as \param
466 * specific but has the same wildcard array lengths as \param deref.
467 */
468 static nir_deref_instr *
469 specialize_wildcards(nir_builder *b,
470 nir_deref_path *deref,
471 nir_deref_path *guide,
472 nir_deref_path *specific)
473 {
474 nir_deref_instr **deref_p = &deref->path[1];
475 nir_deref_instr **guide_p = &guide->path[1];
476 nir_deref_instr **spec_p = &specific->path[1];
477 nir_deref_instr *ret_tail = deref->path[0];
478 for (; *deref_p; deref_p++) {
479 if ((*deref_p)->deref_type == nir_deref_type_array_wildcard) {
480 /* This is where things get tricky. We have to search through
481 * the entry deref to find its corresponding wildcard and fill
482 * this slot in with the value from the src.
483 */
484 while (*guide_p &&
485 (*guide_p)->deref_type != nir_deref_type_array_wildcard) {
486 guide_p++;
487 spec_p++;
488 }
489 assert(*guide_p && *spec_p);
490
491 ret_tail = nir_build_deref_follower(b, ret_tail, *spec_p);
492
493 guide_p++;
494 spec_p++;
495 } else {
496 ret_tail = nir_build_deref_follower(b, ret_tail, *deref_p);
497 }
498 }
499
500 return ret_tail;
501 }
502
503 /* Do a "load" from an deref-based entry return it in "value" as a value. The
504 * deref returned in "value" will always be a fresh copy so the caller can
505 * steal it and assign it to the instruction directly without copying it
506 * again.
507 */
508 static bool
509 load_from_deref_entry_value(struct copy_prop_var_state *state,
510 struct copy_entry *entry,
511 nir_builder *b, nir_intrinsic_instr *intrin,
512 nir_deref_instr *src, struct value *value)
513 {
514 *value = entry->src;
515
516 b->cursor = nir_instr_remove(&intrin->instr);
517
518 nir_deref_path entry_dst_path, src_path;
519 nir_deref_path_init(&entry_dst_path, entry->dst, state->mem_ctx);
520 nir_deref_path_init(&src_path, src, state->mem_ctx);
521
522 bool need_to_specialize_wildcards = false;
523 nir_deref_instr **entry_p = &entry_dst_path.path[1];
524 nir_deref_instr **src_p = &src_path.path[1];
525 while (*entry_p && *src_p) {
526 nir_deref_instr *entry_tail = *entry_p++;
527 nir_deref_instr *src_tail = *src_p++;
528
529 if (src_tail->deref_type == nir_deref_type_array &&
530 entry_tail->deref_type == nir_deref_type_array_wildcard)
531 need_to_specialize_wildcards = true;
532 }
533
534 /* If the entry deref is longer than the source deref then it refers to a
535 * smaller type and we can't source from it.
536 */
537 assert(*entry_p == NULL);
538
539 if (need_to_specialize_wildcards) {
540 /* The entry has some wildcards that are not in src. This means we need
541 * to construct a new deref based on the entry but using the wildcards
542 * from the source and guided by the entry dst. Oof.
543 */
544 nir_deref_path entry_src_path;
545 nir_deref_path_init(&entry_src_path, entry->src.deref, state->mem_ctx);
546 value->deref = specialize_wildcards(b, &entry_src_path,
547 &entry_dst_path, &src_path);
548 nir_deref_path_finish(&entry_src_path);
549 }
550
551 /* If our source deref is longer than the entry deref, that's ok because
552 * it just means the entry deref needs to be extended a bit.
553 */
554 while (*src_p) {
555 nir_deref_instr *src_tail = *src_p++;
556 value->deref = nir_build_deref_follower(b, value->deref, src_tail);
557 }
558
559 nir_deref_path_finish(&entry_dst_path);
560 nir_deref_path_finish(&src_path);
561
562 return true;
563 }
564
565 static bool
566 try_load_from_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
567 nir_builder *b, nir_intrinsic_instr *intrin,
568 nir_deref_instr *src, struct value *value)
569 {
570 if (entry == NULL)
571 return false;
572
573 if (entry->src.is_ssa) {
574 return load_from_ssa_entry_value(state, entry, b, intrin, value);
575 } else {
576 return load_from_deref_entry_value(state, entry, b, intrin, src, value);
577 }
578 }
579
580 static void
581 copy_prop_vars_block(struct copy_prop_var_state *state,
582 nir_builder *b, nir_block *block)
583 {
584 /* Start each block with a blank slate */
585 list_for_each_entry_safe(struct copy_entry, iter, &state->copies, link)
586 copy_entry_remove(state, iter);
587
588 nir_foreach_instr_safe(instr, block) {
589 if (instr->type != nir_instr_type_intrinsic)
590 continue;
591
592 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
593 switch (intrin->intrinsic) {
594 case nir_intrinsic_barrier:
595 case nir_intrinsic_memory_barrier:
596 /* If we hit a barrier, we need to trash everything that may possibly
597 * be accessible to another thread. Locals, globals, and things of
598 * the like are safe, however.
599 */
600 apply_barrier_for_modes(state, ~(nir_var_local | nir_var_global |
601 nir_var_shader_in | nir_var_uniform));
602 break;
603
604 case nir_intrinsic_emit_vertex:
605 case nir_intrinsic_emit_vertex_with_counter:
606 apply_barrier_for_modes(state, nir_var_shader_out);
607 break;
608
609 case nir_intrinsic_load_deref: {
610 nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
611
612 uint8_t comps_read = nir_ssa_def_components_read(&intrin->dest.ssa);
613 mark_aliased_entries_as_read(state, src, comps_read);
614
615 struct copy_entry *src_entry =
616 lookup_entry_for_deref(state, src, derefs_a_contains_b_bit);
617 struct value value;
618 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
619 if (value.is_ssa) {
620 /* lookup_load has already ensured that we get a single SSA
621 * value that has all of the channels. We just have to do the
622 * rewrite operation.
623 */
624 if (intrin->instr.block) {
625 /* The lookup left our instruction in-place. This means it
626 * must have used it to vec up a bunch of different sources.
627 * We need to be careful when rewriting uses so we don't
628 * rewrite the vecN itself.
629 */
630 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
631 nir_src_for_ssa(value.ssa[0]),
632 value.ssa[0]->parent_instr);
633 } else {
634 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
635 nir_src_for_ssa(value.ssa[0]));
636 }
637 } else {
638 /* We're turning it into a load of a different variable */
639 intrin->src[0] = nir_src_for_ssa(&value.deref->dest.ssa);
640
641 /* Put it back in again. */
642 nir_builder_instr_insert(b, instr);
643
644 value.is_ssa = true;
645 for (unsigned i = 0; i < intrin->num_components; i++)
646 value.ssa[i] = &intrin->dest.ssa;
647 }
648 state->progress = true;
649 } else {
650 value.is_ssa = true;
651 for (unsigned i = 0; i < intrin->num_components; i++)
652 value.ssa[i] = &intrin->dest.ssa;
653 }
654
655 /* Now that we have a value, we're going to store it back so that we
656 * have the right value next time we come looking for it. In order
657 * to do this, we need an exact match, not just something that
658 * contains what we're looking for.
659 */
660 struct copy_entry *store_entry =
661 lookup_entry_for_deref(state, src, derefs_equal_bit);
662 if (!store_entry)
663 store_entry = copy_entry_create(state, src);
664
665 /* Set up a store to this entry with the value of the load. This way
666 * we can potentially remove subsequent loads. However, we use a
667 * NULL instruction so we don't try and delete the load on a
668 * subsequent store.
669 */
670 store_to_entry(state, store_entry, &value,
671 ((1 << intrin->num_components) - 1), NULL);
672 break;
673 }
674
675 case nir_intrinsic_store_deref: {
676 struct value value = {
677 .is_ssa = true
678 };
679
680 for (unsigned i = 0; i < intrin->num_components; i++)
681 value.ssa[i] = intrin->src[1].ssa;
682
683 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
684 unsigned wrmask = nir_intrinsic_write_mask(intrin);
685 struct copy_entry *entry =
686 get_entry_and_kill_aliases(state, dst, wrmask);
687 store_to_entry(state, entry, &value, wrmask, &intrin->instr);
688 break;
689 }
690
691 case nir_intrinsic_copy_deref: {
692 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
693 nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
694
695 if (compare_derefs(src, dst) & derefs_equal_bit) {
696 /* This is a no-op self-copy. Get rid of it */
697 nir_instr_remove(instr);
698 continue;
699 }
700
701 mark_aliased_entries_as_read(state, src, 0xf);
702
703 struct copy_entry *src_entry =
704 lookup_entry_for_deref(state, src, derefs_a_contains_b_bit);
705 struct value value;
706 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
707 if (value.is_ssa) {
708 nir_store_deref(b, dst, value.ssa[0], 0xf);
709 intrin = nir_instr_as_intrinsic(nir_builder_last_instr(b));
710 } else {
711 /* If this would be a no-op self-copy, don't bother. */
712 if (compare_derefs(value.deref, dst) & derefs_equal_bit)
713 continue;
714
715 /* Just turn it into a copy of a different deref */
716 intrin->src[1] = nir_src_for_ssa(&value.deref->dest.ssa);
717
718 /* Put it back in again. */
719 nir_builder_instr_insert(b, instr);
720 }
721
722 state->progress = true;
723 } else {
724 value = (struct value) {
725 .is_ssa = false,
726 { .deref = src },
727 };
728 }
729
730 struct copy_entry *dst_entry =
731 get_entry_and_kill_aliases(state, dst, 0xf);
732 store_to_entry(state, dst_entry, &value, 0xf, &intrin->instr);
733 break;
734 }
735
736 default:
737 break;
738 }
739 }
740 }
741
742 bool
743 nir_opt_copy_prop_vars(nir_shader *shader)
744 {
745 struct copy_prop_var_state state;
746
747 nir_assert_unlowered_derefs(shader, nir_lower_load_store_derefs);
748
749 state.shader = shader;
750 state.mem_ctx = ralloc_context(NULL);
751 list_inithead(&state.copies);
752 list_inithead(&state.copy_free_list);
753
754 bool global_progress = false;
755 nir_foreach_function(function, shader) {
756 if (!function->impl)
757 continue;
758
759 nir_builder b;
760 nir_builder_init(&b, function->impl);
761
762 state.progress = false;
763 nir_foreach_block(block, function->impl)
764 copy_prop_vars_block(&state, &b, block);
765
766 if (state.progress) {
767 nir_metadata_preserve(function->impl, nir_metadata_block_index |
768 nir_metadata_dominance);
769 global_progress = true;
770 }
771 }
772
773 ralloc_free(state.mem_ctx);
774
775 return global_progress;
776 }