nir: Rename nir_intrinsic_barrier to control_barrier
[mesa.git] / src / compiler / nir / nir_opt_copy_prop_vars.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 #include "util/bitscan.h"
29 #include "util/u_dynarray.h"
30
31 static const bool debug = false;
32
33 /**
34 * Variable-based copy propagation
35 *
36 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
37 * However, there are cases, especially when dealing with indirects, where SSA
38 * won't help you. This pass is for those times. Specifically, it handles
39 * the following things that the rest of NIR can't:
40 *
41 * 1) Copy-propagation on variables that have indirect access. This includes
42 * propagating from indirect stores into indirect loads.
43 *
44 * 2) Removal of redundant load_deref intrinsics. We can't trust regular CSE
45 * to do this because it isn't aware of variable writes that may alias the
46 * value and make the former load invalid.
47 *
48 * This pass uses an intermediate solution between being local / "per-block"
49 * and a complete data-flow analysis. It follows the control flow graph, and
50 * propagate the available copy information forward, invalidating data at each
51 * cf_node.
52 *
53 * Removal of dead writes to variables is handled by another pass.
54 */
55
56 struct vars_written {
57 nir_variable_mode modes;
58
59 /* Key is deref and value is the uintptr_t with the write mask. */
60 struct hash_table *derefs;
61 };
62
63 struct value {
64 bool is_ssa;
65 union {
66 struct {
67 nir_ssa_def *def[NIR_MAX_VEC_COMPONENTS];
68 uint8_t component[NIR_MAX_VEC_COMPONENTS];
69 } ssa;
70 nir_deref_instr *deref;
71 };
72 };
73
74 static void
75 value_set_ssa_components(struct value *value, nir_ssa_def *def,
76 unsigned num_components)
77 {
78 if (!value->is_ssa)
79 memset(&value->ssa, 0, sizeof(value->ssa));
80 value->is_ssa = true;
81 for (unsigned i = 0; i < num_components; i++) {
82 value->ssa.def[i] = def;
83 value->ssa.component[i] = i;
84 }
85 }
86
87 struct copy_entry {
88 struct value src;
89
90 nir_deref_instr *dst;
91 };
92
93 struct copy_prop_var_state {
94 nir_function_impl *impl;
95
96 void *mem_ctx;
97 void *lin_ctx;
98
99 /* Maps nodes to vars_written. Used to invalidate copy entries when
100 * visiting each node.
101 */
102 struct hash_table *vars_written_map;
103
104 bool progress;
105 };
106
107 static bool
108 value_equals_store_src(struct value *value, nir_intrinsic_instr *intrin)
109 {
110 assert(intrin->intrinsic == nir_intrinsic_store_deref);
111 uintptr_t write_mask = nir_intrinsic_write_mask(intrin);
112
113 for (unsigned i = 0; i < intrin->num_components; i++) {
114 if ((write_mask & (1 << i)) &&
115 (value->ssa.def[i] != intrin->src[1].ssa ||
116 value->ssa.component[i] != i))
117 return false;
118 }
119
120 return true;
121 }
122
123 static struct vars_written *
124 create_vars_written(struct copy_prop_var_state *state)
125 {
126 struct vars_written *written =
127 linear_zalloc_child(state->lin_ctx, sizeof(struct vars_written));
128 written->derefs = _mesa_pointer_hash_table_create(state->mem_ctx);
129 return written;
130 }
131
132 static void
133 gather_vars_written(struct copy_prop_var_state *state,
134 struct vars_written *written,
135 nir_cf_node *cf_node)
136 {
137 struct vars_written *new_written = NULL;
138
139 switch (cf_node->type) {
140 case nir_cf_node_function: {
141 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
142 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
143 gather_vars_written(state, NULL, cf_node);
144 break;
145 }
146
147 case nir_cf_node_block: {
148 if (!written)
149 break;
150
151 nir_block *block = nir_cf_node_as_block(cf_node);
152 nir_foreach_instr(instr, block) {
153 if (instr->type == nir_instr_type_call) {
154 written->modes |= nir_var_shader_out |
155 nir_var_shader_temp |
156 nir_var_function_temp |
157 nir_var_mem_ssbo |
158 nir_var_mem_shared;
159 continue;
160 }
161
162 if (instr->type != nir_instr_type_intrinsic)
163 continue;
164
165 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
166 switch (intrin->intrinsic) {
167 case nir_intrinsic_control_barrier:
168 case nir_intrinsic_memory_barrier:
169 written->modes |= nir_var_shader_out |
170 nir_var_mem_ssbo |
171 nir_var_mem_shared;
172 break;
173
174 case nir_intrinsic_scoped_memory_barrier:
175 if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
176 written->modes |= nir_intrinsic_memory_modes(intrin);
177 break;
178
179 case nir_intrinsic_emit_vertex:
180 case nir_intrinsic_emit_vertex_with_counter:
181 written->modes = nir_var_shader_out;
182 break;
183
184 case nir_intrinsic_deref_atomic_add:
185 case nir_intrinsic_deref_atomic_imin:
186 case nir_intrinsic_deref_atomic_umin:
187 case nir_intrinsic_deref_atomic_imax:
188 case nir_intrinsic_deref_atomic_umax:
189 case nir_intrinsic_deref_atomic_and:
190 case nir_intrinsic_deref_atomic_or:
191 case nir_intrinsic_deref_atomic_xor:
192 case nir_intrinsic_deref_atomic_exchange:
193 case nir_intrinsic_deref_atomic_comp_swap:
194 case nir_intrinsic_store_deref:
195 case nir_intrinsic_copy_deref: {
196 /* Destination in all of store_deref, copy_deref and the atomics is src[0]. */
197 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
198
199 uintptr_t mask = intrin->intrinsic == nir_intrinsic_store_deref ?
200 nir_intrinsic_write_mask(intrin) : (1 << glsl_get_vector_elements(dst->type)) - 1;
201
202 struct hash_entry *ht_entry = _mesa_hash_table_search(written->derefs, dst);
203 if (ht_entry)
204 ht_entry->data = (void *)(mask | (uintptr_t)ht_entry->data);
205 else
206 _mesa_hash_table_insert(written->derefs, dst, (void *)mask);
207
208 break;
209 }
210
211 default:
212 break;
213 }
214 }
215
216 break;
217 }
218
219 case nir_cf_node_if: {
220 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
221
222 new_written = create_vars_written(state);
223
224 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
225 gather_vars_written(state, new_written, cf_node);
226
227 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
228 gather_vars_written(state, new_written, cf_node);
229
230 break;
231 }
232
233 case nir_cf_node_loop: {
234 nir_loop *loop = nir_cf_node_as_loop(cf_node);
235
236 new_written = create_vars_written(state);
237
238 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
239 gather_vars_written(state, new_written, cf_node);
240
241 break;
242 }
243
244 default:
245 unreachable("Invalid CF node type");
246 }
247
248 if (new_written) {
249 /* Merge new information to the parent control flow node. */
250 if (written) {
251 written->modes |= new_written->modes;
252 hash_table_foreach(new_written->derefs, new_entry) {
253 struct hash_entry *old_entry =
254 _mesa_hash_table_search_pre_hashed(written->derefs, new_entry->hash,
255 new_entry->key);
256 if (old_entry) {
257 nir_component_mask_t merged = (uintptr_t) new_entry->data |
258 (uintptr_t) old_entry->data;
259 old_entry->data = (void *) ((uintptr_t) merged);
260 } else {
261 _mesa_hash_table_insert_pre_hashed(written->derefs, new_entry->hash,
262 new_entry->key, new_entry->data);
263 }
264 }
265 }
266 _mesa_hash_table_insert(state->vars_written_map, cf_node, new_written);
267 }
268 }
269
270 static struct copy_entry *
271 copy_entry_create(struct util_dynarray *copies,
272 nir_deref_instr *dst_deref)
273 {
274 struct copy_entry new_entry = {
275 .dst = dst_deref,
276 };
277 util_dynarray_append(copies, struct copy_entry, new_entry);
278 return util_dynarray_top_ptr(copies, struct copy_entry);
279 }
280
281 /* Remove copy entry by swapping it with the last element and reducing the
282 * size. If used inside an iteration on copies, it must be a reverse
283 * (backwards) iteration. It is safe to use in those cases because the swap
284 * will not affect the rest of the iteration.
285 */
286 static void
287 copy_entry_remove(struct util_dynarray *copies,
288 struct copy_entry *entry)
289 {
290 /* This also works when removing the last element since pop don't shrink
291 * the memory used by the array, so the swap is useless but not invalid.
292 */
293 *entry = util_dynarray_pop(copies, struct copy_entry);
294 }
295
296 static bool
297 is_array_deref_of_vector(nir_deref_instr *deref)
298 {
299 if (deref->deref_type != nir_deref_type_array)
300 return false;
301 nir_deref_instr *parent = nir_deref_instr_parent(deref);
302 return glsl_type_is_vector(parent->type);
303 }
304
305 static struct copy_entry *
306 lookup_entry_for_deref(struct util_dynarray *copies,
307 nir_deref_instr *deref,
308 nir_deref_compare_result allowed_comparisons)
309 {
310 struct copy_entry *entry = NULL;
311 util_dynarray_foreach(copies, struct copy_entry, iter) {
312 nir_deref_compare_result result = nir_compare_derefs(iter->dst, deref);
313 if (result & allowed_comparisons) {
314 entry = iter;
315 if (result & nir_derefs_equal_bit)
316 break;
317 /* Keep looking in case we have an equal match later in the array. */
318 }
319 }
320 return entry;
321 }
322
323 static struct copy_entry *
324 lookup_entry_and_kill_aliases(struct util_dynarray *copies,
325 nir_deref_instr *deref,
326 unsigned write_mask)
327 {
328 /* TODO: Take into account the write_mask. */
329
330 nir_deref_instr *dst_match = NULL;
331 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
332 if (!iter->src.is_ssa) {
333 /* If this write aliases the source of some entry, get rid of it */
334 if (nir_compare_derefs(iter->src.deref, deref) & nir_derefs_may_alias_bit) {
335 copy_entry_remove(copies, iter);
336 continue;
337 }
338 }
339
340 nir_deref_compare_result comp = nir_compare_derefs(iter->dst, deref);
341
342 if (comp & nir_derefs_equal_bit) {
343 /* Removing entries invalidate previous iter pointers, so we'll
344 * collect the matching entry later. Just make sure it is unique.
345 */
346 assert(!dst_match);
347 dst_match = iter->dst;
348 } else if (comp & nir_derefs_may_alias_bit) {
349 copy_entry_remove(copies, iter);
350 }
351 }
352
353 struct copy_entry *entry = NULL;
354 if (dst_match) {
355 util_dynarray_foreach(copies, struct copy_entry, iter) {
356 if (iter->dst == dst_match) {
357 entry = iter;
358 break;
359 }
360 }
361 assert(entry);
362 }
363 return entry;
364 }
365
366 static void
367 kill_aliases(struct util_dynarray *copies,
368 nir_deref_instr *deref,
369 unsigned write_mask)
370 {
371 /* TODO: Take into account the write_mask. */
372
373 struct copy_entry *entry =
374 lookup_entry_and_kill_aliases(copies, deref, write_mask);
375 if (entry)
376 copy_entry_remove(copies, entry);
377 }
378
379 static struct copy_entry *
380 get_entry_and_kill_aliases(struct util_dynarray *copies,
381 nir_deref_instr *deref,
382 unsigned write_mask)
383 {
384 /* TODO: Take into account the write_mask. */
385
386 struct copy_entry *entry =
387 lookup_entry_and_kill_aliases(copies, deref, write_mask);
388
389 if (entry == NULL)
390 entry = copy_entry_create(copies, deref);
391
392 return entry;
393 }
394
395 static void
396 apply_barrier_for_modes(struct util_dynarray *copies,
397 nir_variable_mode modes)
398 {
399 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
400 if ((iter->dst->mode & modes) ||
401 (!iter->src.is_ssa && (iter->src.deref->mode & modes)))
402 copy_entry_remove(copies, iter);
403 }
404 }
405
406 static void
407 value_set_from_value(struct value *value, const struct value *from,
408 unsigned base_index, unsigned write_mask)
409 {
410 /* We can't have non-zero indexes with non-trivial write masks */
411 assert(base_index == 0 || write_mask == 1);
412
413 if (from->is_ssa) {
414 /* Clear value if it was being used as non-SSA. */
415 if (!value->is_ssa)
416 memset(&value->ssa, 0, sizeof(value->ssa));
417 value->is_ssa = true;
418 /* Only overwrite the written components */
419 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
420 if (write_mask & (1 << i)) {
421 value->ssa.def[base_index + i] = from->ssa.def[i];
422 value->ssa.component[base_index + i] = from->ssa.component[i];
423 }
424 }
425 } else {
426 /* Non-ssa stores always write everything */
427 value->is_ssa = false;
428 value->deref = from->deref;
429 }
430 }
431
432 /* Try to load a single element of a vector from the copy_entry. If the data
433 * isn't available, just let the original intrinsic do the work.
434 */
435 static bool
436 load_element_from_ssa_entry_value(struct copy_prop_var_state *state,
437 struct copy_entry *entry,
438 nir_builder *b, nir_intrinsic_instr *intrin,
439 struct value *value, unsigned index)
440 {
441 assert(index < glsl_get_vector_elements(entry->dst->type));
442
443 /* We don't have the element available, so let the instruction do the work. */
444 if (!entry->src.ssa.def[index])
445 return false;
446
447 b->cursor = nir_instr_remove(&intrin->instr);
448 intrin->instr.block = NULL;
449
450 assert(entry->src.ssa.component[index] <
451 entry->src.ssa.def[index]->num_components);
452 nir_ssa_def *def = nir_channel(b, entry->src.ssa.def[index],
453 entry->src.ssa.component[index]);
454
455 *value = (struct value) {
456 .is_ssa = true,
457 {
458 .ssa = {
459 .def = { def },
460 .component = { 0 },
461 },
462 }
463 };
464
465 return true;
466 }
467
468 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
469 * single SSA def. Because an entry could reference multiple different SSA
470 * defs, a vecN operation may be inserted to combine them into a single SSA
471 * def before handing it back to the caller. If the load instruction is no
472 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
473 * possible, in some cases, for the load to be used in the vecN operation in
474 * which case it isn't deleted.)
475 */
476 static bool
477 load_from_ssa_entry_value(struct copy_prop_var_state *state,
478 struct copy_entry *entry,
479 nir_builder *b, nir_intrinsic_instr *intrin,
480 nir_deref_instr *src, struct value *value)
481 {
482 if (is_array_deref_of_vector(src)) {
483 if (nir_src_is_const(src->arr.index)) {
484 return load_element_from_ssa_entry_value(state, entry, b, intrin, value,
485 nir_src_as_uint(src->arr.index));
486 }
487
488 /* An SSA copy_entry for the vector won't help indirect load. */
489 if (glsl_type_is_vector(entry->dst->type)) {
490 assert(entry->dst->type == nir_deref_instr_parent(src)->type);
491 /* TODO: If all SSA entries are there, try an if-ladder. */
492 return false;
493 }
494 }
495
496 *value = entry->src;
497 assert(value->is_ssa);
498
499 const struct glsl_type *type = entry->dst->type;
500 unsigned num_components = glsl_get_vector_elements(type);
501
502 nir_component_mask_t available = 0;
503 bool all_same = true;
504 for (unsigned i = 0; i < num_components; i++) {
505 if (value->ssa.def[i])
506 available |= (1 << i);
507
508 if (value->ssa.def[i] != value->ssa.def[0])
509 all_same = false;
510
511 if (value->ssa.component[i] != i)
512 all_same = false;
513 }
514
515 if (all_same) {
516 /* Our work here is done */
517 b->cursor = nir_instr_remove(&intrin->instr);
518 intrin->instr.block = NULL;
519 return true;
520 }
521
522 if (available != (1 << num_components) - 1 &&
523 intrin->intrinsic == nir_intrinsic_load_deref &&
524 (available & nir_ssa_def_components_read(&intrin->dest.ssa)) == 0) {
525 /* If none of the components read are available as SSA values, then we
526 * should just bail. Otherwise, we would end up replacing the uses of
527 * the load_deref a vecN() that just gathers up its components.
528 */
529 return false;
530 }
531
532 b->cursor = nir_after_instr(&intrin->instr);
533
534 nir_ssa_def *load_def =
535 intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
536
537 bool keep_intrin = false;
538 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
539 for (unsigned i = 0; i < num_components; i++) {
540 if (value->ssa.def[i]) {
541 comps[i] = nir_channel(b, value->ssa.def[i], value->ssa.component[i]);
542 } else {
543 /* We don't have anything for this component in our
544 * list. Just re-use a channel from the load.
545 */
546 if (load_def == NULL)
547 load_def = nir_load_deref(b, entry->dst);
548
549 if (load_def->parent_instr == &intrin->instr)
550 keep_intrin = true;
551
552 comps[i] = nir_channel(b, load_def, i);
553 }
554 }
555
556 nir_ssa_def *vec = nir_vec(b, comps, num_components);
557 value_set_ssa_components(value, vec, num_components);
558
559 if (!keep_intrin) {
560 /* Removing this instruction should not touch the cursor because we
561 * created the cursor after the intrinsic and have added at least one
562 * instruction (the vec) since then.
563 */
564 assert(b->cursor.instr != &intrin->instr);
565 nir_instr_remove(&intrin->instr);
566 intrin->instr.block = NULL;
567 }
568
569 return true;
570 }
571
572 /**
573 * Specialize the wildcards in a deref chain
574 *
575 * This function returns a deref chain identical to \param deref except that
576 * some of its wildcards are replaced with indices from \param specific. The
577 * process is guided by \param guide which references the same type as \param
578 * specific but has the same wildcard array lengths as \param deref.
579 */
580 static nir_deref_instr *
581 specialize_wildcards(nir_builder *b,
582 nir_deref_path *deref,
583 nir_deref_path *guide,
584 nir_deref_path *specific)
585 {
586 nir_deref_instr **deref_p = &deref->path[1];
587 nir_deref_instr **guide_p = &guide->path[1];
588 nir_deref_instr **spec_p = &specific->path[1];
589 nir_deref_instr *ret_tail = deref->path[0];
590 for (; *deref_p; deref_p++) {
591 if ((*deref_p)->deref_type == nir_deref_type_array_wildcard) {
592 /* This is where things get tricky. We have to search through
593 * the entry deref to find its corresponding wildcard and fill
594 * this slot in with the value from the src.
595 */
596 while (*guide_p &&
597 (*guide_p)->deref_type != nir_deref_type_array_wildcard) {
598 guide_p++;
599 spec_p++;
600 }
601 assert(*guide_p && *spec_p);
602
603 ret_tail = nir_build_deref_follower(b, ret_tail, *spec_p);
604
605 guide_p++;
606 spec_p++;
607 } else {
608 ret_tail = nir_build_deref_follower(b, ret_tail, *deref_p);
609 }
610 }
611
612 return ret_tail;
613 }
614
615 /* Do a "load" from an deref-based entry return it in "value" as a value. The
616 * deref returned in "value" will always be a fresh copy so the caller can
617 * steal it and assign it to the instruction directly without copying it
618 * again.
619 */
620 static bool
621 load_from_deref_entry_value(struct copy_prop_var_state *state,
622 struct copy_entry *entry,
623 nir_builder *b, nir_intrinsic_instr *intrin,
624 nir_deref_instr *src, struct value *value)
625 {
626 *value = entry->src;
627
628 b->cursor = nir_instr_remove(&intrin->instr);
629
630 nir_deref_path entry_dst_path, src_path;
631 nir_deref_path_init(&entry_dst_path, entry->dst, state->mem_ctx);
632 nir_deref_path_init(&src_path, src, state->mem_ctx);
633
634 bool need_to_specialize_wildcards = false;
635 nir_deref_instr **entry_p = &entry_dst_path.path[1];
636 nir_deref_instr **src_p = &src_path.path[1];
637 while (*entry_p && *src_p) {
638 nir_deref_instr *entry_tail = *entry_p++;
639 nir_deref_instr *src_tail = *src_p++;
640
641 if (src_tail->deref_type == nir_deref_type_array &&
642 entry_tail->deref_type == nir_deref_type_array_wildcard)
643 need_to_specialize_wildcards = true;
644 }
645
646 /* If the entry deref is longer than the source deref then it refers to a
647 * smaller type and we can't source from it.
648 */
649 assert(*entry_p == NULL);
650
651 if (need_to_specialize_wildcards) {
652 /* The entry has some wildcards that are not in src. This means we need
653 * to construct a new deref based on the entry but using the wildcards
654 * from the source and guided by the entry dst. Oof.
655 */
656 nir_deref_path entry_src_path;
657 nir_deref_path_init(&entry_src_path, entry->src.deref, state->mem_ctx);
658 value->deref = specialize_wildcards(b, &entry_src_path,
659 &entry_dst_path, &src_path);
660 nir_deref_path_finish(&entry_src_path);
661 }
662
663 /* If our source deref is longer than the entry deref, that's ok because
664 * it just means the entry deref needs to be extended a bit.
665 */
666 while (*src_p) {
667 nir_deref_instr *src_tail = *src_p++;
668 value->deref = nir_build_deref_follower(b, value->deref, src_tail);
669 }
670
671 nir_deref_path_finish(&entry_dst_path);
672 nir_deref_path_finish(&src_path);
673
674 return true;
675 }
676
677 static bool
678 try_load_from_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
679 nir_builder *b, nir_intrinsic_instr *intrin,
680 nir_deref_instr *src, struct value *value)
681 {
682 if (entry == NULL)
683 return false;
684
685 if (entry->src.is_ssa) {
686 return load_from_ssa_entry_value(state, entry, b, intrin, src, value);
687 } else {
688 return load_from_deref_entry_value(state, entry, b, intrin, src, value);
689 }
690 }
691
692 static void
693 invalidate_copies_for_cf_node(struct copy_prop_var_state *state,
694 struct util_dynarray *copies,
695 nir_cf_node *cf_node)
696 {
697 struct hash_entry *ht_entry = _mesa_hash_table_search(state->vars_written_map, cf_node);
698 assert(ht_entry);
699
700 struct vars_written *written = ht_entry->data;
701 if (written->modes) {
702 util_dynarray_foreach_reverse(copies, struct copy_entry, entry) {
703 if (entry->dst->mode & written->modes)
704 copy_entry_remove(copies, entry);
705 }
706 }
707
708 hash_table_foreach (written->derefs, entry) {
709 nir_deref_instr *deref_written = (nir_deref_instr *)entry->key;
710 kill_aliases(copies, deref_written, (uintptr_t)entry->data);
711 }
712 }
713
714 static void
715 print_value(struct value *value, unsigned num_components)
716 {
717 if (!value->is_ssa) {
718 printf(" %s ", glsl_get_type_name(value->deref->type));
719 nir_print_deref(value->deref, stdout);
720 return;
721 }
722
723 bool same_ssa = true;
724 for (unsigned i = 0; i < num_components; i++) {
725 if (value->ssa.component[i] != i ||
726 (i > 0 && value->ssa.def[i - 1] != value->ssa.def[i])) {
727 same_ssa = false;
728 break;
729 }
730 }
731 if (same_ssa) {
732 printf(" ssa_%d", value->ssa.def[0]->index);
733 } else {
734 for (int i = 0; i < num_components; i++) {
735 if (value->ssa.def[i])
736 printf(" ssa_%d[%u]", value->ssa.def[i]->index, value->ssa.component[i]);
737 else
738 printf(" _");
739 }
740 }
741 }
742
743 static void
744 print_copy_entry(struct copy_entry *entry)
745 {
746 printf(" %s ", glsl_get_type_name(entry->dst->type));
747 nir_print_deref(entry->dst, stdout);
748 printf(":\t");
749
750 unsigned num_components = glsl_get_vector_elements(entry->dst->type);
751 print_value(&entry->src, num_components);
752 printf("\n");
753 }
754
755 static void
756 dump_instr(nir_instr *instr)
757 {
758 printf(" ");
759 nir_print_instr(instr, stdout);
760 printf("\n");
761 }
762
763 static void
764 dump_copy_entries(struct util_dynarray *copies)
765 {
766 util_dynarray_foreach(copies, struct copy_entry, iter)
767 print_copy_entry(iter);
768 printf("\n");
769 }
770
771 static void
772 copy_prop_vars_block(struct copy_prop_var_state *state,
773 nir_builder *b, nir_block *block,
774 struct util_dynarray *copies)
775 {
776 if (debug) {
777 printf("# block%d\n", block->index);
778 dump_copy_entries(copies);
779 }
780
781 nir_foreach_instr_safe(instr, block) {
782 if (debug && instr->type == nir_instr_type_deref)
783 dump_instr(instr);
784
785 if (instr->type == nir_instr_type_call) {
786 if (debug) dump_instr(instr);
787 apply_barrier_for_modes(copies, nir_var_shader_out |
788 nir_var_shader_temp |
789 nir_var_function_temp |
790 nir_var_mem_ssbo |
791 nir_var_mem_shared);
792 if (debug) dump_copy_entries(copies);
793 continue;
794 }
795
796 if (instr->type != nir_instr_type_intrinsic)
797 continue;
798
799 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
800 switch (intrin->intrinsic) {
801 case nir_intrinsic_control_barrier:
802 case nir_intrinsic_memory_barrier:
803 if (debug) dump_instr(instr);
804
805 apply_barrier_for_modes(copies, nir_var_shader_out |
806 nir_var_mem_ssbo |
807 nir_var_mem_shared);
808 break;
809
810 case nir_intrinsic_memory_barrier_buffer:
811 case nir_intrinsic_memory_barrier_atomic_counter:
812 if (debug) dump_instr(instr);
813
814 apply_barrier_for_modes(copies, nir_var_mem_ssbo);
815 break;
816
817 case nir_intrinsic_memory_barrier_shared:
818 if (debug) dump_instr(instr);
819
820 apply_barrier_for_modes(copies, nir_var_mem_shared);
821 break;
822
823 case nir_intrinsic_memory_barrier_tcs_patch:
824 if (debug) dump_instr(instr);
825
826 apply_barrier_for_modes(copies, nir_var_shader_out);
827 break;
828
829 case nir_intrinsic_scoped_memory_barrier:
830 if (debug) dump_instr(instr);
831
832 if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
833 apply_barrier_for_modes(copies, nir_intrinsic_memory_modes(intrin));
834 break;
835
836 case nir_intrinsic_emit_vertex:
837 case nir_intrinsic_emit_vertex_with_counter:
838 if (debug) dump_instr(instr);
839
840 apply_barrier_for_modes(copies, nir_var_shader_out);
841 break;
842
843 case nir_intrinsic_load_deref: {
844 if (debug) dump_instr(instr);
845
846 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
847 break;
848
849 nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
850
851 /* Direct array_derefs of vectors operate on the vectors (the parent
852 * deref). Indirects will be handled like other derefs.
853 */
854 int vec_index = 0;
855 nir_deref_instr *vec_src = src;
856 if (is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) {
857 vec_src = nir_deref_instr_parent(src);
858 unsigned vec_comps = glsl_get_vector_elements(vec_src->type);
859 vec_index = nir_src_as_uint(src->arr.index);
860
861 /* Loading from an invalid index yields an undef */
862 if (vec_index >= vec_comps) {
863 b->cursor = nir_instr_remove(instr);
864 nir_ssa_def *u = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size);
865 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(u));
866 break;
867 }
868 }
869
870 struct copy_entry *src_entry =
871 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
872 struct value value = {0};
873 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
874 if (value.is_ssa) {
875 /* lookup_load has already ensured that we get a single SSA
876 * value that has all of the channels. We just have to do the
877 * rewrite operation. Note for array derefs of vectors, the
878 * channel 0 is used.
879 */
880 if (intrin->instr.block) {
881 /* The lookup left our instruction in-place. This means it
882 * must have used it to vec up a bunch of different sources.
883 * We need to be careful when rewriting uses so we don't
884 * rewrite the vecN itself.
885 */
886 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
887 nir_src_for_ssa(value.ssa.def[0]),
888 value.ssa.def[0]->parent_instr);
889 } else {
890 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
891 nir_src_for_ssa(value.ssa.def[0]));
892 }
893 } else {
894 /* We're turning it into a load of a different variable */
895 intrin->src[0] = nir_src_for_ssa(&value.deref->dest.ssa);
896
897 /* Put it back in again. */
898 nir_builder_instr_insert(b, instr);
899 value_set_ssa_components(&value, &intrin->dest.ssa,
900 intrin->num_components);
901 }
902 state->progress = true;
903 } else {
904 value_set_ssa_components(&value, &intrin->dest.ssa,
905 intrin->num_components);
906 }
907
908 /* Now that we have a value, we're going to store it back so that we
909 * have the right value next time we come looking for it. In order
910 * to do this, we need an exact match, not just something that
911 * contains what we're looking for.
912 */
913 struct copy_entry *entry =
914 lookup_entry_for_deref(copies, vec_src, nir_derefs_equal_bit);
915 if (!entry)
916 entry = copy_entry_create(copies, vec_src);
917
918 /* Update the entry with the value of the load. This way
919 * we can potentially remove subsequent loads.
920 */
921 value_set_from_value(&entry->src, &value, vec_index,
922 (1 << intrin->num_components) - 1);
923 break;
924 }
925
926 case nir_intrinsic_store_deref: {
927 if (debug) dump_instr(instr);
928
929 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
930 break;
931
932 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
933 assert(glsl_type_is_vector_or_scalar(dst->type));
934
935 /* Direct array_derefs of vectors operate on the vectors (the parent
936 * deref). Indirects will be handled like other derefs.
937 */
938 int vec_index = 0;
939 nir_deref_instr *vec_dst = dst;
940 if (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index)) {
941 vec_dst = nir_deref_instr_parent(dst);
942 unsigned vec_comps = glsl_get_vector_elements(vec_dst->type);
943
944 vec_index = nir_src_as_uint(dst->arr.index);
945
946 /* Storing to an invalid index is a no-op. */
947 if (vec_index >= vec_comps) {
948 nir_instr_remove(instr);
949 break;
950 }
951 }
952
953 struct copy_entry *entry =
954 lookup_entry_for_deref(copies, dst, nir_derefs_equal_bit);
955 if (entry && value_equals_store_src(&entry->src, intrin)) {
956 /* If we are storing the value from a load of the same var the
957 * store is redundant so remove it.
958 */
959 nir_instr_remove(instr);
960 } else {
961 struct value value = {0};
962 value_set_ssa_components(&value, intrin->src[1].ssa,
963 intrin->num_components);
964 unsigned wrmask = nir_intrinsic_write_mask(intrin);
965 struct copy_entry *entry =
966 get_entry_and_kill_aliases(copies, vec_dst, wrmask);
967 value_set_from_value(&entry->src, &value, vec_index, wrmask);
968 }
969
970 break;
971 }
972
973 case nir_intrinsic_copy_deref: {
974 if (debug) dump_instr(instr);
975
976 if ((nir_intrinsic_src_access(intrin) & ACCESS_VOLATILE) ||
977 (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE))
978 break;
979
980 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
981 nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
982
983 if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
984 /* This is a no-op self-copy. Get rid of it */
985 nir_instr_remove(instr);
986 continue;
987 }
988
989 /* The copy_deref intrinsic doesn't keep track of num_components, so
990 * get it ourselves.
991 */
992 unsigned num_components = glsl_get_vector_elements(dst->type);
993 unsigned full_mask = (1 << num_components) - 1;
994
995 /* Copy of direct array derefs of vectors are not handled. Just
996 * invalidate what's written and bail.
997 */
998 if ((is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) ||
999 (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index))) {
1000 kill_aliases(copies, dst, full_mask);
1001 break;
1002 }
1003
1004 struct copy_entry *src_entry =
1005 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
1006 struct value value;
1007 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
1008 /* If load works, intrin (the copy_deref) is removed. */
1009 if (value.is_ssa) {
1010 nir_store_deref(b, dst, value.ssa.def[0], full_mask);
1011 } else {
1012 /* If this would be a no-op self-copy, don't bother. */
1013 if (nir_compare_derefs(value.deref, dst) & nir_derefs_equal_bit)
1014 continue;
1015
1016 /* Just turn it into a copy of a different deref */
1017 intrin->src[1] = nir_src_for_ssa(&value.deref->dest.ssa);
1018
1019 /* Put it back in again. */
1020 nir_builder_instr_insert(b, instr);
1021 }
1022
1023 state->progress = true;
1024 } else {
1025 value = (struct value) {
1026 .is_ssa = false,
1027 { .deref = src },
1028 };
1029 }
1030
1031 nir_variable *src_var = nir_deref_instr_get_variable(src);
1032 if (src_var && src_var->data.cannot_coalesce) {
1033 /* The source cannot be coaleseced, which means we can't propagate
1034 * this copy.
1035 */
1036 break;
1037 }
1038
1039 struct copy_entry *dst_entry =
1040 get_entry_and_kill_aliases(copies, dst, full_mask);
1041 value_set_from_value(&dst_entry->src, &value, 0, full_mask);
1042 break;
1043 }
1044
1045 case nir_intrinsic_deref_atomic_add:
1046 case nir_intrinsic_deref_atomic_imin:
1047 case nir_intrinsic_deref_atomic_umin:
1048 case nir_intrinsic_deref_atomic_imax:
1049 case nir_intrinsic_deref_atomic_umax:
1050 case nir_intrinsic_deref_atomic_and:
1051 case nir_intrinsic_deref_atomic_or:
1052 case nir_intrinsic_deref_atomic_xor:
1053 case nir_intrinsic_deref_atomic_exchange:
1054 case nir_intrinsic_deref_atomic_comp_swap:
1055 if (debug) dump_instr(instr);
1056
1057 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
1058 break;
1059
1060 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
1061 unsigned num_components = glsl_get_vector_elements(dst->type);
1062 unsigned full_mask = (1 << num_components) - 1;
1063 kill_aliases(copies, dst, full_mask);
1064 break;
1065
1066 default:
1067 continue; /* To skip the debug below. */
1068 }
1069
1070 if (debug) dump_copy_entries(copies);
1071 }
1072 }
1073
1074 static void
1075 copy_prop_vars_cf_node(struct copy_prop_var_state *state,
1076 struct util_dynarray *copies,
1077 nir_cf_node *cf_node)
1078 {
1079 switch (cf_node->type) {
1080 case nir_cf_node_function: {
1081 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
1082
1083 struct util_dynarray impl_copies;
1084 util_dynarray_init(&impl_copies, state->mem_ctx);
1085
1086 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
1087 copy_prop_vars_cf_node(state, &impl_copies, cf_node);
1088
1089 break;
1090 }
1091
1092 case nir_cf_node_block: {
1093 nir_block *block = nir_cf_node_as_block(cf_node);
1094 nir_builder b;
1095 nir_builder_init(&b, state->impl);
1096 copy_prop_vars_block(state, &b, block, copies);
1097 break;
1098 }
1099
1100 case nir_cf_node_if: {
1101 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
1102
1103 /* Clone the copies for each branch of the if statement. The idea is
1104 * that they both see the same state of available copies, but do not
1105 * interfere to each other.
1106 */
1107
1108 struct util_dynarray then_copies;
1109 util_dynarray_clone(&then_copies, state->mem_ctx, copies);
1110
1111 struct util_dynarray else_copies;
1112 util_dynarray_clone(&else_copies, state->mem_ctx, copies);
1113
1114 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
1115 copy_prop_vars_cf_node(state, &then_copies, cf_node);
1116
1117 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
1118 copy_prop_vars_cf_node(state, &else_copies, cf_node);
1119
1120 /* Both branches copies can be ignored, since the effect of running both
1121 * branches was captured in the first pass that collects vars_written.
1122 */
1123
1124 invalidate_copies_for_cf_node(state, copies, cf_node);
1125
1126 break;
1127 }
1128
1129 case nir_cf_node_loop: {
1130 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1131
1132 /* Invalidate before cloning the copies for the loop, since the loop
1133 * body can be executed more than once.
1134 */
1135
1136 invalidate_copies_for_cf_node(state, copies, cf_node);
1137
1138 struct util_dynarray loop_copies;
1139 util_dynarray_clone(&loop_copies, state->mem_ctx, copies);
1140
1141 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
1142 copy_prop_vars_cf_node(state, &loop_copies, cf_node);
1143
1144 break;
1145 }
1146
1147 default:
1148 unreachable("Invalid CF node type");
1149 }
1150 }
1151
1152 static bool
1153 nir_copy_prop_vars_impl(nir_function_impl *impl)
1154 {
1155 void *mem_ctx = ralloc_context(NULL);
1156
1157 if (debug) {
1158 nir_metadata_require(impl, nir_metadata_block_index);
1159 printf("## nir_copy_prop_vars_impl for %s\n", impl->function->name);
1160 }
1161
1162 struct copy_prop_var_state state = {
1163 .impl = impl,
1164 .mem_ctx = mem_ctx,
1165 .lin_ctx = linear_zalloc_parent(mem_ctx, 0),
1166
1167 .vars_written_map = _mesa_pointer_hash_table_create(mem_ctx),
1168 };
1169
1170 gather_vars_written(&state, NULL, &impl->cf_node);
1171
1172 copy_prop_vars_cf_node(&state, NULL, &impl->cf_node);
1173
1174 if (state.progress) {
1175 nir_metadata_preserve(impl, nir_metadata_block_index |
1176 nir_metadata_dominance);
1177 } else {
1178 #ifndef NDEBUG
1179 impl->valid_metadata &= ~nir_metadata_not_properly_reset;
1180 #endif
1181 }
1182
1183 ralloc_free(mem_ctx);
1184 return state.progress;
1185 }
1186
1187 bool
1188 nir_opt_copy_prop_vars(nir_shader *shader)
1189 {
1190 bool progress = false;
1191
1192 nir_foreach_function(function, shader) {
1193 if (!function->impl)
1194 continue;
1195 progress |= nir_copy_prop_vars_impl(function->impl);
1196 }
1197
1198 return progress;
1199 }