nir: Move nir_lower_mediump_outputs from ir3
[mesa.git] / src / compiler / nir / nir_opt_copy_prop_vars.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 #include "util/bitscan.h"
29 #include "util/u_dynarray.h"
30
31 static const bool debug = false;
32
33 /**
34 * Variable-based copy propagation
35 *
36 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
37 * However, there are cases, especially when dealing with indirects, where SSA
38 * won't help you. This pass is for those times. Specifically, it handles
39 * the following things that the rest of NIR can't:
40 *
41 * 1) Copy-propagation on variables that have indirect access. This includes
42 * propagating from indirect stores into indirect loads.
43 *
44 * 2) Removal of redundant load_deref intrinsics. We can't trust regular CSE
45 * to do this because it isn't aware of variable writes that may alias the
46 * value and make the former load invalid.
47 *
48 * This pass uses an intermediate solution between being local / "per-block"
49 * and a complete data-flow analysis. It follows the control flow graph, and
50 * propagate the available copy information forward, invalidating data at each
51 * cf_node.
52 *
53 * Removal of dead writes to variables is handled by another pass.
54 */
55
56 struct vars_written {
57 nir_variable_mode modes;
58
59 /* Key is deref and value is the uintptr_t with the write mask. */
60 struct hash_table *derefs;
61 };
62
63 struct value {
64 bool is_ssa;
65 union {
66 struct {
67 nir_ssa_def *def[NIR_MAX_VEC_COMPONENTS];
68 uint8_t component[NIR_MAX_VEC_COMPONENTS];
69 } ssa;
70 nir_deref_instr *deref;
71 };
72 };
73
74 static void
75 value_set_ssa_components(struct value *value, nir_ssa_def *def,
76 unsigned num_components)
77 {
78 if (!value->is_ssa)
79 memset(&value->ssa, 0, sizeof(value->ssa));
80 value->is_ssa = true;
81 for (unsigned i = 0; i < num_components; i++) {
82 value->ssa.def[i] = def;
83 value->ssa.component[i] = i;
84 }
85 }
86
87 struct copy_entry {
88 struct value src;
89
90 nir_deref_instr *dst;
91 };
92
93 struct copy_prop_var_state {
94 nir_function_impl *impl;
95
96 void *mem_ctx;
97 void *lin_ctx;
98
99 /* Maps nodes to vars_written. Used to invalidate copy entries when
100 * visiting each node.
101 */
102 struct hash_table *vars_written_map;
103
104 bool progress;
105 };
106
107 static bool
108 value_equals_store_src(struct value *value, nir_intrinsic_instr *intrin)
109 {
110 assert(intrin->intrinsic == nir_intrinsic_store_deref);
111 uintptr_t write_mask = nir_intrinsic_write_mask(intrin);
112
113 for (unsigned i = 0; i < intrin->num_components; i++) {
114 if ((write_mask & (1 << i)) &&
115 (value->ssa.def[i] != intrin->src[1].ssa ||
116 value->ssa.component[i] != i))
117 return false;
118 }
119
120 return true;
121 }
122
123 static struct vars_written *
124 create_vars_written(struct copy_prop_var_state *state)
125 {
126 struct vars_written *written =
127 linear_zalloc_child(state->lin_ctx, sizeof(struct vars_written));
128 written->derefs = _mesa_pointer_hash_table_create(state->mem_ctx);
129 return written;
130 }
131
132 static void
133 gather_vars_written(struct copy_prop_var_state *state,
134 struct vars_written *written,
135 nir_cf_node *cf_node)
136 {
137 struct vars_written *new_written = NULL;
138
139 switch (cf_node->type) {
140 case nir_cf_node_function: {
141 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
142 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
143 gather_vars_written(state, NULL, cf_node);
144 break;
145 }
146
147 case nir_cf_node_block: {
148 if (!written)
149 break;
150
151 nir_block *block = nir_cf_node_as_block(cf_node);
152 nir_foreach_instr(instr, block) {
153 if (instr->type == nir_instr_type_call) {
154 written->modes |= nir_var_shader_out |
155 nir_var_shader_temp |
156 nir_var_function_temp |
157 nir_var_mem_ssbo |
158 nir_var_mem_shared |
159 nir_var_mem_global;
160 continue;
161 }
162
163 if (instr->type != nir_instr_type_intrinsic)
164 continue;
165
166 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
167 switch (intrin->intrinsic) {
168 case nir_intrinsic_control_barrier:
169 case nir_intrinsic_memory_barrier:
170 written->modes |= nir_var_shader_out |
171 nir_var_mem_ssbo |
172 nir_var_mem_shared |
173 nir_var_mem_global;
174 break;
175
176 case nir_intrinsic_scoped_memory_barrier:
177 if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
178 written->modes |= nir_intrinsic_memory_modes(intrin);
179 break;
180
181 case nir_intrinsic_emit_vertex:
182 case nir_intrinsic_emit_vertex_with_counter:
183 written->modes = nir_var_shader_out;
184 break;
185
186 case nir_intrinsic_deref_atomic_add:
187 case nir_intrinsic_deref_atomic_imin:
188 case nir_intrinsic_deref_atomic_umin:
189 case nir_intrinsic_deref_atomic_imax:
190 case nir_intrinsic_deref_atomic_umax:
191 case nir_intrinsic_deref_atomic_and:
192 case nir_intrinsic_deref_atomic_or:
193 case nir_intrinsic_deref_atomic_xor:
194 case nir_intrinsic_deref_atomic_exchange:
195 case nir_intrinsic_deref_atomic_comp_swap:
196 case nir_intrinsic_store_deref:
197 case nir_intrinsic_copy_deref: {
198 /* Destination in all of store_deref, copy_deref and the atomics is src[0]. */
199 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
200
201 uintptr_t mask = intrin->intrinsic == nir_intrinsic_store_deref ?
202 nir_intrinsic_write_mask(intrin) : (1 << glsl_get_vector_elements(dst->type)) - 1;
203
204 struct hash_entry *ht_entry = _mesa_hash_table_search(written->derefs, dst);
205 if (ht_entry)
206 ht_entry->data = (void *)(mask | (uintptr_t)ht_entry->data);
207 else
208 _mesa_hash_table_insert(written->derefs, dst, (void *)mask);
209
210 break;
211 }
212
213 default:
214 break;
215 }
216 }
217
218 break;
219 }
220
221 case nir_cf_node_if: {
222 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
223
224 new_written = create_vars_written(state);
225
226 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
227 gather_vars_written(state, new_written, cf_node);
228
229 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
230 gather_vars_written(state, new_written, cf_node);
231
232 break;
233 }
234
235 case nir_cf_node_loop: {
236 nir_loop *loop = nir_cf_node_as_loop(cf_node);
237
238 new_written = create_vars_written(state);
239
240 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
241 gather_vars_written(state, new_written, cf_node);
242
243 break;
244 }
245
246 default:
247 unreachable("Invalid CF node type");
248 }
249
250 if (new_written) {
251 /* Merge new information to the parent control flow node. */
252 if (written) {
253 written->modes |= new_written->modes;
254 hash_table_foreach(new_written->derefs, new_entry) {
255 struct hash_entry *old_entry =
256 _mesa_hash_table_search_pre_hashed(written->derefs, new_entry->hash,
257 new_entry->key);
258 if (old_entry) {
259 nir_component_mask_t merged = (uintptr_t) new_entry->data |
260 (uintptr_t) old_entry->data;
261 old_entry->data = (void *) ((uintptr_t) merged);
262 } else {
263 _mesa_hash_table_insert_pre_hashed(written->derefs, new_entry->hash,
264 new_entry->key, new_entry->data);
265 }
266 }
267 }
268 _mesa_hash_table_insert(state->vars_written_map, cf_node, new_written);
269 }
270 }
271
272 static struct copy_entry *
273 copy_entry_create(struct util_dynarray *copies,
274 nir_deref_instr *dst_deref)
275 {
276 struct copy_entry new_entry = {
277 .dst = dst_deref,
278 };
279 util_dynarray_append(copies, struct copy_entry, new_entry);
280 return util_dynarray_top_ptr(copies, struct copy_entry);
281 }
282
283 /* Remove copy entry by swapping it with the last element and reducing the
284 * size. If used inside an iteration on copies, it must be a reverse
285 * (backwards) iteration. It is safe to use in those cases because the swap
286 * will not affect the rest of the iteration.
287 */
288 static void
289 copy_entry_remove(struct util_dynarray *copies,
290 struct copy_entry *entry)
291 {
292 /* This also works when removing the last element since pop don't shrink
293 * the memory used by the array, so the swap is useless but not invalid.
294 */
295 *entry = util_dynarray_pop(copies, struct copy_entry);
296 }
297
298 static bool
299 is_array_deref_of_vector(nir_deref_instr *deref)
300 {
301 if (deref->deref_type != nir_deref_type_array)
302 return false;
303 nir_deref_instr *parent = nir_deref_instr_parent(deref);
304 return glsl_type_is_vector(parent->type);
305 }
306
307 static struct copy_entry *
308 lookup_entry_for_deref(struct util_dynarray *copies,
309 nir_deref_instr *deref,
310 nir_deref_compare_result allowed_comparisons)
311 {
312 struct copy_entry *entry = NULL;
313 util_dynarray_foreach(copies, struct copy_entry, iter) {
314 nir_deref_compare_result result = nir_compare_derefs(iter->dst, deref);
315 if (result & allowed_comparisons) {
316 entry = iter;
317 if (result & nir_derefs_equal_bit)
318 break;
319 /* Keep looking in case we have an equal match later in the array. */
320 }
321 }
322 return entry;
323 }
324
325 static struct copy_entry *
326 lookup_entry_and_kill_aliases(struct util_dynarray *copies,
327 nir_deref_instr *deref,
328 unsigned write_mask)
329 {
330 /* TODO: Take into account the write_mask. */
331
332 nir_deref_instr *dst_match = NULL;
333 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
334 if (!iter->src.is_ssa) {
335 /* If this write aliases the source of some entry, get rid of it */
336 if (nir_compare_derefs(iter->src.deref, deref) & nir_derefs_may_alias_bit) {
337 copy_entry_remove(copies, iter);
338 continue;
339 }
340 }
341
342 nir_deref_compare_result comp = nir_compare_derefs(iter->dst, deref);
343
344 if (comp & nir_derefs_equal_bit) {
345 /* Removing entries invalidate previous iter pointers, so we'll
346 * collect the matching entry later. Just make sure it is unique.
347 */
348 assert(!dst_match);
349 dst_match = iter->dst;
350 } else if (comp & nir_derefs_may_alias_bit) {
351 copy_entry_remove(copies, iter);
352 }
353 }
354
355 struct copy_entry *entry = NULL;
356 if (dst_match) {
357 util_dynarray_foreach(copies, struct copy_entry, iter) {
358 if (iter->dst == dst_match) {
359 entry = iter;
360 break;
361 }
362 }
363 assert(entry);
364 }
365 return entry;
366 }
367
368 static void
369 kill_aliases(struct util_dynarray *copies,
370 nir_deref_instr *deref,
371 unsigned write_mask)
372 {
373 /* TODO: Take into account the write_mask. */
374
375 struct copy_entry *entry =
376 lookup_entry_and_kill_aliases(copies, deref, write_mask);
377 if (entry)
378 copy_entry_remove(copies, entry);
379 }
380
381 static struct copy_entry *
382 get_entry_and_kill_aliases(struct util_dynarray *copies,
383 nir_deref_instr *deref,
384 unsigned write_mask)
385 {
386 /* TODO: Take into account the write_mask. */
387
388 struct copy_entry *entry =
389 lookup_entry_and_kill_aliases(copies, deref, write_mask);
390
391 if (entry == NULL)
392 entry = copy_entry_create(copies, deref);
393
394 return entry;
395 }
396
397 static void
398 apply_barrier_for_modes(struct util_dynarray *copies,
399 nir_variable_mode modes)
400 {
401 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
402 if ((iter->dst->mode & modes) ||
403 (!iter->src.is_ssa && (iter->src.deref->mode & modes)))
404 copy_entry_remove(copies, iter);
405 }
406 }
407
408 static void
409 value_set_from_value(struct value *value, const struct value *from,
410 unsigned base_index, unsigned write_mask)
411 {
412 /* We can't have non-zero indexes with non-trivial write masks */
413 assert(base_index == 0 || write_mask == 1);
414
415 if (from->is_ssa) {
416 /* Clear value if it was being used as non-SSA. */
417 if (!value->is_ssa)
418 memset(&value->ssa, 0, sizeof(value->ssa));
419 value->is_ssa = true;
420 /* Only overwrite the written components */
421 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
422 if (write_mask & (1 << i)) {
423 value->ssa.def[base_index + i] = from->ssa.def[i];
424 value->ssa.component[base_index + i] = from->ssa.component[i];
425 }
426 }
427 } else {
428 /* Non-ssa stores always write everything */
429 value->is_ssa = false;
430 value->deref = from->deref;
431 }
432 }
433
434 /* Try to load a single element of a vector from the copy_entry. If the data
435 * isn't available, just let the original intrinsic do the work.
436 */
437 static bool
438 load_element_from_ssa_entry_value(struct copy_prop_var_state *state,
439 struct copy_entry *entry,
440 nir_builder *b, nir_intrinsic_instr *intrin,
441 struct value *value, unsigned index)
442 {
443 assert(index < glsl_get_vector_elements(entry->dst->type));
444
445 /* We don't have the element available, so let the instruction do the work. */
446 if (!entry->src.ssa.def[index])
447 return false;
448
449 b->cursor = nir_instr_remove(&intrin->instr);
450 intrin->instr.block = NULL;
451
452 assert(entry->src.ssa.component[index] <
453 entry->src.ssa.def[index]->num_components);
454 nir_ssa_def *def = nir_channel(b, entry->src.ssa.def[index],
455 entry->src.ssa.component[index]);
456
457 *value = (struct value) {
458 .is_ssa = true,
459 {
460 .ssa = {
461 .def = { def },
462 .component = { 0 },
463 },
464 }
465 };
466
467 return true;
468 }
469
470 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
471 * single SSA def. Because an entry could reference multiple different SSA
472 * defs, a vecN operation may be inserted to combine them into a single SSA
473 * def before handing it back to the caller. If the load instruction is no
474 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
475 * possible, in some cases, for the load to be used in the vecN operation in
476 * which case it isn't deleted.)
477 */
478 static bool
479 load_from_ssa_entry_value(struct copy_prop_var_state *state,
480 struct copy_entry *entry,
481 nir_builder *b, nir_intrinsic_instr *intrin,
482 nir_deref_instr *src, struct value *value)
483 {
484 if (is_array_deref_of_vector(src)) {
485 if (nir_src_is_const(src->arr.index)) {
486 return load_element_from_ssa_entry_value(state, entry, b, intrin, value,
487 nir_src_as_uint(src->arr.index));
488 }
489
490 /* An SSA copy_entry for the vector won't help indirect load. */
491 if (glsl_type_is_vector(entry->dst->type)) {
492 assert(entry->dst->type == nir_deref_instr_parent(src)->type);
493 /* TODO: If all SSA entries are there, try an if-ladder. */
494 return false;
495 }
496 }
497
498 *value = entry->src;
499 assert(value->is_ssa);
500
501 const struct glsl_type *type = entry->dst->type;
502 unsigned num_components = glsl_get_vector_elements(type);
503
504 nir_component_mask_t available = 0;
505 bool all_same = true;
506 for (unsigned i = 0; i < num_components; i++) {
507 if (value->ssa.def[i])
508 available |= (1 << i);
509
510 if (value->ssa.def[i] != value->ssa.def[0])
511 all_same = false;
512
513 if (value->ssa.component[i] != i)
514 all_same = false;
515 }
516
517 if (all_same) {
518 /* Our work here is done */
519 b->cursor = nir_instr_remove(&intrin->instr);
520 intrin->instr.block = NULL;
521 return true;
522 }
523
524 if (available != (1 << num_components) - 1 &&
525 intrin->intrinsic == nir_intrinsic_load_deref &&
526 (available & nir_ssa_def_components_read(&intrin->dest.ssa)) == 0) {
527 /* If none of the components read are available as SSA values, then we
528 * should just bail. Otherwise, we would end up replacing the uses of
529 * the load_deref a vecN() that just gathers up its components.
530 */
531 return false;
532 }
533
534 b->cursor = nir_after_instr(&intrin->instr);
535
536 nir_ssa_def *load_def =
537 intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
538
539 bool keep_intrin = false;
540 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
541 for (unsigned i = 0; i < num_components; i++) {
542 if (value->ssa.def[i]) {
543 comps[i] = nir_channel(b, value->ssa.def[i], value->ssa.component[i]);
544 } else {
545 /* We don't have anything for this component in our
546 * list. Just re-use a channel from the load.
547 */
548 if (load_def == NULL)
549 load_def = nir_load_deref(b, entry->dst);
550
551 if (load_def->parent_instr == &intrin->instr)
552 keep_intrin = true;
553
554 comps[i] = nir_channel(b, load_def, i);
555 }
556 }
557
558 nir_ssa_def *vec = nir_vec(b, comps, num_components);
559 value_set_ssa_components(value, vec, num_components);
560
561 if (!keep_intrin) {
562 /* Removing this instruction should not touch the cursor because we
563 * created the cursor after the intrinsic and have added at least one
564 * instruction (the vec) since then.
565 */
566 assert(b->cursor.instr != &intrin->instr);
567 nir_instr_remove(&intrin->instr);
568 intrin->instr.block = NULL;
569 }
570
571 return true;
572 }
573
574 /**
575 * Specialize the wildcards in a deref chain
576 *
577 * This function returns a deref chain identical to \param deref except that
578 * some of its wildcards are replaced with indices from \param specific. The
579 * process is guided by \param guide which references the same type as \param
580 * specific but has the same wildcard array lengths as \param deref.
581 */
582 static nir_deref_instr *
583 specialize_wildcards(nir_builder *b,
584 nir_deref_path *deref,
585 nir_deref_path *guide,
586 nir_deref_path *specific)
587 {
588 nir_deref_instr **deref_p = &deref->path[1];
589 nir_deref_instr **guide_p = &guide->path[1];
590 nir_deref_instr **spec_p = &specific->path[1];
591 nir_deref_instr *ret_tail = deref->path[0];
592 for (; *deref_p; deref_p++) {
593 if ((*deref_p)->deref_type == nir_deref_type_array_wildcard) {
594 /* This is where things get tricky. We have to search through
595 * the entry deref to find its corresponding wildcard and fill
596 * this slot in with the value from the src.
597 */
598 while (*guide_p &&
599 (*guide_p)->deref_type != nir_deref_type_array_wildcard) {
600 guide_p++;
601 spec_p++;
602 }
603 assert(*guide_p && *spec_p);
604
605 ret_tail = nir_build_deref_follower(b, ret_tail, *spec_p);
606
607 guide_p++;
608 spec_p++;
609 } else {
610 ret_tail = nir_build_deref_follower(b, ret_tail, *deref_p);
611 }
612 }
613
614 return ret_tail;
615 }
616
617 /* Do a "load" from an deref-based entry return it in "value" as a value. The
618 * deref returned in "value" will always be a fresh copy so the caller can
619 * steal it and assign it to the instruction directly without copying it
620 * again.
621 */
622 static bool
623 load_from_deref_entry_value(struct copy_prop_var_state *state,
624 struct copy_entry *entry,
625 nir_builder *b, nir_intrinsic_instr *intrin,
626 nir_deref_instr *src, struct value *value)
627 {
628 *value = entry->src;
629
630 b->cursor = nir_instr_remove(&intrin->instr);
631
632 nir_deref_path entry_dst_path, src_path;
633 nir_deref_path_init(&entry_dst_path, entry->dst, state->mem_ctx);
634 nir_deref_path_init(&src_path, src, state->mem_ctx);
635
636 bool need_to_specialize_wildcards = false;
637 nir_deref_instr **entry_p = &entry_dst_path.path[1];
638 nir_deref_instr **src_p = &src_path.path[1];
639 while (*entry_p && *src_p) {
640 nir_deref_instr *entry_tail = *entry_p++;
641 nir_deref_instr *src_tail = *src_p++;
642
643 if (src_tail->deref_type == nir_deref_type_array &&
644 entry_tail->deref_type == nir_deref_type_array_wildcard)
645 need_to_specialize_wildcards = true;
646 }
647
648 /* If the entry deref is longer than the source deref then it refers to a
649 * smaller type and we can't source from it.
650 */
651 assert(*entry_p == NULL);
652
653 if (need_to_specialize_wildcards) {
654 /* The entry has some wildcards that are not in src. This means we need
655 * to construct a new deref based on the entry but using the wildcards
656 * from the source and guided by the entry dst. Oof.
657 */
658 nir_deref_path entry_src_path;
659 nir_deref_path_init(&entry_src_path, entry->src.deref, state->mem_ctx);
660 value->deref = specialize_wildcards(b, &entry_src_path,
661 &entry_dst_path, &src_path);
662 nir_deref_path_finish(&entry_src_path);
663 }
664
665 /* If our source deref is longer than the entry deref, that's ok because
666 * it just means the entry deref needs to be extended a bit.
667 */
668 while (*src_p) {
669 nir_deref_instr *src_tail = *src_p++;
670 value->deref = nir_build_deref_follower(b, value->deref, src_tail);
671 }
672
673 nir_deref_path_finish(&entry_dst_path);
674 nir_deref_path_finish(&src_path);
675
676 return true;
677 }
678
679 static bool
680 try_load_from_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
681 nir_builder *b, nir_intrinsic_instr *intrin,
682 nir_deref_instr *src, struct value *value)
683 {
684 if (entry == NULL)
685 return false;
686
687 if (entry->src.is_ssa) {
688 return load_from_ssa_entry_value(state, entry, b, intrin, src, value);
689 } else {
690 return load_from_deref_entry_value(state, entry, b, intrin, src, value);
691 }
692 }
693
694 static void
695 invalidate_copies_for_cf_node(struct copy_prop_var_state *state,
696 struct util_dynarray *copies,
697 nir_cf_node *cf_node)
698 {
699 struct hash_entry *ht_entry = _mesa_hash_table_search(state->vars_written_map, cf_node);
700 assert(ht_entry);
701
702 struct vars_written *written = ht_entry->data;
703 if (written->modes) {
704 util_dynarray_foreach_reverse(copies, struct copy_entry, entry) {
705 if (entry->dst->mode & written->modes)
706 copy_entry_remove(copies, entry);
707 }
708 }
709
710 hash_table_foreach (written->derefs, entry) {
711 nir_deref_instr *deref_written = (nir_deref_instr *)entry->key;
712 kill_aliases(copies, deref_written, (uintptr_t)entry->data);
713 }
714 }
715
716 static void
717 print_value(struct value *value, unsigned num_components)
718 {
719 if (!value->is_ssa) {
720 printf(" %s ", glsl_get_type_name(value->deref->type));
721 nir_print_deref(value->deref, stdout);
722 return;
723 }
724
725 bool same_ssa = true;
726 for (unsigned i = 0; i < num_components; i++) {
727 if (value->ssa.component[i] != i ||
728 (i > 0 && value->ssa.def[i - 1] != value->ssa.def[i])) {
729 same_ssa = false;
730 break;
731 }
732 }
733 if (same_ssa) {
734 printf(" ssa_%d", value->ssa.def[0]->index);
735 } else {
736 for (int i = 0; i < num_components; i++) {
737 if (value->ssa.def[i])
738 printf(" ssa_%d[%u]", value->ssa.def[i]->index, value->ssa.component[i]);
739 else
740 printf(" _");
741 }
742 }
743 }
744
745 static void
746 print_copy_entry(struct copy_entry *entry)
747 {
748 printf(" %s ", glsl_get_type_name(entry->dst->type));
749 nir_print_deref(entry->dst, stdout);
750 printf(":\t");
751
752 unsigned num_components = glsl_get_vector_elements(entry->dst->type);
753 print_value(&entry->src, num_components);
754 printf("\n");
755 }
756
757 static void
758 dump_instr(nir_instr *instr)
759 {
760 printf(" ");
761 nir_print_instr(instr, stdout);
762 printf("\n");
763 }
764
765 static void
766 dump_copy_entries(struct util_dynarray *copies)
767 {
768 util_dynarray_foreach(copies, struct copy_entry, iter)
769 print_copy_entry(iter);
770 printf("\n");
771 }
772
773 static void
774 copy_prop_vars_block(struct copy_prop_var_state *state,
775 nir_builder *b, nir_block *block,
776 struct util_dynarray *copies)
777 {
778 if (debug) {
779 printf("# block%d\n", block->index);
780 dump_copy_entries(copies);
781 }
782
783 nir_foreach_instr_safe(instr, block) {
784 if (debug && instr->type == nir_instr_type_deref)
785 dump_instr(instr);
786
787 if (instr->type == nir_instr_type_call) {
788 if (debug) dump_instr(instr);
789 apply_barrier_for_modes(copies, nir_var_shader_out |
790 nir_var_shader_temp |
791 nir_var_function_temp |
792 nir_var_mem_ssbo |
793 nir_var_mem_shared |
794 nir_var_mem_global);
795 if (debug) dump_copy_entries(copies);
796 continue;
797 }
798
799 if (instr->type != nir_instr_type_intrinsic)
800 continue;
801
802 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
803 switch (intrin->intrinsic) {
804 case nir_intrinsic_control_barrier:
805 case nir_intrinsic_memory_barrier:
806 if (debug) dump_instr(instr);
807
808 apply_barrier_for_modes(copies, nir_var_shader_out |
809 nir_var_mem_ssbo |
810 nir_var_mem_shared |
811 nir_var_mem_global);
812 break;
813
814 case nir_intrinsic_memory_barrier_buffer:
815 if (debug) dump_instr(instr);
816
817 apply_barrier_for_modes(copies, nir_var_mem_ssbo |
818 nir_var_mem_global);
819 break;
820
821 case nir_intrinsic_memory_barrier_shared:
822 if (debug) dump_instr(instr);
823
824 apply_barrier_for_modes(copies, nir_var_mem_shared);
825 break;
826
827 case nir_intrinsic_memory_barrier_tcs_patch:
828 if (debug) dump_instr(instr);
829
830 apply_barrier_for_modes(copies, nir_var_shader_out);
831 break;
832
833 case nir_intrinsic_scoped_memory_barrier:
834 if (debug) dump_instr(instr);
835
836 if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
837 apply_barrier_for_modes(copies, nir_intrinsic_memory_modes(intrin));
838 break;
839
840 case nir_intrinsic_emit_vertex:
841 case nir_intrinsic_emit_vertex_with_counter:
842 if (debug) dump_instr(instr);
843
844 apply_barrier_for_modes(copies, nir_var_shader_out);
845 break;
846
847 case nir_intrinsic_load_deref: {
848 if (debug) dump_instr(instr);
849
850 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
851 break;
852
853 nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
854
855 /* Direct array_derefs of vectors operate on the vectors (the parent
856 * deref). Indirects will be handled like other derefs.
857 */
858 int vec_index = 0;
859 nir_deref_instr *vec_src = src;
860 if (is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) {
861 vec_src = nir_deref_instr_parent(src);
862 unsigned vec_comps = glsl_get_vector_elements(vec_src->type);
863 vec_index = nir_src_as_uint(src->arr.index);
864
865 /* Loading from an invalid index yields an undef */
866 if (vec_index >= vec_comps) {
867 b->cursor = nir_instr_remove(instr);
868 nir_ssa_def *u = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size);
869 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(u));
870 break;
871 }
872 }
873
874 struct copy_entry *src_entry =
875 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
876 struct value value = {0};
877 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
878 if (value.is_ssa) {
879 /* lookup_load has already ensured that we get a single SSA
880 * value that has all of the channels. We just have to do the
881 * rewrite operation. Note for array derefs of vectors, the
882 * channel 0 is used.
883 */
884 if (intrin->instr.block) {
885 /* The lookup left our instruction in-place. This means it
886 * must have used it to vec up a bunch of different sources.
887 * We need to be careful when rewriting uses so we don't
888 * rewrite the vecN itself.
889 */
890 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
891 nir_src_for_ssa(value.ssa.def[0]),
892 value.ssa.def[0]->parent_instr);
893 } else {
894 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
895 nir_src_for_ssa(value.ssa.def[0]));
896 }
897 } else {
898 /* We're turning it into a load of a different variable */
899 intrin->src[0] = nir_src_for_ssa(&value.deref->dest.ssa);
900
901 /* Put it back in again. */
902 nir_builder_instr_insert(b, instr);
903 value_set_ssa_components(&value, &intrin->dest.ssa,
904 intrin->num_components);
905 }
906 state->progress = true;
907 } else {
908 value_set_ssa_components(&value, &intrin->dest.ssa,
909 intrin->num_components);
910 }
911
912 /* Now that we have a value, we're going to store it back so that we
913 * have the right value next time we come looking for it. In order
914 * to do this, we need an exact match, not just something that
915 * contains what we're looking for.
916 */
917 struct copy_entry *entry =
918 lookup_entry_for_deref(copies, vec_src, nir_derefs_equal_bit);
919 if (!entry)
920 entry = copy_entry_create(copies, vec_src);
921
922 /* Update the entry with the value of the load. This way
923 * we can potentially remove subsequent loads.
924 */
925 value_set_from_value(&entry->src, &value, vec_index,
926 (1 << intrin->num_components) - 1);
927 break;
928 }
929
930 case nir_intrinsic_store_deref: {
931 if (debug) dump_instr(instr);
932
933 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
934 break;
935
936 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
937 assert(glsl_type_is_vector_or_scalar(dst->type));
938
939 /* Direct array_derefs of vectors operate on the vectors (the parent
940 * deref). Indirects will be handled like other derefs.
941 */
942 int vec_index = 0;
943 nir_deref_instr *vec_dst = dst;
944 if (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index)) {
945 vec_dst = nir_deref_instr_parent(dst);
946 unsigned vec_comps = glsl_get_vector_elements(vec_dst->type);
947
948 vec_index = nir_src_as_uint(dst->arr.index);
949
950 /* Storing to an invalid index is a no-op. */
951 if (vec_index >= vec_comps) {
952 nir_instr_remove(instr);
953 break;
954 }
955 }
956
957 struct copy_entry *entry =
958 lookup_entry_for_deref(copies, dst, nir_derefs_equal_bit);
959 if (entry && value_equals_store_src(&entry->src, intrin)) {
960 /* If we are storing the value from a load of the same var the
961 * store is redundant so remove it.
962 */
963 nir_instr_remove(instr);
964 } else {
965 struct value value = {0};
966 value_set_ssa_components(&value, intrin->src[1].ssa,
967 intrin->num_components);
968 unsigned wrmask = nir_intrinsic_write_mask(intrin);
969 struct copy_entry *entry =
970 get_entry_and_kill_aliases(copies, vec_dst, wrmask);
971 value_set_from_value(&entry->src, &value, vec_index, wrmask);
972 }
973
974 break;
975 }
976
977 case nir_intrinsic_copy_deref: {
978 if (debug) dump_instr(instr);
979
980 if ((nir_intrinsic_src_access(intrin) & ACCESS_VOLATILE) ||
981 (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE))
982 break;
983
984 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
985 nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
986
987 if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
988 /* This is a no-op self-copy. Get rid of it */
989 nir_instr_remove(instr);
990 continue;
991 }
992
993 /* The copy_deref intrinsic doesn't keep track of num_components, so
994 * get it ourselves.
995 */
996 unsigned num_components = glsl_get_vector_elements(dst->type);
997 unsigned full_mask = (1 << num_components) - 1;
998
999 /* Copy of direct array derefs of vectors are not handled. Just
1000 * invalidate what's written and bail.
1001 */
1002 if ((is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) ||
1003 (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index))) {
1004 kill_aliases(copies, dst, full_mask);
1005 break;
1006 }
1007
1008 struct copy_entry *src_entry =
1009 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
1010 struct value value;
1011 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
1012 /* If load works, intrin (the copy_deref) is removed. */
1013 if (value.is_ssa) {
1014 nir_store_deref(b, dst, value.ssa.def[0], full_mask);
1015 } else {
1016 /* If this would be a no-op self-copy, don't bother. */
1017 if (nir_compare_derefs(value.deref, dst) & nir_derefs_equal_bit)
1018 continue;
1019
1020 /* Just turn it into a copy of a different deref */
1021 intrin->src[1] = nir_src_for_ssa(&value.deref->dest.ssa);
1022
1023 /* Put it back in again. */
1024 nir_builder_instr_insert(b, instr);
1025 }
1026
1027 state->progress = true;
1028 } else {
1029 value = (struct value) {
1030 .is_ssa = false,
1031 { .deref = src },
1032 };
1033 }
1034
1035 nir_variable *src_var = nir_deref_instr_get_variable(src);
1036 if (src_var && src_var->data.cannot_coalesce) {
1037 /* The source cannot be coaleseced, which means we can't propagate
1038 * this copy.
1039 */
1040 break;
1041 }
1042
1043 struct copy_entry *dst_entry =
1044 get_entry_and_kill_aliases(copies, dst, full_mask);
1045 value_set_from_value(&dst_entry->src, &value, 0, full_mask);
1046 break;
1047 }
1048
1049 case nir_intrinsic_deref_atomic_add:
1050 case nir_intrinsic_deref_atomic_imin:
1051 case nir_intrinsic_deref_atomic_umin:
1052 case nir_intrinsic_deref_atomic_imax:
1053 case nir_intrinsic_deref_atomic_umax:
1054 case nir_intrinsic_deref_atomic_and:
1055 case nir_intrinsic_deref_atomic_or:
1056 case nir_intrinsic_deref_atomic_xor:
1057 case nir_intrinsic_deref_atomic_exchange:
1058 case nir_intrinsic_deref_atomic_comp_swap:
1059 if (debug) dump_instr(instr);
1060
1061 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
1062 break;
1063
1064 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
1065 unsigned num_components = glsl_get_vector_elements(dst->type);
1066 unsigned full_mask = (1 << num_components) - 1;
1067 kill_aliases(copies, dst, full_mask);
1068 break;
1069
1070 default:
1071 continue; /* To skip the debug below. */
1072 }
1073
1074 if (debug) dump_copy_entries(copies);
1075 }
1076 }
1077
1078 static void
1079 copy_prop_vars_cf_node(struct copy_prop_var_state *state,
1080 struct util_dynarray *copies,
1081 nir_cf_node *cf_node)
1082 {
1083 switch (cf_node->type) {
1084 case nir_cf_node_function: {
1085 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
1086
1087 struct util_dynarray impl_copies;
1088 util_dynarray_init(&impl_copies, state->mem_ctx);
1089
1090 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
1091 copy_prop_vars_cf_node(state, &impl_copies, cf_node);
1092
1093 break;
1094 }
1095
1096 case nir_cf_node_block: {
1097 nir_block *block = nir_cf_node_as_block(cf_node);
1098 nir_builder b;
1099 nir_builder_init(&b, state->impl);
1100 copy_prop_vars_block(state, &b, block, copies);
1101 break;
1102 }
1103
1104 case nir_cf_node_if: {
1105 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
1106
1107 /* Clone the copies for each branch of the if statement. The idea is
1108 * that they both see the same state of available copies, but do not
1109 * interfere to each other.
1110 */
1111
1112 struct util_dynarray then_copies;
1113 util_dynarray_clone(&then_copies, state->mem_ctx, copies);
1114
1115 struct util_dynarray else_copies;
1116 util_dynarray_clone(&else_copies, state->mem_ctx, copies);
1117
1118 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
1119 copy_prop_vars_cf_node(state, &then_copies, cf_node);
1120
1121 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
1122 copy_prop_vars_cf_node(state, &else_copies, cf_node);
1123
1124 /* Both branches copies can be ignored, since the effect of running both
1125 * branches was captured in the first pass that collects vars_written.
1126 */
1127
1128 invalidate_copies_for_cf_node(state, copies, cf_node);
1129
1130 break;
1131 }
1132
1133 case nir_cf_node_loop: {
1134 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1135
1136 /* Invalidate before cloning the copies for the loop, since the loop
1137 * body can be executed more than once.
1138 */
1139
1140 invalidate_copies_for_cf_node(state, copies, cf_node);
1141
1142 struct util_dynarray loop_copies;
1143 util_dynarray_clone(&loop_copies, state->mem_ctx, copies);
1144
1145 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
1146 copy_prop_vars_cf_node(state, &loop_copies, cf_node);
1147
1148 break;
1149 }
1150
1151 default:
1152 unreachable("Invalid CF node type");
1153 }
1154 }
1155
1156 static bool
1157 nir_copy_prop_vars_impl(nir_function_impl *impl)
1158 {
1159 void *mem_ctx = ralloc_context(NULL);
1160
1161 if (debug) {
1162 nir_metadata_require(impl, nir_metadata_block_index);
1163 printf("## nir_copy_prop_vars_impl for %s\n", impl->function->name);
1164 }
1165
1166 struct copy_prop_var_state state = {
1167 .impl = impl,
1168 .mem_ctx = mem_ctx,
1169 .lin_ctx = linear_zalloc_parent(mem_ctx, 0),
1170
1171 .vars_written_map = _mesa_pointer_hash_table_create(mem_ctx),
1172 };
1173
1174 gather_vars_written(&state, NULL, &impl->cf_node);
1175
1176 copy_prop_vars_cf_node(&state, NULL, &impl->cf_node);
1177
1178 if (state.progress) {
1179 nir_metadata_preserve(impl, nir_metadata_block_index |
1180 nir_metadata_dominance);
1181 } else {
1182 #ifndef NDEBUG
1183 impl->valid_metadata &= ~nir_metadata_not_properly_reset;
1184 #endif
1185 }
1186
1187 ralloc_free(mem_ctx);
1188 return state.progress;
1189 }
1190
1191 bool
1192 nir_opt_copy_prop_vars(nir_shader *shader)
1193 {
1194 bool progress = false;
1195
1196 nir_foreach_function(function, shader) {
1197 if (!function->impl)
1198 continue;
1199 progress |= nir_copy_prop_vars_impl(function->impl);
1200 }
1201
1202 return progress;
1203 }