nir/builder: Add a build_deref_array_imm helper
[mesa.git] / src / compiler / nir / tests / vars_tests.cpp
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <gtest/gtest.h>
25
26 #include "nir.h"
27 #include "nir_builder.h"
28
29 namespace {
30
31 class nir_vars_test : public ::testing::Test {
32 protected:
33 nir_vars_test();
34 ~nir_vars_test();
35
36 nir_variable *create_var(nir_variable_mode mode, const glsl_type *type,
37 const char *name) {
38 if (mode == nir_var_function_temp)
39 return nir_local_variable_create(b->impl, type, name);
40 else
41 return nir_variable_create(b->shader, mode, type, name);
42 }
43
44 nir_variable *create_int(nir_variable_mode mode, const char *name) {
45 return create_var(mode, glsl_int_type(), name);
46 }
47
48 nir_variable *create_ivec2(nir_variable_mode mode, const char *name) {
49 return create_var(mode, glsl_vector_type(GLSL_TYPE_INT, 2), name);
50 }
51
52 nir_variable **create_many_int(nir_variable_mode mode, const char *prefix, unsigned count) {
53 nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
54 for (unsigned i = 0; i < count; i++)
55 result[i] = create_int(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
56 return result;
57 }
58
59 nir_variable **create_many_ivec2(nir_variable_mode mode, const char *prefix, unsigned count) {
60 nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
61 for (unsigned i = 0; i < count; i++)
62 result[i] = create_ivec2(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
63 return result;
64 }
65
66 unsigned count_intrinsics(nir_intrinsic_op intrinsic);
67
68 nir_intrinsic_instr *get_intrinsic(nir_intrinsic_op intrinsic,
69 unsigned index);
70
71 void *mem_ctx;
72 void *lin_ctx;
73
74 nir_builder *b;
75 };
76
77 nir_vars_test::nir_vars_test()
78 {
79 mem_ctx = ralloc_context(NULL);
80 lin_ctx = linear_alloc_parent(mem_ctx, 0);
81 static const nir_shader_compiler_options options = { };
82 b = rzalloc(mem_ctx, nir_builder);
83 nir_builder_init_simple_shader(b, mem_ctx, MESA_SHADER_FRAGMENT, &options);
84 }
85
86 nir_vars_test::~nir_vars_test()
87 {
88 if (HasFailure()) {
89 printf("\nShader from the failed test:\n\n");
90 nir_print_shader(b->shader, stdout);
91 }
92
93 ralloc_free(mem_ctx);
94 }
95
96 unsigned
97 nir_vars_test::count_intrinsics(nir_intrinsic_op intrinsic)
98 {
99 unsigned count = 0;
100 nir_foreach_block(block, b->impl) {
101 nir_foreach_instr(instr, block) {
102 if (instr->type != nir_instr_type_intrinsic)
103 continue;
104 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
105 if (intrin->intrinsic == intrinsic)
106 count++;
107 }
108 }
109 return count;
110 }
111
112 nir_intrinsic_instr *
113 nir_vars_test::get_intrinsic(nir_intrinsic_op intrinsic,
114 unsigned index)
115 {
116 nir_foreach_block(block, b->impl) {
117 nir_foreach_instr(instr, block) {
118 if (instr->type != nir_instr_type_intrinsic)
119 continue;
120 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
121 if (intrin->intrinsic == intrinsic) {
122 if (index == 0)
123 return intrin;
124 index--;
125 }
126 }
127 }
128 return NULL;
129 }
130
131 /* Allow grouping the tests while still sharing the helpers. */
132 class nir_redundant_load_vars_test : public nir_vars_test {};
133 class nir_copy_prop_vars_test : public nir_vars_test {};
134 class nir_dead_write_vars_test : public nir_vars_test {};
135
136 } // namespace
137
138 TEST_F(nir_redundant_load_vars_test, duplicated_load)
139 {
140 /* Load a variable twice in the same block. One should be removed. */
141
142 nir_variable *in = create_int(nir_var_shader_in, "in");
143 nir_variable **out = create_many_int(nir_var_shader_out, "out", 2);
144
145 nir_store_var(b, out[0], nir_load_var(b, in), 1);
146 nir_store_var(b, out[1], nir_load_var(b, in), 1);
147
148 nir_validate_shader(b->shader, NULL);
149
150 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
151
152 bool progress = nir_opt_copy_prop_vars(b->shader);
153 EXPECT_TRUE(progress);
154
155 nir_validate_shader(b->shader, NULL);
156
157 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
158 }
159
160 TEST_F(nir_redundant_load_vars_test, duplicated_load_in_two_blocks)
161 {
162 /* Load a variable twice in different blocks. One should be removed. */
163
164 nir_variable *in = create_int(nir_var_shader_in, "in");
165 nir_variable **out = create_many_int(nir_var_shader_out, "out", 2);
166
167 nir_store_var(b, out[0], nir_load_var(b, in), 1);
168
169 /* Forces the stores to be in different blocks. */
170 nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
171
172 nir_store_var(b, out[1], nir_load_var(b, in), 1);
173
174 nir_validate_shader(b->shader, NULL);
175
176 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
177
178 bool progress = nir_opt_copy_prop_vars(b->shader);
179 EXPECT_TRUE(progress);
180
181 nir_validate_shader(b->shader, NULL);
182
183 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
184 }
185
186 TEST_F(nir_redundant_load_vars_test, invalidate_inside_if_block)
187 {
188 /* Load variables, then write to some of then in different branches of the
189 * if statement. They should be invalidated accordingly.
190 */
191
192 nir_variable **g = create_many_int(nir_var_shader_temp, "g", 3);
193 nir_variable **out = create_many_int(nir_var_shader_out, "out", 3);
194
195 nir_load_var(b, g[0]);
196 nir_load_var(b, g[1]);
197 nir_load_var(b, g[2]);
198
199 nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
200 nir_store_var(b, g[0], nir_imm_int(b, 10), 1);
201
202 nir_push_else(b, if_stmt);
203 nir_store_var(b, g[1], nir_imm_int(b, 20), 1);
204
205 nir_pop_if(b, if_stmt);
206
207 nir_store_var(b, out[0], nir_load_var(b, g[0]), 1);
208 nir_store_var(b, out[1], nir_load_var(b, g[1]), 1);
209 nir_store_var(b, out[2], nir_load_var(b, g[2]), 1);
210
211 nir_validate_shader(b->shader, NULL);
212
213 bool progress = nir_opt_copy_prop_vars(b->shader);
214 EXPECT_TRUE(progress);
215
216 /* There are 3 initial loads, plus 2 loads for the values invalidated
217 * inside the if statement.
218 */
219 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 5);
220
221 /* We only load g[2] once. */
222 unsigned g2_load_count = 0;
223 for (int i = 0; i < 5; i++) {
224 nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, i);
225 if (nir_intrinsic_get_var(load, 0) == g[2])
226 g2_load_count++;
227 }
228 EXPECT_EQ(g2_load_count, 1);
229 }
230
231 TEST_F(nir_redundant_load_vars_test, invalidate_live_load_in_the_end_of_loop)
232 {
233 /* Invalidating a load in the end of loop body will apply to the whole loop
234 * body.
235 */
236
237 nir_variable *v = create_int(nir_var_mem_ssbo, "v");
238
239 nir_load_var(b, v);
240
241 nir_loop *loop = nir_push_loop(b);
242
243 nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
244 nir_jump(b, nir_jump_break);
245 nir_pop_if(b, if_stmt);
246
247 nir_load_var(b, v);
248 nir_store_var(b, v, nir_imm_int(b, 10), 1);
249
250 nir_pop_loop(b, loop);
251
252 bool progress = nir_opt_copy_prop_vars(b->shader);
253 ASSERT_FALSE(progress);
254 }
255
256 TEST_F(nir_copy_prop_vars_test, simple_copies)
257 {
258 nir_variable *in = create_int(nir_var_shader_in, "in");
259 nir_variable *temp = create_int(nir_var_function_temp, "temp");
260 nir_variable *out = create_int(nir_var_shader_out, "out");
261
262 nir_copy_var(b, temp, in);
263 nir_copy_var(b, out, temp);
264
265 nir_validate_shader(b->shader, NULL);
266
267 bool progress = nir_opt_copy_prop_vars(b->shader);
268 EXPECT_TRUE(progress);
269
270 nir_validate_shader(b->shader, NULL);
271
272 ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 2);
273
274 nir_intrinsic_instr *first_copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
275 ASSERT_TRUE(first_copy->src[1].is_ssa);
276
277 nir_intrinsic_instr *second_copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
278 ASSERT_TRUE(second_copy->src[1].is_ssa);
279
280 EXPECT_EQ(first_copy->src[1].ssa, second_copy->src[1].ssa);
281 }
282
283 TEST_F(nir_copy_prop_vars_test, simple_store_load)
284 {
285 nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
286 unsigned mask = 1 | 2;
287
288 nir_ssa_def *stored_value = nir_imm_ivec2(b, 10, 20);
289 nir_store_var(b, v[0], stored_value, mask);
290
291 nir_ssa_def *read_value = nir_load_var(b, v[0]);
292 nir_store_var(b, v[1], read_value, mask);
293
294 nir_validate_shader(b->shader, NULL);
295
296 bool progress = nir_opt_copy_prop_vars(b->shader);
297 EXPECT_TRUE(progress);
298
299 nir_validate_shader(b->shader, NULL);
300
301 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
302
303 for (int i = 0; i < 2; i++) {
304 nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
305 ASSERT_TRUE(store->src[1].is_ssa);
306 EXPECT_EQ(store->src[1].ssa, stored_value);
307 }
308 }
309
310 TEST_F(nir_copy_prop_vars_test, store_store_load)
311 {
312 nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
313 unsigned mask = 1 | 2;
314
315 nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
316 nir_store_var(b, v[0], first_value, mask);
317
318 nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
319 nir_store_var(b, v[0], second_value, mask);
320
321 nir_ssa_def *read_value = nir_load_var(b, v[0]);
322 nir_store_var(b, v[1], read_value, mask);
323
324 nir_validate_shader(b->shader, NULL);
325
326 bool progress = nir_opt_copy_prop_vars(b->shader);
327 EXPECT_TRUE(progress);
328
329 nir_validate_shader(b->shader, NULL);
330
331 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
332
333 /* Store to v[1] should use second_value directly. */
334 nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
335 ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
336 ASSERT_TRUE(store_to_v1->src[1].is_ssa);
337 EXPECT_EQ(store_to_v1->src[1].ssa, second_value);
338 }
339
340 TEST_F(nir_copy_prop_vars_test, store_store_load_different_components)
341 {
342 nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
343
344 nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
345 nir_store_var(b, v[0], first_value, 1 << 1);
346
347 nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
348 nir_store_var(b, v[0], second_value, 1 << 0);
349
350 nir_ssa_def *read_value = nir_load_var(b, v[0]);
351 nir_store_var(b, v[1], read_value, 1 << 1);
352
353 nir_validate_shader(b->shader, NULL);
354
355 bool progress = nir_opt_copy_prop_vars(b->shader);
356 EXPECT_TRUE(progress);
357
358 nir_validate_shader(b->shader, NULL);
359
360 nir_opt_constant_folding(b->shader);
361 nir_validate_shader(b->shader, NULL);
362
363 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
364
365 /* Store to v[1] should use first_value directly. The write of
366 * second_value did not overwrite the component it uses.
367 */
368 nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
369 ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
370 ASSERT_EQ(nir_src_comp_as_uint(store_to_v1->src[1], 1), 20);
371 }
372
373 TEST_F(nir_copy_prop_vars_test, store_store_load_different_components_in_many_blocks)
374 {
375 nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
376
377 nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
378 nir_store_var(b, v[0], first_value, 1 << 1);
379
380 /* Adding an if statement will cause blocks to be created. */
381 nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
382
383 nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
384 nir_store_var(b, v[0], second_value, 1 << 0);
385
386 /* Adding an if statement will cause blocks to be created. */
387 nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
388
389 nir_ssa_def *read_value = nir_load_var(b, v[0]);
390 nir_store_var(b, v[1], read_value, 1 << 1);
391
392 nir_validate_shader(b->shader, NULL);
393
394 bool progress = nir_opt_copy_prop_vars(b->shader);
395 EXPECT_TRUE(progress);
396
397 nir_validate_shader(b->shader, NULL);
398
399 nir_opt_constant_folding(b->shader);
400 nir_validate_shader(b->shader, NULL);
401
402 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
403
404 /* Store to v[1] should use first_value directly. The write of
405 * second_value did not overwrite the component it uses.
406 */
407 nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
408 ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
409 ASSERT_EQ(nir_src_comp_as_uint(store_to_v1->src[1], 1), 20);
410 }
411
412 TEST_F(nir_copy_prop_vars_test, memory_barrier_in_two_blocks)
413 {
414 nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 4);
415
416 nir_store_var(b, v[0], nir_imm_int(b, 1), 1);
417 nir_store_var(b, v[1], nir_imm_int(b, 2), 1);
418
419 /* Split into many blocks. */
420 nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
421
422 nir_store_var(b, v[2], nir_load_var(b, v[0]), 1);
423
424 nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_memory_barrier)->instr);
425
426 nir_store_var(b, v[3], nir_load_var(b, v[1]), 1);
427
428 bool progress = nir_opt_copy_prop_vars(b->shader);
429 ASSERT_TRUE(progress);
430
431 /* Only the second load will remain after the optimization. */
432 ASSERT_EQ(1, count_intrinsics(nir_intrinsic_load_deref));
433 nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
434 ASSERT_EQ(nir_intrinsic_get_var(load, 0), v[1]);
435 }
436
437 TEST_F(nir_copy_prop_vars_test, simple_store_load_in_two_blocks)
438 {
439 nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
440 unsigned mask = 1 | 2;
441
442 nir_ssa_def *stored_value = nir_imm_ivec2(b, 10, 20);
443 nir_store_var(b, v[0], stored_value, mask);
444
445 /* Adding an if statement will cause blocks to be created. */
446 nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
447
448 nir_ssa_def *read_value = nir_load_var(b, v[0]);
449 nir_store_var(b, v[1], read_value, mask);
450
451 nir_validate_shader(b->shader, NULL);
452
453 bool progress = nir_opt_copy_prop_vars(b->shader);
454 EXPECT_TRUE(progress);
455
456 nir_validate_shader(b->shader, NULL);
457
458 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
459
460 for (int i = 0; i < 2; i++) {
461 nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
462 ASSERT_TRUE(store->src[1].is_ssa);
463 EXPECT_EQ(store->src[1].ssa, stored_value);
464 }
465 }
466
467 TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previous_load)
468 {
469 nir_variable *in0 = create_ivec2(nir_var_mem_ssbo, "in0");
470 nir_variable *in1 = create_ivec2(nir_var_mem_ssbo, "in1");
471 nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
472 nir_variable *out = create_int(nir_var_mem_ssbo, "out");
473
474 nir_store_var(b, vec, nir_load_var(b, in0), 1 << 0);
475 nir_store_var(b, vec, nir_load_var(b, in1), 1 << 1);
476
477 /* This load will be dropped, as vec.y (or vec[1]) is already known. */
478 nir_deref_instr *deref =
479 nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
480 nir_ssa_def *loaded_from_deref = nir_load_deref(b, deref);
481
482 /* This store should use the value loaded from in1. */
483 nir_store_var(b, out, loaded_from_deref, 1 << 0);
484
485 nir_validate_shader(b->shader, NULL);
486 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
487 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
488
489 bool progress = nir_opt_copy_prop_vars(b->shader);
490 EXPECT_TRUE(progress);
491
492 nir_validate_shader(b->shader, NULL);
493 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
494 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
495
496 nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 2);
497 ASSERT_TRUE(store->src[1].is_ssa);
498
499 /* NOTE: The ALU instruction is how we get the vec.y. */
500 ASSERT_TRUE(nir_src_as_alu_instr(&store->src[1]));
501 }
502
503 TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previous_copy)
504 {
505 nir_variable *in0 = create_ivec2(nir_var_mem_ssbo, "in0");
506 nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
507
508 nir_copy_var(b, vec, in0);
509
510 /* This load will be replaced with one from in0. */
511 nir_deref_instr *deref =
512 nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
513 nir_load_deref(b, deref);
514
515 nir_validate_shader(b->shader, NULL);
516
517 bool progress = nir_opt_copy_prop_vars(b->shader);
518 EXPECT_TRUE(progress);
519
520 nir_validate_shader(b->shader, NULL);
521 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
522
523 nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
524 ASSERT_EQ(nir_intrinsic_get_var(load, 0), in0);
525 }
526
527 TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_gets_reused)
528 {
529 nir_variable *in0 = create_ivec2(nir_var_mem_ssbo, "in0");
530 nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
531 nir_variable *out = create_ivec2(nir_var_mem_ssbo, "out");
532
533 /* Loading "vec[1]" deref will save the information about vec.y. */
534 nir_deref_instr *deref =
535 nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
536 nir_load_deref(b, deref);
537
538 /* Store to vec.x. */
539 nir_store_var(b, vec, nir_load_var(b, in0), 1 << 0);
540
541 /* This load will be dropped, since both vec.x and vec.y are known. */
542 nir_ssa_def *loaded_from_vec = nir_load_var(b, vec);
543 nir_store_var(b, out, loaded_from_vec, 0x3);
544
545 nir_validate_shader(b->shader, NULL);
546 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
547 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
548
549 bool progress = nir_opt_copy_prop_vars(b->shader);
550 EXPECT_TRUE(progress);
551
552 nir_validate_shader(b->shader, NULL);
553 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
554 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
555
556 nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 1);
557 ASSERT_TRUE(store->src[1].is_ssa);
558 ASSERT_TRUE(nir_src_as_alu_instr(&store->src[1]));
559 }
560
561 TEST_F(nir_copy_prop_vars_test, store_load_direct_array_deref_on_vector)
562 {
563 nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
564 nir_variable *out0 = create_int(nir_var_mem_ssbo, "out0");
565 nir_variable *out1 = create_ivec2(nir_var_mem_ssbo, "out1");
566
567 /* Store to "vec[1]" and "vec[0]". */
568 nir_deref_instr *store_deref_y =
569 nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
570 nir_store_deref(b, store_deref_y, nir_imm_int(b, 20), 1);
571
572 nir_deref_instr *store_deref_x =
573 nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 0);
574 nir_store_deref(b, store_deref_x, nir_imm_int(b, 10), 1);
575
576 /* Both loads below will be dropped, because the values are already known. */
577 nir_deref_instr *load_deref_y =
578 nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
579 nir_store_var(b, out0, nir_load_deref(b, load_deref_y), 1);
580
581 nir_store_var(b, out1, nir_load_var(b, vec), 1);
582
583 nir_validate_shader(b->shader, NULL);
584 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
585 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
586
587 bool progress = nir_opt_copy_prop_vars(b->shader);
588 EXPECT_TRUE(progress);
589
590 nir_validate_shader(b->shader, NULL);
591 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 0);
592 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
593
594 /* Third store will just use the value from first store. */
595 nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
596 nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
597 ASSERT_TRUE(third_store->src[1].is_ssa);
598 EXPECT_EQ(third_store->src[1].ssa, first_store->src[1].ssa);
599
600 /* Fourth store will compose first and second store values. */
601 nir_intrinsic_instr *fourth_store = get_intrinsic(nir_intrinsic_store_deref, 3);
602 ASSERT_TRUE(fourth_store->src[1].is_ssa);
603 EXPECT_TRUE(nir_src_as_alu_instr(&fourth_store->src[1]));
604 }
605
606 TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref_on_vector)
607 {
608 nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
609 nir_variable *idx = create_int(nir_var_mem_ssbo, "idx");
610 nir_variable *out = create_int(nir_var_mem_ssbo, "out");
611
612 nir_ssa_def *idx_ssa = nir_load_var(b, idx);
613
614 /* Store to vec[idx]. */
615 nir_deref_instr *store_deref =
616 nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
617 nir_store_deref(b, store_deref, nir_imm_int(b, 20), 1);
618
619 /* Load from vec[idx] to store in out. This load should be dropped. */
620 nir_deref_instr *load_deref =
621 nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
622 nir_store_var(b, out, nir_load_deref(b, load_deref), 1);
623
624 nir_validate_shader(b->shader, NULL);
625 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
626 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
627
628 bool progress = nir_opt_copy_prop_vars(b->shader);
629 EXPECT_TRUE(progress);
630
631 nir_validate_shader(b->shader, NULL);
632 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
633 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
634
635 /* Store to vec[idx] propagated to out. */
636 nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
637 nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
638 ASSERT_TRUE(first->src[1].is_ssa);
639 ASSERT_TRUE(second->src[1].is_ssa);
640 EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
641 }
642
643 TEST_F(nir_copy_prop_vars_test, store_load_direct_and_indirect_array_deref_on_vector)
644 {
645 nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
646 nir_variable *idx = create_int(nir_var_mem_ssbo, "idx");
647 nir_variable **out = create_many_int(nir_var_mem_ssbo, "out", 2);
648
649 nir_ssa_def *idx_ssa = nir_load_var(b, idx);
650
651 /* Store to vec. */
652 nir_store_var(b, vec, nir_imm_ivec2(b, 10, 10), 1 | 2);
653
654 /* Load from vec[idx]. This load is currently not dropped. */
655 nir_deref_instr *indirect =
656 nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
657 nir_store_var(b, out[0], nir_load_deref(b, indirect), 1);
658
659 /* Load from vec[idx] again. This load should be dropped. */
660 nir_store_var(b, out[1], nir_load_deref(b, indirect), 1);
661
662 nir_validate_shader(b->shader, NULL);
663 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
664 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
665
666 bool progress = nir_opt_copy_prop_vars(b->shader);
667 EXPECT_TRUE(progress);
668
669 nir_validate_shader(b->shader, NULL);
670 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
671 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
672
673 /* Store to vec[idx] propagated to out. */
674 nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
675 nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
676 ASSERT_TRUE(second->src[1].is_ssa);
677 ASSERT_TRUE(third->src[1].is_ssa);
678 EXPECT_EQ(second->src[1].ssa, third->src[1].ssa);
679 }
680
681 TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref)
682 {
683 nir_variable *arr = create_var(nir_var_mem_ssbo,
684 glsl_array_type(glsl_int_type(), 10, 0),
685 "arr");
686 nir_variable *idx = create_int(nir_var_mem_ssbo, "idx");
687 nir_variable *out = create_int(nir_var_mem_ssbo, "out");
688
689 nir_ssa_def *idx_ssa = nir_load_var(b, idx);
690
691 /* Store to arr[idx]. */
692 nir_deref_instr *store_deref =
693 nir_build_deref_array(b, nir_build_deref_var(b, arr), idx_ssa);
694 nir_store_deref(b, store_deref, nir_imm_int(b, 20), 1);
695
696 /* Load from arr[idx] to store in out. This load should be dropped. */
697 nir_deref_instr *load_deref =
698 nir_build_deref_array(b, nir_build_deref_var(b, arr), idx_ssa);
699 nir_store_var(b, out, nir_load_deref(b, load_deref), 1);
700
701 nir_validate_shader(b->shader, NULL);
702 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
703 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
704
705 bool progress = nir_opt_copy_prop_vars(b->shader);
706 EXPECT_TRUE(progress);
707
708 nir_validate_shader(b->shader, NULL);
709 ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
710 ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
711
712 /* Store to arr[idx] propagated to out. */
713 nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
714 nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
715 ASSERT_TRUE(first->src[1].is_ssa);
716 ASSERT_TRUE(second->src[1].is_ssa);
717 EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
718 }
719
720 TEST_F(nir_dead_write_vars_test, no_dead_writes_in_block)
721 {
722 nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 2);
723
724 nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
725
726 bool progress = nir_opt_dead_write_vars(b->shader);
727 ASSERT_FALSE(progress);
728 }
729
730 TEST_F(nir_dead_write_vars_test, no_dead_writes_different_components_in_block)
731 {
732 nir_variable **v = create_many_ivec2(nir_var_mem_ssbo, "v", 3);
733
734 nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
735 nir_store_var(b, v[0], nir_load_var(b, v[2]), 1 << 1);
736
737 bool progress = nir_opt_dead_write_vars(b->shader);
738 ASSERT_FALSE(progress);
739 }
740
741 TEST_F(nir_dead_write_vars_test, no_dead_writes_in_if_statement)
742 {
743 nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 6);
744
745 nir_store_var(b, v[2], nir_load_var(b, v[0]), 1);
746 nir_store_var(b, v[3], nir_load_var(b, v[1]), 1);
747
748 /* Each arm of the if statement will overwrite one store. */
749 nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
750 nir_store_var(b, v[2], nir_load_var(b, v[4]), 1);
751
752 nir_push_else(b, if_stmt);
753 nir_store_var(b, v[3], nir_load_var(b, v[5]), 1);
754
755 nir_pop_if(b, if_stmt);
756
757 bool progress = nir_opt_dead_write_vars(b->shader);
758 ASSERT_FALSE(progress);
759 }
760
761 TEST_F(nir_dead_write_vars_test, no_dead_writes_in_loop_statement)
762 {
763 nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
764
765 nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
766
767 /* Loop will write other value. Since it might not be executed, it doesn't
768 * kill the first write.
769 */
770 nir_loop *loop = nir_push_loop(b);
771
772 nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
773 nir_jump(b, nir_jump_break);
774 nir_pop_if(b, if_stmt);
775
776 nir_store_var(b, v[0], nir_load_var(b, v[2]), 1);
777 nir_pop_loop(b, loop);
778
779 bool progress = nir_opt_dead_write_vars(b->shader);
780 ASSERT_FALSE(progress);
781 }
782
783 TEST_F(nir_dead_write_vars_test, dead_write_in_block)
784 {
785 nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
786
787 nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
788 nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
789 nir_store_var(b, v[0], load_v2, 1);
790
791 bool progress = nir_opt_dead_write_vars(b->shader);
792 ASSERT_TRUE(progress);
793
794 EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
795
796 nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
797 ASSERT_TRUE(store->src[1].is_ssa);
798 EXPECT_EQ(store->src[1].ssa, load_v2);
799 }
800
801 TEST_F(nir_dead_write_vars_test, dead_write_components_in_block)
802 {
803 nir_variable **v = create_many_ivec2(nir_var_mem_ssbo, "v", 3);
804
805 nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
806 nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
807 nir_store_var(b, v[0], load_v2, 1 << 0);
808
809 bool progress = nir_opt_dead_write_vars(b->shader);
810 ASSERT_TRUE(progress);
811
812 EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
813
814 nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
815 ASSERT_TRUE(store->src[1].is_ssa);
816 EXPECT_EQ(store->src[1].ssa, load_v2);
817 }
818
819
820 /* TODO: The DISABLED tests below depend on the dead write removal be able to
821 * identify dead writes between multiple blocks. This is still not
822 * implemented.
823 */
824
825 TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_in_two_blocks)
826 {
827 nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
828
829 nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
830 nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
831
832 /* Causes the stores to be in different blocks. */
833 nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
834
835 nir_store_var(b, v[0], load_v2, 1);
836
837 bool progress = nir_opt_dead_write_vars(b->shader);
838 ASSERT_TRUE(progress);
839
840 EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
841
842 nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
843 ASSERT_TRUE(store->src[1].is_ssa);
844 EXPECT_EQ(store->src[1].ssa, load_v2);
845 }
846
847 TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_components_in_two_blocks)
848 {
849 nir_variable **v = create_many_ivec2(nir_var_mem_ssbo, "v", 3);
850
851 nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
852
853 /* Causes the stores to be in different blocks. */
854 nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
855
856 nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
857 nir_store_var(b, v[0], load_v2, 1 << 0);
858
859 bool progress = nir_opt_dead_write_vars(b->shader);
860 ASSERT_TRUE(progress);
861
862 EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
863
864 nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
865 ASSERT_TRUE(store->src[1].is_ssa);
866 EXPECT_EQ(store->src[1].ssa, load_v2);
867 }
868
869 TEST_F(nir_dead_write_vars_test, DISABLED_dead_writes_in_if_statement)
870 {
871 nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 4);
872
873 /* Both branches will overwrite, making the previous store dead. */
874 nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
875
876 nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
877 nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
878 nir_store_var(b, v[0], load_v2, 1);
879
880 nir_push_else(b, if_stmt);
881 nir_ssa_def *load_v3 = nir_load_var(b, v[3]);
882 nir_store_var(b, v[0], load_v3, 1);
883
884 nir_pop_if(b, if_stmt);
885
886 bool progress = nir_opt_dead_write_vars(b->shader);
887 ASSERT_TRUE(progress);
888 EXPECT_EQ(2, count_intrinsics(nir_intrinsic_store_deref));
889
890 nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
891 ASSERT_TRUE(first_store->src[1].is_ssa);
892 EXPECT_EQ(first_store->src[1].ssa, load_v2);
893
894 nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1);
895 ASSERT_TRUE(second_store->src[1].is_ssa);
896 EXPECT_EQ(second_store->src[1].ssa, load_v3);
897 }
898
899 TEST_F(nir_dead_write_vars_test, DISABLED_memory_barrier_in_two_blocks)
900 {
901 nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 2);
902
903 nir_store_var(b, v[0], nir_imm_int(b, 1), 1);
904 nir_store_var(b, v[1], nir_imm_int(b, 2), 1);
905
906 /* Split into many blocks. */
907 nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
908
909 /* Because it is before the barrier, this will kill the previous store to that target. */
910 nir_store_var(b, v[0], nir_imm_int(b, 3), 1);
911
912 nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_memory_barrier)->instr);
913
914 nir_store_var(b, v[1], nir_imm_int(b, 4), 1);
915
916 bool progress = nir_opt_dead_write_vars(b->shader);
917 ASSERT_TRUE(progress);
918
919 EXPECT_EQ(3, count_intrinsics(nir_intrinsic_store_deref));
920 }
921
922 TEST_F(nir_dead_write_vars_test, DISABLED_unrelated_barrier_in_two_blocks)
923 {
924 nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
925 nir_variable *out = create_int(nir_var_shader_out, "out");
926
927 nir_store_var(b, out, nir_load_var(b, v[1]), 1);
928 nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
929
930 /* Split into many blocks. */
931 nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
932
933 /* Emit vertex will ensure writes to output variables are considered used,
934 * but should not affect other types of variables. */
935
936 nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_emit_vertex)->instr);
937
938 nir_store_var(b, out, nir_load_var(b, v[2]), 1);
939 nir_store_var(b, v[0], nir_load_var(b, v[2]), 1);
940
941 bool progress = nir_opt_dead_write_vars(b->shader);
942 ASSERT_TRUE(progress);
943
944 /* Verify the first write to v[0] was removed. */
945 EXPECT_EQ(3, count_intrinsics(nir_intrinsic_store_deref));
946
947 nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
948 EXPECT_EQ(nir_intrinsic_get_var(first_store, 0), out);
949
950 nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1);
951 EXPECT_EQ(nir_intrinsic_get_var(second_store, 0), out);
952
953 nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
954 EXPECT_EQ(nir_intrinsic_get_var(third_store, 0), v[0]);
955 }