nir/load_store_vectorizer: Add unit tests for alignment handling.
authorEric Anholt <eric@anholt.net>
Fri, 21 Aug 2020 23:59:33 +0000 (16:59 -0700)
committerMarge Bot <eric+marge@anholt.net>
Fri, 4 Sep 2020 13:03:50 +0000 (13:03 +0000)
The alignment upgrading logic is pretty tricky, but this gives me good
confidence that it actually works.

Reviewed-by: Rhys Perry <pendingchaos02@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4710>

src/compiler/nir/tests/load_store_vectorizer_tests.cpp

index 84b154ecb5af13f4d6201b55019a576e5482cb04..d1a494d61e07e15c78a9d288e77d7f575bb3df94 100644 (file)
@@ -1747,3 +1747,83 @@ TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust)
 
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 }
+
+TEST_F(nir_load_store_vectorize_test, ubo_alignment_16_4)
+{
+   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   offset = nir_imul_imm(b, offset, 16);
+   offset = nir_iadd_imm(b, offset, 4);
+   nir_intrinsic_instr *load = create_indirect_load(nir_var_mem_ubo, 0, offset,
+                                                    0x1);
+
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ubo));
+   EXPECT_EQ(nir_intrinsic_align_mul(load), 16);
+   EXPECT_EQ(nir_intrinsic_align_offset(load), 4);
+}
+
+TEST_F(nir_load_store_vectorize_test, ubo_alignment_16_4_swapped)
+{
+   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   offset = nir_iadd_imm(b, offset, 1);
+   offset = nir_imul_imm(b, offset, 16);
+   offset = nir_iadd_imm(b, offset, 4);
+   nir_intrinsic_instr *load =
+      create_indirect_load(nir_var_mem_ubo, 0, offset, 0x1);
+
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ubo));
+   EXPECT_EQ(nir_intrinsic_align_mul(load), 16);
+   EXPECT_EQ(nir_intrinsic_align_offset(load), 4);
+}
+
+/* Check offset % mul != 0 */
+TEST_F(nir_load_store_vectorize_test, ubo_alignment_16_20)
+{
+   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   offset = nir_imul_imm(b, offset, 16);
+   offset = nir_iadd_imm(b, offset, 20);
+   nir_intrinsic_instr *load = create_indirect_load(nir_var_mem_ubo, 0, offset,
+                                                    0x1);
+
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ubo));
+   EXPECT_EQ(nir_intrinsic_align_mul(load), 16);
+   EXPECT_EQ(nir_intrinsic_align_offset(load), 4);
+}
+
+/* Check that we don't upgrade to non-power-of-two alignments. */
+TEST_F(nir_load_store_vectorize_test, ubo_alignment_24_4)
+{
+   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   offset = nir_imul_imm(b, offset, 24);
+   offset = nir_iadd_imm(b, offset, 4);
+   nir_intrinsic_instr *load =
+      create_indirect_load(nir_var_mem_ubo, 0, offset, 0x1);
+
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ubo));
+   EXPECT_EQ(nir_intrinsic_align_mul(load), 8);
+   EXPECT_EQ(nir_intrinsic_align_offset(load), 4);
+}
+
+/* Check that we don't upgrade to non-power-of-two alignments. */
+TEST_F(nir_load_store_vectorize_test, ubo_alignment_64_16_8)
+{
+   nir_ssa_def *x = nir_imul_imm(b, nir_load_local_invocation_index(b), 64);
+   nir_ssa_def *y = nir_imul_imm(b, nir_load_instance_id(b), 16);
+   nir_ssa_def *offset = nir_iadd(b, x, y);
+   offset = nir_iadd_imm(b, offset, 8);
+   nir_intrinsic_instr *load =
+      create_indirect_load(nir_var_mem_ubo, 0, offset, 0x1);
+
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ubo));
+   EXPECT_EQ(nir_intrinsic_align_mul(load), 16);
+   EXPECT_EQ(nir_intrinsic_align_offset(load), 8);
+}
+
+TEST_F(nir_load_store_vectorize_test, ubo_alignment_const_100)
+{
+   nir_intrinsic_instr *load =
+      create_indirect_load(nir_var_mem_ubo, 0, nir_imm_int(b, 100), 0x1);
+
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ubo));
+   EXPECT_EQ(nir_intrinsic_align_mul(load), 0x40000000);
+   EXPECT_EQ(nir_intrinsic_align_offset(load), 100);
+}