nir/load_store_vectorizer: Use more imm helpers in the tests.
[mesa.git] / src / compiler / nir / tests / load_store_vectorizer_tests.cpp
index 0b332d8bc78bacd5cc24c8d24b11cc15d24c197e..84b154ecb5af13f4d6201b55019a576e5482cb04 100644 (file)
 #include "nir.h"
 #include "nir_builder.h"
 
+/* This is a macro so you get good line numbers */
+#define EXPECT_INSTR_SWIZZLES(instr, load, expected_swizzle)    \
+   EXPECT_EQ((instr)->src[0].src.ssa, &(load)->dest.ssa);       \
+   EXPECT_EQ(swizzle(instr, 0), expected_swizzle);
+
 namespace {
 
 class nir_load_store_vectorize_test : public ::testing::Test {
@@ -38,7 +43,8 @@ protected:
    nir_intrinsic_instr *get_intrinsic(nir_intrinsic_op intrinsic,
                                       unsigned index);
 
-   bool run_vectorizer(nir_variable_mode modes, bool cse=false);
+   bool run_vectorizer(nir_variable_mode modes, bool cse=false,
+                       nir_variable_mode robust_modes = (nir_variable_mode)0);
 
    nir_ssa_def *get_resource(uint32_t binding, bool ssbo);
 
@@ -69,9 +75,12 @@ protected:
                                       nir_intrinsic_instr *low, nir_intrinsic_instr *high);
    static void shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align);
 
+   std::string swizzle(nir_alu_instr *instr, int src);
+
    void *mem_ctx;
 
    nir_builder *b;
+   std::map<unsigned, nir_alu_instr*> movs;
    std::map<unsigned, nir_alu_src*> loads;
    std::map<unsigned, nir_ssa_def*> res_map;
 };
@@ -98,6 +107,17 @@ nir_load_store_vectorize_test::~nir_load_store_vectorize_test()
    glsl_type_singleton_decref();
 }
 
+std::string
+nir_load_store_vectorize_test::swizzle(nir_alu_instr *instr, int src)
+{
+   std::string swizzle;
+   for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(instr, src); i++) {
+      swizzle += "xyzw"[instr->src[src].swizzle[i]];
+   }
+
+   return swizzle;
+}
+
 unsigned
 nir_load_store_vectorize_test::count_intrinsics(nir_intrinsic_op intrinsic)
 {
@@ -134,11 +154,13 @@ nir_load_store_vectorize_test::get_intrinsic(nir_intrinsic_op intrinsic,
 }
 
 bool
-nir_load_store_vectorize_test::run_vectorizer(nir_variable_mode modes, bool cse)
+nir_load_store_vectorize_test::run_vectorizer(nir_variable_mode modes,
+                                              bool cse,
+                                              nir_variable_mode robust_modes)
 {
    if (modes & nir_var_mem_shared)
       nir_lower_vars_to_explicit_types(b->shader, nir_var_mem_shared, shared_type_info);
-   bool progress = nir_opt_load_store_vectorize(b->shader, modes, mem_vectorize_callback);
+   bool progress = nir_opt_load_store_vectorize(b->shader, modes, mem_vectorize_callback, robust_modes);
    if (progress) {
       nir_validate_shader(b->shader, NULL);
       if (cse)
@@ -206,8 +228,9 @@ nir_load_store_vectorize_test::create_indirect_load(
       nir_intrinsic_set_access(load, (gl_access_qualifier)access);
    }
    nir_builder_instr_insert(b, &load->instr);
-   nir_instr *mov = nir_mov(b, &load->dest.ssa)->parent_instr;
-   loads[id] = &nir_instr_as_alu(mov)->src[0];
+   nir_alu_instr *mov = nir_instr_as_alu(nir_mov(b, &load->dest.ssa)->parent_instr);
+   movs[id] = mov;
+   loads[id] = &mov->src[0];
 
    return load;
 }
@@ -276,8 +299,9 @@ void nir_load_store_vectorize_test::create_shared_load(
    load->num_components = components;
    load->src[0] = nir_src_for_ssa(&deref->dest.ssa);
    nir_builder_instr_insert(b, &load->instr);
-   nir_instr *mov = nir_mov(b, &load->dest.ssa)->parent_instr;
-   loads[id] = &nir_instr_as_alu(mov)->src[0];
+   nir_alu_instr *mov = nir_instr_as_alu(nir_mov(b, &load->dest.ssa)->parent_instr);
+   movs[id] = mov;
+   loads[id] = &mov->src[0];
 }
 
 void nir_load_store_vectorize_test::create_shared_store(
@@ -357,10 +381,8 @@ TEST_F(nir_load_store_vectorize_test, ubo_load_adjacent)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 2);
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, ubo_load_intersecting)
@@ -379,12 +401,8 @@ TEST_F(nir_load_store_vectorize_test, ubo_load_intersecting)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 3);
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x1]->swizzle[1], 1);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
-   ASSERT_EQ(loads[0x2]->swizzle[1], 2);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "xy");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "yz");
 }
 
 TEST_F(nir_load_store_vectorize_test, ubo_load_identical)
@@ -405,8 +423,8 @@ TEST_F(nir_load_store_vectorize_test, ubo_load_identical)
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
    ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
    ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 0);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "x");
 }
 
 TEST_F(nir_load_store_vectorize_test, ubo_load_large)
@@ -439,10 +457,8 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_adjacent)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 2);
    ASSERT_EQ(nir_src_as_uint(load->src[0]), 0);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, push_const_load_adjacent_base)
@@ -461,10 +477,8 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_adjacent_base)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 2);
    ASSERT_EQ(nir_src_as_uint(load->src[0]), 0);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent)
@@ -483,10 +497,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 2);
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect)
@@ -506,10 +518,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 2);
    ASSERT_EQ(load->src[1].ssa, index_base);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_sub)
@@ -530,10 +540,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_sub)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 2);
    ASSERT_EQ(load->src[1].ssa, index_base_prev);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_neg_stride)
@@ -555,10 +563,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_neg_stride)
    nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 2);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 
    /* nir_opt_algebraic optimizes the imul */
    ASSERT_TRUE(test_alu(load->src[1].ssa->parent_instr, nir_op_ineg));
@@ -586,10 +592,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_identical_store_adjacent)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 1);
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x3]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x3]->swizzle[0], 0);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_identical_store_intersecting)
@@ -601,7 +605,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_identical_store_intersecting)
    nir_validate_shader(b->shader, NULL);
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 
-   EXPECT_FALSE(run_vectorizer(nir_var_mem_ssbo));
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ssbo));
 
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 }
@@ -615,7 +619,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_identical_store_identical)
    nir_validate_shader(b->shader, NULL);
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 
-   EXPECT_FALSE(run_vectorizer(nir_var_mem_ssbo));
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ssbo));
 
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 }
@@ -629,7 +633,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_identical_load_identical)
    nir_validate_shader(b->shader, NULL);
    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_ssbo), 2);
 
-   EXPECT_FALSE(run_vectorizer(nir_var_mem_ssbo));
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ssbo));
 
    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_ssbo), 2);
 }
@@ -658,10 +662,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_store_identical)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 2);
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x3]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x3]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x3], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent)
@@ -748,7 +750,10 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_large)
 TEST_F(nir_load_store_vectorize_test, ubo_load_adjacent_memory_barrier)
 {
    create_load(nir_var_mem_ubo, 0, 0, 0x1);
-   nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_memory_barrier)->instr);
+
+   nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
+                             nir_var_mem_ssbo);
+
    create_load(nir_var_mem_ubo, 0, 4, 0x2);
 
    nir_validate_shader(b->shader, NULL);
@@ -762,23 +767,27 @@ TEST_F(nir_load_store_vectorize_test, ubo_load_adjacent_memory_barrier)
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_memory_barrier)
 {
    create_load(nir_var_mem_ssbo, 0, 0, 0x1);
-   nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_memory_barrier)->instr);
+
+   nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
+                             nir_var_mem_ssbo);
+
    create_load(nir_var_mem_ssbo, 0, 4, 0x2);
 
    nir_validate_shader(b->shader, NULL);
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 
-   EXPECT_FALSE(run_vectorizer(nir_var_mem_ssbo));
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ssbo));
 
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 }
 
-/* nir_intrinsic_barrier only syncs invocations in a workgroup, it doesn't
- * require that loads/stores complete. */
+/* nir_intrinsic_control_barrier only syncs invocations in a workgroup, it
+ * doesn't require that loads/stores complete.
+ */
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_barrier)
 {
    create_load(nir_var_mem_ssbo, 0, 0, 0x1);
-   nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_barrier)->instr);
+   nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_control_barrier)->instr);
    create_load(nir_var_mem_ssbo, 0, 4, 0x2);
 
    nir_validate_shader(b->shader, NULL);
@@ -792,7 +801,10 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_barrier)
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_memory_barrier_shared)
 {
    create_load(nir_var_mem_ssbo, 0, 0, 0x1);
-   nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_memory_barrier_shared)->instr);
+
+   nir_scoped_memory_barrier(b, NIR_SCOPE_WORKGROUP, NIR_MEMORY_ACQ_REL,
+                             nir_var_mem_shared);
+
    create_load(nir_var_mem_ssbo, 0, 4, 0x2);
 
    nir_validate_shader(b->shader, NULL);
@@ -820,10 +832,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_8_8_16)
    ASSERT_EQ(load->dest.ssa.bit_size, 8);
    ASSERT_EQ(load->dest.ssa.num_components, 4);
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 
    nir_ssa_def *val = loads[0x3]->src.ssa;
    ASSERT_EQ(val->bit_size, 16);
@@ -855,18 +865,14 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 4);
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x1]->swizzle[1], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "xy");
 
    nir_ssa_def *val = loads[0x2]->src.ssa;
    ASSERT_EQ(val->bit_size, 64);
    ASSERT_EQ(val->num_components, 1);
    ASSERT_TRUE(test_alu(val->parent_instr, nir_op_pack_64_2x32));
    nir_alu_instr *pack = nir_instr_as_alu(val->parent_instr);
-   ASSERT_EQ(pack->src[0].src.ssa, &load->dest.ssa);
-   ASSERT_EQ(pack->src[0].swizzle[0], 2);
-   ASSERT_EQ(pack->src[0].swizzle[1], 3);
+   EXPECT_INSTR_SWIZZLES(pack, load, "zw");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64_64)
@@ -886,28 +892,21 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64_64)
    ASSERT_EQ(load->dest.ssa.bit_size, 64);
    ASSERT_EQ(load->dest.ssa.num_components, 3);
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
-   ASSERT_EQ(loads[0x3]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x3]->swizzle[0], 2);
+   EXPECT_INSTR_SWIZZLES(movs[0x3], load, "z");
 
-   /* pack_64_2x32(unpack_64_2x32()) is created because the 32-bit and first
-    * 64-bit loads are combined before the second 64-bit load is even considered. */
    nir_ssa_def *val = loads[0x2]->src.ssa;
    ASSERT_EQ(val->bit_size, 64);
    ASSERT_EQ(val->num_components, 1);
-   ASSERT_TRUE(test_alu(val->parent_instr, nir_op_pack_64_2x32));
-   nir_alu_instr *pack = nir_instr_as_alu(val->parent_instr);
-   ASSERT_TRUE(test_alu(pack->src[0].src.ssa->parent_instr, nir_op_unpack_64_2x32));
-   nir_alu_instr *unpack = nir_instr_as_alu(pack->src[0].src.ssa->parent_instr);
-   ASSERT_EQ(unpack->src[0].src.ssa, &load->dest.ssa);
-   ASSERT_EQ(unpack->src[0].swizzle[0], 1);
+   ASSERT_TRUE(test_alu(val->parent_instr, nir_op_mov));
+   nir_alu_instr *mov = nir_instr_as_alu(val->parent_instr);
+   EXPECT_INSTR_SWIZZLES(mov, load, "y");
 
    val = loads[0x1]->src.ssa;
    ASSERT_EQ(val->bit_size, 32);
    ASSERT_EQ(val->num_components, 2);
    ASSERT_TRUE(test_alu(val->parent_instr, nir_op_unpack_64_2x32));
-   unpack = nir_instr_as_alu(val->parent_instr);
-   ASSERT_EQ(unpack->src[0].src.ssa, &load->dest.ssa);
-   ASSERT_EQ(unpack->src[0].swizzle[0], 0);
+   nir_alu_instr *unpack = nir_instr_as_alu(val->parent_instr);
+   EXPECT_INSTR_SWIZZLES(unpack, load, "x");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_intersecting_32_32_64)
@@ -926,18 +925,14 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_intersecting_32_32_64)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 3);
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 4);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x1]->swizzle[1], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "xy");
 
    nir_ssa_def *val = loads[0x2]->src.ssa;
    ASSERT_EQ(val->bit_size, 64);
    ASSERT_EQ(val->num_components, 1);
    ASSERT_TRUE(test_alu(val->parent_instr, nir_op_pack_64_2x32));
    nir_alu_instr *pack = nir_instr_as_alu(val->parent_instr);
-   ASSERT_EQ(pack->src[0].src.ssa, &load->dest.ssa);
-   ASSERT_EQ(pack->src[0].swizzle[0], 1);
-   ASSERT_EQ(pack->src[0].swizzle[1], 2);
+   EXPECT_INSTR_SWIZZLES(pack, load, "yz");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent_8_8_16)
@@ -960,10 +955,10 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent_8_8_16)
    ASSERT_EQ(val->bit_size, 8);
    ASSERT_EQ(val->num_components, 4);
    nir_const_value *cv = nir_instr_as_load_const(val->parent_instr)->value;
-   ASSERT_EQ(nir_const_value_as_uint(cv[0], 32), 0x10);
-   ASSERT_EQ(nir_const_value_as_uint(cv[1], 32), 0x20);
-   ASSERT_EQ(nir_const_value_as_uint(cv[2], 32), 0x30);
-   ASSERT_EQ(nir_const_value_as_uint(cv[3], 32), 0x0);
+   ASSERT_EQ(nir_const_value_as_uint(cv[0], 8), 0x10);
+   ASSERT_EQ(nir_const_value_as_uint(cv[1], 8), 0x20);
+   ASSERT_EQ(nir_const_value_as_uint(cv[2], 8), 0x30);
+   ASSERT_EQ(nir_const_value_as_uint(cv[3], 8), 0x0);
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent_32_32_64)
@@ -1048,7 +1043,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent_32_64)
    nir_validate_shader(b->shader, NULL);
    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_ssbo), 2);
 
-   EXPECT_FALSE(run_vectorizer(nir_var_mem_ssbo));
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ssbo));
 
    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_ssbo), 2);
 }
@@ -1108,10 +1103,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_adjacent)
    ASSERT_EQ(deref->deref_type, nir_deref_type_var);
    ASSERT_EQ(deref->var, var);
 
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, shared_load_distant_64bit)
@@ -1162,10 +1155,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_adjacent_indirect)
    ASSERT_EQ(deref->deref_type, nir_deref_type_var);
    ASSERT_EQ(deref->var, var);
 
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, shared_load_adjacent_indirect_sub)
@@ -1200,10 +1191,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_adjacent_indirect_sub)
    ASSERT_EQ(deref->deref_type, nir_deref_type_var);
    ASSERT_EQ(deref->var, var);
 
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, shared_load_struct)
@@ -1239,10 +1228,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_struct)
    ASSERT_EQ(deref->deref_type, nir_deref_type_var);
    ASSERT_EQ(deref->var, var);
 
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, shared_load_identical_store_adjacent)
@@ -1275,10 +1262,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_identical_store_adjacent)
    ASSERT_EQ(deref->deref_type, nir_deref_type_var);
    ASSERT_EQ(deref->var, var);
 
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x3]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x3]->swizzle[0], 0);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
 }
 
 TEST_F(nir_load_store_vectorize_test, shared_load_identical_store_identical)
@@ -1331,10 +1316,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_adjacent_store_identical)
    ASSERT_EQ(deref->deref_type, nir_deref_type_var);
    ASSERT_EQ(deref->var, var);
 
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x3]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x3]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x3], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, shared_load_bool)
@@ -1408,8 +1391,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_bool_mixed)
 
    ASSERT_TRUE(test_alu(loads[0x1]->src.ssa->parent_instr, nir_op_i2b1));
    ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->dest.ssa, 0));
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, shared_store_adjacent)
@@ -1492,9 +1475,9 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_separate_indirect_indirect
 {
    nir_ssa_def *index_base = nir_load_local_invocation_index(b);
    create_indirect_load(nir_var_mem_push_const, 0,
-      nir_iadd(b, nir_imul(b, nir_iadd(b, index_base, nir_imm_int(b, 2)), nir_imm_int(b, 16)), nir_imm_int(b, 32)), 0x1);
+      nir_iadd_imm(b, nir_imul_imm(b, nir_iadd_imm(b, index_base, 2), 16), 32), 0x1);
    create_indirect_load(nir_var_mem_push_const, 0,
-      nir_iadd(b, nir_imul(b, nir_iadd(b, index_base, nir_imm_int(b, 3)), nir_imm_int(b, 16)), nir_imm_int(b, 32)), 0x2);
+      nir_iadd_imm(b, nir_imul_imm(b, nir_iadd_imm(b, index_base, 3), 16), 32), 0x2);
 
    nir_validate_shader(b->shader, NULL);
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_push_constant), 2);
@@ -1508,8 +1491,8 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_adjacent_complex_indirect)
 {
    nir_ssa_def *index_base = nir_load_local_invocation_index(b);
    //vec4 pc[]; pc[gl_LocalInvocationIndex].w; pc[gl_LocalInvocationIndex+1].x;
-   nir_ssa_def *low = nir_iadd(b, nir_imul(b, index_base, nir_imm_int(b, 16)), nir_imm_int(b, 12));
-   nir_ssa_def *high = nir_imul(b, nir_iadd(b, index_base, nir_imm_int(b, 1)), nir_imm_int(b, 16));
+   nir_ssa_def *low = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 12);
+   nir_ssa_def *high = nir_imul_imm(b, nir_iadd_imm(b, index_base, 1), 16);
    create_indirect_load(nir_var_mem_push_const, 0, low, 0x1);
    create_indirect_load(nir_var_mem_push_const, 0, high, 0x2);
 
@@ -1524,10 +1507,8 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_adjacent_complex_indirect)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 2);
    ASSERT_EQ(load->src[0].ssa, low);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x2]->swizzle[0], 1);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_alias0)
@@ -1540,7 +1521,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_alias0)
    nir_validate_shader(b->shader, NULL);
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 
-   EXPECT_FALSE(run_vectorizer(nir_var_mem_ssbo));
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ssbo));
 
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 }
@@ -1565,7 +1546,7 @@ TEST_F(nir_load_store_vectorize_test, DISABLED_ssbo_alias2)
 {
    /* TODO: try to combine these loads */
    nir_ssa_def *index_base = nir_load_local_invocation_index(b);
-   nir_ssa_def *offset = nir_iadd(b, nir_imul(b, index_base, nir_imm_int(b, 16)), nir_imm_int(b, 4));
+   nir_ssa_def *offset = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 4);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x1);
    create_store(nir_var_mem_ssbo, 0, 0, 0x2);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x3);
@@ -1581,10 +1562,8 @@ TEST_F(nir_load_store_vectorize_test, DISABLED_ssbo_alias2)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 1);
    ASSERT_EQ(load->src[1].ssa, offset);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x3]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x3]->swizzle[0], 0);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_alias3)
@@ -1593,7 +1572,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_alias3)
     * these loads can't be combined because if index_base == 268435455, then
     * offset == 0 because the addition would wrap around */
    nir_ssa_def *index_base = nir_load_local_invocation_index(b);
-   nir_ssa_def *offset = nir_iadd(b, nir_imul(b, index_base, nir_imm_int(b, 16)), nir_imm_int(b, 16));
+   nir_ssa_def *offset = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 16);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x1);
    create_store(nir_var_mem_ssbo, 0, 0, 0x2);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x3);
@@ -1610,7 +1589,7 @@ TEST_F(nir_load_store_vectorize_test, DISABLED_ssbo_alias4)
 {
    /* TODO: try to combine these loads */
    nir_ssa_def *index_base = nir_load_local_invocation_index(b);
-   nir_ssa_def *offset = nir_iadd(b, nir_imul(b, index_base, nir_imm_int(b, 16)), nir_imm_int(b, 16));
+   nir_ssa_def *offset = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 16);
    nir_instr_as_alu(offset->parent_instr)->no_unsigned_wrap = true;
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x1);
    create_store(nir_var_mem_ssbo, 0, 0, 0x2);
@@ -1627,10 +1606,8 @@ TEST_F(nir_load_store_vectorize_test, DISABLED_ssbo_alias4)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 1);
    ASSERT_EQ(load->src[1].ssa, offset);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x3]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x3]->swizzle[0], 0);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_alias5)
@@ -1642,7 +1619,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_alias5)
    nir_validate_shader(b->shader, NULL);
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 
-   EXPECT_FALSE(run_vectorizer(nir_var_mem_ssbo));
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ssbo));
 
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 }
@@ -1664,10 +1641,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_alias6)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 1);
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x3]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x3]->swizzle[0], 0);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
 }
 
 TEST_F(nir_load_store_vectorize_test, DISABLED_shared_alias0)
@@ -1702,10 +1677,8 @@ TEST_F(nir_load_store_vectorize_test, DISABLED_shared_alias0)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 1);
    ASSERT_EQ(load->src[0].ssa, &load_deref->dest.ssa);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x3]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x3]->swizzle[0], 0);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
 }
 
 TEST_F(nir_load_store_vectorize_test, shared_alias1)
@@ -1729,16 +1702,14 @@ TEST_F(nir_load_store_vectorize_test, shared_alias1)
    ASSERT_EQ(load->dest.ssa.bit_size, 32);
    ASSERT_EQ(load->dest.ssa.num_components, 1);
    ASSERT_EQ(load->src[0].ssa, &load_deref->dest.ssa);
-   ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x3]->src.ssa, &load->dest.ssa);
-   ASSERT_EQ(loads[0x1]->swizzle[0], 0);
-   ASSERT_EQ(loads[0x3]->swizzle[0], 0);
+   EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
+   EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
 }
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_distant_64bit)
 {
-   create_indirect_load(nir_var_mem_ssbo, 0, nir_imm_intN_t(b, 0x100000000, 64), 0x1);
-   create_indirect_load(nir_var_mem_ssbo, 0, nir_imm_intN_t(b, 0x200000004, 64), 0x2);
+   create_indirect_load(nir_var_mem_ssbo, 0, nir_imm_int64(b, 0x100000000), 0x1);
+   create_indirect_load(nir_var_mem_ssbo, 0, nir_imm_int64(b, 0x200000004), 0x2);
 
    nir_validate_shader(b->shader, NULL);
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
@@ -1763,3 +1734,16 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_distant_indirect_64bit)
 
    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
 }
+
+TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust)
+{
+   create_load(nir_var_mem_ssbo, 0, 0xfffffffc, 0x1);
+   create_load(nir_var_mem_ssbo, 0, 0x0, 0x2);
+
+   nir_validate_shader(b->shader, NULL);
+   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
+
+   EXPECT_TRUE(run_vectorizer(nir_var_mem_ssbo, false, nir_var_mem_ssbo));
+
+   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
+}