+TEST_F(nir_copy_prop_vars_test, store_volatile)
+{
+ nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
+ unsigned mask = 1 | 2;
+
+ nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
+ nir_store_var(b, v[0], first_value, mask);
+
+ nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
+ nir_store_var_volatile(b, v[0], second_value, mask);
+
+ nir_ssa_def *third_value = nir_imm_ivec2(b, 50, 60);
+ nir_store_var(b, v[0], third_value, mask);
+
+ nir_ssa_def *read_value = nir_load_var(b, v[0]);
+ nir_store_var(b, v[1], read_value, mask);
+
+ nir_validate_shader(b->shader, NULL);
+
+ bool progress = nir_opt_copy_prop_vars(b->shader);
+ EXPECT_TRUE(progress);
+
+ nir_validate_shader(b->shader, NULL);
+
+ ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
+
+ /* Our approach here is a bit scorched-earth. We expect the volatile store
+ * in the middle to cause both that store and the one before it to be kept.
+ * Technically, volatile only prevents combining the volatile store with
+ * another store and one could argue that the store before the volatile and
+ * the one after it could be combined. However, it seems safer to just
+ * treat a volatile store like an atomic and prevent any combining across
+ * it.
+ */
+ nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 3);
+ ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
+ ASSERT_TRUE(store_to_v1->src[1].is_ssa);
+ EXPECT_EQ(store_to_v1->src[1].ssa, third_value);
+}
+
+TEST_F(nir_copy_prop_vars_test, self_copy_volatile)
+{
+ nir_variable *v = create_int(nir_var_mem_ssbo, "v");
+
+ nir_copy_var(b, v, v);
+ nir_copy_deref_with_access(b, nir_build_deref_var(b, v),
+ nir_build_deref_var(b, v),
+ (gl_access_qualifier)0, ACCESS_VOLATILE);
+ nir_copy_deref_with_access(b, nir_build_deref_var(b, v),
+ nir_build_deref_var(b, v),
+ ACCESS_VOLATILE, (gl_access_qualifier)0);
+ nir_copy_var(b, v, v);
+
+ nir_validate_shader(b->shader, NULL);
+
+ bool progress = nir_opt_copy_prop_vars(b->shader);
+ EXPECT_TRUE(progress);
+
+ nir_validate_shader(b->shader, NULL);
+
+ ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 2);
+
+ /* Store to v[1] should use second_value directly. */
+ nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_copy_deref, 0);
+ nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_copy_deref, 1);
+ ASSERT_EQ(nir_intrinsic_src_access(first), ACCESS_VOLATILE);
+ ASSERT_EQ(nir_intrinsic_dst_access(first), (gl_access_qualifier)0);
+ ASSERT_EQ(nir_intrinsic_src_access(second), (gl_access_qualifier)0);
+ ASSERT_EQ(nir_intrinsic_dst_access(second), ACCESS_VOLATILE);
+}
+