case nir_intrinsic_store_deref: {
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
+
+ if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE) {
+ /* Consider a volatile write to also be a sort of read. This
+ * prevents us from deleting a non-volatile write just before a
+ * volatile write thanks to a non-volatile write afterwards. It's
+ * quite the corner case, but this should be safer and more
+ * predictable for the programmer than allowing two non-volatile
+ * writes to be combined with a volatile write between them.
+ */
+ clear_unused_for_read(&unused_writes, dst);
+ break;
+ }
+
nir_component_mask_t mask = nir_intrinsic_write_mask(intrin);
progress |= update_unused_writes(&unused_writes, intrin, dst, mask);
break;
nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
+ if (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE) {
+ clear_unused_for_read(&unused_writes, src);
+ clear_unused_for_read(&unused_writes, dst);
+ break;
+ }
+
/* Self-copy is removed. */
if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
nir_instr_remove(instr);
ASSERT_FALSE(progress);
}
+TEST_F(nir_dead_write_vars_test, volatile_write)
+{
+ nir_variable *v = create_int(nir_var_mem_ssbo, "v");
+
+ nir_store_var(b, v, nir_imm_int(b, 0), 0x1);
+ nir_store_var_volatile(b, v, nir_imm_int(b, 1), 0x1);
+ nir_store_var(b, v, nir_imm_int(b, 2), 0x1);
+
+ /* Our approach here is a bit scorched-earth. We expect the volatile store
+ * in the middle to cause both that store and the one before it to be kept.
+ * Technically, volatile only prevents combining the volatile store with
+ * another store and one could argue that the store before the volatile and
+ * the one after it could be combined. However, it seems safer to just
+ * treat a volatile store like an atomic and prevent any combining across
+ * it.
+ */
+ bool progress = nir_opt_dead_write_vars(b->shader);
+ ASSERT_FALSE(progress);
+}
+
+TEST_F(nir_dead_write_vars_test, volatile_copies)
+{
+ nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 2);
+
+ nir_copy_var(b, v[0], v[1]);
+ nir_copy_deref_with_access(b, nir_build_deref_var(b, v[0]),
+ nir_build_deref_var(b, v[1]),
+ ACCESS_VOLATILE, (gl_access_qualifier)0);
+ nir_copy_var(b, v[0], v[1]);
+
+ /* Our approach here is a bit scorched-earth. We expect the volatile store
+ * in the middle to cause both that store and the one before it to be kept.
+ * Technically, volatile only prevents combining the volatile store with
+ * another store and one could argue that the store before the volatile and
+ * the one after it could be combined. However, it seems safer to just
+ * treat a volatile store like an atomic and prevent any combining across
+ * it.
+ */
+ bool progress = nir_opt_dead_write_vars(b->shader);
+ ASSERT_FALSE(progress);
+}
+
TEST_F(nir_dead_write_vars_test, no_dead_writes_in_if_statement)
{
nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 6);