From bd3ab75aef95d062cedaa92504fede9887a2c370 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Tue, 7 Jan 2020 14:58:45 -0600 Subject: [PATCH] intel/nir: Stop adding redundant barriers Now that both GLSL and SPIR-V are adding shared and tcs_patch barriers (as appropreate) prior to the nir_intrinsic_barrier, we don't need to do it ourselves in the back-end. This reverts commit 26e950a5de01564e3b5f2148ae994454ae5205fe. Reviewed-by: Caio Marcelo de Oliveira Filho Part-of: --- src/intel/compiler/brw_nir_lower_cs_intrinsics.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/intel/compiler/brw_nir_lower_cs_intrinsics.c b/src/intel/compiler/brw_nir_lower_cs_intrinsics.c index 3f48a3c5dda..434ad005281 100644 --- a/src/intel/compiler/brw_nir_lower_cs_intrinsics.c +++ b/src/intel/compiler/brw_nir_lower_cs_intrinsics.c @@ -55,20 +55,6 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state, nir_ssa_def *sysval; switch (intrinsic->intrinsic) { - case nir_intrinsic_barrier: { - /* Our HW barrier instruction doesn't do a memory barrier for us but - * the GLSL barrier() intrinsic does for shared memory. Insert a - * shared memory barrier before every barrier(). - */ - b->cursor = nir_before_instr(&intrinsic->instr); - - nir_intrinsic_instr *shared_barrier = - nir_intrinsic_instr_create(b->shader, - nir_intrinsic_memory_barrier_shared); - nir_builder_instr_insert(b, &shared_barrier->instr); - continue; - } - case nir_intrinsic_load_local_invocation_index: case nir_intrinsic_load_local_invocation_id: { /* First time we are using those, so let's calculate them. */ -- 2.30.2