unsigned num_channels,
bool header_present);
-void
+unsigned
brw_memory_fence(struct brw_codegen *p,
struct brw_reg dst,
struct brw_reg src,
brw_inst_set_binding_table_index(devinfo, insn, bti);
}
-void
+unsigned
brw_memory_fence(struct brw_codegen *p,
struct brw_reg dst,
struct brw_reg src,
(devinfo->gen == 7 && !devinfo->is_haswell);
struct brw_inst *insn;
+ unsigned fences = 0;
+
brw_push_insn_state(p);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
brw_set_default_exec_size(p, BRW_EXECUTE_1);
brw_set_src0(p, insn, src);
brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
commit_enable, bti);
+ fences++;
if (devinfo->gen == 7 && !devinfo->is_haswell) {
/* IVB does typed surface access through the render cache, so we need to
brw_set_src0(p, insn, src);
brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
commit_enable, bti);
+ fences++;
/* Now write the response of the second message into the response of the
* first to trigger a pipeline stall -- This way future render and data
}
brw_pop_insn_state(p);
+
+ return fences;
}
void
generate_shader_time_add(inst, src[0], src[1], src[2]);
break;
- case SHADER_OPCODE_MEMORY_FENCE:
+ case SHADER_OPCODE_MEMORY_FENCE: {
assert(src[1].file == BRW_IMMEDIATE_VALUE);
assert(src[2].file == BRW_IMMEDIATE_VALUE);
- brw_memory_fence(p, dst, src[0], BRW_OPCODE_SEND, src[1].ud, src[2].ud);
- send_count++;
+ const unsigned sends =
+ brw_memory_fence(p, dst, src[0], BRW_OPCODE_SEND, src[1].ud,
+ src[2].ud);
+ send_count += sends;
break;
+ }
case FS_OPCODE_SCHEDULING_FENCE:
if (unlikely(debug_flag))
send_count++;
break;
- case SHADER_OPCODE_MEMORY_FENCE:
- brw_memory_fence(p, dst, src[0], BRW_OPCODE_SEND, false, /* bti */ 0);
- send_count++;
+ case SHADER_OPCODE_MEMORY_FENCE: {
+ const unsigned sends =
+ brw_memory_fence(p, dst, src[0], BRW_OPCODE_SEND, false,
+ /* bti */ 0);
+ send_count += sends;
break;
+ }
case SHADER_OPCODE_FIND_LIVE_CHANNEL: {
const struct brw_reg mask =