#define TAG_TEXTURE_4_VTX 0x2
#define TAG_TEXTURE_4 0x3
+#define TAG_TEXTURE_4_BARRIER 0x4
#define TAG_LOAD_STORE_4 0x5
#define TAG_ALU_4 0x8
#define TAG_ALU_8 0x9
emit_attr_read(ctx, reg, vertex_builtin_arg(instr->intrinsic), 1, nir_type_int);
}
+static void
+emit_control_barrier(compiler_context *ctx)
+{
+ midgard_instruction ins = {
+ .type = TAG_TEXTURE_4,
+ .src = { ~0, ~0, ~0, ~0 },
+ .texture = {
+ .op = TEXTURE_OP_BARRIER,
+ .unknown4 = 3 /* (control |) buffers | shared */
+ }
+ };
+
+ emit_mir_instruction(ctx, ins);
+}
+
static const nir_variable *
search_var(struct exec_list *vars, unsigned driver_loc)
{
emit_vertex_builtin(ctx, instr);
break;
+ case nir_intrinsic_memory_barrier_buffer:
+ case nir_intrinsic_memory_barrier_shared:
+ break;
+
+ case nir_intrinsic_control_barrier:
+ schedule_barrier(ctx);
+ emit_control_barrier(ctx);
+ schedule_barrier(ctx);
+ break;
+
default:
printf ("Unhandled intrinsic %s\n", nir_intrinsic_infos[instr->intrinsic].name);
assert(0);
}
case TAG_TEXTURE_4:
- case TAG_TEXTURE_4_VTX: {
+ case TAG_TEXTURE_4_VTX:
+ case TAG_TEXTURE_4_BARRIER: {
/* Texture instructions are easy, since there is no pipelining
* nor VLIW to worry about. We may need to set .cont/.last
* flags. */
}
case TAG_TEXTURE_4: {
+ if (ins->texture.op == TEXTURE_OP_BARRIER)
+ break;
+
/* Grab RA results */
struct phys_reg dest = index_to_reg(ctx, l, ins->dest, mir_typesize(ins));
struct phys_reg coord = index_to_reg(ctx, l, ins->src[1], mir_srcsize(ins, 1));
mir_update_worklist(worklist, len, instructions, ins);
struct midgard_bundle out = {
- .tag = TAG_TEXTURE_4,
+ .tag = ins->texture.op == TEXTURE_OP_BARRIER ?
+ TAG_TEXTURE_4_BARRIER : TAG_TEXTURE_4,
.instruction_count = 1,
.instructions = { ins }
};