return ins;
}
+midgard_instruction
+v_load_store_scratch(unsigned srcdest, unsigned index, bool is_store);
+
/* Scheduling */
void schedule_program(compiler_context *ctx);
#define OP_IS_STORE_VARY(op) (\
op == midgard_op_st_vary_16 || \
- op == midgard_op_st_vary_32 \
+ op == midgard_op_st_vary_32 || \
+ op == midgard_op_st_vary_32u || \
+ op == midgard_op_st_vary_32i \
)
+#define OP_IS_STORE_R26(op) (\
+ OP_IS_STORE_VARY(op) || \
+ op == midgard_op_st_char || \
+ op == midgard_op_st_char2 || \
+ op == midgard_op_st_char4 || \
+ op == midgard_op_st_short4 || \
+ op == midgard_op_st_int4 \
+ )
+
#define OP_IS_STORE(op) (\
OP_IS_STORE_VARY(op) || \
op == midgard_op_st_cubemap_coords \
}
case TAG_LOAD_STORE_4: {
- if (OP_IS_STORE_VARY(ins->load_store.op)) {
+ if (OP_IS_STORE_R26(ins->load_store.op)) {
/* TODO: use ssa_args for st_vary */
ins->load_store.reg = 0;
} else {
}
}
+midgard_instruction
+v_load_store_scratch(unsigned srcdest, unsigned index, bool is_store)
+{
+ /* We index by 32-bit vec4s */
+ unsigned byte = (index * 4 * 4);
+
+ midgard_instruction ins = {
+ .type = TAG_LOAD_STORE_4,
+ .mask = 0xF,
+ .ssa_args = {
+ .dest = -1,
+ .src0 = -1,
+ .src1 = -1
+ },
+ .load_store = {
+ .op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
+ .swizzle = SWIZZLE_XYZW,
+
+ /* For register spilling - to thread local storage */
+ .unknown = 0x1EEA,
+
+ /* Splattered across, TODO combine logically */
+ .varying_parameters = (byte & 0x1FF) << 1,
+ .address = (byte >> 9)
+ }
+ };
+
+ if (is_store) {
+ /* r0 = r26, r1 = r27 */
+ assert(srcdest == 26 || srcdest == 27);
+ ins.ssa_args.src0 = SSA_FIXED_REGISTER(srcdest - 26);
+ } else {
+ ins.ssa_args.dest = srcdest;
+ }
+ return ins;
+}
void
schedule_program(compiler_context *ctx)