} else {
brw_load_register_mem(brw, GEN7_3DPRIM_START_INSTANCE, bo,
prim->indirect_offset + 12);
- BEGIN_BATCH(3);
- OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
- OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX);
- OUT_BATCH(0);
- ADVANCE_BATCH();
+ brw_load_register_imm32(brw, GEN7_3DPRIM_BASE_VERTEX, 0);
}
} else {
indirect_flag = 0;
/* Recommended optimizations for Victim Cache eviction and floating
* point blending.
*/
- BEGIN_BATCH(3);
- OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
- OUT_BATCH(GEN7_CACHE_MODE_1);
- OUT_BATCH(REG_MASK(GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE) |
- REG_MASK(GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC) |
- GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE |
- GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC);
- ADVANCE_BATCH();
+ brw_load_register_imm32(brw, GEN7_CACHE_MODE_1,
+ REG_MASK(GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE) |
+ REG_MASK(GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC) |
+ GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE |
+ GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC);
if (gen_device_info_is_9lp(devinfo)) {
- BEGIN_BATCH(3);
- OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
- OUT_BATCH(GEN7_GT_MODE);
- OUT_BATCH(GEN9_SUBSLICE_HASHING_MASK_BITS |
- GEN9_SUBSLICE_HASHING_16x16);
- ADVANCE_BATCH();
+ brw_load_register_imm32(brw, GEN7_GT_MODE,
+ GEN9_SUBSLICE_HASHING_MASK_BITS |
+ GEN9_SUBSLICE_HASHING_16x16);
}
}
if (devinfo->gen >= 8) {
assert(!cfg->n[GEN_L3P_IS] && !cfg->n[GEN_L3P_C] && !cfg->n[GEN_L3P_T]);
- BEGIN_BATCH(3);
- OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
+ const unsigned imm_data = ((has_slm ? GEN8_L3CNTLREG_SLM_ENABLE : 0) |
+ SET_FIELD(cfg->n[GEN_L3P_URB], GEN8_L3CNTLREG_URB_ALLOC) |
+ SET_FIELD(cfg->n[GEN_L3P_RO], GEN8_L3CNTLREG_RO_ALLOC) |
+ SET_FIELD(cfg->n[GEN_L3P_DC], GEN8_L3CNTLREG_DC_ALLOC) |
+ SET_FIELD(cfg->n[GEN_L3P_ALL], GEN8_L3CNTLREG_ALL_ALLOC));
/* Set up the L3 partitioning. */
- OUT_BATCH(GEN8_L3CNTLREG);
- OUT_BATCH((has_slm ? GEN8_L3CNTLREG_SLM_ENABLE : 0) |
- SET_FIELD(cfg->n[GEN_L3P_URB], GEN8_L3CNTLREG_URB_ALLOC) |
- SET_FIELD(cfg->n[GEN_L3P_RO], GEN8_L3CNTLREG_RO_ALLOC) |
- SET_FIELD(cfg->n[GEN_L3P_DC], GEN8_L3CNTLREG_DC_ALLOC) |
- SET_FIELD(cfg->n[GEN_L3P_ALL], GEN8_L3CNTLREG_ALL_ALLOC));
-
- ADVANCE_BATCH();
-
+ brw_load_register_imm32(brw, GEN8_L3CNTLREG, imm_data);
} else {
assert(!cfg->n[GEN_L3P_ALL]);
brw->batch.needs_sol_reset = true;
} else {
for (int i = 0; i < 4; i++) {
- BEGIN_BATCH(3);
- OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
- OUT_BATCH(GEN7_SO_WRITE_OFFSET(i));
- OUT_BATCH(0);
- ADVANCE_BATCH();
+ brw_load_register_imm32(brw, GEN7_SO_WRITE_OFFSET(i), 0);
}
}
render_cache_flush);
/* CACHE_MODE_1 is a non-privileged register. */
- BEGIN_BATCH(3);
- OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
- OUT_BATCH(GEN7_CACHE_MODE_1);
- OUT_BATCH(GEN8_HIZ_PMA_MASK_BITS | pma_stall_bits);
- ADVANCE_BATCH();
+ brw_load_register_imm32(brw, GEN7_CACHE_MODE_1,
+ GEN8_HIZ_PMA_MASK_BITS |
+ pma_stall_bits );
/* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
* Flush bits is often necessary. We do it regardless because it's easier.