This really makes more sense in the intel_batchbuffer struct.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
intel_batchbuffer_save_state(brw);
retry:
- brw->no_batch_wrap = true;
+ brw->batch.no_wrap = true;
brw_upload_compute_state(brw);
brw_emit_gpgpu_walker(brw);
- brw->no_batch_wrap = false;
+ brw->batch.no_wrap = false;
if (!brw_batch_has_aperture_space(brw, 0)) {
if (!fail_next) {
bool use_batch_first;
bool needs_sol_reset;
bool state_base_address_emitted;
+ bool no_wrap;
struct brw_reloc_list batch_relocs;
struct brw_reloc_list state_relocs;
uint32_t reset_count;
struct intel_batchbuffer batch;
- bool no_batch_wrap;
struct {
struct brw_bo *bo;
* brw->ctx.NewDriverState.
*/
if (brw->ctx.NewDriverState) {
- brw->no_batch_wrap = true;
+ brw->batch.no_wrap = true;
brw_upload_render_state(brw);
}
brw_emit_prim(brw, prim, brw->primitive, xfb_obj, stream);
- brw->no_batch_wrap = false;
+ brw->batch.no_wrap = false;
if (!brw_batch_has_aperture_space(brw, 0)) {
if (!fail_next) {
intel_batchbuffer_require_space(brw, 1400, RENDER_RING);
brw_require_statebuffer_space(brw, 600);
intel_batchbuffer_save_state(brw);
- brw->no_batch_wrap = true;
+ brw->batch.no_wrap = true;
#if GEN_GEN == 6
/* Emit workaround flushes when we switch from drawing to blorping. */
blorp_exec(batch, params);
- brw->no_batch_wrap = false;
+ brw->batch.no_wrap = false;
/* Check if the blorp op we just did would make our batch likely to fail to
* map all the BOs into the GPU at batch exec time later. If so, flush the
const unsigned batch_used = USED_BATCH(*batch) * 4;
if (batch_used + sz >= BATCH_SZ) {
- if (!brw->no_batch_wrap) {
+ if (!batch->no_wrap) {
intel_batchbuffer_flush(brw);
} else {
const unsigned new_size =
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
- brw->no_batch_wrap = true;
+ brw->batch.no_wrap = true;
/* Capture the closing pipeline statistics register values necessary to
* support query objects (in the non-hardware context world).
intel_batchbuffer_emit_dword(&brw->batch, MI_NOOP);
}
- brw->no_batch_wrap = false;
+ brw->batch.no_wrap = false;
}
static void
return 0;
/* Check that we didn't just wrap our batchbuffer at a bad time. */
- assert(!brw->no_batch_wrap);
+ assert(!brw->batch.no_wrap);
brw_finish_batch(brw);
intel_upload_finish(brw);
uint32_t offset = ALIGN(batch->state_used, alignment);
if (offset + size >= STATE_SZ) {
- if (!brw->no_batch_wrap) {
+ if (!batch->no_wrap) {
intel_batchbuffer_flush(brw);
offset = ALIGN(batch->state_used, alignment);
} else {