brw_dispatch_compute_common(struct gl_context *ctx)
{
struct brw_context *brw = brw_context(ctx);
- bool fail_next = false;
+ bool fail_next;
if (!_mesa_check_conditional_render(ctx))
return;
intel_batchbuffer_require_space(brw, 600);
brw_require_statebuffer_space(brw, 2500);
intel_batchbuffer_save_state(brw);
+ fail_next = intel_batchbuffer_saved_state_is_empty(brw);
retry:
brw->batch.no_wrap = true;
{
struct brw_context *brw = brw_context(ctx);
const struct gen_device_info *devinfo = &brw->screen->devinfo;
- bool fail_next = false;
+ bool fail_next;
/* Flag BRW_NEW_DRAW_CALL on every draw. This allows us to have
* atoms that happen on every draw call.
intel_batchbuffer_require_space(brw, 1500);
brw_require_statebuffer_space(brw, 2400);
intel_batchbuffer_save_state(brw);
+ fail_next = intel_batchbuffer_saved_state_is_empty(brw);
if (brw->num_instances != prim->num_instances ||
brw->basevertex != prim->basevertex ||
assert(batch->blorp->driver_ctx == batch->driver_batch);
struct brw_context *brw = batch->driver_batch;
struct gl_context *ctx = &brw->ctx;
- bool check_aperture_failed_once = false;
+ bool check_aperture_failed_once;
#if GEN_GEN >= 11
/* The PIPE_CONTROL command description says:
intel_batchbuffer_require_space(brw, 1400);
brw_require_statebuffer_space(brw, 600);
intel_batchbuffer_save_state(brw);
+ check_aperture_failed_once = intel_batchbuffer_saved_state_is_empty(brw);
brw->batch.no_wrap = true;
#if GEN_GEN == 6
brw->batch.saved.exec_count = brw->batch.exec_count;
}
+bool
+intel_batchbuffer_saved_state_is_empty(struct brw_context *brw)
+{
+ struct intel_batchbuffer *batch = &brw->batch;
+ return (batch->saved.map_next == batch->batch.map);
+}
+
void
intel_batchbuffer_reset_to_saved(struct brw_context *brw)
{
void intel_batchbuffer_init(struct brw_context *brw);
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
void intel_batchbuffer_save_state(struct brw_context *brw);
+bool intel_batchbuffer_saved_state_is_empty(struct brw_context *brw);
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz);
int _intel_batchbuffer_flush_fence(struct brw_context *brw,