}
}
+ if (brw_init_pipe_control(brw, devinfo)) {
+ *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
+ intelDestroyContext(driContextPriv);
+ return false;
+ }
+
brw_init_state(brw);
intelInitExtensions(ctx);
if (ctx->swrast_context)
_swrast_DestroyContext(&brw->ctx);
+ brw_fini_pipe_control(brw);
intel_batchbuffer_free(brw);
drm_intel_bo_unreference(brw->throttle_batch[1]);
drm_intel_bo *bo;
/** Last BO submitted to the hardware. Used for glFinish(). */
drm_intel_bo *last_bo;
- /** BO for post-sync nonzero writes for gen6 workaround. */
- drm_intel_bo *workaround_bo;
uint16_t emit, total;
uint16_t used, reserved_space;
enum brw_gpu_ring ring;
bool needs_sol_reset;
- uint8_t pipe_controls_since_last_cs_stall;
-
struct {
uint16_t used;
int reloc_count;
drm_intel_context *hw_ctx;
+ /** BO for post-sync nonzero writes for gen6 workaround. */
+ drm_intel_bo *workaround_bo;
+ uint8_t pipe_controls_since_last_cs_stall;
+
/**
* Set of drm_intel_bo * that have been rendered to within this batchbuffer
* and would need flushing before being used from another cache domain that
const struct intel_mipmap_tree *mt);
/* brw_pipe_control.c */
+int brw_init_pipe_control(struct brw_context *brw,
+ const struct brw_device_info *info);
+void brw_fini_pipe_control(struct brw_context *brw);
+
void brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags);
void brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
drm_intel_bo *bo, uint32_t offset,
if (brw->gen == 7 && !brw->is_haswell) {
if (flags & PIPE_CONTROL_CS_STALL) {
/* If we're doing a CS stall, reset the counter and carry on. */
- brw->batch.pipe_controls_since_last_cs_stall = 0;
+ brw->pipe_controls_since_last_cs_stall = 0;
return 0;
}
/* If this is the fourth pipe control without a CS stall, do one now. */
- if (++brw->batch.pipe_controls_since_last_cs_stall == 4) {
- brw->batch.pipe_controls_since_last_cs_stall = 0;
+ if (++brw->pipe_controls_since_last_cs_stall == 4) {
+ brw->pipe_controls_since_last_cs_stall = 0;
return PIPE_CONTROL_CS_STALL;
}
}
brw_emit_pipe_control_write(brw,
PIPE_CONTROL_WRITE_IMMEDIATE
| PIPE_CONTROL_DEPTH_STALL,
- brw->batch.workaround_bo, 0,
+ brw->workaround_bo, 0,
0, 0);
}
brw_emit_pipe_control_write(brw,
PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_WRITE_IMMEDIATE,
- brw->batch.workaround_bo, 0,
+ brw->workaround_bo, 0,
0, 0);
}
PIPE_CONTROL_STALL_AT_SCOREBOARD);
brw_emit_pipe_control_write(brw, PIPE_CONTROL_WRITE_IMMEDIATE,
- brw->batch.workaround_bo, 0, 0, 0);
+ brw->workaround_bo, 0, 0, 0);
}
/* Emit a pipelined flush to either flush render and texture cache for
brw_render_cache_set_clear(brw);
}
+
+int
+brw_init_pipe_control(struct brw_context *brw,
+ const struct brw_device_info *devinfo)
+{
+ if (devinfo->gen < 6)
+ return 0;
+
+ /* We can't just use brw_state_batch to get a chunk of space for
+ * the gen6 workaround because it involves actually writing to
+ * the buffer, and the kernel doesn't let us write to the batch.
+ */
+ brw->workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
+ "pipe_control workaround",
+ 4096, 4096);
+ if (brw->workaround_bo == NULL)
+ return -ENOMEM;
+
+ brw->pipe_controls_since_last_cs_stall = 0;
+
+ return 0;
+}
+
+void
+brw_fini_pipe_control(struct brw_context *brw)
+{
+ drm_intel_bo_unreference(brw->workaround_bo);
+}
*/
brw_emit_pipe_control_write(brw,
PIPE_CONTROL_WRITE_IMMEDIATE,
- brw->batch.workaround_bo, 0, 0, 0);
+ brw->workaround_bo, 0, 0, 0);
/* Emit 3DSTATE_WM_HZ_OP again to disable the state overrides. */
BEGIN_BATCH(5);
{
intel_batchbuffer_reset(brw);
- if (brw->gen >= 6) {
- /* We can't just use brw_state_batch to get a chunk of space for
- * the gen6 workaround because it involves actually writing to
- * the buffer, and the kernel doesn't let us write to the batch.
- */
- brw->batch.workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
- "pipe_control workaround",
- 4096, 4096);
- }
-
if (!brw->has_llc) {
brw->batch.cpu_map = malloc(BATCH_SZ);
brw->batch.map = brw->batch.cpu_map;
brw->batch.state_batch_offset = brw->batch.bo->size;
brw->batch.used = 0;
brw->batch.needs_sol_reset = false;
- brw->batch.pipe_controls_since_last_cs_stall = 0;
/* We don't know what ring the new batch will be sent to until we see the
* first BEGIN_BATCH or BEGIN_BATCH_BLT. Mark it as unknown.
free(brw->batch.cpu_map);
drm_intel_bo_unreference(brw->batch.last_bo);
drm_intel_bo_unreference(brw->batch.bo);
- drm_intel_bo_unreference(brw->batch.workaround_bo);
}
static void
/* Set a value in a BO to a known quantity. The workaround BO already
* exists and doesn't contain anything important, so we may as well use it.
*/
- drm_intel_bo_map(brw->batch.workaround_bo, true);
- data = brw->batch.workaround_bo->virtual;
+ drm_intel_bo_map(brw->workaround_bo, true);
+ data = brw->workaround_bo->virtual;
data[offset] = 0xffffffff;
- drm_intel_bo_unmap(brw->batch.workaround_bo);
+ drm_intel_bo_unmap(brw->workaround_bo);
/* Write the register. */
BEGIN_BATCH(3);
BEGIN_BATCH(3);
OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
OUT_BATCH(reg);
- OUT_RELOC(brw->batch.workaround_bo,
+ OUT_RELOC(brw->workaround_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
offset * sizeof(uint32_t));
ADVANCE_BATCH();
intel_batchbuffer_flush(brw);
/* Check whether the value got written. */
- drm_intel_bo_map(brw->batch.workaround_bo, false);
- data = brw->batch.workaround_bo->virtual;
+ drm_intel_bo_map(brw->workaround_bo, false);
+ data = brw->workaround_bo->virtual;
bool success = data[offset] == expected_value;
- drm_intel_bo_unmap(brw->batch.workaround_bo);
+ drm_intel_bo_unmap(brw->workaround_bo);
result = success;
/* Set a value in a BO to a known quantity. The workaround BO already
* exists and doesn't contain anything important, so we may as well use it.
*/
- drm_intel_bo_map(brw->batch.workaround_bo, true);
- data = brw->batch.workaround_bo->virtual;
+ drm_intel_bo_map(brw->workaround_bo, true);
+ data = brw->workaround_bo->virtual;
data[offset] = 0xffffffff;
- drm_intel_bo_unmap(brw->batch.workaround_bo);
+ drm_intel_bo_unmap(brw->workaround_bo);
/* Write OACONTROL. */
BEGIN_BATCH(3);
BEGIN_BATCH(3);
OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
OUT_BATCH(OACONTROL);
- OUT_RELOC(brw->batch.workaround_bo,
+ OUT_RELOC(brw->workaround_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
offset * sizeof(uint32_t));
ADVANCE_BATCH();
intel_batchbuffer_flush(brw);
/* Check whether the value got written. */
- drm_intel_bo_map(brw->batch.workaround_bo, false);
- data = brw->batch.workaround_bo->virtual;
+ drm_intel_bo_map(brw->workaround_bo, false);
+ data = brw->workaround_bo->virtual;
bool success = data[offset] == expected_value;
- drm_intel_bo_unmap(brw->batch.workaround_bo);
+ drm_intel_bo_unmap(brw->workaround_bo);
result = success;