intel_fbo_init(brw);
- intel_batchbuffer_init(&brw->batch, brw->bufmgr, brw->has_llc);
+ intel_batchbuffer_init(screen, &brw->batch);
if (brw->gen >= 6) {
/* Create a new hardware context. Using a hardware context means that
}
void
-intel_batchbuffer_init(struct intel_batchbuffer *batch,
- struct brw_bufmgr *bufmgr,
- bool has_llc)
+intel_batchbuffer_init(struct intel_screen *screen,
+ struct intel_batchbuffer *batch)
{
- struct brw_context *brw = container_of(batch, brw, batch);
+ struct brw_bufmgr *bufmgr = screen->bufmgr;
+ const struct gen_device_info *devinfo = &screen->devinfo;
- if (!has_llc) {
+ if (!devinfo->has_llc) {
batch->cpu_map = malloc(BATCH_SZ);
batch->map = batch->cpu_map;
batch->map_next = batch->cpu_map;
}
batch->use_batch_first =
- brw->screen->kernel_features & KERNEL_ALLOWS_EXEC_BATCH_FIRST;
+ screen->kernel_features & KERNEL_ALLOWS_EXEC_BATCH_FIRST;
/* PIPE_CONTROL needs a w/a but only on gen6 */
batch->valid_reloc_flags = EXEC_OBJECT_WRITE;
- if (brw->gen == 6)
+ if (devinfo->gen == 6)
batch->valid_reloc_flags |= EXEC_OBJECT_NEEDS_GTT;
- intel_batchbuffer_reset(batch, bufmgr, has_llc);
+ intel_batchbuffer_reset(batch, bufmgr, devinfo->has_llc);
}
#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
struct intel_batchbuffer;
-void intel_batchbuffer_init(struct intel_batchbuffer *batch,
- struct brw_bufmgr *bufmgr,
- bool has_llc);
+void intel_batchbuffer_init(struct intel_screen *screen,
+ struct intel_batchbuffer *batch);
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
void intel_batchbuffer_save_state(struct brw_context *brw);
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);