* This is required for transform feedback buffer offsets, query objects,
* and also allows us to reduce how much state we have to emit.
*/
- intel->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
+ brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
- if (!intel->hw_ctx) {
+ if (!brw->hw_ctx) {
fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
ralloc_free(brw);
return false;
dri_bufmgr *bufmgr;
+ drm_intel_context *hw_ctx;
+
/**
* Set if rendering has occured to the drawable's front buffer.
*
struct gl_context *ctx = &intel->ctx;
struct brw_query_object *query = brw->query.obj;
- if (intel->hw_ctx)
+ if (brw->hw_ctx)
return;
/* Skip if we're not doing any queries, or we've already recorded the
void
brw_emit_query_end(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
struct brw_query_object *query = brw->query.obj;
- if (intel->hw_ctx)
+ if (brw->hw_ctx)
return;
if (!brw->query.begin_emitted)
* right away rather than doing it via state atoms. This saves a small
* amount of overhead on every draw call.
*/
- if (!intel->hw_ctx)
+ if (!brw->hw_ctx)
return;
brw_upload_invariant_state(brw);
static void
brw_destroy_context(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
/* Force a report. */
brw->shader_time.report_time = 0;
free(brw->curbe.last_buf);
free(brw->curbe.next_buf);
- drm_intel_gem_context_destroy(intel->hw_ctx);
+ drm_intel_gem_context_destroy(brw->hw_ctx);
}
/**
* would otherwise be stored in the context (which for all intents and
* purposes means everything).
*/
- if (intel->hw_ctx == NULL)
+ if (brw->hw_ctx == NULL)
brw->state.dirty.brw |= BRW_NEW_CONTEXT;
brw->state.dirty.brw |= BRW_NEW_BATCH;
if (ret == 0) {
if (unlikely(INTEL_DEBUG & DEBUG_AUB))
brw_annotate_aub(brw);
- if (intel->hw_ctx == NULL || batch->is_blit) {
+ if (brw->hw_ctx == NULL || batch->is_blit) {
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
flags);
} else {
- ret = drm_intel_gem_bo_context_exec(batch->bo, intel->hw_ctx,
+ ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
4 * batch->used, flags);
}
}
bool has_llc;
bool has_swizzling;
- drm_intel_context *hw_ctx;
-
struct intel_batchbuffer batch;
drm_intel_bo *first_post_swapbuffers_batch;