i965: Create a helper function for emitting PIPE_CONTROL writes.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_queryobj.c
index 660268858843694bc422f3caf124304f651eae43..dc26c0864e17a168a1269cbe9ada63d20c58a7dc 100644 (file)
 /**
  * Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
  */
-static void
-write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
+void
+brw_write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
 {
-   struct intel_context *intel = &brw->intel;
-   if (intel->gen >= 6) {
-      /* Emit workaround flushes: */
-      if (intel->gen == 6) {
-         /* The timestamp write below is a non-zero post-sync op, which on
-          * Gen6 necessitates a CS stall.  CS stalls need stall at scoreboard
-          * set.  See the comments for intel_emit_post_sync_nonzero_flush().
-          */
-         BEGIN_BATCH(4);
-         OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
-         OUT_BATCH(PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD);
-         OUT_BATCH(0);
-         OUT_BATCH(0);
-         ADVANCE_BATCH();
-      }
-
-      BEGIN_BATCH(5);
-      OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
-      OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP);
-      OUT_RELOC(query_bo,
-                I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
-                PIPE_CONTROL_GLOBAL_GTT_WRITE |
-                idx * sizeof(uint64_t));
-      OUT_BATCH(0);
-      OUT_BATCH(0);
-      ADVANCE_BATCH();
-   } else {
-      BEGIN_BATCH(4);
-      OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) |
-                PIPE_CONTROL_WRITE_TIMESTAMP);
-      OUT_RELOC(query_bo,
-                I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
-                PIPE_CONTROL_GLOBAL_GTT_WRITE |
-                idx * sizeof(uint64_t));
-      OUT_BATCH(0);
-      OUT_BATCH(0);
-      ADVANCE_BATCH();
+   if (brw->gen == 6) {
+      /* Emit Sandybridge workaround flush: */
+      brw_emit_pipe_control_flush(brw,
+                                  PIPE_CONTROL_CS_STALL |
+                                  PIPE_CONTROL_STALL_AT_SCOREBOARD);
    }
+
+   brw_emit_pipe_control_write(brw, PIPE_CONTROL_WRITE_TIMESTAMP,
+                               query_bo, idx * sizeof(uint64_t), 0, 0);
 }
 
 /**
@@ -95,24 +66,12 @@ write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
 static void
 write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
 {
-   struct intel_context *intel = &brw->intel;
-   assert(intel->gen < 6);
-
-   BEGIN_BATCH(4);
-   OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) |
-             PIPE_CONTROL_DEPTH_STALL | PIPE_CONTROL_WRITE_DEPTH_COUNT);
-   /* This object could be mapped cacheable, but we don't have an exposed
-    * mechanism to support that.  Since it's going uncached, tell GEM that
-    * we're writing to it.  The usual clflush should be all that's required
-    * to pick up the results.
-    */
-   OUT_RELOC(query_bo,
-             I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
-             PIPE_CONTROL_GLOBAL_GTT_WRITE |
-             (idx * sizeof(uint64_t)));
-   OUT_BATCH(0);
-   OUT_BATCH(0);
-   ADVANCE_BATCH();
+   assert(brw->gen < 6);
+
+   brw_emit_pipe_control_write(brw,
+                               PIPE_CONTROL_WRITE_DEPTH_COUNT
+                               | PIPE_CONTROL_DEPTH_STALL,
+                               query_bo, idx * sizeof(uint64_t), 0, 0);
 }
 
 /**
@@ -123,12 +82,11 @@ brw_queryobj_get_results(struct gl_context *ctx,
                         struct brw_query_object *query)
 {
    struct brw_context *brw = brw_context(ctx);
-   struct intel_context *intel = intel_context(ctx);
 
    int i;
    uint64_t *results;
 
-   assert(intel->gen < 6);
+   assert(brw->gen < 6);
 
    if (query->bo == NULL)
       return;
@@ -140,7 +98,7 @@ brw_queryobj_get_results(struct gl_context *ctx,
    if (drm_intel_bo_references(brw->batch.bo, query->bo))
       intel_batchbuffer_flush(brw);
 
-   if (unlikely(intel->perf_debug)) {
+   if (unlikely(brw->perf_debug)) {
       if (drm_intel_bo_busy(query->bo)) {
          perf_debug("Stalling on the GPU waiting for a query object.\n");
       }
@@ -245,10 +203,9 @@ static void
 brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
 {
    struct brw_context *brw = brw_context(ctx);
-   struct intel_context *intel = intel_context(ctx);
    struct brw_query_object *query = (struct brw_query_object *)q;
 
-   assert(intel->gen < 6);
+   assert(brw->gen < 6);
 
    switch (query->Base.Target) {
    case GL_TIME_ELAPSED_EXT:
@@ -273,7 +230,7 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
        */
       drm_intel_bo_unreference(query->bo);
       query->bo = drm_intel_bo_alloc(brw->bufmgr, "timer query", 4096, 4096);
-      write_timestamp(brw, query->bo, 0);
+      brw_write_timestamp(brw, query->bo, 0);
       break;
 
    case GL_ANY_SAMPLES_PASSED:
@@ -296,7 +253,7 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
        * avoid them when necessary.  They're required for occlusion queries,
        * so turn them on now.
        */
-      intel->stats_wm++;
+      brw->stats_wm++;
       brw->state.dirty.brw |= BRW_NEW_STATS_WM;
       break;
 
@@ -318,15 +275,14 @@ static void
 brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
 {
    struct brw_context *brw = brw_context(ctx);
-   struct intel_context *intel = intel_context(ctx);
    struct brw_query_object *query = (struct brw_query_object *)q;
 
-   assert(intel->gen < 6);
+   assert(brw->gen < 6);
 
    switch (query->Base.Target) {
    case GL_TIME_ELAPSED_EXT:
       /* Write the final timestamp. */
-      write_timestamp(brw, query->bo, 1);
+      brw_write_timestamp(brw, query->bo, 1);
       break;
 
    case GL_ANY_SAMPLES_PASSED:
@@ -355,7 +311,7 @@ brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
 
       brw->query.obj = NULL;
 
-      intel->stats_wm--;
+      brw->stats_wm--;
       brw->state.dirty.brw |= BRW_NEW_STATS_WM;
       break;
 
@@ -375,7 +331,7 @@ static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
 {
    struct brw_query_object *query = (struct brw_query_object *)q;
 
-   assert(intel_context(ctx)->gen < 6);
+   assert(brw_context(ctx)->gen < 6);
 
    brw_queryobj_get_results(ctx, query);
    query->Base.Ready = true;
@@ -390,10 +346,9 @@ static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
 static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
 {
    struct brw_context *brw = brw_context(ctx);
-   struct intel_context *intel = intel_context(ctx);
    struct brw_query_object *query = (struct brw_query_object *)q;
 
-   assert(intel->gen < 6);
+   assert(brw->gen < 6);
 
    /* From the GL_ARB_occlusion_query spec:
     *
@@ -421,9 +376,8 @@ static void
 ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query)
 {
    struct brw_context *brw = brw_context(ctx);
-   struct intel_context *intel = intel_context(ctx);
 
-   assert(intel->gen < 6);
+   assert(brw->gen < 6);
 
    if (!query->bo || query->last_index * 2 + 1 >= 4096 / sizeof(uint64_t)) {
 
@@ -463,8 +417,7 @@ ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query)
 void
 brw_emit_query_begin(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-   struct gl_context *ctx = &intel->ctx;
+   struct gl_context *ctx = &brw->ctx;
    struct brw_query_object *query = brw->query.obj;
 
    if (brw->hw_ctx)
@@ -523,7 +476,7 @@ brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
 
    drm_intel_bo_unreference(query->bo);
    query->bo = drm_intel_bo_alloc(brw->bufmgr, "timestamp query", 4096, 4096);
-   write_timestamp(brw, query->bo, 0);
+   brw_write_timestamp(brw, query->bo, 0);
 }
 
 /**