Drop GLcontext typedef and use struct gl_context instead
[mesa.git] / src / mesa / drivers / dri / i965 / brw_queryobj.c
index 3f47a68049fd299d04bbe7db78e152c2c04b8de8..f28f28663ea728cce8ce33c2abfdf482ea1ed20f 100644 (file)
@@ -55,20 +55,24 @@ brw_queryobj_get_results(struct brw_query_object *query)
    if (query->bo == NULL)
       return;
 
-   /* Map and count the pixels from the current query BO */
-   dri_bo_map(query->bo, GL_FALSE);
+   drm_intel_bo_map(query->bo, GL_FALSE);
    results = query->bo->virtual;
-   for (i = query->first_index; i <= query->last_index; i++) {
-      query->Base.Result += results[i * 2 + 1] - results[i * 2];
+   if (query->Base.Target == GL_TIME_ELAPSED_EXT) {
+      query->Base.Result += 1000 * ((results[1] >> 32) - (results[0] >> 32));
+   } else {
+      /* Map and count the pixels from the current query BO */
+      for (i = query->first_index; i <= query->last_index; i++) {
+        query->Base.Result += results[i * 2 + 1] - results[i * 2];
+      }
    }
-   dri_bo_unmap(query->bo);
+   drm_intel_bo_unmap(query->bo);
 
-   dri_bo_unreference(query->bo);
+   drm_intel_bo_unreference(query->bo);
    query->bo = NULL;
 }
 
 static struct gl_query_object *
-brw_new_query_object(GLcontext *ctx, GLuint id)
+brw_new_query_object(struct gl_context *ctx, GLuint id)
 {
    struct brw_query_object *query;
 
@@ -83,59 +87,117 @@ brw_new_query_object(GLcontext *ctx, GLuint id)
 }
 
 static void
-brw_delete_query(GLcontext *ctx, struct gl_query_object *q)
+brw_delete_query(struct gl_context *ctx, struct gl_query_object *q)
 {
    struct brw_query_object *query = (struct brw_query_object *)q;
 
-   dri_bo_unreference(query->bo);
+   drm_intel_bo_unreference(query->bo);
    free(query);
 }
 
 static void
-brw_begin_query(GLcontext *ctx, struct gl_query_object *q)
+brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
 {
    struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct brw_query_object *query = (struct brw_query_object *)q;
 
-   /* Reset our driver's tracking of query state. */
-   dri_bo_unreference(query->bo);
-   query->bo = NULL;
-   query->first_index = -1;
-   query->last_index = -1;
-
-   brw->query.obj = query;
-   intel->stats_wm++;
+   if (query->Base.Target == GL_TIME_ELAPSED_EXT) {
+      drm_intel_bo_unreference(query->bo);
+      query->bo = drm_intel_bo_alloc(intel->bufmgr, "timer query",
+                                    4096, 4096);
+
+      if (intel->gen >= 6) {
+         BEGIN_BATCH(4);
+         OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+         OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP);
+         OUT_RELOC(query->bo,
+                 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                 PIPE_CONTROL_GLOBAL_GTT_WRITE |
+                 0);
+         OUT_BATCH(0);
+         ADVANCE_BATCH();
+      
+      } else {
+         BEGIN_BATCH(4);
+         OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+                 PIPE_CONTROL_WRITE_TIMESTAMP);
+         OUT_RELOC(query->bo,
+                 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                 PIPE_CONTROL_GLOBAL_GTT_WRITE |
+                 0);
+         OUT_BATCH(0);
+         OUT_BATCH(0);
+         ADVANCE_BATCH();
+      }
+   } else {
+      /* Reset our driver's tracking of query state. */
+      drm_intel_bo_unreference(query->bo);
+      query->bo = NULL;
+      query->first_index = -1;
+      query->last_index = -1;
+
+      brw->query.obj = query;
+      intel->stats_wm++;
+   }
 }
 
 /**
  * Begin the ARB_occlusion_query query on a query object.
  */
 static void
-brw_end_query(GLcontext *ctx, struct gl_query_object *q)
+brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
 {
    struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = intel_context(ctx);
    struct brw_query_object *query = (struct brw_query_object *)q;
 
-   /* Flush the batchbuffer in case it has writes to our query BO.
-    * Have later queries write to a new query BO so that further rendering
-    * doesn't delay the collection of our results.
-    */
-   if (query->bo) {
-      brw_emit_query_end(brw);
-      intel_batchbuffer_flush(intel->batch);
+   if (query->Base.Target == GL_TIME_ELAPSED_EXT) {
+      if (intel->gen >= 6) {
+         BEGIN_BATCH(4);
+         OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+         OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP);
+         OUT_RELOC(query->bo,
+                 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                 PIPE_CONTROL_GLOBAL_GTT_WRITE |
+                 8);
+         OUT_BATCH(0);
+         ADVANCE_BATCH();
+      
+      } else {
+         BEGIN_BATCH(4);
+         OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+                 PIPE_CONTROL_WRITE_TIMESTAMP);
+         OUT_RELOC(query->bo,
+                 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                 PIPE_CONTROL_GLOBAL_GTT_WRITE |
+                 8);
+         OUT_BATCH(0);
+         OUT_BATCH(0);
+         ADVANCE_BATCH();
+      }
 
-      dri_bo_unreference(brw->query.bo);
-      brw->query.bo = NULL;
+      intel_batchbuffer_flush(intel->batch);
+   } else {
+      /* Flush the batchbuffer in case it has writes to our query BO.
+       * Have later queries write to a new query BO so that further rendering
+       * doesn't delay the collection of our results.
+       */
+      if (query->bo) {
+        brw_emit_query_end(brw);
+        intel_batchbuffer_flush(intel->batch);
+
+        drm_intel_bo_unreference(brw->query.bo);
+        brw->query.bo = NULL;
+      }
+
+      brw->query.obj = NULL;
+
+      intel->stats_wm--;
    }
-
-   brw->query.obj = NULL;
-
-   intel->stats_wm--;
 }
 
-static void brw_wait_query(GLcontext *ctx, struct gl_query_object *q)
+static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
 {
    struct brw_query_object *query = (struct brw_query_object *)q;
 
@@ -143,7 +205,7 @@ static void brw_wait_query(GLcontext *ctx, struct gl_query_object *q)
    query->Base.Ready = GL_TRUE;
 }
 
-static void brw_check_query(GLcontext *ctx, struct gl_query_object *q)
+static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
 {
    struct brw_query_object *query = (struct brw_query_object *)q;
 
@@ -166,10 +228,10 @@ brw_prepare_query_begin(struct brw_context *brw)
    /* Get a new query BO if we're going to need it. */
    if (brw->query.bo == NULL ||
        brw->query.index * 2 + 1 >= 4096 / sizeof(uint64_t)) {
-      dri_bo_unreference(brw->query.bo);
+      drm_intel_bo_unreference(brw->query.bo);
       brw->query.bo = NULL;
 
-      brw->query.bo = dri_bo_alloc(intel->bufmgr, "query", 4096, 1);
+      brw->query.bo = drm_intel_bo_alloc(intel->bufmgr, "query", 4096, 1);
       brw->query.index = 0;
    }
 
@@ -187,27 +249,48 @@ brw_emit_query_begin(struct brw_context *brw)
    if (!query || brw->query.active)
       return;
 
-   BEGIN_BATCH(4);
-   OUT_BATCH(_3DSTATE_PIPE_CONTROL |
-            PIPE_CONTROL_DEPTH_STALL |
-            PIPE_CONTROL_WRITE_DEPTH_COUNT);
-   /* This object could be mapped cacheable, but we don't have an exposed
-    * mechanism to support that.  Since it's going uncached, tell GEM that
-    * we're writing to it.  The usual clflush should be all that's required
-    * to pick up the results.
-    */
-   OUT_RELOC(brw->query.bo,
-            I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
-            PIPE_CONTROL_GLOBAL_GTT_WRITE |
-            ((brw->query.index * 2) * sizeof(uint64_t)));
-   OUT_BATCH(0);
-   OUT_BATCH(0);
-   ADVANCE_BATCH();
+   if (intel->gen >= 6) {
+       BEGIN_BATCH(8);
+
+       /* workaround: CS stall required before depth stall. */
+       OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+       OUT_BATCH(PIPE_CONTROL_CS_STALL);
+       OUT_BATCH(0); /* write address */
+       OUT_BATCH(0); /* write data */
+
+       OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+       OUT_BATCH(PIPE_CONTROL_DEPTH_STALL |
+                PIPE_CONTROL_WRITE_DEPTH_COUNT);
+       OUT_RELOC(brw->query.bo,
+                I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                PIPE_CONTROL_GLOBAL_GTT_WRITE |
+                ((brw->query.index * 2) * sizeof(uint64_t)));
+       OUT_BATCH(0);
+       ADVANCE_BATCH();
+       
+   } else {
+       BEGIN_BATCH(4);
+       OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+              PIPE_CONTROL_DEPTH_STALL |
+              PIPE_CONTROL_WRITE_DEPTH_COUNT);
+       /* This object could be mapped cacheable, but we don't have an exposed
+       * mechanism to support that.  Since it's going uncached, tell GEM that
+       * we're writing to it.  The usual clflush should be all that's required
+       * to pick up the results.
+       */
+       OUT_RELOC(brw->query.bo,
+              I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+              PIPE_CONTROL_GLOBAL_GTT_WRITE |
+              ((brw->query.index * 2) * sizeof(uint64_t)));
+       OUT_BATCH(0);
+       OUT_BATCH(0);
+       ADVANCE_BATCH();
+   }
 
    if (query->bo != brw->query.bo) {
       if (query->bo != NULL)
         brw_queryobj_get_results(query);
-      dri_bo_reference(brw->query.bo);
+      drm_intel_bo_reference(brw->query.bo);
       query->bo = brw->query.bo;
       query->first_index = brw->query.index;
    }
@@ -224,17 +307,37 @@ brw_emit_query_end(struct brw_context *brw)
    if (!brw->query.active)
       return;
 
-   BEGIN_BATCH(4);
-   OUT_BATCH(_3DSTATE_PIPE_CONTROL |
-            PIPE_CONTROL_DEPTH_STALL |
-            PIPE_CONTROL_WRITE_DEPTH_COUNT);
-   OUT_RELOC(brw->query.bo,
-            I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
-            PIPE_CONTROL_GLOBAL_GTT_WRITE |
-            ((brw->query.index * 2 + 1) * sizeof(uint64_t)));
-   OUT_BATCH(0);
-   OUT_BATCH(0);
-   ADVANCE_BATCH();
+   if (intel->gen >= 6) {
+       BEGIN_BATCH(8);
+       /* workaround: CS stall required before depth stall. */
+       OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+       OUT_BATCH(PIPE_CONTROL_CS_STALL);
+       OUT_BATCH(0); /* write address */
+       OUT_BATCH(0); /* write data */
+
+       OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+       OUT_BATCH(PIPE_CONTROL_DEPTH_STALL |
+                PIPE_CONTROL_WRITE_DEPTH_COUNT);
+       OUT_RELOC(brw->query.bo,
+                I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                PIPE_CONTROL_GLOBAL_GTT_WRITE |
+                ((brw->query.index * 2 + 1) * sizeof(uint64_t)));
+       OUT_BATCH(0);
+       ADVANCE_BATCH();
+   
+   } else {
+       BEGIN_BATCH(4);
+       OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+              PIPE_CONTROL_DEPTH_STALL |
+              PIPE_CONTROL_WRITE_DEPTH_COUNT);
+       OUT_RELOC(brw->query.bo,
+              I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+              PIPE_CONTROL_GLOBAL_GTT_WRITE |
+              ((brw->query.index * 2 + 1) * sizeof(uint64_t)));
+       OUT_BATCH(0);
+       OUT_BATCH(0);
+       ADVANCE_BATCH();
+   }
 
    brw->query.active = GL_FALSE;
    brw->query.index++;