i965/program_cache: Cast the key to char * before adding key_size
[mesa.git] / src / mesa / drivers / dri / i965 / gen6_queryobj.c
index bbd3c44fb0f89d0430147fa9f14d08c3342a5810..ada28a4a8241bf672c094472f2b78f55564726a2 100644 (file)
@@ -60,20 +60,31 @@ set_query_availability(struct brw_context *brw, struct brw_query_object *query,
     */
    if (brw->ctx.Extensions.ARB_query_buffer_object &&
        brw_is_query_pipelined(query)) {
-      brw_emit_pipe_control_write(brw,
-                                  PIPE_CONTROL_WRITE_IMMEDIATE,
+      unsigned flags = PIPE_CONTROL_WRITE_IMMEDIATE;
+
+      if (available) {
+         /* Order available *after* the query results. */
+         flags |= PIPE_CONTROL_FLUSH_ENABLE;
+      } else {
+         /* Make it unavailable *before* any pipelined reads. */
+         flags |= PIPE_CONTROL_CS_STALL;
+      }
+
+      brw_emit_pipe_control_write(brw, flags,
                                   query->bo, 2 * sizeof(uint64_t),
-                                  available, 0);
+                                  available);
    }
 }
 
 static void
 write_primitives_generated(struct brw_context *brw,
-                           drm_intel_bo *query_bo, int stream, int idx)
+                           struct brw_bo *query_bo, int stream, int idx)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
    brw_emit_mi_flush(brw);
 
-   if (brw->gen >= 7 && stream > 0) {
+   if (devinfo->gen >= 7 && stream > 0) {
       brw_store_register_mem64(brw, query_bo,
                                GEN7_SO_PRIM_STORAGE_NEEDED(stream),
                                idx * sizeof(uint64_t));
@@ -85,11 +96,13 @@ write_primitives_generated(struct brw_context *brw,
 
 static void
 write_xfb_primitives_written(struct brw_context *brw,
-                             drm_intel_bo *bo, int stream, int idx)
+                             struct brw_bo *bo, int stream, int idx)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
    brw_emit_mi_flush(brw);
 
-   if (brw->gen >= 7) {
+   if (devinfo->gen >= 7) {
       brw_store_register_mem64(brw, bo, GEN7_SO_NUM_PRIMS_WRITTEN(stream),
                                idx * sizeof(uint64_t));
    } else {
@@ -98,6 +111,55 @@ write_xfb_primitives_written(struct brw_context *brw,
    }
 }
 
+static void
+write_xfb_overflow_streams(struct gl_context *ctx,
+                           struct brw_bo *bo, int stream, int count,
+                           int idx)
+{
+   struct brw_context *brw = brw_context(ctx);
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
+   brw_emit_mi_flush(brw);
+
+   for (int i = 0; i < count; i++) {
+      int w_idx = 4 * i + idx;
+      int g_idx = 4 * i + idx + 2;
+
+      if (devinfo->gen >= 7) {
+         brw_store_register_mem64(brw, bo,
+                                  GEN7_SO_NUM_PRIMS_WRITTEN(stream + i),
+                                  g_idx * sizeof(uint64_t));
+         brw_store_register_mem64(brw, bo,
+                                  GEN7_SO_PRIM_STORAGE_NEEDED(stream + i),
+                                  w_idx * sizeof(uint64_t));
+      } else {
+         brw_store_register_mem64(brw, bo,
+                                  GEN6_SO_NUM_PRIMS_WRITTEN,
+                                  g_idx * sizeof(uint64_t));
+         brw_store_register_mem64(brw, bo,
+                                  GEN6_SO_PRIM_STORAGE_NEEDED,
+                                  w_idx * sizeof(uint64_t));
+      }
+   }
+}
+
+static bool
+check_xfb_overflow_streams(uint64_t *results, int count)
+{
+   bool overflow = false;
+
+   for (int i = 0; i < count; i++) {
+      uint64_t *result_i = &results[4 * i];
+
+      if ((result_i[3] - result_i[2]) != (result_i[1] - result_i[0])) {
+         overflow = true;
+         break;
+      }
+   }
+
+   return overflow;
+}
+
 static inline int
 pipeline_target_to_index(int target)
 {
@@ -108,9 +170,11 @@ pipeline_target_to_index(int target)
 }
 
 static void
-emit_pipeline_stat(struct brw_context *brw, drm_intel_bo *bo,
+emit_pipeline_stat(struct brw_context *brw, struct brw_bo *bo,
                    int stream, int target, int idx)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
    /* One source of confusion is the tessellation shader statistics. The
     * hardware has no statistics specific to the TE unit. Ideally we could have
     * the HS primitives for TESS_CONTROL_SHADER_PATCHES_ARB, and the DS
@@ -139,7 +203,7 @@ emit_pipeline_stat(struct brw_context *brw, drm_intel_bo *bo,
    /* Gen6 GS code counts full primitives, that is, it won't count individual
     * triangles in a triangle strip. Use CL_INVOCATION_COUNT for that.
     */
-   if (brw->gen == 6 && target == GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB)
+   if (devinfo->gen == 6 && target == GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB)
       reg = CL_INVOCATION_COUNT;
    assert(reg != 0);
 
@@ -160,41 +224,29 @@ gen6_queryobj_get_results(struct gl_context *ctx,
                           struct brw_query_object *query)
 {
    struct brw_context *brw = brw_context(ctx);
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
 
    if (query->bo == NULL)
       return;
 
-   brw_bo_map(brw, query->bo, false, "query object");
-   uint64_t *results = query->bo->virtual;
+   uint64_t *results = brw_bo_map(brw, query->bo, MAP_READ);
    switch (query->Base.Target) {
    case GL_TIME_ELAPSED:
       /* The query BO contains the starting and ending timestamps.
        * Subtract the two and convert to nanoseconds.
        */
-      query->Base.Result += 80 * (results[1] - results[0]);
+      query->Base.Result = brw_raw_timestamp_delta(brw, results[0], results[1]);
+      query->Base.Result = gen_device_info_timebase_scale(devinfo, query->Base.Result);
       break;
 
    case GL_TIMESTAMP:
-      /* Our timer is a clock that increments every 80ns (regardless of
-       * other clock scaling in the system).  The timestamp register we can
-       * read for glGetTimestamp() masks out the top 32 bits, so we do that
-       * here too to let the two counters be compared against each other.
-       *
-       * If we just multiplied that 32 bits of data by 80, it would roll
-       * over at a non-power-of-two, so an application couldn't use
-       * GL_QUERY_COUNTER_BITS to handle rollover correctly.  Instead, we
-       * report 36 bits and truncate at that (rolling over 5 times as often
-       * as the HW counter), and when the 32-bit counter rolls over, it
-       * happens to also be at a rollover in the reported value from near
-       * (1<<36) to 0.
-       *
-       * The low 32 bits rolls over in ~343 seconds.  Our 36-bit result
-       * rolls over every ~69 seconds.
-       *
-       * The query BO contains a single timestamp value in results[0].
+      /* The query BO contains a single timestamp value in results[0]. */
+      query->Base.Result = gen_device_info_timebase_scale(devinfo, results[0]);
+
+      /* Ensure the scaled timestamp overflows according to
+       * GL_QUERY_COUNTER_BITS
        */
-      query->Base.Result = 80 * (results[0] & 0xffffffff);
-      query->Base.Result &= (1ull << 36) - 1;
+      query->Base.Result &= (1ull << ctx->Const.QueryCounterBits.Timestamp) - 1;
       break;
 
    case GL_SAMPLES_PASSED_ARB:
@@ -225,6 +277,14 @@ gen6_queryobj_get_results(struct gl_context *ctx,
       query->Base.Result = results[1] - results[0];
       break;
 
+   case GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB:
+      query->Base.Result = check_xfb_overflow_streams(results, 1);
+      break;
+
+   case GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB:
+      query->Base.Result = check_xfb_overflow_streams(results, MAX_VERTEX_STREAMS);
+      break;
+
    case GL_FRAGMENT_SHADER_INVOCATIONS_ARB:
       query->Base.Result = (results[1] - results[0]);
       /* Implement the "WaDividePSInvocationCountBy4:HSW,BDW" workaround:
@@ -237,19 +297,19 @@ gen6_queryobj_get_results(struct gl_context *ctx,
        * and correctly emitted the number of pixel shader invocations, but,
        * whomever forgot to undo the multiply by 4.
        */
-      if (brw->gen == 8 || brw->is_haswell)
+      if (devinfo->gen == 8 || devinfo->is_haswell)
          query->Base.Result /= 4;
       break;
 
    default:
       unreachable("Unrecognized query target in brw_queryobj_get_results()");
    }
-   drm_intel_bo_unmap(query->bo);
+   brw_bo_unmap(query->bo);
 
    /* Now that we've processed the data stored in the query's buffer object,
     * we can release it.
     */
-   drm_intel_bo_unreference(query->bo);
+   brw_bo_unreference(query->bo);
    query->bo = NULL;
 
    query->Base.Ready = true;
@@ -268,8 +328,9 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
    struct brw_query_object *query = (struct brw_query_object *)q;
 
    /* Since we're starting a new query, we need to throw away old results. */
-   drm_intel_bo_unreference(query->bo);
-   query->bo = drm_intel_bo_alloc(brw->bufmgr, "query results", 4096, 4096);
+   brw_bo_unreference(query->bo);
+   query->bo =
+      brw_bo_alloc(brw->bufmgr, "query results", 4096, BRW_MEMZONE_OTHER);
 
    /* For ARB_query_buffer_object: The result is not available */
    set_query_availability(brw, query, false);
@@ -314,6 +375,14 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
       write_xfb_primitives_written(brw, query->bo, query->Base.Stream, 0);
       break;
 
+   case GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB:
+      write_xfb_overflow_streams(ctx, query->bo, query->Base.Stream, 1, 0);
+      break;
+
+   case GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB:
+      write_xfb_overflow_streams(ctx, query->bo, 0, MAX_VERTEX_STREAMS, 0);
+      break;
+
    case GL_VERTICES_SUBMITTED_ARB:
    case GL_PRIMITIVES_SUBMITTED_ARB:
    case GL_VERTEX_SHADER_INVOCATIONS_ARB:
@@ -368,6 +437,15 @@ gen6_end_query(struct gl_context *ctx, struct gl_query_object *q)
       write_xfb_primitives_written(brw, query->bo, query->Base.Stream, 1);
       break;
 
+   case GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB:
+      write_xfb_overflow_streams(ctx, query->bo, query->Base.Stream, 1, 1);
+      break;
+
+   case GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB:
+      write_xfb_overflow_streams(ctx, query->bo, 0, MAX_VERTEX_STREAMS, 1);
+      break;
+
+      /* calculate overflow here */
    case GL_VERTICES_SUBMITTED_ARB:
    case GL_PRIMITIVES_SUBMITTED_ARB:
    case GL_VERTEX_SHADER_INVOCATIONS_ARB:
@@ -406,7 +484,7 @@ flush_batch_if_needed(struct brw_context *brw, struct brw_query_object *query)
     * (for example, due to being full).  Record that it's been flushed.
     */
    query->flushed = query->flushed ||
-      !drm_intel_bo_references(brw->batch.bo, query->bo);
+                    !brw_batch_references(&brw->batch, query->bo);
 
    if (!query->flushed)
       intel_batchbuffer_flush(brw);
@@ -458,7 +536,7 @@ static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
     */
    flush_batch_if_needed(brw, query);
 
-   if (!drm_intel_bo_busy(query->bo)) {
+   if (!brw_bo_busy(query->bo)) {
       gen6_queryobj_get_results(ctx, query);
    }
 }