* required for handling queries, so that we can be sure that we won't
* have to emit a batchbuffer without getting the ending PS_DEPTH_COUNT.
*/
-#include "main/simple_list.h"
#include "main/imports.h"
#include "brw_context.h"
if (query->bo == NULL)
return;
- /* Map and count the pixels from the current query BO */
- dri_bo_map(query->bo, GL_FALSE);
+ drm_intel_bo_map(query->bo, GL_FALSE);
results = query->bo->virtual;
- for (i = query->first_index; i <= query->last_index; i++) {
- query->Base.Result += results[i * 2 + 1] - results[i * 2];
+ if (query->Base.Target == GL_TIME_ELAPSED_EXT) {
+ query->Base.Result += 1000 * ((results[1] >> 32) - (results[0] >> 32));
+ } else {
+ /* Map and count the pixels from the current query BO */
+ for (i = query->first_index; i <= query->last_index; i++) {
+ query->Base.Result += results[i * 2 + 1] - results[i * 2];
+ }
}
- dri_bo_unmap(query->bo);
+ drm_intel_bo_unmap(query->bo);
- dri_bo_unreference(query->bo);
+ drm_intel_bo_unreference(query->bo);
query->bo = NULL;
}
static struct gl_query_object *
-brw_new_query_object(GLcontext *ctx, GLuint id)
+brw_new_query_object(struct gl_context *ctx, GLuint id)
{
struct brw_query_object *query;
- query = _mesa_calloc(sizeof(struct brw_query_object));
+ query = calloc(1, sizeof(struct brw_query_object));
query->Base.Id = id;
query->Base.Result = 0;
}
static void
-brw_delete_query(GLcontext *ctx, struct gl_query_object *q)
+brw_delete_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_query_object *query = (struct brw_query_object *)q;
- dri_bo_unreference(query->bo);
- _mesa_free(query);
+ drm_intel_bo_unreference(query->bo);
+ free(query);
}
static void
-brw_begin_query(GLcontext *ctx, struct gl_query_object *q)
+brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
- /* Reset our driver's tracking of query state. */
- dri_bo_unreference(query->bo);
- query->bo = NULL;
- query->first_index = -1;
- query->last_index = -1;
-
- insert_at_head(&brw->query.active_head, query);
- intel->stats_wm++;
+ if (query->Base.Target == GL_TIME_ELAPSED_EXT) {
+ drm_intel_bo_unreference(query->bo);
+ query->bo = drm_intel_bo_alloc(intel->bufmgr, "timer query",
+ 4096, 4096);
+
+ if (intel->gen >= 6) {
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP);
+ OUT_RELOC(query->bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ PIPE_CONTROL_GLOBAL_GTT_WRITE |
+ 0);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+
+ } else {
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+ PIPE_CONTROL_WRITE_TIMESTAMP);
+ OUT_RELOC(query->bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ PIPE_CONTROL_GLOBAL_GTT_WRITE |
+ 0);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+ }
+ } else {
+ /* Reset our driver's tracking of query state. */
+ drm_intel_bo_unreference(query->bo);
+ query->bo = NULL;
+ query->first_index = -1;
+ query->last_index = -1;
+
+ brw->query.obj = query;
+ intel->stats_wm++;
+ }
}
/**
* Begin the ARB_occlusion_query query on a query object.
*/
static void
-brw_end_query(GLcontext *ctx, struct gl_query_object *q)
+brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
- /* Flush the batchbuffer in case it has writes to our query BO.
- * Have later queries write to a new query BO so that further rendering
- * doesn't delay the collection of our results.
- */
- if (query->bo) {
- brw_emit_query_end(brw);
- intel_batchbuffer_flush(intel->batch);
+ if (query->Base.Target == GL_TIME_ELAPSED_EXT) {
+ if (intel->gen >= 6) {
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP);
+ OUT_RELOC(query->bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ PIPE_CONTROL_GLOBAL_GTT_WRITE |
+ 8);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+
+ } else {
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+ PIPE_CONTROL_WRITE_TIMESTAMP);
+ OUT_RELOC(query->bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ PIPE_CONTROL_GLOBAL_GTT_WRITE |
+ 8);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+ }
- dri_bo_unreference(brw->query.bo);
- brw->query.bo = NULL;
- }
+ intel_batchbuffer_flush(intel->batch);
+ } else {
+ /* Flush the batchbuffer in case it has writes to our query BO.
+ * Have later queries write to a new query BO so that further rendering
+ * doesn't delay the collection of our results.
+ */
+ if (query->bo) {
+ brw_emit_query_end(brw);
+ intel_batchbuffer_flush(intel->batch);
+
+ drm_intel_bo_unreference(brw->query.bo);
+ brw->query.bo = NULL;
+ }
- remove_from_list(query);
+ brw->query.obj = NULL;
- intel->stats_wm--;
+ intel->stats_wm--;
+ }
}
-static void brw_wait_query(GLcontext *ctx, struct gl_query_object *q)
+static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_query_object *query = (struct brw_query_object *)q;
query->Base.Ready = GL_TRUE;
}
-static void brw_check_query(GLcontext *ctx, struct gl_query_object *q)
+static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_query_object *query = (struct brw_query_object *)q;
struct intel_context *intel = &brw->intel;
/* Skip if we're not doing any queries. */
- if (is_empty_list(&brw->query.active_head))
+ if (!brw->query.obj)
return;
/* Get a new query BO if we're going to need it. */
if (brw->query.bo == NULL ||
brw->query.index * 2 + 1 >= 4096 / sizeof(uint64_t)) {
- dri_bo_unreference(brw->query.bo);
+ drm_intel_bo_unreference(brw->query.bo);
brw->query.bo = NULL;
- brw->query.bo = dri_bo_alloc(intel->bufmgr, "query", 4096, 1);
+ brw->query.bo = drm_intel_bo_alloc(intel->bufmgr, "query", 4096, 1);
+
+ /* clear target buffer */
+ drm_intel_bo_map(brw->query.bo, GL_TRUE);
+ memset((char *)brw->query.bo->virtual, 0, 4096);
+ drm_intel_bo_unmap(brw->query.bo);
+
brw->query.index = 0;
}
brw_emit_query_begin(struct brw_context *brw)
{
struct intel_context *intel = &brw->intel;
- struct brw_query_object *query;
+ struct brw_query_object *query = brw->query.obj;
/* Skip if we're not doing any queries, or we've emitted the start. */
- if (brw->query.active || is_empty_list(&brw->query.active_head))
+ if (!query || brw->query.active)
return;
- BEGIN_BATCH(4, IGNORE_CLIPRECTS);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL |
- PIPE_CONTROL_DEPTH_STALL |
- PIPE_CONTROL_WRITE_DEPTH_COUNT);
- /* This object could be mapped cacheable, but we don't have an exposed
- * mechanism to support that. Since it's going uncached, tell GEM that
- * we're writing to it. The usual clflush should be all that's required
- * to pick up the results.
- */
- OUT_RELOC(brw->query.bo,
- I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
- PIPE_CONTROL_GLOBAL_GTT_WRITE |
- ((brw->query.index * 2) * sizeof(uint64_t)));
- OUT_BATCH(0);
- OUT_BATCH(0);
- ADVANCE_BATCH();
-
- foreach(query, &brw->query.active_head) {
- if (query->bo != brw->query.bo) {
- if (query->bo != NULL)
- brw_queryobj_get_results(query);
- dri_bo_reference(brw->query.bo);
- query->bo = brw->query.bo;
- query->first_index = brw->query.index;
- }
- query->last_index = brw->query.index;
+ if (intel->gen >= 6) {
+ BEGIN_BATCH(8);
+
+ /* workaround: CS stall required before depth stall. */
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_CS_STALL);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_WRITE_DEPTH_COUNT);
+ OUT_RELOC(brw->query.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ PIPE_CONTROL_GLOBAL_GTT_WRITE |
+ ((brw->query.index * 2) * sizeof(uint64_t)));
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+
+ } else {
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_WRITE_DEPTH_COUNT);
+ /* This object could be mapped cacheable, but we don't have an exposed
+ * mechanism to support that. Since it's going uncached, tell GEM that
+ * we're writing to it. The usual clflush should be all that's required
+ * to pick up the results.
+ */
+ OUT_RELOC(brw->query.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ PIPE_CONTROL_GLOBAL_GTT_WRITE |
+ ((brw->query.index * 2) * sizeof(uint64_t)));
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+ }
+
+ if (query->bo != brw->query.bo) {
+ if (query->bo != NULL)
+ brw_queryobj_get_results(query);
+ drm_intel_bo_reference(brw->query.bo);
+ query->bo = brw->query.bo;
+ query->first_index = brw->query.index;
}
+ query->last_index = brw->query.index;
brw->query.active = GL_TRUE;
}
if (!brw->query.active)
return;
- BEGIN_BATCH(4, IGNORE_CLIPRECTS);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL |
- PIPE_CONTROL_DEPTH_STALL |
- PIPE_CONTROL_WRITE_DEPTH_COUNT);
- OUT_RELOC(brw->query.bo,
- I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
- PIPE_CONTROL_GLOBAL_GTT_WRITE |
- ((brw->query.index * 2 + 1) * sizeof(uint64_t)));
- OUT_BATCH(0);
- OUT_BATCH(0);
- ADVANCE_BATCH();
+ if (intel->gen >= 6) {
+ BEGIN_BATCH(8);
+ /* workaround: CS stall required before depth stall. */
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_CS_STALL);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_WRITE_DEPTH_COUNT);
+ OUT_RELOC(brw->query.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ PIPE_CONTROL_GLOBAL_GTT_WRITE |
+ ((brw->query.index * 2 + 1) * sizeof(uint64_t)));
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+
+ } else {
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_WRITE_DEPTH_COUNT);
+ OUT_RELOC(brw->query.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ PIPE_CONTROL_GLOBAL_GTT_WRITE |
+ ((brw->query.index * 2 + 1) * sizeof(uint64_t)));
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+ }
brw->query.active = GL_FALSE;
brw->query.index++;