#include "freedreno_resource.h"
#include "freedreno_util.h"
-
-static bool
-is_active(struct fd_acc_query *aq, enum fd_render_stage stage)
-{
- return !!(aq->provider->active & stage);
-}
-
static void
fd_acc_destroy_query(struct fd_context *ctx, struct fd_query *q)
{
struct fd_acc_query *aq = fd_acc_query(q);
- DBG("%p: active=%d", q, q->active);
+ DBG("%p", q);
pipe_resource_reference(&aq->prsc, NULL);
list_del(&aq->node);
+ free(aq->query_data);
free(aq);
}
fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_WRITE);
map = fd_bo_map(rsc->bo);
- memset(map, 0, aq->provider->size);
+ memset(map, 0, aq->size);
fd_bo_cpu_fini(rsc->bo);
}
-static boolean
+static void
+fd_acc_query_pause(struct fd_acc_query *aq)
+{
+ const struct fd_acc_sample_provider *p = aq->provider;
+
+ if (!aq->batch)
+ return;
+
+ p->pause(aq, aq->batch);
+ aq->batch = NULL;
+}
+
+static void
+fd_acc_query_resume(struct fd_acc_query *aq, struct fd_batch *batch)
+{
+ const struct fd_acc_sample_provider *p = aq->provider;
+
+ aq->batch = batch;
+ p->resume(aq, aq->batch);
+
+ fd_screen_lock(batch->ctx->screen);
+ fd_batch_resource_write(batch, fd_resource(aq->prsc));
+ fd_screen_unlock(batch->ctx->screen);
+}
+
+static void
fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q)
{
- struct fd_batch *batch = ctx->batch;
struct fd_acc_query *aq = fd_acc_query(q);
- const struct fd_acc_sample_provider *p = aq->provider;
- DBG("%p: active=%d", q, q->active);
+ DBG("%p", q);
/* ->begin_query() discards previous results, so realloc bo: */
realloc_query_bo(ctx, aq);
- /* then resume query if needed to collect first sample: */
- if (batch && is_active(aq, batch->stage))
- p->resume(aq, batch);
+ /* Signal that we need to update the active queries on the next draw */
+ ctx->update_active_queries = true;
/* add to active list: */
- assert(list_empty(&aq->node));
+ assert(list_is_empty(&aq->node));
list_addtail(&aq->node, &ctx->acc_active_queries);
- return true;
+ /* TIMESTAMP/GPU_FINISHED and don't do normal bracketing at draw time, we
+ * need to just emit the capture at this moment.
+ */
+ if (skip_begin_query(q->type))
+ fd_acc_query_resume(aq, fd_context_batch(ctx));
}
static void
fd_acc_end_query(struct fd_context *ctx, struct fd_query *q)
{
- struct fd_batch *batch = ctx->batch;
struct fd_acc_query *aq = fd_acc_query(q);
- const struct fd_acc_sample_provider *p = aq->provider;
- DBG("%p: active=%d", q, q->active);
+ DBG("%p", q);
- if (batch && is_active(aq, batch->stage))
- p->pause(aq, batch);
+ fd_acc_query_pause(aq);
/* remove from active list: */
list_delinit(&aq->node);
}
-static boolean
+static bool
fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
- boolean wait, union pipe_query_result *result)
+ bool wait, union pipe_query_result *result)
{
struct fd_acc_query *aq = fd_acc_query(q);
const struct fd_acc_sample_provider *p = aq->provider;
struct fd_resource *rsc = fd_resource(aq->prsc);
- DBG("%p: wait=%d, active=%d", q, wait, q->active);
+ DBG("%p: wait=%d", q, wait);
- assert(LIST_IS_EMPTY(&aq->node));
+ assert(list_is_empty(&aq->node));
/* if !wait, then check the last sample (the one most likely to
* not be ready yet) and bail if it is not ready:
* spin forever:
*/
if (aq->no_wait_cnt++ > 5)
- fd_batch_flush(rsc->write_batch, false, false);
+ fd_batch_flush(rsc->write_batch);
return false;
}
}
if (rsc->write_batch)
- fd_batch_flush(rsc->write_batch, true, false);
+ fd_batch_flush(rsc->write_batch);
/* get the result: */
fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
};
struct fd_query *
-fd_acc_create_query(struct fd_context *ctx, unsigned query_type)
+fd_acc_create_query2(struct fd_context *ctx, unsigned query_type,
+ unsigned index, const struct fd_acc_sample_provider *provider)
{
struct fd_acc_query *aq;
struct fd_query *q;
- int idx = pidx(query_type);
-
- if ((idx < 0) || !ctx->acc_sample_providers[idx])
- return NULL;
aq = CALLOC_STRUCT(fd_acc_query);
if (!aq)
DBG("%p: query_type=%u", aq, query_type);
- aq->provider = ctx->acc_sample_providers[idx];
+ aq->provider = provider;
+ aq->size = provider->size;
list_inithead(&aq->node);
q = &aq->base;
q->funcs = &acc_query_funcs;
q->type = query_type;
+ q->index = index;
return q;
}
+struct fd_query *
+fd_acc_create_query(struct fd_context *ctx, unsigned query_type,
+ unsigned index)
+{
+ int idx = pidx(query_type);
+
+ if ((idx < 0) || !ctx->acc_sample_providers[idx])
+ return NULL;
+
+ return fd_acc_create_query2(ctx, query_type, index,
+ ctx->acc_sample_providers[idx]);
+}
+
+/* Called at clear/draw/blit time to enable/disable the appropriate queries in
+ * the batch (and transfer active querying between batches in the case of
+ * batch reordering).
+ */
void
fd_acc_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
{
- if (stage != batch->stage) {
- struct fd_acc_query *aq;
- LIST_FOR_EACH_ENTRY(aq, &batch->ctx->acc_active_queries, node) {
- const struct fd_acc_sample_provider *p = aq->provider;
-
- bool was_active = is_active(aq, batch->stage);
- bool now_active = is_active(aq, stage);
+ struct fd_context *ctx = batch->ctx;
- if (now_active && !was_active)
- p->resume(aq, batch);
- else if (was_active && !now_active)
- p->pause(aq, batch);
+ if (stage != batch->stage || ctx->update_active_queries) {
+ struct fd_acc_query *aq;
+ LIST_FOR_EACH_ENTRY(aq, &ctx->acc_active_queries, node) {
+ bool batch_change = aq->batch != batch;
+ bool was_active = aq->batch != NULL;
+ bool now_active = stage != FD_STAGE_NULL &&
+ (ctx->active_queries || aq->provider->always);
+
+ if (was_active && (!now_active || batch_change))
+ fd_acc_query_pause(aq);
+ if (now_active && (!was_active || batch_change))
+ fd_acc_query_resume(aq, batch);
}
}
+
+ ctx->update_active_queries = false;
}
void