/**
* @file iris_query.c
*
- * XXX: this file is EMPTY. it will eventually implement query objects!
+ * ============================= GENXML CODE =============================
+ * [This file is compiled once per generation.]
+ * =======================================================================
+ *
+ * Query object support. This allows measuring various simple statistics
+ * via counters on the GPU. We use GenX code for MI_MATH calculations.
*/
#include <stdio.h>
#include <errno.h>
+#include "perf/gen_perf.h"
#include "pipe/p_defines.h"
#include "pipe/p_state.h"
#include "pipe/p_context.h"
#include "pipe/p_screen.h"
#include "util/u_inlines.h"
+#include "util/u_upload_mgr.h"
#include "iris_context.h"
#include "iris_defines.h"
+#include "iris_fence.h"
#include "iris_resource.h"
#include "iris_screen.h"
-#define CS_GPR(n) (0x2600 + (n) * 8)
-
-#define MI_MATH (0x1a << 23)
-
-#define MI_ALU_LOAD 0x080
-#define MI_ALU_LOADINV 0x480
-#define MI_ALU_LOAD0 0x081
-#define MI_ALU_LOAD1 0x481
-#define MI_ALU_ADD 0x100
-#define MI_ALU_SUB 0x101
-#define MI_ALU_AND 0x102
-#define MI_ALU_OR 0x103
-#define MI_ALU_XOR 0x104
-#define MI_ALU_STORE 0x180
-#define MI_ALU_STOREINV 0x580
-
-#define MI_ALU_R0 0x00
-#define MI_ALU_R1 0x01
-#define MI_ALU_R2 0x02
-#define MI_ALU_R3 0x03
-#define MI_ALU_R4 0x04
-#define MI_ALU_SRCA 0x20
-#define MI_ALU_SRCB 0x21
-#define MI_ALU_ACCU 0x31
-#define MI_ALU_ZF 0x32
-#define MI_ALU_CF 0x33
-
-#define MI_ALU0(op) ((MI_ALU_##op << 20))
-#define MI_ALU1(op, x) ((MI_ALU_##op << 20) | (MI_ALU_##x << 10)
-#define MI_ALU2(op, x, y) \
- ((MI_ALU_##op << 20) | (MI_ALU_##x << 10) | (MI_ALU_##y))
+#include "iris_genx_macros.h"
+
+#define SO_PRIM_STORAGE_NEEDED(n) (GENX(SO_PRIM_STORAGE_NEEDED0_num) + (n) * 8)
+#define SO_NUM_PRIMS_WRITTEN(n) (GENX(SO_NUM_PRIMS_WRITTEN0_num) + (n) * 8)
struct iris_query {
enum pipe_query_type type;
+ int index;
bool ready;
+ bool stalled;
+
uint64_t result;
- struct iris_bo *bo;
+ struct iris_state_ref query_state_ref;
struct iris_query_snapshots *map;
+ struct iris_syncpt *syncpt;
+
+ int batch_idx;
+
+ struct iris_monitor_object *monitor;
};
struct iris_query_snapshots {
+ /** iris_render_condition's saved MI_PREDICATE_RESULT value. */
+ uint64_t predicate_result;
+
+ /** Have the start/end snapshots landed? */
+ uint64_t snapshots_landed;
+
+ /** Starting and ending counter snapshots */
uint64_t start;
uint64_t end;
+};
+
+struct iris_query_so_overflow {
+ uint64_t predicate_result;
uint64_t snapshots_landed;
+
+ struct {
+ uint64_t prim_storage_needed[2];
+ uint64_t num_prims[2];
+ } stream[4];
};
+static struct gen_mi_value
+query_mem64(struct iris_query *q, uint32_t offset)
+{
+ struct iris_address addr = {
+ .bo = iris_resource_bo(q->query_state_ref.res),
+ .offset = q->query_state_ref.offset + offset,
+ .write = true
+ };
+ return gen_mi_mem64(addr);
+}
+
/**
* Is this type of query written by PIPE_CONTROL?
*/
}
static void
-write_availability(struct iris_context *ice,
- struct iris_query *q,
- bool available)
+mark_available(struct iris_context *ice, struct iris_query *q)
{
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[q->batch_idx];
unsigned flags = PIPE_CONTROL_WRITE_IMMEDIATE;
unsigned offset = offsetof(struct iris_query_snapshots, snapshots_landed);
+ struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
+ offset += q->query_state_ref.offset;
if (!iris_is_query_pipelined(q)) {
- ice->vtbl.store_data_imm64(batch, q->bo, offset, available);
+ ice->vtbl.store_data_imm64(batch, bo, offset, true);
} else {
- if (available) {
- /* Order available *after* the query results. */
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- } else {
- /* Make it unavailable *before* any pipelined reads. */
- flags |= PIPE_CONTROL_CS_STALL;
- }
- iris_emit_pipe_control_write(batch, flags, q->bo, offset, available);
+ /* Order available *after* the query results. */
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ iris_emit_pipe_control_write(batch, "query: mark available",
+ flags, bo, offset, true);
}
}
{
const struct gen_device_info *devinfo = &batch->screen->devinfo;
const unsigned optional_cs_stall =
- devinfo->gen == 9 && devinfo->gt == 4 ? PIPE_CONTROL_CS_STALL : 0;
+ GEN_GEN == 9 && devinfo->gt == 4 ? PIPE_CONTROL_CS_STALL : 0;
+ struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
- iris_emit_pipe_control_write(batch, flags | optional_cs_stall,
- q->bo, offset, 0ull);
+ iris_emit_pipe_control_write(batch, "query: pipelined snapshot write",
+ flags | optional_cs_stall,
+ bo, offset, 0ull);
}
static void
write_value(struct iris_context *ice, struct iris_query *q, unsigned offset)
{
- struct iris_batch *batch = &ice->render_batch;
- const struct gen_device_info *devinfo = &batch->screen->devinfo;
+ struct iris_batch *batch = &ice->batches[q->batch_idx];
+ struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
- iris_use_pinned_bo(batch, q->bo, true);
+ if (!iris_is_query_pipelined(q)) {
+ iris_emit_pipe_control_flush(batch,
+ "query: non-pipelined snapshot write",
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ q->stalled = true;
+ }
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
- if (devinfo->gen >= 10) {
+ if (GEN_GEN >= 10) {
/* "Driver must program PIPE_CONTROL with only Depth Stall Enable
* bit set prior to programming a PIPE_CONTROL with Write PS Depth
* Count sync operation."
*/
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_DEPTH_STALL);
+ iris_emit_pipe_control_flush(batch,
+ "workaround: depth stall before writing "
+ "PS_DEPTH_COUNT",
+ PIPE_CONTROL_DEPTH_STALL);
}
- iris_pipelined_write(&ice->render_batch, q,
+ iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q,
PIPE_CONTROL_WRITE_DEPTH_COUNT |
PIPE_CONTROL_DEPTH_STALL,
offset);
break;
case PIPE_QUERY_TIME_ELAPSED:
- iris_pipelined_write(&ice->render_batch, q,
+ case PIPE_QUERY_TIMESTAMP:
+ case PIPE_QUERY_TIMESTAMP_DISJOINT:
+ iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q,
PIPE_CONTROL_WRITE_TIMESTAMP,
offset);
break;
+ case PIPE_QUERY_PRIMITIVES_GENERATED:
+ ice->vtbl.store_register_mem64(batch,
+ q->index == 0 ?
+ GENX(CL_INVOCATION_COUNT_num) :
+ SO_PRIM_STORAGE_NEEDED(q->index),
+ bo, offset, false);
+ break;
+ case PIPE_QUERY_PRIMITIVES_EMITTED:
+ ice->vtbl.store_register_mem64(batch,
+ SO_NUM_PRIMS_WRITTEN(q->index),
+ bo, offset, false);
+ break;
+ case PIPE_QUERY_PIPELINE_STATISTICS_SINGLE: {
+ static const uint32_t index_to_reg[] = {
+ GENX(IA_VERTICES_COUNT_num),
+ GENX(IA_PRIMITIVES_COUNT_num),
+ GENX(VS_INVOCATION_COUNT_num),
+ GENX(GS_INVOCATION_COUNT_num),
+ GENX(GS_PRIMITIVES_COUNT_num),
+ GENX(CL_INVOCATION_COUNT_num),
+ GENX(CL_PRIMITIVES_COUNT_num),
+ GENX(PS_INVOCATION_COUNT_num),
+ GENX(HS_INVOCATION_COUNT_num),
+ GENX(DS_INVOCATION_COUNT_num),
+ GENX(CS_INVOCATION_COUNT_num),
+ };
+ const uint32_t reg = index_to_reg[q->index];
+
+ ice->vtbl.store_register_mem64(batch, reg, bo, offset, false);
+ break;
+ }
default:
assert(false);
}
}
static void
-calculate_result_on_cpu(struct iris_query *q)
+write_overflow_values(struct iris_context *ice, struct iris_query *q, bool end)
+{
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
+ uint32_t count = q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ? 1 : 4;
+ struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
+ uint32_t offset = q->query_state_ref.offset;
+
+ iris_emit_pipe_control_flush(batch,
+ "query: write SO overflow snapshots",
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ for (uint32_t i = 0; i < count; i++) {
+ int s = q->index + i;
+ int g_idx = offset + offsetof(struct iris_query_so_overflow,
+ stream[s].num_prims[end]);
+ int w_idx = offset + offsetof(struct iris_query_so_overflow,
+ stream[s].prim_storage_needed[end]);
+ ice->vtbl.store_register_mem64(batch, SO_NUM_PRIMS_WRITTEN(s),
+ bo, g_idx, false);
+ ice->vtbl.store_register_mem64(batch, SO_PRIM_STORAGE_NEEDED(s),
+ bo, w_idx, false);
+ }
+}
+
+static uint64_t
+iris_raw_timestamp_delta(uint64_t time0, uint64_t time1)
+{
+ if (time0 > time1) {
+ return (1ULL << TIMESTAMP_BITS) + time1 - time0;
+ } else {
+ return time1 - time0;
+ }
+}
+
+static bool
+stream_overflowed(struct iris_query_so_overflow *so, int s)
+{
+ return (so->stream[s].prim_storage_needed[1] -
+ so->stream[s].prim_storage_needed[0]) !=
+ (so->stream[s].num_prims[1] - so->stream[s].num_prims[0]);
+}
+
+static void
+calculate_result_on_cpu(const struct gen_device_info *devinfo,
+ struct iris_query *q)
{
switch (q->type) {
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
q->result = q->map->end != q->map->start;
break;
- case PIPE_QUERY_OCCLUSION_COUNTER:
+ case PIPE_QUERY_TIMESTAMP:
+ case PIPE_QUERY_TIMESTAMP_DISJOINT:
+ /* The timestamp is the single starting snapshot. */
+ q->result = gen_device_info_timebase_scale(devinfo, q->map->start);
+ q->result &= (1ull << TIMESTAMP_BITS) - 1;
+ break;
case PIPE_QUERY_TIME_ELAPSED:
+ q->result = iris_raw_timestamp_delta(q->map->start, q->map->end);
+ q->result = gen_device_info_timebase_scale(devinfo, q->result);
+ q->result &= (1ull << TIMESTAMP_BITS) - 1;
+ break;
+ case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
+ q->result = stream_overflowed((void *) q->map, q->index);
+ break;
+ case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
+ q->result = false;
+ for (int i = 0; i < MAX_VERTEX_STREAMS; i++)
+ q->result |= stream_overflowed((void *) q->map, i);
+ break;
+ case PIPE_QUERY_PIPELINE_STATISTICS_SINGLE:
+ q->result = q->map->end - q->map->start;
+
+ /* WaDividePSInvocationCountBy4:HSW,BDW */
+ if (GEN_GEN == 8 && q->index == PIPE_STAT_QUERY_PS_INVOCATIONS)
+ q->result /= 4;
+ break;
+ case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_PRIMITIVES_EMITTED:
default:
}
/**
- * Calculate the result and store it to CS_GPR0.
+ * Calculate the streamout overflow for stream \p idx:
+ *
+ * (num_prims[1] - num_prims[0]) - (storage_needed[1] - storage_needed[0])
*/
-static void
-calculate_result_on_gpu(struct iris_context *ice, struct iris_query *q)
+static struct gen_mi_value
+calc_overflow_for_stream(struct gen_mi_builder *b,
+ struct iris_query *q,
+ int idx)
{
- struct iris_batch *batch = &ice->render_batch;
-
- ice->vtbl.load_register_mem64(batch, CS_GPR(1), q->bo,
- offsetof(struct iris_query_snapshots, start));
- ice->vtbl.load_register_mem64(batch, CS_GPR(2), q->bo,
- offsetof(struct iris_query_snapshots, end));
-
- static const uint32_t math[] = {
- MI_MATH | (5 - 2),
- MI_ALU2(LOAD, SRCA, R2),
- MI_ALU2(LOAD, SRCB, R1),
- MI_ALU0(SUB),
- MI_ALU2(STORE, R0, ACCU),
- };
- iris_batch_emit(batch, math, sizeof(math));
+#define C(counter, i) query_mem64(q, \
+ offsetof(struct iris_query_so_overflow, stream[idx].counter[i]))
+
+ return gen_mi_isub(b, gen_mi_isub(b, C(num_prims, 1), C(num_prims, 0)),
+ gen_mi_isub(b, C(prim_storage_needed, 1),
+ C(prim_storage_needed, 0)));
+#undef C
+}
+
+/**
+ * Calculate whether any stream has overflowed.
+ */
+static struct gen_mi_value
+calc_overflow_any_stream(struct gen_mi_builder *b, struct iris_query *q)
+{
+ struct gen_mi_value stream_result[MAX_VERTEX_STREAMS];
+ for (int i = 0; i < MAX_VERTEX_STREAMS; i++)
+ stream_result[i] = calc_overflow_for_stream(b, q, i);
+
+ struct gen_mi_value result = stream_result[0];
+ for (int i = 1; i < MAX_VERTEX_STREAMS; i++)
+ result = gen_mi_ior(b, result, stream_result[i]);
+
+ return result;
+}
+
+static bool
+query_is_boolean(enum pipe_query_type type)
+{
+ switch (type) {
+ case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
+ case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
+ case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * Calculate the result using MI_MATH.
+ */
+static struct gen_mi_value
+calculate_result_on_gpu(const struct gen_device_info *devinfo,
+ struct gen_mi_builder *b,
+ struct iris_query *q)
+{
+ struct gen_mi_value result;
+ struct gen_mi_value start_val =
+ query_mem64(q, offsetof(struct iris_query_snapshots, start));
+ struct gen_mi_value end_val =
+ query_mem64(q, offsetof(struct iris_query_snapshots, end));
+
+ switch (q->type) {
+ case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
+ result = calc_overflow_for_stream(b, q, q->index);
+ break;
+ case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
+ result = calc_overflow_any_stream(b, q);
+ break;
+ case PIPE_QUERY_TIMESTAMP: {
+ /* TODO: This discards any fractional bits of the timebase scale.
+ * We would need to do a bit of fixed point math on the CS ALU, or
+ * launch an actual shader to calculate this with full precision.
+ */
+ uint32_t scale = 1000000000ull / devinfo->timestamp_frequency;
+ result = gen_mi_iand(b, gen_mi_imm((1ull << 36) - 1),
+ gen_mi_imul_imm(b, start_val, scale));
+ break;
+ }
+ case PIPE_QUERY_TIME_ELAPSED: {
+ /* TODO: This discards fractional bits (see above). */
+ uint32_t scale = 1000000000ull / devinfo->timestamp_frequency;
+ result = gen_mi_imul_imm(b, gen_mi_isub(b, end_val, start_val), scale);
+ break;
+ }
+ default:
+ result = gen_mi_isub(b, end_val, start_val);
+ break;
+ }
+
+ /* WaDividePSInvocationCountBy4:HSW,BDW */
+ if (GEN_GEN == 8 &&
+ q->type == PIPE_QUERY_PIPELINE_STATISTICS_SINGLE &&
+ q->index == PIPE_STAT_QUERY_PS_INVOCATIONS)
+ result = gen_mi_ushr32_imm(b, result, 2);
+
+ if (query_is_boolean(q->type))
+ result = gen_mi_iand(b, gen_mi_nz(b, result), gen_mi_imm(1));
+
+ return result;
}
static struct pipe_query *
struct iris_query *q = calloc(1, sizeof(struct iris_query));
q->type = query_type;
+ q->index = index;
+ if (q->type == PIPE_QUERY_PIPELINE_STATISTICS_SINGLE &&
+ q->index == PIPE_STAT_QUERY_CS_INVOCATIONS)
+ q->batch_idx = IRIS_BATCH_COMPUTE;
+ else
+ q->batch_idx = IRIS_BATCH_RENDER;
return (struct pipe_query *) q;
}
iris_destroy_query(struct pipe_context *ctx, struct pipe_query *p_query)
{
struct iris_query *query = (void *) p_query;
- iris_bo_unreference(query->bo);
+ struct iris_screen *screen = (void *) ctx->screen;
+ iris_syncpt_reference(screen, &query->syncpt, NULL);
free(query);
}
-static boolean
+static bool
iris_begin_query(struct pipe_context *ctx, struct pipe_query *query)
{
- struct iris_screen *screen = (void *) ctx->screen;
struct iris_context *ice = (void *) ctx;
struct iris_query *q = (void *) query;
+ void *ptr = NULL;
+ uint32_t size;
+
+ if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
+ q->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
+ size = sizeof(struct iris_query_so_overflow);
+ else
+ size = sizeof(struct iris_query_snapshots);
+
+ u_upload_alloc(ice->query_buffer_uploader, 0,
+ size, size, &q->query_state_ref.offset,
+ &q->query_state_ref.res, &ptr);
- iris_bo_unreference(q->bo);
- q->bo = iris_bo_alloc(screen->bufmgr, "query object", 4096,
- IRIS_MEMZONE_OTHER);
- if (!q->bo)
+ if (!iris_resource_bo(q->query_state_ref.res))
return false;
- q->map = iris_bo_map(&ice->dbg, q->bo, MAP_READ | MAP_ASYNC);
+ q->map = ptr;
if (!q->map)
return false;
q->result = 0ull;
q->ready = false;
+ WRITE_ONCE(q->map->snapshots_landed, false);
- write_availability(ice, q, false);
- write_value(ice, q, offsetof(struct iris_query_snapshots, start));
+ if (q->type == PIPE_QUERY_PRIMITIVES_GENERATED && q->index == 0) {
+ ice->state.prims_generated_query_active = true;
+ ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
+ }
+
+ if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
+ q->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
+ write_overflow_values(ice, q, false);
+ else
+ write_value(ice, q,
+ q->query_state_ref.offset +
+ offsetof(struct iris_query_snapshots, start));
return true;
}
{
struct iris_context *ice = (void *) ctx;
struct iris_query *q = (void *) query;
+ struct iris_batch *batch = &ice->batches[q->batch_idx];
+
+ if (q->type == PIPE_QUERY_TIMESTAMP) {
+ iris_begin_query(ctx, query);
+ iris_batch_reference_signal_syncpt(batch, &q->syncpt);
+ mark_available(ice, q);
+ return true;
+ }
- write_value(ice, q, offsetof(struct iris_query_snapshots, end));
- write_availability(ice, q, true);
+ if (q->type == PIPE_QUERY_PRIMITIVES_GENERATED && q->index == 0) {
+ ice->state.prims_generated_query_active = false;
+ ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
+ }
+
+ if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
+ q->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
+ write_overflow_values(ice, q, true);
+ else
+ write_value(ice, q,
+ q->query_state_ref.offset +
+ offsetof(struct iris_query_snapshots, end));
+
+ iris_batch_reference_signal_syncpt(batch, &q->syncpt);
+ mark_available(ice, q);
return true;
}
-static boolean
+/**
+ * See if the snapshots have landed for a query, and if so, compute the
+ * result and mark it ready. Does not flush (unlike iris_get_query_result).
+ */
+static void
+iris_check_query_no_flush(struct iris_context *ice, struct iris_query *q)
+{
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
+
+ if (!q->ready && READ_ONCE(q->map->snapshots_landed)) {
+ calculate_result_on_cpu(devinfo, q);
+ }
+}
+
+static bool
iris_get_query_result(struct pipe_context *ctx,
struct pipe_query *query,
- boolean wait,
+ bool wait,
union pipe_query_result *result)
{
struct iris_context *ice = (void *) ctx;
struct iris_query *q = (void *) query;
+ struct iris_screen *screen = (void *) ctx->screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
+
+ if (unlikely(screen->no_hw)) {
+ result->u64 = 0;
+ return true;
+ }
if (!q->ready) {
- if (iris_batch_references(&ice->render_batch, q->bo))
- iris_batch_flush(&ice->render_batch);
+ struct iris_batch *batch = &ice->batches[q->batch_idx];
+ if (q->syncpt == iris_batch_get_signal_syncpt(batch))
+ iris_batch_flush(batch);
- if (!q->map->snapshots_landed) {
+ while (!READ_ONCE(q->map->snapshots_landed)) {
if (wait)
- iris_bo_wait_rendering(q->bo);
+ iris_wait_syncpt(ctx->screen, q->syncpt, INT64_MAX);
else
return false;
}
- assert(q->map->snapshots_landed);
- calculate_result_on_cpu(q);
+ assert(READ_ONCE(q->map->snapshots_landed));
+ calculate_result_on_cpu(devinfo, q);
}
assert(q->ready);
+
result->u64 = q->result;
return true;
static void
iris_get_query_result_resource(struct pipe_context *ctx,
struct pipe_query *query,
- boolean wait,
+ bool wait,
enum pipe_query_value_type result_type,
int index,
struct pipe_resource *p_res,
{
struct iris_context *ice = (void *) ctx;
struct iris_query *q = (void *) query;
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[q->batch_idx];
+ const struct gen_device_info *devinfo = &batch->screen->devinfo;
+ struct iris_resource *res = (void *) p_res;
+ struct iris_bo *query_bo = iris_resource_bo(q->query_state_ref.res);
+ struct iris_bo *dst_bo = iris_resource_bo(p_res);
unsigned snapshots_landed_offset =
offsetof(struct iris_query_snapshots, snapshots_landed);
+ res->bind_history |= PIPE_BIND_QUERY_BUFFER;
+
if (index == -1) {
/* They're asking for the availability of the result. If we still
* have commands queued up which produce the result, submit them
* now so that progress happens. Either way, copy the snapshots
* landed field to the destination resource.
*/
- if (iris_batch_references(batch, q->bo))
+ if (q->syncpt == iris_batch_get_signal_syncpt(batch))
iris_batch_flush(batch);
- ice->vtbl.copy_mem_mem(batch, iris_resource_bo(p_res), offset,
- q->bo, snapshots_landed_offset,
+ ice->vtbl.copy_mem_mem(batch, dst_bo, offset,
+ query_bo, snapshots_landed_offset,
result_type <= PIPE_QUERY_TYPE_U32 ? 4 : 8);
return;
}
- if (!q->ready && q->map->snapshots_landed) {
+ if (!q->ready && READ_ONCE(q->map->snapshots_landed)) {
/* The final snapshots happen to have landed, so let's just compute
* the result on the CPU now...
*/
- calculate_result_on_cpu(q);
+ calculate_result_on_cpu(devinfo, q);
}
if (q->ready) {
/* We happen to have the result on the CPU, so just copy it. */
if (result_type <= PIPE_QUERY_TYPE_U32) {
- ice->vtbl.store_data_imm32(batch, iris_resource_bo(p_res), offset,
- q->result);
+ ice->vtbl.store_data_imm32(batch, dst_bo, offset, q->result);
} else {
- ice->vtbl.store_data_imm64(batch, iris_resource_bo(p_res), offset,
- q->result);
+ ice->vtbl.store_data_imm64(batch, dst_bo, offset, q->result);
}
+
+ /* Make sure the result lands before they use bind the QBO elsewhere
+ * and use the result.
+ */
+ // XXX: Why? i965 doesn't do this.
+ iris_emit_pipe_control_flush(batch,
+ "query: unknown QBO flushing hack",
+ PIPE_CONTROL_CS_STALL);
return;
}
- /* Calculate the result to CS_GPR0 */
- calculate_result_on_gpu(ice, q);
+ bool predicated = !wait && !q->stalled;
+
+ struct gen_mi_builder b;
+ gen_mi_builder_init(&b, batch);
- bool predicated = !wait && iris_is_query_pipelined(q);
+ struct gen_mi_value result = calculate_result_on_gpu(devinfo, &b, q);
+ struct gen_mi_value dst =
+ result_type <= PIPE_QUERY_TYPE_U32 ? gen_mi_mem32(rw_bo(dst_bo, offset))
+ : gen_mi_mem64(rw_bo(dst_bo, offset));
if (predicated) {
- ice->vtbl.load_register_imm64(batch, MI_PREDICATE_SRC1, 0ull);
- ice->vtbl.load_register_mem64(batch, MI_PREDICATE_SRC0, q->bo,
- snapshots_landed_offset);
- uint32_t predicate = MI_PREDICATE |
- MI_PREDICATE_LOADOP_LOADINV |
- MI_PREDICATE_COMBINEOP_SET |
- MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
- iris_batch_emit(batch, &predicate, sizeof(uint32_t));
+ gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_RESULT),
+ gen_mi_mem64(ro_bo(query_bo, snapshots_landed_offset)));
+ gen_mi_store_if(&b, dst, result);
+ } else {
+ gen_mi_store(&b, dst, result);
}
+}
- if (result_type <= PIPE_QUERY_TYPE_U32) {
- ice->vtbl.store_register_mem32(batch, CS_GPR(0),
- iris_resource_bo(p_res),
- offset, predicated);
+static void
+iris_set_active_query_state(struct pipe_context *ctx, bool enable)
+{
+ struct iris_context *ice = (void *) ctx;
+
+ if (ice->state.statistics_counters_enabled == enable)
+ return;
+
+ // XXX: most packets aren't paying attention to this yet, because it'd
+ // have to be done dynamically at draw time, which is a pain
+ ice->state.statistics_counters_enabled = enable;
+ ice->state.dirty |= IRIS_DIRTY_CLIP |
+ IRIS_DIRTY_GS |
+ IRIS_DIRTY_RASTER |
+ IRIS_DIRTY_STREAMOUT |
+ IRIS_DIRTY_TCS |
+ IRIS_DIRTY_TES |
+ IRIS_DIRTY_VS |
+ IRIS_DIRTY_WM;
+}
+
+static void
+set_predicate_enable(struct iris_context *ice, bool value)
+{
+ if (value)
+ ice->state.predicate = IRIS_PREDICATE_STATE_RENDER;
+ else
+ ice->state.predicate = IRIS_PREDICATE_STATE_DONT_RENDER;
+}
+
+static void
+set_predicate_for_result(struct iris_context *ice,
+ struct iris_query *q,
+ bool inverted)
+{
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
+ struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
+
+ /* The CPU doesn't have the query result yet; use hardware predication */
+ ice->state.predicate = IRIS_PREDICATE_STATE_USE_BIT;
+
+ /* Ensure the memory is coherent for MI_LOAD_REGISTER_* commands. */
+ iris_emit_pipe_control_flush(batch,
+ "conditional rendering: set predicate",
+ PIPE_CONTROL_FLUSH_ENABLE);
+ q->stalled = true;
+
+ struct gen_mi_builder b;
+ gen_mi_builder_init(&b, batch);
+
+ struct gen_mi_value result;
+
+ switch (q->type) {
+ case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
+ result = calc_overflow_for_stream(&b, q, q->index);
+ break;
+ case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
+ result = calc_overflow_any_stream(&b, q);
+ break;
+ default: {
+ /* PIPE_QUERY_OCCLUSION_* */
+ struct gen_mi_value start =
+ query_mem64(q, offsetof(struct iris_query_snapshots, start));
+ struct gen_mi_value end =
+ query_mem64(q, offsetof(struct iris_query_snapshots, end));
+ result = gen_mi_isub(&b, end, start);
+ break;
+ }
+ }
+
+ result = inverted ? gen_mi_z(&b, result) : gen_mi_nz(&b, result);
+ result = gen_mi_iand(&b, result, gen_mi_imm(1));
+
+ /* We immediately set the predicate on the render batch, as all the
+ * counters come from 3D operations. However, we may need to predicate
+ * a compute dispatch, which executes in a different GEM context and has
+ * a different MI_PREDICATE_RESULT register. So, we save the result to
+ * memory and reload it in iris_launch_grid.
+ */
+ gen_mi_value_ref(&b, result);
+ gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_RESULT), result);
+ gen_mi_store(&b, query_mem64(q, offsetof(struct iris_query_snapshots,
+ predicate_result)), result);
+ ice->state.compute_predicate = bo;
+}
+
+static void
+iris_render_condition(struct pipe_context *ctx,
+ struct pipe_query *query,
+ bool condition,
+ enum pipe_render_cond_flag mode)
+{
+ struct iris_context *ice = (void *) ctx;
+ struct iris_query *q = (void *) query;
+
+ /* The old condition isn't relevant; we'll update it if necessary */
+ ice->state.compute_predicate = NULL;
+ ice->condition.query = q;
+ ice->condition.condition = condition;
+
+ if (!q) {
+ ice->state.predicate = IRIS_PREDICATE_STATE_RENDER;
+ return;
+ }
+
+ iris_check_query_no_flush(ice, q);
+
+ if (q->result || q->ready) {
+ set_predicate_enable(ice, (q->result != 0) ^ condition);
} else {
- ice->vtbl.store_register_mem64(batch, CS_GPR(0),
- iris_resource_bo(p_res),
- offset, predicated);
+ if (mode == PIPE_RENDER_COND_NO_WAIT ||
+ mode == PIPE_RENDER_COND_BY_REGION_NO_WAIT) {
+ perf_debug(&ice->dbg, "Conditional rendering demoted from "
+ "\"no wait\" to \"wait\".");
+ }
+ set_predicate_for_result(ice, q, condition);
}
}
static void
-iris_set_active_query_state(struct pipe_context *pipe, boolean enable)
+iris_resolve_conditional_render(struct iris_context *ice)
{
- /* Do nothing, intentionally - only u_blitter uses this. */
+ struct pipe_context *ctx = (void *) ice;
+ struct iris_query *q = ice->condition.query;
+ struct pipe_query *query = (void *) q;
+ union pipe_query_result result;
+
+ if (ice->state.predicate != IRIS_PREDICATE_STATE_USE_BIT)
+ return;
+
+ assert(q);
+
+ iris_get_query_result(ctx, query, true, &result);
+ set_predicate_enable(ice, (q->result != 0) ^ ice->condition.condition);
}
void
-iris_init_query_functions(struct pipe_context *ctx)
+genX(init_query)(struct iris_context *ice)
{
+ struct pipe_context *ctx = &ice->ctx;
+
ctx->create_query = iris_create_query;
ctx->destroy_query = iris_destroy_query;
ctx->begin_query = iris_begin_query;
ctx->get_query_result = iris_get_query_result;
ctx->get_query_result_resource = iris_get_query_result_resource;
ctx->set_active_query_state = iris_set_active_query_state;
+ ctx->render_condition = iris_render_condition;
+
+ ice->vtbl.resolve_conditional_render = iris_resolve_conditional_render;
}