struct iris_batch *batch = &ice->render_batch;
const struct gen_device_info *devinfo = &batch->screen->devinfo;
+ if (!iris_is_query_pipelined(q)) {
+ iris_emit_pipe_control_flush(batch,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ }
+
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
offset);
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
- iris_emit_pipe_control_flush(batch,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
ice->vtbl.store_register_mem64(batch,
q->index == 0 ? CL_INVOCATION_COUNT :
SO_PRIM_STORAGE_NEEDED(q->index),
q->bo, offset, false);
break;
+ case PIPE_QUERY_PRIMITIVES_EMITTED:
+ ice->vtbl.store_register_mem64(batch,
+ SO_NUM_PRIMS_WRITTEN(q->index),
+ q->bo, offset, false);
+ break;
case PIPE_QUERY_PIPELINE_STATISTICS: {
static const uint32_t index_to_reg[] = {
IA_VERTICES_COUNT,
};
const uint32_t reg = index_to_reg[q->index];
- iris_emit_pipe_control_flush(batch,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
-
ice->vtbl.store_register_mem64(batch, reg, q->bo, offset, false);
break;
}