struct iris_state_ref query_state_ref;
struct iris_query_snapshots *map;
- struct iris_syncpt *syncpt;
+ struct iris_syncobj *syncobj;
int batch_idx;
offset += q->query_state_ref.offset;
if (!iris_is_query_pipelined(q)) {
- ice->vtbl.store_data_imm64(batch, bo, offset, true);
+ batch->screen->vtbl.store_data_imm64(batch, bo, offset, true);
} else {
/* Order available *after* the query results. */
flags |= PIPE_CONTROL_FLUSH_ENABLE;
offset);
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
- ice->vtbl.store_register_mem64(batch,
+ batch->screen->vtbl.store_register_mem64(batch,
q->index == 0 ?
GENX(CL_INVOCATION_COUNT_num) :
SO_PRIM_STORAGE_NEEDED(q->index),
bo, offset, false);
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
- ice->vtbl.store_register_mem64(batch,
+ batch->screen->vtbl.store_register_mem64(batch,
SO_NUM_PRIMS_WRITTEN(q->index),
bo, offset, false);
break;
};
const uint32_t reg = index_to_reg[q->index];
- ice->vtbl.store_register_mem64(batch, reg, bo, offset, false);
+ batch->screen->vtbl.store_register_mem64(batch, reg, bo, offset, false);
break;
}
default:
stream[s].num_prims[end]);
int w_idx = offset + offsetof(struct iris_query_so_overflow,
stream[s].prim_storage_needed[end]);
- ice->vtbl.store_register_mem64(batch, SO_NUM_PRIMS_WRITTEN(s),
+ batch->screen->vtbl.store_register_mem64(batch, SO_NUM_PRIMS_WRITTEN(s),
bo, g_idx, false);
- ice->vtbl.store_register_mem64(batch, SO_PRIM_STORAGE_NEEDED(s),
+ batch->screen->vtbl.store_register_mem64(batch, SO_PRIM_STORAGE_NEEDED(s),
bo, w_idx, false);
}
}
iris_destroy_monitor_object(ctx, query->monitor);
query->monitor = NULL;
} else {
- iris_syncpt_reference(screen, &query->syncpt, NULL);
+ iris_syncobj_reference(screen, &query->syncobj, NULL);
screen->base.fence_reference(ctx->screen, &query->fence, NULL);
}
free(query);
if (q->type == PIPE_QUERY_TIMESTAMP) {
iris_begin_query(ctx, query);
- iris_batch_reference_signal_syncpt(batch, &q->syncpt);
+ iris_batch_reference_signal_syncobj(batch, &q->syncobj);
mark_available(ice, q);
return true;
}
q->query_state_ref.offset +
offsetof(struct iris_query_snapshots, end));
- iris_batch_reference_signal_syncpt(batch, &q->syncpt);
+ iris_batch_reference_signal_syncobj(batch, &q->syncobj);
mark_available(ice, q);
return true;
if (!q->ready) {
struct iris_batch *batch = &ice->batches[q->batch_idx];
- if (q->syncpt == iris_batch_get_signal_syncpt(batch))
+ if (q->syncobj == iris_batch_get_signal_syncobj(batch))
iris_batch_flush(batch);
while (!READ_ONCE(q->map->snapshots_landed)) {
if (wait)
- iris_wait_syncpt(ctx->screen, q->syncpt, INT64_MAX);
+ iris_wait_syncobj(ctx->screen, q->syncobj, INT64_MAX);
else
return false;
}
* now so that progress happens. Either way, copy the snapshots
* landed field to the destination resource.
*/
- if (q->syncpt == iris_batch_get_signal_syncpt(batch))
+ if (q->syncobj == iris_batch_get_signal_syncobj(batch))
iris_batch_flush(batch);
- ice->vtbl.copy_mem_mem(batch, dst_bo, offset,
+ batch->screen->vtbl.copy_mem_mem(batch, dst_bo, offset,
query_bo, snapshots_landed_offset,
result_type <= PIPE_QUERY_TYPE_U32 ? 4 : 8);
return;
if (q->ready) {
/* We happen to have the result on the CPU, so just copy it. */
if (result_type <= PIPE_QUERY_TYPE_U32) {
- ice->vtbl.store_data_imm32(batch, dst_bo, offset, q->result);
+ batch->screen->vtbl.store_data_imm32(batch, dst_bo, offset, q->result);
} else {
- ice->vtbl.store_data_imm64(batch, dst_bo, offset, q->result);
+ batch->screen->vtbl.store_data_imm64(batch, dst_bo, offset, q->result);
}
/* Make sure the result lands before they use bind the QBO elsewhere
struct gen_mi_builder b;
gen_mi_builder_init(&b, batch);
+ iris_batch_sync_region_start(batch);
+
struct gen_mi_value result = calculate_result_on_gpu(devinfo, &b, q);
struct gen_mi_value dst =
result_type <= PIPE_QUERY_TYPE_U32 ? gen_mi_mem32(rw_bo(dst_bo, offset))
} else {
gen_mi_store(&b, dst, result);
}
+
+ iris_batch_sync_region_end(batch);
}
static void
// have to be done dynamically at draw time, which is a pain
ice->state.statistics_counters_enabled = enable;
ice->state.dirty |= IRIS_DIRTY_CLIP |
- IRIS_DIRTY_GS |
IRIS_DIRTY_RASTER |
IRIS_DIRTY_STREAMOUT |
- IRIS_DIRTY_TCS |
- IRIS_DIRTY_TES |
- IRIS_DIRTY_VS |
IRIS_DIRTY_WM;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_GS |
+ IRIS_STAGE_DIRTY_TCS |
+ IRIS_STAGE_DIRTY_TES |
+ IRIS_STAGE_DIRTY_VS;
}
static void
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
+ iris_batch_sync_region_start(batch);
+
/* The CPU doesn't have the query result yet; use hardware predication */
ice->state.predicate = IRIS_PREDICATE_STATE_USE_BIT;
gen_mi_store(&b, query_mem64(q, offsetof(struct iris_query_snapshots,
predicate_result)), result);
ice->state.compute_predicate = bo;
+
+ iris_batch_sync_region_end(batch);
}
static void
genX(init_query)(struct iris_context *ice)
{
struct pipe_context *ctx = &ice->ctx;
+ struct iris_screen *screen = (struct iris_screen *)ctx->screen;
ctx->create_query = iris_create_query;
ctx->create_batch_query = iris_create_batch_query;
ctx->set_active_query_state = iris_set_active_query_state;
ctx->render_condition = iris_render_condition;
- ice->vtbl.resolve_conditional_render = iris_resolve_conditional_render;
+ screen->vtbl.resolve_conditional_render = iris_resolve_conditional_render;
}