.get_result_resource = NULL
};
-static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
- unsigned query_type)
+static struct pipe_query *r600_query_sw_create(unsigned query_type)
{
struct r600_query_sw *query;
FREE(rquery);
}
-static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
+static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
struct r600_query_hw *query)
{
unsigned buf_size = MAX2(query->result_size,
- ctx->screen->info.min_alloc_size);
+ rscreen->info.min_alloc_size);
/* Queries are normally read by the CPU after
* being written by the gpu, hence staging is probably a good
* usage pattern.
*/
struct r600_resource *buf = (struct r600_resource*)
- pipe_buffer_create(ctx->b.screen, 0,
+ pipe_buffer_create(&rscreen->b, 0,
PIPE_USAGE_STAGING, buf_size);
if (!buf)
return NULL;
- if (!query->ops->prepare_buffer(ctx, query, buf)) {
+ if (!query->ops->prepare_buffer(rscreen, query, buf)) {
r600_resource_reference(&buf, NULL);
return NULL;
}
return buf;
}
-static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
+static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
struct r600_query_hw *query,
struct r600_resource *buffer)
{
/* Callers ensure that the buffer is currently unused by the GPU. */
- uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_UNSYNCHRONIZED);
+ uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
+ PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_UNSYNCHRONIZED);
if (!results)
return false;
if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
- unsigned max_rbs = ctx->screen->info.num_render_backends;
- unsigned enabled_rb_mask = ctx->screen->info.enabled_rb_mask;
+ unsigned max_rbs = rscreen->info.num_render_backends;
+ unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
unsigned num_results;
unsigned i, j;
.add_result = r600_query_hw_add_result,
};
-bool r600_query_hw_init(struct r600_common_context *rctx,
+bool r600_query_hw_init(struct r600_common_screen *rscreen,
struct r600_query_hw *query)
{
- query->buffer.buf = r600_new_query_buffer(rctx, query);
+ query->buffer.buf = r600_new_query_buffer(rscreen, query);
if (!query->buffer.buf)
return false;
return true;
}
-static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
+static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
unsigned query_type,
unsigned index)
{
switch (query_type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
- query->result_size = 16 * rctx->screen->info.num_render_backends;
+ query->result_size = 16 * rscreen->info.num_render_backends;
query->result_size += 16; /* for the fence + alignment */
query->num_cs_dw_begin = 6;
- query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
+ query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
break;
case PIPE_QUERY_TIME_ELAPSED:
query->result_size = 24;
query->num_cs_dw_begin = 8;
- query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
+ query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
break;
case PIPE_QUERY_TIMESTAMP:
query->result_size = 16;
- query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
+ query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
query->flags = R600_QUERY_HW_FLAG_NO_START;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
/* 11 values on EG, 8 on R600. */
- query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
+ query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
query->result_size += 8; /* for the fence + alignment */
query->num_cs_dw_begin = 6;
- query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
+ query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
break;
default:
assert(0);
return NULL;
}
- if (!r600_query_hw_init(rctx, query)) {
+ if (!r600_query_hw_init(rscreen, query)) {
FREE(query);
return NULL;
}
*qbuf = query->buffer;
query->buffer.results_end = 0;
query->buffer.previous = qbuf;
- query->buffer.buf = r600_new_query_buffer(ctx, query);
+ query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
if (!query->buffer.buf)
return;
}
static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct r600_common_screen *rscreen =
+ (struct r600_common_screen *)ctx->screen;
if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
query_type == PIPE_QUERY_GPU_FINISHED ||
query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
- return r600_query_sw_create(ctx, query_type);
+ return r600_query_sw_create(query_type);
- return r600_query_hw_create(rctx, query_type, index);
+ return r600_query_hw_create(rscreen, query_type, index);
}
static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
r600_resource_reference(&query->buffer.buf, NULL);
- query->buffer.buf = r600_new_query_buffer(rctx, query);
+ query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
} else {
- if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
+ if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
r600_resource_reference(&query->buffer.buf, NULL);
}
}