#include "r600_cs.h"
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
-#include "os/os_time.h"
+#include "util/os_time.h"
#include "tgsi/tgsi_text.h"
#define R600_MAX_STREAMS 4
case R600_QUERY_NUM_DB_CACHE_FLUSHES:
query->begin_result = rctx->num_db_cache_flushes;
break;
- case R600_QUERY_NUM_L2_INVALIDATES:
- query->begin_result = rctx->num_L2_invalidates;
- break;
- case R600_QUERY_NUM_L2_WRITEBACKS:
- query->begin_result = rctx->num_L2_writebacks;
- break;
case R600_QUERY_NUM_RESIDENT_HANDLES:
query->begin_result = rctx->num_resident_handles;
break;
case R600_QUERY_GPU_TEMPERATURE:
case R600_QUERY_CURRENT_GPU_SCLK:
case R600_QUERY_CURRENT_GPU_MCLK:
- case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
case R600_QUERY_NUM_MAPPED_BUFFERS:
query->begin_result = 0;
break;
case R600_QUERY_NUM_DB_CACHE_FLUSHES:
query->end_result = rctx->num_db_cache_flushes;
break;
- case R600_QUERY_NUM_L2_INVALIDATES:
- query->end_result = rctx->num_L2_invalidates;
- break;
- case R600_QUERY_NUM_L2_WRITEBACKS:
- query->end_result = rctx->num_L2_writebacks;
- break;
case R600_QUERY_NUM_RESIDENT_HANDLES:
query->end_result = rctx->num_resident_handles;
break;
case R600_QUERY_NUM_SHADERS_CREATED:
query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
break;
- case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
- query->end_result = rctx->last_tex_ps_draw_ratio;
- break;
case R600_QUERY_NUM_SHADER_CACHE_HITS:
query->end_result =
p_atomic_read(&rctx->screen->num_shader_cache_hits);
}
r600_resource_reference(&query->buffer.buf, NULL);
- r600_resource_reference(&query->workaround_buf, NULL);
FREE(rquery);
}
memset(results, 0, buffer->b.b.width0);
if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
- query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
+ query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
+ query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
unsigned max_rbs = rscreen->info.num_render_backends;
unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
unsigned num_results;
switch (query_type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
query->result_size = 16 * rscreen->info.num_render_backends;
query->result_size += 16; /* for the fence + alignment */
query->num_cs_dw_begin = 6;
unsigned type, int diff)
{
if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
- type == PIPE_QUERY_OCCLUSION_PREDICATE) {
+ type == PIPE_QUERY_OCCLUSION_PREDICATE ||
+ type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
bool old_enable = rctx->num_occlusion_queries != 0;
bool old_perfect_enable =
rctx->num_perfect_occlusion_queries != 0;
rctx->num_occlusion_queries += diff;
assert(rctx->num_occlusion_queries >= 0);
- if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
+ if (type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
rctx->num_perfect_occlusion_queries += diff;
assert(rctx->num_perfect_occlusion_queries >= 0);
}
}
}
-static void emit_sample_streamout(struct radeon_winsys_cs *cs, uint64_t va,
+static void emit_sample_streamout(struct radeon_cmdbuf *cs, uint64_t va,
unsigned stream)
{
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
struct r600_resource *buffer,
uint64_t va)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_cmdbuf *cs = ctx->gfx.cs;
switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
radeon_emit(cs, va);
emit_sample_streamout(cs, va + 32 * stream, stream);
break;
case PIPE_QUERY_TIME_ELAPSED:
- if (ctx->chip_class >= SI) {
- /* Write the timestamp from the CP not waiting for
- * outstanding draws (top-of-pipe).
- */
- radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cs, COPY_DATA_COUNT_SEL |
- COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
- COPY_DATA_DST_SEL(COPY_DATA_MEM_ASYNC));
- radeon_emit(cs, 0);
- radeon_emit(cs, 0);
- radeon_emit(cs, va);
- radeon_emit(cs, va >> 32);
- } else {
- /* Write the timestamp after the last draw is done.
- * (bottom-of-pipe)
- */
- r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
- 0, EOP_DATA_SEL_TIMESTAMP,
- NULL, va, 0, query->b.type);
- }
+ /* Write the timestamp after the last draw is done.
+ * (bottom-of-pipe)
+ */
+ r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
+ 0, EOP_DATA_SEL_TIMESTAMP,
+ NULL, va, 0, query->b.type);
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
struct r600_resource *buffer,
uint64_t va)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_cmdbuf *cs = ctx->gfx.cs;
uint64_t fence_va = 0;
switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
va += 8;
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
struct r600_resource *buf, uint64_t va,
uint32_t op)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_cmdbuf *cs = ctx->gfx.cs;
- if (ctx->chip_class >= GFX9) {
- radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
- radeon_emit(cs, op);
- radeon_emit(cs, va);
- radeon_emit(cs, va >> 32);
- } else {
- radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
- radeon_emit(cs, va);
- radeon_emit(cs, op | ((va >> 32) & 0xFF));
- }
+ radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, op | ((va >> 32) & 0xFF));
r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ,
RADEON_PRIO_QUERY);
}
flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
- if (query->workaround_buf) {
- op = PRED_OP(PREDICATION_OP_BOOL64);
- } else {
- switch (query->b.type) {
- case PIPE_QUERY_OCCLUSION_COUNTER:
- case PIPE_QUERY_OCCLUSION_PREDICATE:
- op = PRED_OP(PREDICATION_OP_ZPASS);
- break;
- case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
- case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
- op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
- invert = !invert;
- break;
- default:
- assert(0);
- return;
- }
+ switch (query->b.type) {
+ case PIPE_QUERY_OCCLUSION_COUNTER:
+ case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
+ op = PRED_OP(PREDICATION_OP_ZPASS);
+ break;
+ case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
+ case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
+ op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
+ invert = !invert;
+ break;
+ default:
+ assert(0);
+ return;
}
/* if true then invert, see GL_ARB_conditional_render_inverted */
else
op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
- /* Use the value written by compute shader as a workaround. Note that
- * the wait flag does not apply in this predication mode.
- *
- * The shader outputs the result value to L2. Workarounds only affect VI
- * and later, where the CP reads data from L2, so we don't need an
- * additional flush.
- */
- if (query->workaround_buf) {
- uint64_t va = query->workaround_buf->gpu_address + query->workaround_offset;
- emit_set_predicate(ctx, query->workaround_buf, va, op);
- return;
- }
-
op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
/* emit predicate packets for all data blocks */
rquery->ops->destroy(rctx->screen, rquery);
}
-static boolean r600_begin_query(struct pipe_context *ctx,
- struct pipe_query *query)
+static bool r600_begin_query(struct pipe_context *ctx,
+ struct pipe_query *query)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct r600_query *rquery = (struct r600_query *)query;
if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
r600_query_hw_reset_buffers(rctx, query);
- r600_resource_reference(&query->workaround_buf, NULL);
-
r600_query_hw_emit_start(rctx, query);
if (!query->buffer.buf)
return false;
switch (rquery->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
params->start_offset = 0;
params->end_offset = 8;
params->fence_offset = max_rbs * 16;
}
break;
}
- case PIPE_QUERY_OCCLUSION_PREDICATE: {
+ case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
for (unsigned i = 0; i < max_rbs; ++i) {
unsigned results_base = i * 16;
result->b = result->b ||
}
}
-static boolean r600_get_query_result(struct pipe_context *ctx,
- struct pipe_query *query, boolean wait,
- union pipe_query_result *result)
+static bool r600_get_query_result(struct pipe_context *ctx,
+ struct pipe_query *query, bool wait,
+ union pipe_query_result *result)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct r600_query *rquery = (struct r600_query *)query;
static void r600_get_query_result_resource(struct pipe_context *ctx,
struct pipe_query *query,
- boolean wait,
+ bool wait,
enum pipe_query_value_type result_type,
int index,
struct pipe_resource *resource,
* 1.x = fence_offset
* 1.y = pair_stride
* 1.z = pair_count
+ * 1.w = result_offset
+ * 2.x = buffer0 offset
*
* BUFFER[0] = query result buffer
* BUFFER[1] = previous summary buffer
"DCL BUFFER[0]\n"
"DCL BUFFER[1]\n"
"DCL BUFFER[2]\n"
- "DCL CONST[0][0..1]\n"
+ "DCL CONST[0][0..2]\n"
"DCL TEMP[0..5]\n"
"IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
"IMM[1] UINT32 {1, 2, 4, 8}\n"
"AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
"UIF TEMP[5]\n"
/* Check result availability. */
- "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
+ "UADD TEMP[1].x, CONST[0][1].xxxx, CONST[0][2].xxxx\n"
+ "LOAD TEMP[1].x, BUFFER[0], TEMP[1].xxxx\n"
"ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
"MOV TEMP[1], TEMP[0].zzzz\n"
"NOT TEMP[0].z, TEMP[0].zzzz\n"
/* Load result if available. */
"UIF TEMP[1]\n"
- "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
+ "UADD TEMP[0].x, IMM[0].xxxx, CONST[0][2].xxxx\n"
+ "LOAD TEMP[0].xy, BUFFER[0], TEMP[0].xxxx\n"
"ENDIF\n"
"ELSE\n"
/* Load previously accumulated result if requested. */
/* Load fence and check result availability */
"UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
+ "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n"
"LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
"ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
"NOT TEMP[0].z, TEMP[0].zzzz\n"
/* Load start and end. */
"UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
"UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
+ "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n"
"LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
"UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
"AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
"UIF TEMP[4]\n"
/* Store accumulated data for chaining. */
- "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
+ "STORE BUFFER[2].xyz, CONST[0][1].wwww, TEMP[0]\n"
"ELSE\n"
"AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
"UIF TEMP[4]\n"
/* Store result availability. */
"NOT TEMP[0].z, TEMP[0]\n"
"AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
- "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
+ "STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].zzzz\n"
"AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
"UIF TEMP[4]\n"
- "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
+ "STORE BUFFER[2].y, CONST[0][1].wwww, IMM[0].xxxx\n"
"ENDIF\n"
"ELSE\n"
/* Store result if it is available. */
"AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
"UIF TEMP[4]\n"
- "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
+ "STORE BUFFER[2].xy, CONST[0][1].wwww, TEMP[0].xyxy\n"
"ELSE\n"
/* Clamping */
"UIF TEMP[0].yyyy\n"
"UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
"ENDIF\n"
- "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
+ "STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].xxxx\n"
"ENDIF\n"
"ENDIF\n"
"ENDIF\n"
rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
pipe_resource_reference(&st->saved_const0.buffer, NULL);
- rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
+ rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo, ~0);
for (unsigned i = 0; i < 3; ++i)
pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
}
uint32_t fence_offset;
uint32_t pair_stride;
uint32_t pair_count;
+ uint32_t buffer_offset;
+ uint32_t buffer0_offset;
} consts;
if (!rctx->query_result_shader) {
}
if (query->buffer.previous) {
- u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
+ u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 256,
&tmp_buffer_offset, &tmp_buffer);
if (!tmp_buffer)
return;
consts.config = 0;
if (index < 0)
consts.config |= 4;
- if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE)
+ if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
+ query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE)
consts.config |= 8;
else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
params.start_offset += qbuf->results_end - query->result_size;
}
- rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
-
ssbo[0].buffer = &qbuf->buf->b.b;
- ssbo[0].buffer_offset = params.start_offset;
- ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
-
+ ssbo[0].buffer_offset = params.start_offset & ~0xff;
+ ssbo[0].buffer_size = qbuf->results_end - ssbo[0].buffer_offset;
+ consts.buffer0_offset = (params.start_offset & 0xff);
if (!qbuf->previous) {
+
ssbo[2].buffer = resource;
- ssbo[2].buffer_offset = offset;
- ssbo[2].buffer_size = 8;
+ ssbo[2].buffer_offset = offset & ~0xff;
+ ssbo[2].buffer_size = offset + 8;
+ consts.buffer_offset = (offset & 0xff);
+ } else
+ consts.buffer_offset = 0;
- ((struct r600_resource *)resource)->TC_L2_dirty = true;
- }
+ rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
- rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
+ rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo, ~0);
if (wait && qbuf == &query->buffer) {
uint64_t va;
va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
va += params.fence_offset;
- r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
+ r600_gfx_wait_fence(rctx, qbuf->buf, va, 0x80000000, 0x80000000);
}
rctx->b.launch_grid(&rctx->b, &grid);
static void r600_render_condition(struct pipe_context *ctx,
struct pipe_query *query,
- boolean condition,
+ bool condition,
enum pipe_render_cond_flag mode)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
/* Compute the size of SET_PREDICATION packets. */
atom->num_dw = 0;
if (query) {
- bool needs_workaround = false;
-
- /* There is a firmware regression in VI which causes successive
- * SET_PREDICATION packets to give the wrong answer for
- * non-inverted stream overflow predication.
- */
- if (rctx->chip_class >= VI && !condition &&
- (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE ||
- (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE &&
- (rquery->buffer.previous ||
- rquery->buffer.results_end > rquery->result_size)))) {
- needs_workaround = true;
- }
-
- if (needs_workaround && !rquery->workaround_buf) {
- bool old_force_off = rctx->render_cond_force_off;
- rctx->render_cond_force_off = true;
+ for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
+ atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
- u_suballocator_alloc(
- rctx->allocator_zeroed_memory, 8, 8,
- &rquery->workaround_offset,
- (struct pipe_resource **)&rquery->workaround_buf);
-
- /* Reset to NULL to avoid a redundant SET_PREDICATION
- * from launching the compute grid.
- */
- rctx->render_cond = NULL;
-
- ctx->get_query_result_resource(
- ctx, query, true, PIPE_QUERY_TYPE_U64, 0,
- &rquery->workaround_buf->b.b, rquery->workaround_offset);
-
- /* Settings this in the render cond atom is too late,
- * so set it here. */
- rctx->flags |= rctx->screen->barrier_flags.L2_to_cp |
- R600_CONTEXT_FLUSH_FOR_RENDER_COND;
-
- rctx->render_cond_force_off = old_force_off;
- }
-
- if (needs_workaround) {
- atom->num_dw = 5;
- } else {
- for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
- atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
-
- if (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
- atom->num_dw *= R600_MAX_STREAMS;
- }
+ if (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
+ atom->num_dw *= R600_MAX_STREAMS;
}
rctx->render_cond = query;
{
struct r600_common_context *ctx =
(struct r600_common_context*)rscreen->aux_context;
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_cmdbuf *cs = ctx->gfx.cs;
struct r600_resource *buffer;
uint32_t *results;
unsigned i, mask = 0;
- unsigned max_rbs = ctx->screen->info.num_render_backends;
+ unsigned max_rbs;
+
+ if (ctx->family == CHIP_JUNIPER) {
+ /*
+ * Fix for predication lockups - the chip can only ever have
+ * 4 RBs, however it looks like the predication logic assumes
+ * there's 8, trying to read results from query buffers never
+ * written to. By increasing this number we'll write the
+ * status bit for these as per the normal disabled rb logic.
+ */
+ ctx->screen->info.num_render_backends = 8;
+ }
+ max_rbs = ctx->screen->info.num_render_backends;
assert(rscreen->chip_class <= CAYMAN);
- /* if backend_map query is supported by the kernel */
- if (rscreen->info.r600_gb_backend_map_valid) {
+ /*
+ * if backend_map query is supported by the kernel.
+ * Note the kernel drm driver for a long time never filled in the
+ * associated data on eg/cm, only on r600/r700, hence ignore the valid
+ * bit there if the map is zero.
+ * (Albeit some chips with just one active rb can have a valid 0 map.)
+ */
+ if (rscreen->info.r600_gb_backend_map_valid &&
+ (ctx->chip_class < EVERGREEN || rscreen->info.r600_gb_backend_map != 0)) {
unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
unsigned backend_map = rscreen->info.r600_gb_backend_map;
unsigned item_width, item_mask;
r600_resource_reference(&buffer, NULL);
- if (mask)
+ if (mask) {
+ if (rscreen->debug_flags & DBG_INFO &&
+ mask != rscreen->info.enabled_rb_mask) {
+ printf("enabled_rb_mask (fixed) = 0x%x\n", mask);
+ }
rscreen->info.enabled_rb_mask = mask;
+ }
}
#define XFULL(name_, query_type_, type_, result_type_, group_id_) \
#define XG(group_, name_, query_type_, type_, result_type_) \
XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
-static struct pipe_driver_query_info r600_driver_query_list[] = {
+static const struct pipe_driver_query_info r600_driver_query_list[] = {
X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES, UINT64, AVERAGE),
X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES, UINT64, AVERAGE),
- X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
- X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
X("num-resident-handles", NUM_RESIDENT_HANDLES, UINT64, AVERAGE),
X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE),
X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE),
X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
- X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
/* GPIN queries are for the benefit of old versions of GPUPerfStudio,
* which use it as a fallback path to detect the GPU type.
static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
{
- if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
+ if (rscreen->info.drm_minor >= 42)
return ARRAY_SIZE(r600_driver_query_list);
- else if (rscreen->info.drm_major == 3) {
- if (rscreen->chip_class >= VI)
- return ARRAY_SIZE(r600_driver_query_list);
- else
- return ARRAY_SIZE(r600_driver_query_list) - 7;
- }
else
return ARRAY_SIZE(r600_driver_query_list) - 25;
}