#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
#include "util/os_time.h"
-#include "tgsi/tgsi_text.h"
+#include "util/u_suballoc.h"
#include "amd/common/sid.h"
#define SI_MAX_STREAMS 4
}
}
+static int64_t si_finish_dma_get_cpu_time(struct si_context *sctx)
+{
+ struct pipe_fence_handle *fence = NULL;
+
+ si_flush_dma_cs(sctx, 0, &fence);
+ if (fence) {
+ sctx->ws->fence_wait(sctx->ws, fence, PIPE_TIMEOUT_INFINITE);
+ sctx->ws->fence_reference(&fence, NULL);
+ }
+
+ return os_time_get_nano();
+}
+
static bool si_query_sw_begin(struct si_context *sctx,
struct si_query *rquery)
{
case PIPE_QUERY_TIMESTAMP_DISJOINT:
case PIPE_QUERY_GPU_FINISHED:
break;
+ case SI_QUERY_TIME_ELAPSED_SDMA_SI:
+ query->begin_result = si_finish_dma_get_cpu_time(sctx);
+ break;
case SI_QUERY_DRAW_CALLS:
- query->begin_result = sctx->b.num_draw_calls;
+ query->begin_result = sctx->num_draw_calls;
break;
case SI_QUERY_DECOMPRESS_CALLS:
- query->begin_result = sctx->b.num_decompress_calls;
+ query->begin_result = sctx->num_decompress_calls;
break;
case SI_QUERY_MRT_DRAW_CALLS:
- query->begin_result = sctx->b.num_mrt_draw_calls;
+ query->begin_result = sctx->num_mrt_draw_calls;
break;
case SI_QUERY_PRIM_RESTART_CALLS:
- query->begin_result = sctx->b.num_prim_restart_calls;
+ query->begin_result = sctx->num_prim_restart_calls;
break;
case SI_QUERY_SPILL_DRAW_CALLS:
- query->begin_result = sctx->b.num_spill_draw_calls;
+ query->begin_result = sctx->num_spill_draw_calls;
break;
case SI_QUERY_COMPUTE_CALLS:
- query->begin_result = sctx->b.num_compute_calls;
+ query->begin_result = sctx->num_compute_calls;
break;
case SI_QUERY_SPILL_COMPUTE_CALLS:
- query->begin_result = sctx->b.num_spill_compute_calls;
+ query->begin_result = sctx->num_spill_compute_calls;
break;
case SI_QUERY_DMA_CALLS:
- query->begin_result = sctx->b.num_dma_calls;
+ query->begin_result = sctx->num_dma_calls;
break;
case SI_QUERY_CP_DMA_CALLS:
- query->begin_result = sctx->b.num_cp_dma_calls;
+ query->begin_result = sctx->num_cp_dma_calls;
break;
case SI_QUERY_NUM_VS_FLUSHES:
- query->begin_result = sctx->b.num_vs_flushes;
+ query->begin_result = sctx->num_vs_flushes;
break;
case SI_QUERY_NUM_PS_FLUSHES:
- query->begin_result = sctx->b.num_ps_flushes;
+ query->begin_result = sctx->num_ps_flushes;
break;
case SI_QUERY_NUM_CS_FLUSHES:
- query->begin_result = sctx->b.num_cs_flushes;
+ query->begin_result = sctx->num_cs_flushes;
break;
case SI_QUERY_NUM_CB_CACHE_FLUSHES:
- query->begin_result = sctx->b.num_cb_cache_flushes;
+ query->begin_result = sctx->num_cb_cache_flushes;
break;
case SI_QUERY_NUM_DB_CACHE_FLUSHES:
- query->begin_result = sctx->b.num_db_cache_flushes;
+ query->begin_result = sctx->num_db_cache_flushes;
break;
case SI_QUERY_NUM_L2_INVALIDATES:
- query->begin_result = sctx->b.num_L2_invalidates;
+ query->begin_result = sctx->num_L2_invalidates;
break;
case SI_QUERY_NUM_L2_WRITEBACKS:
- query->begin_result = sctx->b.num_L2_writebacks;
+ query->begin_result = sctx->num_L2_writebacks;
break;
case SI_QUERY_NUM_RESIDENT_HANDLES:
- query->begin_result = sctx->b.num_resident_handles;
+ query->begin_result = sctx->num_resident_handles;
break;
case SI_QUERY_TC_OFFLOADED_SLOTS:
- query->begin_result = sctx->b.tc ? sctx->b.tc->num_offloaded_slots : 0;
+ query->begin_result = sctx->tc ? sctx->tc->num_offloaded_slots : 0;
break;
case SI_QUERY_TC_DIRECT_SLOTS:
- query->begin_result = sctx->b.tc ? sctx->b.tc->num_direct_slots : 0;
+ query->begin_result = sctx->tc ? sctx->tc->num_direct_slots : 0;
break;
case SI_QUERY_TC_NUM_SYNCS:
- query->begin_result = sctx->b.tc ? sctx->b.tc->num_syncs : 0;
+ query->begin_result = sctx->tc ? sctx->tc->num_syncs : 0;
break;
case SI_QUERY_REQUESTED_VRAM:
case SI_QUERY_REQUESTED_GTT:
case SI_QUERY_NUM_EVICTIONS:
case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
- query->begin_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
+ query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
break;
}
case SI_QUERY_GFX_BO_LIST_SIZE:
ws_id = winsys_id_from_type(query->b.type);
- query->begin_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
- query->begin_time = sctx->b.ws->query_value(sctx->b.ws,
+ query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
+ query->begin_time = sctx->ws->query_value(sctx->ws,
RADEON_NUM_GFX_IBS);
break;
case SI_QUERY_CS_THREAD_BUSY:
ws_id = winsys_id_from_type(query->b.type);
- query->begin_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
+ query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
query->begin_time = os_time_get_nano();
break;
case SI_QUERY_GALLIUM_THREAD_BUSY:
query->begin_result =
- sctx->b.tc ? util_queue_get_thread_time_nano(&sctx->b.tc->queue, 0) : 0;
+ sctx->tc ? util_queue_get_thread_time_nano(&sctx->tc->queue, 0) : 0;
query->begin_time = os_time_get_nano();
break;
case SI_QUERY_GPU_LOAD:
case PIPE_QUERY_TIMESTAMP_DISJOINT:
break;
case PIPE_QUERY_GPU_FINISHED:
- sctx->b.b.flush(&sctx->b.b, &query->fence, PIPE_FLUSH_DEFERRED);
+ sctx->b.flush(&sctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
+ break;
+ case SI_QUERY_TIME_ELAPSED_SDMA_SI:
+ query->end_result = si_finish_dma_get_cpu_time(sctx);
break;
case SI_QUERY_DRAW_CALLS:
- query->end_result = sctx->b.num_draw_calls;
+ query->end_result = sctx->num_draw_calls;
break;
case SI_QUERY_DECOMPRESS_CALLS:
- query->end_result = sctx->b.num_decompress_calls;
+ query->end_result = sctx->num_decompress_calls;
break;
case SI_QUERY_MRT_DRAW_CALLS:
- query->end_result = sctx->b.num_mrt_draw_calls;
+ query->end_result = sctx->num_mrt_draw_calls;
break;
case SI_QUERY_PRIM_RESTART_CALLS:
- query->end_result = sctx->b.num_prim_restart_calls;
+ query->end_result = sctx->num_prim_restart_calls;
break;
case SI_QUERY_SPILL_DRAW_CALLS:
- query->end_result = sctx->b.num_spill_draw_calls;
+ query->end_result = sctx->num_spill_draw_calls;
break;
case SI_QUERY_COMPUTE_CALLS:
- query->end_result = sctx->b.num_compute_calls;
+ query->end_result = sctx->num_compute_calls;
break;
case SI_QUERY_SPILL_COMPUTE_CALLS:
- query->end_result = sctx->b.num_spill_compute_calls;
+ query->end_result = sctx->num_spill_compute_calls;
break;
case SI_QUERY_DMA_CALLS:
- query->end_result = sctx->b.num_dma_calls;
+ query->end_result = sctx->num_dma_calls;
break;
case SI_QUERY_CP_DMA_CALLS:
- query->end_result = sctx->b.num_cp_dma_calls;
+ query->end_result = sctx->num_cp_dma_calls;
break;
case SI_QUERY_NUM_VS_FLUSHES:
- query->end_result = sctx->b.num_vs_flushes;
+ query->end_result = sctx->num_vs_flushes;
break;
case SI_QUERY_NUM_PS_FLUSHES:
- query->end_result = sctx->b.num_ps_flushes;
+ query->end_result = sctx->num_ps_flushes;
break;
case SI_QUERY_NUM_CS_FLUSHES:
- query->end_result = sctx->b.num_cs_flushes;
+ query->end_result = sctx->num_cs_flushes;
break;
case SI_QUERY_NUM_CB_CACHE_FLUSHES:
- query->end_result = sctx->b.num_cb_cache_flushes;
+ query->end_result = sctx->num_cb_cache_flushes;
break;
case SI_QUERY_NUM_DB_CACHE_FLUSHES:
- query->end_result = sctx->b.num_db_cache_flushes;
+ query->end_result = sctx->num_db_cache_flushes;
break;
case SI_QUERY_NUM_L2_INVALIDATES:
- query->end_result = sctx->b.num_L2_invalidates;
+ query->end_result = sctx->num_L2_invalidates;
break;
case SI_QUERY_NUM_L2_WRITEBACKS:
- query->end_result = sctx->b.num_L2_writebacks;
+ query->end_result = sctx->num_L2_writebacks;
break;
case SI_QUERY_NUM_RESIDENT_HANDLES:
- query->end_result = sctx->b.num_resident_handles;
+ query->end_result = sctx->num_resident_handles;
break;
case SI_QUERY_TC_OFFLOADED_SLOTS:
- query->end_result = sctx->b.tc ? sctx->b.tc->num_offloaded_slots : 0;
+ query->end_result = sctx->tc ? sctx->tc->num_offloaded_slots : 0;
break;
case SI_QUERY_TC_DIRECT_SLOTS:
- query->end_result = sctx->b.tc ? sctx->b.tc->num_direct_slots : 0;
+ query->end_result = sctx->tc ? sctx->tc->num_direct_slots : 0;
break;
case SI_QUERY_TC_NUM_SYNCS:
- query->end_result = sctx->b.tc ? sctx->b.tc->num_syncs : 0;
+ query->end_result = sctx->tc ? sctx->tc->num_syncs : 0;
break;
case SI_QUERY_REQUESTED_VRAM:
case SI_QUERY_REQUESTED_GTT:
case SI_QUERY_NUM_EVICTIONS:
case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
- query->end_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
+ query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
break;
}
case SI_QUERY_GFX_BO_LIST_SIZE:
ws_id = winsys_id_from_type(query->b.type);
- query->end_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
- query->end_time = sctx->b.ws->query_value(sctx->b.ws,
+ query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
+ query->end_time = sctx->ws->query_value(sctx->ws,
RADEON_NUM_GFX_IBS);
break;
case SI_QUERY_CS_THREAD_BUSY:
ws_id = winsys_id_from_type(query->b.type);
- query->end_result = sctx->b.ws->query_value(sctx->b.ws, ws_id);
+ query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
query->end_time = os_time_get_nano();
break;
case SI_QUERY_GALLIUM_THREAD_BUSY:
query->end_result =
- sctx->b.tc ? util_queue_get_thread_time_nano(&sctx->b.tc->queue, 0) : 0;
+ sctx->tc ? util_queue_get_thread_time_nano(&sctx->tc->queue, 0) : 0;
query->end_time = os_time_get_nano();
break;
case SI_QUERY_GPU_LOAD:
query->end_result = p_atomic_read(&sctx->screen->num_shaders_created);
break;
case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
- query->end_result = sctx->b.last_tex_ps_draw_ratio;
+ query->end_result = sctx->last_tex_ps_draw_ratio;
break;
case SI_QUERY_NUM_SHADER_CACHE_HITS:
query->end_result =
result->timestamp_disjoint.disjoint = false;
return true;
case PIPE_QUERY_GPU_FINISHED: {
- struct pipe_screen *screen = sctx->b.b.screen;
- struct pipe_context *ctx = rquery->b.flushed ? NULL : &sctx->b.b;
+ struct pipe_screen *screen = sctx->b.screen;
+ struct pipe_context *ctx = rquery->b.flushed ? NULL : &sctx->b;
result->b = screen->fence_finish(screen, ctx, query->fence,
wait ? PIPE_TIMEOUT_INFINITE : 0);
* being written by the gpu, hence staging is probably a good
* usage pattern.
*/
- struct r600_resource *buf = (struct r600_resource*)
+ struct r600_resource *buf = r600_resource(
pipe_buffer_create(&sscreen->b, 0,
- PIPE_USAGE_STAGING, buf_size);
+ PIPE_USAGE_STAGING, buf_size));
if (!buf)
return NULL;
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
query->result_size = 16 * sscreen->info.num_render_backends;
query->result_size += 16; /* for the fence + alignment */
- query->num_cs_dw_end = 6 + si_gfx_write_fence_dwords(sscreen);
+ query->num_cs_dw_end = 6 + si_cp_write_fence_dwords(sscreen);
+ break;
+ case SI_QUERY_TIME_ELAPSED_SDMA:
+ /* GET_GLOBAL_TIMESTAMP only works if the offset is a multiple of 32. */
+ query->result_size = 64;
+ query->num_cs_dw_end = 0;
break;
case PIPE_QUERY_TIME_ELAPSED:
query->result_size = 24;
- query->num_cs_dw_end = 8 + si_gfx_write_fence_dwords(sscreen);
+ query->num_cs_dw_end = 8 + si_cp_write_fence_dwords(sscreen);
break;
case PIPE_QUERY_TIMESTAMP:
query->result_size = 16;
- query->num_cs_dw_end = 8 + si_gfx_write_fence_dwords(sscreen);
+ query->num_cs_dw_end = 8 + si_cp_write_fence_dwords(sscreen);
query->flags = SI_QUERY_HW_FLAG_NO_START;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
/* 11 values on GCN. */
query->result_size = 11 * 16;
query->result_size += 8; /* for the fence + alignment */
- query->num_cs_dw_end = 6 + si_gfx_write_fence_dwords(sscreen);
+ query->num_cs_dw_end = 6 + si_cp_write_fence_dwords(sscreen);
break;
default:
assert(0);
if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
type == PIPE_QUERY_OCCLUSION_PREDICATE ||
type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
- bool old_enable = sctx->b.num_occlusion_queries != 0;
+ bool old_enable = sctx->num_occlusion_queries != 0;
bool old_perfect_enable =
- sctx->b.num_perfect_occlusion_queries != 0;
+ sctx->num_perfect_occlusion_queries != 0;
bool enable, perfect_enable;
- sctx->b.num_occlusion_queries += diff;
- assert(sctx->b.num_occlusion_queries >= 0);
+ sctx->num_occlusion_queries += diff;
+ assert(sctx->num_occlusion_queries >= 0);
if (type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
- sctx->b.num_perfect_occlusion_queries += diff;
- assert(sctx->b.num_perfect_occlusion_queries >= 0);
+ sctx->num_perfect_occlusion_queries += diff;
+ assert(sctx->num_perfect_occlusion_queries >= 0);
}
- enable = sctx->b.num_occlusion_queries != 0;
- perfect_enable = sctx->b.num_perfect_occlusion_queries != 0;
+ enable = sctx->num_occlusion_queries != 0;
+ perfect_enable = sctx->num_perfect_occlusion_queries != 0;
if (enable != old_enable || perfect_enable != old_perfect_enable) {
si_set_occlusion_query_state(sctx, old_perfect_enable);
}
}
-static void emit_sample_streamout(struct radeon_winsys_cs *cs, uint64_t va,
+static void emit_sample_streamout(struct radeon_cmdbuf *cs, uint64_t va,
unsigned stream)
{
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
struct r600_resource *buffer,
uint64_t va)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
switch (query->b.type) {
+ case SI_QUERY_TIME_ELAPSED_SDMA:
+ si_dma_emit_timestamp(sctx, buffer, va - buffer->gpu_address);
+ return;
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
radeon_emit(cs, COPY_DATA_COUNT_SEL |
COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
- COPY_DATA_DST_SEL(COPY_DATA_MEM_ASYNC));
+ COPY_DATA_DST_SEL(COPY_DATA_DST_MEM));
radeon_emit(cs, 0);
radeon_emit(cs, 0);
radeon_emit(cs, va);
default:
assert(0);
}
- radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
}
si_update_occlusion_query_state(sctx, query->b.type, 1);
si_update_prims_generated_query_state(sctx, query->b.type, 1);
- si_need_gfx_cs_space(sctx);
+ if (query->b.type != SI_QUERY_TIME_ELAPSED_SDMA)
+ si_need_gfx_cs_space(sctx);
/* Get a new query buffer if needed. */
if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
query->ops->emit_start(sctx, query, query->buffer.buf, va);
- sctx->b.num_cs_dw_queries_suspend += query->num_cs_dw_end;
+ sctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
}
static void si_query_hw_do_emit_stop(struct si_context *sctx,
struct r600_resource *buffer,
uint64_t va)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint64_t fence_va = 0;
switch (query->b.type) {
+ case SI_QUERY_TIME_ELAPSED_SDMA:
+ si_dma_emit_timestamp(sctx, buffer, va + 32 - buffer->gpu_address);
+ return;
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
va += 8;
/* fall through */
case PIPE_QUERY_TIMESTAMP:
- si_gfx_write_event_eop(sctx, V_028A90_BOTTOM_OF_PIPE_TS,
- 0, EOP_DATA_SEL_TIMESTAMP, NULL, va,
- 0, query->b.type);
+ si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS,
+ 0, EOP_DST_SEL_MEM,
+ EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
+ EOP_DATA_SEL_TIMESTAMP, NULL, va,
+ 0, query->b.type);
fence_va = va + 8;
break;
case PIPE_QUERY_PIPELINE_STATISTICS: {
default:
assert(0);
}
- radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
- if (fence_va)
- si_gfx_write_event_eop(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
- EOP_DATA_SEL_VALUE_32BIT,
- query->buffer.buf, fence_va, 0x80000000,
- query->b.type);
+ if (fence_va) {
+ si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ EOP_DST_SEL_MEM,
+ EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
+ EOP_DATA_SEL_VALUE_32BIT,
+ query->buffer.buf, fence_va, 0x80000000,
+ query->b.type);
+ }
}
static void si_query_hw_emit_stop(struct si_context *sctx,
query->buffer.results_end += query->result_size;
if (!(query->flags & SI_QUERY_HW_FLAG_NO_START))
- sctx->b.num_cs_dw_queries_suspend -= query->num_cs_dw_end;
+ sctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
si_update_occlusion_query_state(sctx, query->b.type, -1);
si_update_prims_generated_query_state(sctx, query->b.type, -1);
struct r600_resource *buf, uint64_t va,
uint32_t op)
{
- struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
+ struct radeon_cmdbuf *cs = ctx->gfx_cs;
- if (ctx->b.chip_class >= GFX9) {
+ if (ctx->chip_class >= GFX9) {
radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
radeon_emit(cs, op);
radeon_emit(cs, va);
radeon_emit(cs, va);
radeon_emit(cs, op | ((va >> 32) & 0xFF));
}
- radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, buf, RADEON_USAGE_READ,
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_READ,
RADEON_PRIO_QUERY);
}
-static void si_emit_query_predication(struct si_context *ctx,
- struct r600_atom *atom)
+static void si_emit_query_predication(struct si_context *ctx)
{
- struct si_query_hw *query = (struct si_query_hw *)ctx->b.render_cond;
+ struct si_query_hw *query = (struct si_query_hw *)ctx->render_cond;
struct si_query_buffer *qbuf;
uint32_t op;
bool flag_wait, invert;
if (!query)
return;
- invert = ctx->b.render_cond_invert;
- flag_wait = ctx->b.render_cond_mode == PIPE_RENDER_COND_WAIT ||
- ctx->b.render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
+ invert = ctx->render_cond_invert;
+ flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
+ ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
if (query->workaround_buf) {
op = PRED_OP(PREDICATION_OP_BOOL64);
if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
query_type == PIPE_QUERY_GPU_FINISHED ||
- query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
+ (query_type >= PIPE_QUERY_DRIVER_SPECIFIC &&
+ query_type != SI_QUERY_TIME_ELAPSED_SDMA))
return si_query_sw_create(query_type);
return si_query_hw_create(sscreen, query_type, index);
/* Obtain a new buffer if the current one can't be mapped without a stall. */
if (si_rings_is_buffer_referenced(sctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
- !sctx->b.ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
+ !sctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
r600_resource_reference(&query->buffer.buf, NULL);
query->buffer.buf = si_new_query_buffer(sctx->screen, query);
} else {
if (!query->buffer.buf)
return false;
- LIST_ADDTAIL(&query->list, &sctx->b.active_queries);
+ LIST_ADDTAIL(&query->list, &sctx->active_queries);
return true;
}
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
{
- /* Offsets apply to EG+ */
static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
params->start_offset = offsets[index];
params->end_offset = 88 + offsets[index];
case PIPE_QUERY_TIME_ELAPSED:
result->u64 += si_query_read_result(buffer, 0, 2, false);
break;
+ case SI_QUERY_TIME_ELAPSED_SDMA:
+ result->u64 += si_query_read_result(buffer, 0, 32/4, false);
+ break;
case PIPE_QUERY_TIMESTAMP:
result->u64 = *(uint64_t*)buffer;
break;
void *map;
if (rquery->b.flushed)
- map = sctx->b.ws->buffer_map(qbuf->buf->buf, NULL, usage);
+ map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
else
map = si_buffer_map_sync_with_rings(sctx, qbuf->buf, usage);
/* Convert the time to expected units. */
if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
+ rquery->type == SI_QUERY_TIME_ELAPSED_SDMA ||
rquery->type == PIPE_QUERY_TIMESTAMP) {
result->u64 = (1000000 * result->u64) / sscreen->info.clock_crystal_freq;
}
return true;
}
-/* Create the compute shader that is used to collect the results.
- *
- * One compute grid with a single thread is launched for every query result
- * buffer. The thread (optionally) reads a previous summary buffer, then
- * accumulates data from the query result buffer, and writes the result either
- * to a summary buffer to be consumed by the next grid invocation or to the
- * user-supplied buffer.
- *
- * Data layout:
- *
- * CONST
- * 0.x = end_offset
- * 0.y = result_stride
- * 0.z = result_count
- * 0.w = bit field:
- * 1: read previously accumulated values
- * 2: write accumulated values for chaining
- * 4: write result available
- * 8: convert result to boolean (0/1)
- * 16: only read one dword and use that as result
- * 32: apply timestamp conversion
- * 64: store full 64 bits result
- * 128: store signed 32 bits result
- * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
- * 1.x = fence_offset
- * 1.y = pair_stride
- * 1.z = pair_count
- *
- * BUFFER[0] = query result buffer
- * BUFFER[1] = previous summary buffer
- * BUFFER[2] = next summary buffer or user-supplied buffer
- */
-static void si_create_query_result_shader(struct si_context *sctx)
-{
- /* TEMP[0].xy = accumulated result so far
- * TEMP[0].z = result not available
- *
- * TEMP[1].x = current result index
- * TEMP[1].y = current pair index
- */
- static const char text_tmpl[] =
- "COMP\n"
- "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
- "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
- "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
- "DCL BUFFER[0]\n"
- "DCL BUFFER[1]\n"
- "DCL BUFFER[2]\n"
- "DCL CONST[0][0..1]\n"
- "DCL TEMP[0..5]\n"
- "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
- "IMM[1] UINT32 {1, 2, 4, 8}\n"
- "IMM[2] UINT32 {16, 32, 64, 128}\n"
- "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
- "IMM[4] UINT32 {256, 0, 0, 0}\n"
-
- "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
- "UIF TEMP[5]\n"
- /* Check result availability. */
- "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
- "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
- "MOV TEMP[1], TEMP[0].zzzz\n"
- "NOT TEMP[0].z, TEMP[0].zzzz\n"
-
- /* Load result if available. */
- "UIF TEMP[1]\n"
- "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
- "ENDIF\n"
- "ELSE\n"
- /* Load previously accumulated result if requested. */
- "MOV TEMP[0], IMM[0].xxxx\n"
- "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
- "UIF TEMP[4]\n"
- "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
- "ENDIF\n"
-
- "MOV TEMP[1].x, IMM[0].xxxx\n"
- "BGNLOOP\n"
- /* Break if accumulated result so far is not available. */
- "UIF TEMP[0].zzzz\n"
- "BRK\n"
- "ENDIF\n"
-
- /* Break if result_index >= result_count. */
- "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
- "UIF TEMP[5]\n"
- "BRK\n"
- "ENDIF\n"
-
- /* Load fence and check result availability */
- "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
- "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
- "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
- "NOT TEMP[0].z, TEMP[0].zzzz\n"
- "UIF TEMP[0].zzzz\n"
- "BRK\n"
- "ENDIF\n"
-
- "MOV TEMP[1].y, IMM[0].xxxx\n"
- "BGNLOOP\n"
- /* Load start and end. */
- "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
- "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
- "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
-
- "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
- "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
-
- "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
-
- "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
- "UIF TEMP[5].zzzz\n"
- /* Load second start/end half-pair and
- * take the difference
- */
- "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
- "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
- "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
-
- "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
- "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
- "ENDIF\n"
-
- "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
-
- /* Increment pair index */
- "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
- "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
- "UIF TEMP[5]\n"
- "BRK\n"
- "ENDIF\n"
- "ENDLOOP\n"
-
- /* Increment result index */
- "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
- "ENDLOOP\n"
- "ENDIF\n"
-
- "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
- "UIF TEMP[4]\n"
- /* Store accumulated data for chaining. */
- "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
- "ELSE\n"
- "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
- "UIF TEMP[4]\n"
- /* Store result availability. */
- "NOT TEMP[0].z, TEMP[0]\n"
- "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
- "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
-
- "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
- "UIF TEMP[4]\n"
- "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
- "ENDIF\n"
- "ELSE\n"
- /* Store result if it is available. */
- "NOT TEMP[4], TEMP[0].zzzz\n"
- "UIF TEMP[4]\n"
- /* Apply timestamp conversion */
- "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
- "UIF TEMP[4]\n"
- "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
- "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
- "ENDIF\n"
-
- /* Convert to boolean */
- "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
- "UIF TEMP[4]\n"
- "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
- "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
- "MOV TEMP[0].y, IMM[0].xxxx\n"
- "ENDIF\n"
-
- "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
- "UIF TEMP[4]\n"
- "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
- "ELSE\n"
- /* Clamping */
- "UIF TEMP[0].yyyy\n"
- "MOV TEMP[0].x, IMM[0].wwww\n"
- "ENDIF\n"
-
- "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
- "UIF TEMP[4]\n"
- "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
- "ENDIF\n"
-
- "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
- "ENDIF\n"
- "ENDIF\n"
- "ENDIF\n"
- "ENDIF\n"
-
- "END\n";
-
- char text[sizeof(text_tmpl) + 32];
- struct tgsi_token tokens[1024];
- struct pipe_compute_state state = {};
-
- /* Hard code the frequency into the shader so that the backend can
- * use the full range of optimizations for divide-by-constant.
- */
- snprintf(text, sizeof(text), text_tmpl,
- sctx->screen->info.clock_crystal_freq);
-
- if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
- assert(false);
- return;
- }
-
- state.ir_type = PIPE_SHADER_IR_TGSI;
- state.prog = tokens;
-
- sctx->b.query_result_shader = sctx->b.b.create_compute_state(&sctx->b.b, &state);
-}
-
static void si_restore_qbo_state(struct si_context *sctx,
struct si_qbo_state *st)
{
- sctx->b.b.bind_compute_state(&sctx->b.b, st->saved_compute);
+ sctx->b.bind_compute_state(&sctx->b, st->saved_compute);
- sctx->b.b.set_constant_buffer(&sctx->b.b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
+ sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
pipe_resource_reference(&st->saved_const0.buffer, NULL);
- sctx->b.b.set_shader_buffers(&sctx->b.b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
+ sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
for (unsigned i = 0; i < 3; ++i)
pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
}
uint32_t pair_count;
} consts;
- if (!sctx->b.query_result_shader) {
- si_create_query_result_shader(sctx);
- if (!sctx->b.query_result_shader)
+ if (!sctx->query_result_shader) {
+ sctx->query_result_shader = si_create_query_result_cs(sctx);
+ if (!sctx->query_result_shader)
return;
}
if (query->buffer.previous) {
- u_suballocator_alloc(sctx->b.allocator_zeroed_memory, 16, 16,
+ u_suballocator_alloc(sctx->allocator_zeroed_memory, 16, 16,
&tmp_buffer_offset, &tmp_buffer);
if (!tmp_buffer)
return;
ssbo[2] = ssbo[1];
- sctx->b.b.bind_compute_state(&sctx->b.b, sctx->b.query_result_shader);
+ sctx->b.bind_compute_state(&sctx->b, sctx->query_result_shader);
grid.block[0] = 1;
grid.block[1] = 1;
break;
}
- sctx->b.flags |= sctx->screen->barrier_flags.cp_to_L2;
+ sctx->flags |= sctx->screen->barrier_flags.cp_to_L2;
for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
if (query->b.type != PIPE_QUERY_TIMESTAMP) {
params.start_offset += qbuf->results_end - query->result_size;
}
- sctx->b.b.set_constant_buffer(&sctx->b.b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
+ sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
ssbo[0].buffer = &qbuf->buf->b.b;
ssbo[0].buffer_offset = params.start_offset;
ssbo[2].buffer_offset = offset;
ssbo[2].buffer_size = 8;
- ((struct r600_resource *)resource)->TC_L2_dirty = true;
+ r600_resource(resource)->TC_L2_dirty = true;
}
- sctx->b.b.set_shader_buffers(&sctx->b.b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
+ sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
if (wait && qbuf == &query->buffer) {
uint64_t va;
va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
va += params.fence_offset;
- si_gfx_wait_fence(sctx, va, 0x80000000, 0x80000000);
+ si_cp_wait_mem(sctx, va, 0x80000000, 0x80000000, 0);
}
- sctx->b.b.launch_grid(&sctx->b.b, &grid);
- sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
+ sctx->b.launch_grid(&sctx->b, &grid);
+ sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
}
si_restore_qbo_state(sctx, &saved_state);
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_query_hw *rquery = (struct si_query_hw *)query;
- struct r600_atom *atom = &sctx->b.render_cond_atom;
+ struct si_atom *atom = &sctx->atoms.s.render_cond;
if (query) {
bool needs_workaround = false;
* SET_PREDICATION packets to give the wrong answer for
* non-inverted stream overflow predication.
*/
- if (((sctx->b.chip_class == VI && sctx->screen->info.pfp_fw_feature < 49) ||
- (sctx->b.chip_class == GFX9 && sctx->screen->info.pfp_fw_feature < 38)) &&
+ if (((sctx->chip_class == VI && sctx->screen->info.pfp_fw_feature < 49) ||
+ (sctx->chip_class == GFX9 && sctx->screen->info.pfp_fw_feature < 38)) &&
!condition &&
(rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE ||
(rquery->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE &&
}
if (needs_workaround && !rquery->workaround_buf) {
- bool old_force_off = sctx->b.render_cond_force_off;
- sctx->b.render_cond_force_off = true;
+ bool old_force_off = sctx->render_cond_force_off;
+ sctx->render_cond_force_off = true;
u_suballocator_alloc(
- sctx->b.allocator_zeroed_memory, 8, 8,
+ sctx->allocator_zeroed_memory, 8, 8,
&rquery->workaround_offset,
(struct pipe_resource **)&rquery->workaround_buf);
/* Reset to NULL to avoid a redundant SET_PREDICATION
* from launching the compute grid.
*/
- sctx->b.render_cond = NULL;
+ sctx->render_cond = NULL;
ctx->get_query_result_resource(
ctx, query, true, PIPE_QUERY_TYPE_U64, 0,
/* Settings this in the render cond atom is too late,
* so set it here. */
- sctx->b.flags |= sctx->screen->barrier_flags.L2_to_cp |
+ sctx->flags |= sctx->screen->barrier_flags.L2_to_cp |
SI_CONTEXT_FLUSH_FOR_RENDER_COND;
- sctx->b.render_cond_force_off = old_force_off;
+ sctx->render_cond_force_off = old_force_off;
}
}
- sctx->b.render_cond = query;
- sctx->b.render_cond_invert = condition;
- sctx->b.render_cond_mode = mode;
+ sctx->render_cond = query;
+ sctx->render_cond_invert = condition;
+ sctx->render_cond_mode = mode;
si_set_atom_dirty(sctx, atom, query != NULL);
}
{
struct si_query_hw *query;
- LIST_FOR_EACH_ENTRY(query, &sctx->b.active_queries, list) {
+ LIST_FOR_EACH_ENTRY(query, &sctx->active_queries, list) {
si_query_hw_emit_stop(sctx, query);
}
- assert(sctx->b.num_cs_dw_queries_suspend == 0);
+ assert(sctx->num_cs_dw_queries_suspend == 0);
}
void si_resume_queries(struct si_context *sctx)
{
struct si_query_hw *query;
- assert(sctx->b.num_cs_dw_queries_suspend == 0);
+ assert(sctx->num_cs_dw_queries_suspend == 0);
/* Check CS space here. Resuming must not be interrupted by flushes. */
si_need_gfx_cs_space(sctx);
- LIST_FOR_EACH_ENTRY(query, &sctx->b.active_queries, list) {
+ LIST_FOR_EACH_ENTRY(query, &sctx->active_queries, list) {
si_query_hw_emit_start(sctx, query);
}
}
X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
+
+ /* SRBM_STATUS2 */
X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
+
+ /* CP_STAT */
X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
static unsigned si_get_num_queries(struct si_screen *sscreen)
{
- if (sscreen->info.drm_major == 2 && sscreen->info.drm_minor >= 42)
- return ARRAY_SIZE(si_driver_query_list);
- else if (sscreen->info.drm_major == 3) {
+ /* amdgpu */
+ if (sscreen->info.drm_major == 3) {
if (sscreen->info.chip_class >= VI)
return ARRAY_SIZE(si_driver_query_list);
else
return ARRAY_SIZE(si_driver_query_list) - 7;
}
- else
- return ARRAY_SIZE(si_driver_query_list) - 25;
+
+ /* radeon */
+ if (sscreen->info.has_read_registers_query) {
+ if (sscreen->info.chip_class == CIK)
+ return ARRAY_SIZE(si_driver_query_list) - 6;
+ else
+ return ARRAY_SIZE(si_driver_query_list) - 7;
+ }
+
+ return ARRAY_SIZE(si_driver_query_list) - 21;
}
static int si_get_driver_query_info(struct pipe_screen *screen,
void si_init_query_functions(struct si_context *sctx)
{
- sctx->b.b.create_query = si_create_query;
- sctx->b.b.create_batch_query = si_create_batch_query;
- sctx->b.b.destroy_query = si_destroy_query;
- sctx->b.b.begin_query = si_begin_query;
- sctx->b.b.end_query = si_end_query;
- sctx->b.b.get_query_result = si_get_query_result;
- sctx->b.b.get_query_result_resource = si_get_query_result_resource;
- sctx->b.render_cond_atom.emit = si_emit_query_predication;
-
- if (((struct si_screen*)sctx->b.b.screen)->info.num_render_backends > 0)
- sctx->b.b.render_condition = si_render_condition;
-
- LIST_INITHEAD(&sctx->b.active_queries);
+ sctx->b.create_query = si_create_query;
+ sctx->b.create_batch_query = si_create_batch_query;
+ sctx->b.destroy_query = si_destroy_query;
+ sctx->b.begin_query = si_begin_query;
+ sctx->b.end_query = si_end_query;
+ sctx->b.get_query_result = si_get_query_result;
+ sctx->b.get_query_result_resource = si_get_query_result_resource;
+ sctx->atoms.s.render_cond.emit = si_emit_query_predication;
+
+ if (((struct si_screen*)sctx->b.screen)->info.num_render_backends > 0)
+ sctx->b.render_condition = si_render_condition;
+
+ LIST_INITHEAD(&sctx->active_queries);
}
void si_init_screen_query_functions(struct si_screen *sscreen)