* being written by the gpu, hence staging is probably a good
* usage pattern.
*/
- struct r600_resource *buf = (struct r600_resource*)
+ struct r600_resource *buf = r600_resource(
pipe_buffer_create(&sscreen->b, 0,
- PIPE_USAGE_STAGING, buf_size);
+ PIPE_USAGE_STAGING, buf_size));
if (!buf)
return NULL;
}
}
-static void emit_sample_streamout(struct radeon_winsys_cs *cs, uint64_t va,
+static void emit_sample_streamout(struct radeon_cmdbuf *cs, uint64_t va,
unsigned stream)
{
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
struct r600_resource *buffer,
uint64_t va)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
struct r600_resource *buffer,
uint64_t va)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint64_t fence_va = 0;
switch (query->b.type) {
struct r600_resource *buf, uint64_t va,
uint32_t op)
{
- struct radeon_winsys_cs *cs = ctx->gfx_cs;
+ struct radeon_cmdbuf *cs = ctx->gfx_cs;
if (ctx->chip_class >= GFX9) {
radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
RADEON_PRIO_QUERY);
}
-static void si_emit_query_predication(struct si_context *ctx,
- struct r600_atom *atom)
+static void si_emit_query_predication(struct si_context *ctx)
{
struct si_query_hw *query = (struct si_query_hw *)ctx->render_cond;
struct si_query_buffer *qbuf;
ssbo[2].buffer_offset = offset;
ssbo[2].buffer_size = 8;
- ((struct r600_resource *)resource)->TC_L2_dirty = true;
+ r600_resource(resource)->TC_L2_dirty = true;
}
sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_query_hw *rquery = (struct si_query_hw *)query;
- struct r600_atom *atom = &sctx->render_cond_atom;
+ struct si_atom *atom = &sctx->atoms.s.render_cond;
if (query) {
bool needs_workaround = false;
X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
+
+ /* SRBM_STATUS2 */
X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
+
+ /* CP_STAT */
X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
static unsigned si_get_num_queries(struct si_screen *sscreen)
{
- if (sscreen->info.drm_major == 2 && sscreen->info.drm_minor >= 42)
- return ARRAY_SIZE(si_driver_query_list);
- else if (sscreen->info.drm_major == 3) {
+ /* amdgpu */
+ if (sscreen->info.drm_major == 3) {
if (sscreen->info.chip_class >= VI)
return ARRAY_SIZE(si_driver_query_list);
else
return ARRAY_SIZE(si_driver_query_list) - 7;
}
- else
- return ARRAY_SIZE(si_driver_query_list) - 25;
+
+ /* radeon */
+ if (sscreen->info.has_read_registers_query) {
+ if (sscreen->info.chip_class == CIK)
+ return ARRAY_SIZE(si_driver_query_list) - 6;
+ else
+ return ARRAY_SIZE(si_driver_query_list) - 7;
+ }
+
+ return ARRAY_SIZE(si_driver_query_list) - 21;
}
static int si_get_driver_query_info(struct pipe_screen *screen,
sctx->b.end_query = si_end_query;
sctx->b.get_query_result = si_get_query_result;
sctx->b.get_query_result_resource = si_get_query_result_resource;
- sctx->render_cond_atom.emit = si_emit_query_predication;
+ sctx->atoms.s.render_cond.emit = si_emit_query_predication;
if (((struct si_screen*)sctx->b.screen)->info.num_render_backends > 0)
sctx->b.render_condition = si_render_condition;