for (i = 0; i < R600_NUM_ATOMS; i++) {
if (ctx->atoms[i] && ctx->atoms[i]->dirty) {
num_dw += ctx->atoms[i]->num_dw;
- if (ctx->screen->trace_bo) {
+ if (ctx->screen->b.trace_bo) {
num_dw += R600_TRACE_CS_DWORDS;
}
}
/* The upper-bound of how much space a draw command would take. */
num_dw += R600_MAX_FLUSH_CS_DWORDS + R600_MAX_DRAW_CS_DWORDS;
- if (ctx->screen->trace_bo) {
+ if (ctx->screen->b.trace_bo) {
num_dw += R600_TRACE_CS_DWORDS;
}
}
}
/* Flush the CS. */
- ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, ctx->screen->cs_count++);
+ ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, ctx->screen->b.cs_count++);
}
void r600_begin_new_cs(struct r600_context *ctx)
goto fail;
}
- if (rscreen->trace_bo) {
- rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, rscreen->trace_bo->cs_buf);
+ if (rscreen->b.trace_bo) {
+ rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, rscreen->b.trace_bo->cs_buf);
} else {
rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, NULL);
}
compute_memory_pool_delete(rscreen->global_pool);
}
- if (rscreen->trace_bo) {
- rscreen->b.ws->buffer_unmap(rscreen->trace_bo->cs_buf);
- pipe_resource_reference((struct pipe_resource**)&rscreen->trace_bo, NULL);
+ if (rscreen->b.trace_bo) {
+ rscreen->b.ws->buffer_unmap(rscreen->b.trace_bo->cs_buf);
+ pipe_resource_reference((struct pipe_resource**)&rscreen->b.trace_bo, NULL);
}
rscreen->b.ws->destroy(rscreen->b.ws);
rscreen->global_pool = compute_memory_pool_new(rscreen);
- rscreen->cs_count = 0;
+ rscreen->b.cs_count = 0;
if (rscreen->b.info.drm_minor >= 28 && (rscreen->b.debug_flags & DBG_TRACE_CS)) {
- rscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->b.b,
+ rscreen->b.trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->b.b,
PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING,
4096);
- if (rscreen->trace_bo) {
- rscreen->trace_ptr = rscreen->b.ws->buffer_map(rscreen->trace_bo->cs_buf, NULL,
+ if (rscreen->b.trace_bo) {
+ rscreen->b.trace_ptr = rscreen->b.ws->buffer_map(rscreen->b.trace_bo->cs_buf, NULL,
PIPE_TRANSFER_UNSYNCHRONIZED);
}
}
* XXX: Not sure if this is the best place for global_pool. Also,
* it's not thread safe, so it won't work with multiple contexts. */
struct compute_memory_pool *global_pool;
- struct r600_resource *trace_bo;
- uint32_t *trace_ptr;
- unsigned cs_count;
};
struct r600_pipe_sampler_view {
{
atom->emit(&rctx->b, atom);
atom->dirty = false;
- if (rctx->screen->trace_bo) {
+ if (rctx->screen->b.trace_bo) {
r600_trace_emit(rctx);
}
}
(info.count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0);
}
- if (rctx->screen->trace_bo) {
+ if (rctx->screen->b.trace_bo) {
r600_trace_emit(rctx);
}
uint64_t va;
uint32_t reloc;
- va = r600_resource_va(&rscreen->b.b, (void*)rscreen->trace_bo);
- reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rscreen->trace_bo, RADEON_USAGE_READWRITE);
+ va = r600_resource_va(&rscreen->b.b, (void*)rscreen->b.trace_bo);
+ reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rscreen->b.trace_bo, RADEON_USAGE_READWRITE);
radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0));
radeon_emit(cs, va & 0xFFFFFFFFUL);
radeon_emit(cs, (va >> 32UL) & 0xFFUL);
radeon_emit(cs, cs->cdw);
- radeon_emit(cs, rscreen->cs_count);
+ radeon_emit(cs, rscreen->b.cs_count);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, reloc);
}
* It must be locked prior to using and flushed before unlocking. */
struct pipe_context *aux_context;
pipe_mutex aux_context_lock;
+
+ struct r600_resource *trace_bo;
+ uint32_t *trace_ptr;
+ unsigned cs_count;
};
/* This encapsulates a state or an operation which can emitted into the GPU
num_dw += ctx->atoms.cache_flush->num_dw;
#if SI_TRACE_CS
- if (ctx->screen->trace_bo) {
+ if (ctx->screen->b.trace_bo) {
num_dw += SI_TRACE_CS_DWORDS;
}
#endif
flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
#if SI_TRACE_CS
- if (ctx->screen->trace_bo) {
+ if (ctx->screen->b.trace_bo) {
struct si_screen *sscreen = ctx->screen;
unsigned i;
for (i = 0; i < cs->cdw; i++) {
- fprintf(stderr, "[%4d] [%5d] 0x%08x\n", sscreen->cs_count, i, cs->buf[i]);
+ fprintf(stderr, "[%4d] [%5d] 0x%08x\n", sscreen->b.cs_count, i, cs->buf[i]);
}
- sscreen->cs_count++;
+ sscreen->b.cs_count++;
}
#endif
ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, 0);
#if SI_TRACE_CS
- if (ctx->screen->trace_bo) {
+ if (ctx->screen->b.trace_bo) {
struct si_screen *sscreen = ctx->screen;
unsigned i;
for (i = 0; i < 10; i++) {
usleep(5);
- if (!ctx->ws->buffer_is_busy(sscreen->trace_bo->buf, RADEON_USAGE_READWRITE)) {
+ if (!ctx->ws->buffer_is_busy(sscreen->b.trace_bo->buf, RADEON_USAGE_READWRITE)) {
break;
}
}
if (i == 10) {
fprintf(stderr, "timeout on cs lockup likely happen at cs %d dw %d\n",
- sscreen->trace_ptr[1], sscreen->trace_ptr[0]);
+ sscreen->b.trace_ptr[1], sscreen->b.trace_ptr[0]);
} else {
- fprintf(stderr, "cs %d executed in %dms\n", sscreen->trace_ptr[1], i * 5);
+ fprintf(stderr, "cs %d executed in %dms\n", sscreen->b.trace_ptr[1], i * 5);
}
}
#endif
struct radeon_winsys_cs *cs = sctx->cs;
uint64_t va;
- va = r600_resource_va(&sscreen->screen, (void*)sscreen->trace_bo);
- r600_context_bo_reloc(sctx, sscreen->trace_bo, RADEON_USAGE_READWRITE);
+ va = r600_resource_va(&sscreen->screen, (void*)sscreen->b.trace_bo);
+ r600_context_bo_reloc(sctx, sscreen->b.trace_bo, RADEON_USAGE_READWRITE);
cs->buf[cs->cdw++] = PKT3(PKT3_WRITE_DATA, 4, 0);
cs->buf[cs->cdw++] = PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC) |
PKT3_WRITE_DATA_WR_CONFIRM |
cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL;
cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFFFFFFFUL;
cs->buf[cs->cdw++] = cs->cdw;
- cs->buf[cs->cdw++] = sscreen->cs_count;
+ cs->buf[cs->cdw++] = sscreen->b.cs_count;
}
#endif
r600_common_screen_cleanup(&sscreen->b);
#if SI_TRACE_CS
- if (sscreen->trace_bo) {
- sscreen->ws->buffer_unmap(sscreen->trace_bo->cs_buf);
- pipe_resource_reference((struct pipe_resource**)&sscreen->trace_bo, NULL);
+ if (sscreen->b.trace_bo) {
+ sscreen->ws->buffer_unmap(sscreen->b.trace_bo->cs_buf);
+ pipe_resource_reference((struct pipe_resource**)&sscreen->b.trace_bo, NULL);
}
#endif
sscreen->b.debug_flags |= DBG_FS | DBG_VS | DBG_GS | DBG_PS | DBG_CS;
#if SI_TRACE_CS
- sscreen->cs_count = 0;
+ sscreen->b.cs_count = 0;
if (sscreen->info.drm_minor >= 28) {
- sscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&sscreen->screen,
+ sscreen->b.trace_bo = (struct r600_resource*)pipe_buffer_create(&sscreen->screen,
PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING,
4096);
- if (sscreen->trace_bo) {
- sscreen->trace_ptr = sscreen->ws->buffer_map(sscreen->trace_bo->cs_buf, NULL,
+ if (sscreen->b.trace_bo) {
+ sscreen->b.trace_ptr = sscreen->ws->buffer_map(sscreen->b.trace_bo->cs_buf, NULL,
PIPE_TRANSFER_UNSYNCHRONIZED);
}
}
struct si_screen {
struct r600_common_screen b;
-#if SI_TRACE_CS
- struct r600_resource *trace_bo;
- uint32_t *trace_ptr;
- unsigned cs_count;
-#endif
};
struct si_pipe_sampler_view {
count += state->ndw;
#if SI_TRACE_CS
/* for tracing each states */
- if (sctx->screen->trace_bo) {
+ if (sctx->screen->b.trace_bo) {
count += SI_TRACE_CS_DWORDS;
}
#endif
cs->cdw += state->ndw;
#if SI_TRACE_CS
- if (sctx->screen->trace_bo) {
+ if (sctx->screen->b.trace_bo) {
si_trace_emit(sctx);
}
#endif
sctx->pm4_dirty_cdwords = 0;
#if SI_TRACE_CS
- if (sctx->screen->trace_bo) {
+ if (sctx->screen->b.trace_bo) {
si_trace_emit(sctx);
}
#endif