ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
- si_flush_gfx_cs(ctx, 0, NULL);
+ si_flush_gfx_cs(sctx, 0, NULL);
busy = true;
}
}
!radeon_cs_memory_below_limit(sctx->screen, sctx->b.gfx_cs,
sctx->b.vram + rbo->vram_usage,
sctx->b.gtt + rbo->gart_usage))
- si_flush_gfx_cs(&sctx->b, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, rbo, usage, priority);
}
struct r600_resource *buffer,
uint64_t va)
{
- struct radeon_winsys_cs *cs = ctx->gfx_cs;
+ struct si_context *sctx = (struct si_context*)ctx;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
uint64_t fence_va = 0;
switch (query->b.type) {
va += 8;
/* fall through */
case PIPE_QUERY_TIMESTAMP:
- si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS,
- 0, EOP_DATA_SEL_TIMESTAMP, NULL, va,
- 0, query->b.type);
+ si_gfx_write_event_eop(sctx, V_028A90_BOTTOM_OF_PIPE_TS,
+ 0, EOP_DATA_SEL_TIMESTAMP, NULL, va,
+ 0, query->b.type);
fence_va = va + 8;
break;
case PIPE_QUERY_PIPELINE_STATISTICS: {
RADEON_PRIO_QUERY);
if (fence_va)
- si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
- EOP_DATA_SEL_VALUE_32BIT,
- query->buffer.buf, fence_va, 0x80000000,
- query->b.type);
+ si_gfx_write_event_eop(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ EOP_DATA_SEL_VALUE_32BIT,
+ query->buffer.buf, fence_va, 0x80000000,
+ query->b.type);
}
static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
struct pipe_resource *resource,
unsigned offset)
{
+ struct si_context *sctx = (struct si_context*)rctx;
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct r600_query_buffer *qbuf;
struct r600_query_buffer *qbuf_prev;
va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
va += params.fence_offset;
- si_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
+ si_gfx_wait_fence(sctx, va, 0x80000000, 0x80000000);
}
rctx->b.launch_grid(&rctx->b, &grid);
static void r600_texture_transfer_unmap(struct pipe_context *ctx,
struct pipe_transfer* transfer)
{
+ struct si_context *sctx = (struct si_context*)ctx;
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct pipe_resource *texture = transfer->resource;
* The result is that the kernel memory manager is never a bottleneck.
*/
if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
- si_flush_gfx_cs(rctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
rctx->num_alloc_tex_transfer_bytes = 0;
}
* \param old_value Previous fence value (for a bug workaround)
* \param new_value Fence value to write for this event.
*/
-void si_gfx_write_event_eop(struct r600_common_context *ctx,
+void si_gfx_write_event_eop(struct si_context *ctx,
unsigned event, unsigned event_flags,
unsigned data_sel,
struct r600_resource *buf, uint64_t va,
uint32_t new_fence, unsigned query_type)
{
- struct radeon_winsys_cs *cs = ctx->gfx_cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
unsigned op = EVENT_TYPE(event) |
EVENT_INDEX(5) |
event_flags;
if (data_sel != EOP_DATA_SEL_DISCARD)
sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
- if (ctx->chip_class >= GFX9) {
+ if (ctx->b.chip_class >= GFX9) {
/* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
* counters) must immediately precede every timestamp event to
* prevent a GPU hang on GFX9.
* Occlusion queries don't need to do it here, because they
* always do ZPASS_DONE before the timestamp.
*/
- if (ctx->chip_class == GFX9 &&
+ if (ctx->b.chip_class == GFX9 &&
query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
query_type != PIPE_QUERY_OCCLUSION_PREDICATE &&
query_type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
- struct r600_resource *scratch = ctx->eop_bug_scratch;
+ struct r600_resource *scratch = ctx->b.eop_bug_scratch;
- assert(16 * ctx->screen->info.num_render_backends <=
+ assert(16 * ctx->b.screen->info.num_render_backends <=
scratch->b.b.width0);
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
radeon_emit(cs, scratch->gpu_address);
radeon_emit(cs, scratch->gpu_address >> 32);
- radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
+ radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, scratch,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
radeon_emit(cs, 0); /* immediate data hi */
radeon_emit(cs, 0); /* unused */
} else {
- if (ctx->chip_class == CIK ||
- ctx->chip_class == VI) {
- struct r600_resource *scratch = ctx->eop_bug_scratch;
+ if (ctx->b.chip_class == CIK ||
+ ctx->b.chip_class == VI) {
+ struct r600_resource *scratch = ctx->b.eop_bug_scratch;
uint64_t va = scratch->gpu_address;
/* Two EOP events are required to make all engines go idle
radeon_emit(cs, 0); /* immediate data */
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
+ radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, scratch,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
}
if (buf) {
- radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
}
}
return dwords;
}
-void si_gfx_wait_fence(struct r600_common_context *ctx,
+void si_gfx_wait_fence(struct si_context *ctx,
uint64_t va, uint32_t ref, uint32_t mask)
{
- struct radeon_winsys_cs *cs = ctx->gfx_cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
radeon_emit(cs, fence_va >> 32);
radeon_emit(cs, 0x80000000);
} else if (flags & PIPE_FLUSH_BOTTOM_OF_PIPE) {
- si_gfx_write_event_eop(&ctx->b, V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
EOP_DATA_SEL_VALUE_32BIT,
NULL, fence_va, 0x80000000,
PIPE_QUERY_GPU_FINISHED);
* not going to wait.
*/
threaded_context_unwrap_sync(ctx);
- si_flush_gfx_cs(&sctx->b, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
rfence->gfx_unflushed.ctx = NULL;
if (!timeout)
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
-void si_flush_gfx_cs(void *context, unsigned flags,
+void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
struct pipe_fence_handle **fence)
{
- struct si_context *ctx = context;
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct radeon_winsys *ws = ctx->b.ws;
static void si_pc_emit_stop(struct r600_common_context *ctx,
struct r600_resource *buffer, uint64_t va)
{
- struct radeon_winsys_cs *cs = ctx->gfx_cs;
+ struct si_context *sctx = (struct si_context*)ctx;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
- si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
- EOP_DATA_SEL_VALUE_32BIT,
- buffer, va, 0, SI_NOT_QUERY);
- si_gfx_wait_fence(ctx, va, 0, 0xffffffff);
+ si_gfx_write_event_eop(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ EOP_DATA_SEL_VALUE_32BIT,
+ buffer, va, 0, SI_NOT_QUERY);
+ si_gfx_wait_fence(sctx, va, 0, 0xffffffff);
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_SAMPLE) | EVENT_INDEX(0));
}
sctx->b.gfx_cs = ws->cs_create(sctx->b.ctx, RING_GFX,
- si_flush_gfx_cs, sctx);
+ (void*)si_flush_gfx_cs, sctx);
/* Border colors. */
sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS *
uint64_t offset, uint64_t size, unsigned value);
/* si_fence.c */
-void si_gfx_write_event_eop(struct r600_common_context *ctx,
+void si_gfx_write_event_eop(struct si_context *ctx,
unsigned event, unsigned event_flags,
unsigned data_sel,
struct r600_resource *buf, uint64_t va,
uint32_t new_fence, unsigned query_type);
unsigned si_gfx_write_fence_dwords(struct si_screen *screen);
-void si_gfx_wait_fence(struct r600_common_context *ctx,
+void si_gfx_wait_fence(struct si_context *ctx,
uint64_t va, uint32_t ref, uint32_t mask);
void si_init_fence_functions(struct si_context *ctx);
void si_init_screen_fence_functions(struct si_screen *screen);
void si_init_screen_get_functions(struct si_screen *sscreen);
/* si_gfx_cs.c */
-void si_flush_gfx_cs(void *context, unsigned flags,
+void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
struct pipe_fence_handle **fence);
void si_begin_new_gfx_cs(struct si_context *ctx);
void si_need_gfx_cs_space(struct si_context *ctx);
static inline void
si_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r)
{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct si_context *sctx = (struct si_context *)ctx;
struct r600_resource *res = (struct r600_resource *)r;
if (res) {
/* Add memory usage for need_gfx_cs_space */
- rctx->vram += res->vram_usage;
- rctx->gtt += res->gart_usage;
+ sctx->b.vram += res->vram_usage;
+ sctx->b.gtt += res->gart_usage;
}
}
/* Necessary for DCC */
if (sctx->b.chip_class == VI)
- si_gfx_write_event_eop(&sctx->b, V_028A90_FLUSH_AND_INV_CB_DATA_TS,
+ si_gfx_write_event_eop(sctx, V_028A90_FLUSH_AND_INV_CB_DATA_TS,
0, EOP_DATA_SEL_DISCARD, NULL,
0, 0, SI_NOT_QUERY);
}
va = sctx->wait_mem_scratch->gpu_address;
sctx->wait_mem_number++;
- si_gfx_write_event_eop(&sctx->b, cb_db_event, tc_flags,
+ si_gfx_write_event_eop(sctx, cb_db_event, tc_flags,
EOP_DATA_SEL_VALUE_32BIT,
sctx->wait_mem_scratch, va,
sctx->wait_mem_number, SI_NOT_QUERY);
- si_gfx_wait_fence(&sctx->b, va, sctx->wait_mem_number, 0xffffffff);
+ si_gfx_wait_fence(sctx, va, sctx->wait_mem_number, 0xffffffff);
}
/* Make sure ME is idle (it executes most packets) before continuing.