/*
* Copyright 2013-2017 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
/* If the context wasn't flushed at fence creation, this is non-NULL. */
struct {
- struct r600_common_context *ctx;
+ struct si_context *ctx;
unsigned ib_index;
} gfx_unflushed;
* \param old_value Previous fence value (for a bug workaround)
* \param new_value Fence value to write for this event.
*/
-void si_gfx_write_event_eop(struct r600_common_context *ctx,
+void si_gfx_write_event_eop(struct si_context *ctx,
unsigned event, unsigned event_flags,
unsigned data_sel,
struct r600_resource *buf, uint64_t va,
uint32_t new_fence, unsigned query_type)
{
- struct radeon_winsys_cs *cs = ctx->gfx_cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
unsigned op = EVENT_TYPE(event) |
EVENT_INDEX(5) |
event_flags;
if (data_sel != EOP_DATA_SEL_DISCARD)
sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
- if (ctx->chip_class >= GFX9) {
+ if (ctx->b.chip_class >= GFX9) {
/* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
* counters) must immediately precede every timestamp event to
* prevent a GPU hang on GFX9.
* Occlusion queries don't need to do it here, because they
* always do ZPASS_DONE before the timestamp.
*/
- if (ctx->chip_class == GFX9 &&
+ if (ctx->b.chip_class == GFX9 &&
query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
query_type != PIPE_QUERY_OCCLUSION_PREDICATE &&
query_type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
- struct r600_resource *scratch = ctx->eop_bug_scratch;
+ struct r600_resource *scratch = ctx->b.eop_bug_scratch;
- assert(16 * ctx->screen->info.num_render_backends <=
+ assert(16 * ctx->b.screen->info.num_render_backends <=
scratch->b.b.width0);
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
radeon_emit(cs, scratch->gpu_address);
radeon_emit(cs, scratch->gpu_address >> 32);
- radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
+ radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, scratch,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
radeon_emit(cs, 0); /* immediate data hi */
radeon_emit(cs, 0); /* unused */
} else {
- if (ctx->chip_class == CIK ||
- ctx->chip_class == VI) {
- struct r600_resource *scratch = ctx->eop_bug_scratch;
+ if (ctx->b.chip_class == CIK ||
+ ctx->b.chip_class == VI) {
+ struct r600_resource *scratch = ctx->b.eop_bug_scratch;
uint64_t va = scratch->gpu_address;
/* Two EOP events are required to make all engines go idle
radeon_emit(cs, 0); /* immediate data */
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
+ radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, scratch,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
}
if (buf) {
- radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
}
}
return dwords;
}
-void si_gfx_wait_fence(struct r600_common_context *ctx,
+void si_gfx_wait_fence(struct si_context *ctx,
uint64_t va, uint32_t ref, uint32_t mask)
{
- struct radeon_winsys_cs *cs = ctx->gfx_cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
radeon_emit(cs, 4); /* poll interval */
}
-static void si_add_fence_dependency(struct r600_common_context *rctx,
+static void si_add_fence_dependency(struct si_context *sctx,
struct pipe_fence_handle *fence)
{
- struct radeon_winsys *ws = rctx->ws;
+ struct radeon_winsys *ws = sctx->b.ws;
- if (rctx->dma_cs)
- ws->cs_add_fence_dependency(rctx->dma_cs, fence);
- ws->cs_add_fence_dependency(rctx->gfx_cs, fence);
+ if (sctx->b.dma_cs)
+ ws->cs_add_fence_dependency(sctx->b.dma_cs, fence);
+ ws->cs_add_fence_dependency(sctx->b.gfx_cs, fence);
}
-static void si_add_syncobj_signal(struct r600_common_context *rctx,
+static void si_add_syncobj_signal(struct si_context *sctx,
struct pipe_fence_handle *fence)
{
- struct radeon_winsys *ws = rctx->ws;
-
- ws->cs_add_syncobj_signal(rctx->gfx_cs, fence);
+ sctx->b.ws->cs_add_syncobj_signal(sctx->b.gfx_cs, fence);
}
static void si_fence_reference(struct pipe_screen *screen,
uint64_t fence_va = fine->buf->gpu_address + fine->offset;
- radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, fine->buf,
+ radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, fine->buf,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
if (flags & PIPE_FLUSH_TOP_OF_PIPE) {
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
radeon_emit(cs, fence_va >> 32);
radeon_emit(cs, 0x80000000);
} else if (flags & PIPE_FLUSH_BOTTOM_OF_PIPE) {
- si_gfx_write_event_eop(&ctx->b, V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
EOP_DATA_SEL_VALUE_32BIT,
NULL, fence_va, 0x80000000,
PIPE_QUERY_GPU_FINISHED);
struct si_context *sctx;
sctx = (struct si_context *)threaded_context_unwrap_unsync(ctx);
- if (rfence->gfx_unflushed.ctx == &sctx->b &&
+ if (rfence->gfx_unflushed.ctx == sctx &&
rfence->gfx_unflushed.ib_index == sctx->b.num_gfx_cs_flushes) {
/* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
* spec says:
* not going to wait.
*/
threaded_context_unwrap_sync(ctx);
- si_flush_gfx_cs(&sctx->b, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
rfence->gfx_unflushed.ctx = NULL;
if (!timeout)
unsigned flags)
{
struct pipe_screen *screen = ctx->screen;
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct radeon_winsys *ws = rctx->ws;
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct radeon_winsys *ws = sctx->b.ws;
struct pipe_fence_handle *gfx_fence = NULL;
struct pipe_fence_handle *sdma_fence = NULL;
bool deferred_fence = false;
assert(flags & PIPE_FLUSH_DEFERRED);
assert(fence);
- si_fine_fence_set((struct si_context *)rctx, &fine, flags);
+ si_fine_fence_set(sctx, &fine, flags);
}
/* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
- if (rctx->dma_cs)
- si_flush_dma_cs(rctx, rflags, fence ? &sdma_fence : NULL);
+ if (sctx->b.dma_cs)
+ si_flush_dma_cs(sctx, rflags, fence ? &sdma_fence : NULL);
- if (!radeon_emitted(rctx->gfx_cs, rctx->initial_gfx_cs_size)) {
+ if (!radeon_emitted(sctx->b.gfx_cs, sctx->b.initial_gfx_cs_size)) {
if (fence)
- ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
+ ws->fence_reference(&gfx_fence, sctx->b.last_gfx_fence);
if (!(flags & PIPE_FLUSH_DEFERRED))
- ws->cs_sync_flush(rctx->gfx_cs);
+ ws->cs_sync_flush(sctx->b.gfx_cs);
} else {
/* Instead of flushing, create a deferred fence. Constraints:
* - The state tracker must allow a deferred flush.
if (flags & PIPE_FLUSH_DEFERRED &&
!(flags & PIPE_FLUSH_FENCE_FD) &&
fence) {
- gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx_cs);
+ gfx_fence = sctx->b.ws->cs_get_next_fence(sctx->b.gfx_cs);
deferred_fence = true;
} else {
- si_flush_gfx_cs(rctx, rflags, fence ? &gfx_fence : NULL);
+ si_flush_gfx_cs(sctx, rflags, fence ? &gfx_fence : NULL);
}
}
multi_fence->sdma = sdma_fence;
if (deferred_fence) {
- multi_fence->gfx_unflushed.ctx = rctx;
- multi_fence->gfx_unflushed.ib_index = rctx->num_gfx_cs_flushes;
+ multi_fence->gfx_unflushed.ctx = sctx;
+ multi_fence->gfx_unflushed.ib_index = sctx->b.num_gfx_cs_flushes;
}
multi_fence->fine = fine;
assert(!fine.buf);
finish:
if (!(flags & PIPE_FLUSH_DEFERRED)) {
- if (rctx->dma_cs)
- ws->cs_sync_flush(rctx->dma_cs);
- ws->cs_sync_flush(rctx->gfx_cs);
+ if (sctx->b.dma_cs)
+ ws->cs_sync_flush(sctx->b.dma_cs);
+ ws->cs_sync_flush(sctx->b.gfx_cs);
}
}
static void si_fence_server_signal(struct pipe_context *ctx,
struct pipe_fence_handle *fence)
{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct si_context *sctx = (struct si_context *)ctx;
struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
/* We should have at least one syncobj to signal */
assert(rfence->sdma || rfence->gfx);
if (rfence->sdma)
- si_add_syncobj_signal(rctx, rfence->sdma);
+ si_add_syncobj_signal(sctx, rfence->sdma);
if (rfence->gfx)
- si_add_syncobj_signal(rctx, rfence->gfx);
+ si_add_syncobj_signal(sctx, rfence->gfx);
/**
* The spec does not require a flush here. We insert a flush
static void si_fence_server_sync(struct pipe_context *ctx,
struct pipe_fence_handle *fence)
{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct si_context *sctx = (struct si_context *)ctx;
struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
util_queue_fence_wait(&rfence->ready);
/* Unflushed fences from the same context are no-ops. */
if (rfence->gfx_unflushed.ctx &&
- rfence->gfx_unflushed.ctx == rctx)
+ rfence->gfx_unflushed.ctx == sctx)
return;
/* All unflushed commands will not start execution before
si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC);
if (rfence->sdma)
- si_add_fence_dependency(rctx, rfence->sdma);
+ si_add_fence_dependency(sctx, rfence->sdma);
if (rfence->gfx)
- si_add_fence_dependency(rctx, rfence->gfx);
+ si_add_fence_dependency(sctx, rfence->gfx);
}
void si_init_fence_functions(struct si_context *ctx)