struct r600_resource *resource,
unsigned usage)
{
+ struct si_context *sctx = (struct si_context*)ctx;
enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
bool busy = false;
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_dma_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
- si_flush_dma_cs(ctx, 0, NULL);
+ si_flush_dma_cs(sctx, 0, NULL);
busy = true;
}
}
unsigned level, struct pipe_box *box,
bool commit)
{
- struct r600_common_context *ctx = (struct r600_common_context *)pctx;
+ struct si_context *ctx = (struct si_context *)pctx;
struct r600_resource *res = r600_resource(resource);
/*
* (b) wait for threaded submit to finish, including those that were
* triggered by some other, earlier operation.
*/
- if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
- res->buf, RADEON_USAGE_READWRITE)) {
+ if (radeon_emitted(ctx->b.gfx_cs, ctx->b.initial_gfx_cs_size) &&
+ ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs,
+ res->buf, RADEON_USAGE_READWRITE)) {
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
- if (radeon_emitted(ctx->dma_cs, 0) &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
- res->buf, RADEON_USAGE_READWRITE)) {
+ if (radeon_emitted(ctx->b.dma_cs, 0) &&
+ ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs,
+ res->buf, RADEON_USAGE_READWRITE)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
- ctx->ws->cs_sync_flush(ctx->dma_cs);
- ctx->ws->cs_sync_flush(ctx->gfx_cs);
+ ctx->b.ws->cs_sync_flush(ctx->b.dma_cs);
+ ctx->b.ws->cs_sync_flush(ctx->b.gfx_cs);
assert(resource->target == PIPE_BUFFER);
- return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
+ return ctx->b.ws->buffer_commit(res->buf, box->x, box->width, commit);
}
bool si_common_context_init(struct r600_common_context *rctx,
if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
rctx->dma_cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
- si_flush_dma_cs,
+ (void*)si_flush_dma_cs,
rctx);
}
src_offset += rsrc->gpu_address;
ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
- si_need_dma_space(&ctx->b, ncopy * 7, rdst, rsrc);
+ si_need_dma_space(ctx, ncopy * 7, rdst, rsrc);
for (i = 0; i < ncopy; i++) {
csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
/* the same maximum size as for copying */
ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
- si_need_dma_space(&sctx->b, ncopy * 5, rdst, NULL);
+ si_need_dma_space(sctx, ncopy * 5, rdst, NULL);
for (i = 0; i < ncopy; i++) {
csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
srcy + copy_height != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->b.dma_cs;
- si_need_dma_space(&sctx->b, 13, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 13, &rdst->resource, &rsrc->resource);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
struct radeon_winsys_cs *cs = sctx->b.dma_cs;
uint32_t direction = linear == rdst ? 1u << 31 : 0;
- si_need_dma_space(&sctx->b, 14, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 14, &rdst->resource, &rsrc->resource);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
dstx + copy_width != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->b.dma_cs;
- si_need_dma_space(&sctx->b, 15, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 15, &rdst->resource, &rsrc->resource);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
fprintf(f, "SDMA Dump Done.\n");
}
-void si_check_vm_faults(struct r600_common_context *ctx,
+void si_check_vm_faults(struct si_context *sctx,
struct radeon_saved_cs *saved, enum ring_type ring)
{
- struct si_context *sctx = (struct si_context *)ctx;
struct pipe_screen *screen = sctx->b.b.screen;
FILE *f;
uint64_t addr;
}
ncopy = DIV_ROUND_UP(size, max_size);
- si_need_dma_space(&ctx->b, ncopy * 5, rdst, rsrc);
+ si_need_dma_space(ctx, ncopy * 5, rdst, rsrc);
for (i = 0; i < ncopy; i++) {
count = MIN2(size, max_size);
/* the same maximum size as for copying */
ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
- si_need_dma_space(&sctx->b, ncopy * 4, rdst, NULL);
+ si_need_dma_space(sctx, ncopy * 4, rdst, NULL);
for (i = 0; i < ncopy; i++) {
csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
mt = G_009910_MICRO_TILE_MODE(tile_mode);
size = copy_height * pitch;
ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
- si_need_dma_space(&ctx->b, ncopy * 9, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(ctx, ncopy * 9, &rdst->resource, &rsrc->resource);
for (i = 0; i < ncopy; i++) {
cheight = copy_height;
#include "si_pipe.h"
#include "radeon/r600_cs.h"
-static void si_dma_emit_wait_idle(struct r600_common_context *rctx)
+static void si_dma_emit_wait_idle(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = rctx->dma_cs;
+ struct radeon_winsys_cs *cs = sctx->b.dma_cs;
/* NOP waits for idle on Evergreen and later. */
- if (rctx->chip_class >= CIK)
+ if (sctx->b.chip_class >= CIK)
radeon_emit(cs, 0x00000000); /* NOP */
else
radeon_emit(cs, 0xf0000000); /* NOP */
}
-void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
+void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
struct r600_resource *dst, struct r600_resource *src)
{
- uint64_t vram = ctx->dma_cs->used_vram;
- uint64_t gtt = ctx->dma_cs->used_gart;
+ uint64_t vram = ctx->b.dma_cs->used_vram;
+ uint64_t gtt = ctx->b.dma_cs->used_gart;
if (dst) {
vram += dst->vram_usage;
}
/* Flush the GFX IB if DMA depends on it. */
- if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
+ if (radeon_emitted(ctx->b.gfx_cs, ctx->b.initial_gfx_cs_size) &&
((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
+ ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
(src &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
- RADEON_USAGE_WRITE))))
+ ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs, src->buf,
+ RADEON_USAGE_WRITE))))
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
* engine busy while uploads are being submitted.
*/
num_dw++; /* for emit_wait_idle below */
- if (!ctx->ws->cs_check_space(ctx->dma_cs, num_dw) ||
- ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 ||
- !radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt)) {
+ if (!ctx->b.ws->cs_check_space(ctx->b.dma_cs, num_dw) ||
+ ctx->b.dma_cs->used_vram + ctx->b.dma_cs->used_gart > 64 * 1024 * 1024 ||
+ !radeon_cs_memory_below_limit(ctx->screen, ctx->b.dma_cs, vram, gtt)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
- assert((num_dw + ctx->dma_cs->current.cdw) <= ctx->dma_cs->current.max_dw);
+ assert((num_dw + ctx->b.dma_cs->current.cdw) <= ctx->b.dma_cs->current.max_dw);
}
/* Wait for idle if either buffer has been used in the IB before to
* prevent read-after-write hazards.
*/
if ((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
+ ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
(src &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, src->buf,
- RADEON_USAGE_WRITE)))
+ ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs, src->buf,
+ RADEON_USAGE_WRITE)))
si_dma_emit_wait_idle(ctx);
if (dst) {
- radeon_add_to_buffer_list(ctx, ctx->dma_cs, dst,
+ radeon_add_to_buffer_list(&ctx->b, ctx->b.dma_cs, dst,
RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_BUFFER);
}
if (src) {
- radeon_add_to_buffer_list(ctx, ctx->dma_cs, src,
+ radeon_add_to_buffer_list(&ctx->b, ctx->b.dma_cs, src,
RADEON_USAGE_READ,
RADEON_PRIO_SDMA_BUFFER);
}
/* this function is called before all DMA calls, so increment this. */
- ctx->num_dma_calls++;
+ ctx->b.num_dma_calls++;
}
-void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence)
+void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
+ struct pipe_fence_handle **fence)
{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct radeon_winsys_cs *cs = rctx->dma_cs;
+ struct radeon_winsys_cs *cs = ctx->b.dma_cs;
struct radeon_saved_cs saved;
- bool check_vm = (rctx->screen->debug_flags & DBG(CHECK_VM));
+ bool check_vm = (ctx->screen->debug_flags & DBG(CHECK_VM)) != 0;
if (!radeon_emitted(cs, 0)) {
if (fence)
- rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
+ ctx->b.ws->fence_reference(fence, ctx->b.last_sdma_fence);
return;
}
if (check_vm)
- si_save_cs(rctx->ws, cs, &saved, true);
+ si_save_cs(ctx->b.ws, cs, &saved, true);
- rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
+ ctx->b.ws->cs_flush(cs, flags, &ctx->b.last_sdma_fence);
if (fence)
- rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
+ ctx->b.ws->fence_reference(fence, ctx->b.last_sdma_fence);
if (check_vm) {
/* Use conservative timeout 800ms, after which we won't wait any
* longer and assume the GPU is hung.
*/
- rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
+ ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_sdma_fence, 800*1000*1000);
- si_check_vm_faults(rctx, &saved, RING_DMA);
+ si_check_vm_faults(ctx, &saved, RING_DMA);
si_clear_saved_cs(&saved);
}
}
void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned value)
{
- struct r600_common_context *rctx = (struct r600_common_context*)sscreen->aux_context;
+ struct si_context *ctx = (struct si_context*)sscreen->aux_context;
mtx_lock(&sscreen->aux_context_lock);
- rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
+ ctx->b.dma_clear_buffer(&ctx->b.b, dst, offset, size, value);
sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
mtx_unlock(&sscreen->aux_context_lock);
}
/* If the context wasn't flushed at fence creation, this is non-NULL. */
struct {
- struct r600_common_context *ctx;
+ struct si_context *ctx;
unsigned ib_index;
} gfx_unflushed;
radeon_emit(cs, 4); /* poll interval */
}
-static void si_add_fence_dependency(struct r600_common_context *rctx,
+static void si_add_fence_dependency(struct si_context *sctx,
struct pipe_fence_handle *fence)
{
- struct radeon_winsys *ws = rctx->ws;
+ struct radeon_winsys *ws = sctx->b.ws;
- if (rctx->dma_cs)
- ws->cs_add_fence_dependency(rctx->dma_cs, fence);
- ws->cs_add_fence_dependency(rctx->gfx_cs, fence);
+ if (sctx->b.dma_cs)
+ ws->cs_add_fence_dependency(sctx->b.dma_cs, fence);
+ ws->cs_add_fence_dependency(sctx->b.gfx_cs, fence);
}
static void si_add_syncobj_signal(struct r600_common_context *rctx,
struct si_context *sctx;
sctx = (struct si_context *)threaded_context_unwrap_unsync(ctx);
- if (rfence->gfx_unflushed.ctx == &sctx->b &&
+ if (rfence->gfx_unflushed.ctx == sctx &&
rfence->gfx_unflushed.ib_index == sctx->b.num_gfx_cs_flushes) {
/* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
* spec says:
unsigned flags)
{
struct pipe_screen *screen = ctx->screen;
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct radeon_winsys *ws = rctx->ws;
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct radeon_winsys *ws = sctx->b.ws;
struct pipe_fence_handle *gfx_fence = NULL;
struct pipe_fence_handle *sdma_fence = NULL;
bool deferred_fence = false;
assert(flags & PIPE_FLUSH_DEFERRED);
assert(fence);
- si_fine_fence_set((struct si_context *)rctx, &fine, flags);
+ si_fine_fence_set(sctx, &fine, flags);
}
/* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
- if (rctx->dma_cs)
- si_flush_dma_cs(rctx, rflags, fence ? &sdma_fence : NULL);
+ if (sctx->b.dma_cs)
+ si_flush_dma_cs(sctx, rflags, fence ? &sdma_fence : NULL);
- if (!radeon_emitted(rctx->gfx_cs, rctx->initial_gfx_cs_size)) {
+ if (!radeon_emitted(sctx->b.gfx_cs, sctx->b.initial_gfx_cs_size)) {
if (fence)
- ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
+ ws->fence_reference(&gfx_fence, sctx->b.last_gfx_fence);
if (!(flags & PIPE_FLUSH_DEFERRED))
- ws->cs_sync_flush(rctx->gfx_cs);
+ ws->cs_sync_flush(sctx->b.gfx_cs);
} else {
/* Instead of flushing, create a deferred fence. Constraints:
* - The state tracker must allow a deferred flush.
if (flags & PIPE_FLUSH_DEFERRED &&
!(flags & PIPE_FLUSH_FENCE_FD) &&
fence) {
- gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx_cs);
+ gfx_fence = sctx->b.ws->cs_get_next_fence(sctx->b.gfx_cs);
deferred_fence = true;
} else {
- si_flush_gfx_cs(rctx, rflags, fence ? &gfx_fence : NULL);
+ si_flush_gfx_cs(sctx, rflags, fence ? &gfx_fence : NULL);
}
}
multi_fence->sdma = sdma_fence;
if (deferred_fence) {
- multi_fence->gfx_unflushed.ctx = rctx;
- multi_fence->gfx_unflushed.ib_index = rctx->num_gfx_cs_flushes;
+ multi_fence->gfx_unflushed.ctx = sctx;
+ multi_fence->gfx_unflushed.ib_index = sctx->b.num_gfx_cs_flushes;
}
multi_fence->fine = fine;
assert(!fine.buf);
finish:
if (!(flags & PIPE_FLUSH_DEFERRED)) {
- if (rctx->dma_cs)
- ws->cs_sync_flush(rctx->dma_cs);
- ws->cs_sync_flush(rctx->gfx_cs);
+ if (sctx->b.dma_cs)
+ ws->cs_sync_flush(sctx->b.dma_cs);
+ ws->cs_sync_flush(sctx->b.gfx_cs);
}
}
static void si_fence_server_sync(struct pipe_context *ctx,
struct pipe_fence_handle *fence)
{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct si_context *sctx = (struct si_context *)ctx;
struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
util_queue_fence_wait(&rfence->ready);
/* Unflushed fences from the same context are no-ops. */
if (rfence->gfx_unflushed.ctx &&
- rfence->gfx_unflushed.ctx == rctx)
+ rfence->gfx_unflushed.ctx == sctx)
return;
/* All unflushed commands will not start execution before
si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC);
if (rfence->sdma)
- si_add_fence_dependency(rctx, rfence->sdma);
+ si_add_fence_dependency(sctx, rfence->sdma);
if (rfence->gfx)
- si_add_fence_dependency(rctx, rfence->gfx);
+ si_add_fence_dependency(sctx, rfence->gfx);
}
void si_init_fence_functions(struct si_context *ctx)
*/
ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_gfx_fence, 800*1000*1000);
- si_check_vm_faults(&ctx->b, &ctx->current_saved_cs->gfx, RING_GFX);
+ si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, RING_GFX);
}
if (ctx->current_saved_cs)
void si_log_draw_state(struct si_context *sctx, struct u_log_context *log);
void si_log_compute_state(struct si_context *sctx, struct u_log_context *log);
void si_init_debug_functions(struct si_context *sctx);
-void si_check_vm_faults(struct r600_common_context *ctx,
+void si_check_vm_faults(struct si_context *sctx,
struct radeon_saved_cs *saved, enum ring_type ring);
bool si_replace_shader(unsigned num, struct ac_shader_binary *binary);
void si_init_dma_functions(struct si_context *sctx);
/* si_dma_cs.c */
-void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
+void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
struct r600_resource *dst, struct r600_resource *src);
-void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
+void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
+ struct pipe_fence_handle **fence);
void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned value);