From 3069cb8b7818fb4fe3a94924a18a68fd2ea1c6bc Mon Sep 17 00:00:00 2001 From: =?utf8?q?Marek=20Ol=C5=A1=C3=A1k?= Date: Sun, 1 Apr 2018 15:37:11 -0400 Subject: [PATCH] radeonsi: use r600_common_context less pt2 Acked-by: Timothy Arceri --- .../drivers/radeon/r600_buffer_common.c | 4 +-- src/gallium/drivers/radeon/r600_cs.h | 2 +- src/gallium/drivers/radeon/r600_query.c | 20 ++++++------ src/gallium/drivers/radeon/r600_texture.c | 3 +- src/gallium/drivers/radeonsi/si_fence.c | 32 +++++++++---------- src/gallium/drivers/radeonsi/si_gfx_cs.c | 3 +- src/gallium/drivers/radeonsi/si_perfcounter.c | 11 ++++--- src/gallium/drivers/radeonsi/si_pipe.c | 2 +- src/gallium/drivers/radeonsi/si_pipe.h | 12 +++---- src/gallium/drivers/radeonsi/si_state_draw.c | 6 ++-- 10 files changed, 49 insertions(+), 46 deletions(-) diff --git a/src/gallium/drivers/radeon/r600_buffer_common.c b/src/gallium/drivers/radeon/r600_buffer_common.c index 8a4ad2dc6db..aff2360c2c0 100644 --- a/src/gallium/drivers/radeon/r600_buffer_common.c +++ b/src/gallium/drivers/radeon/r600_buffer_common.c @@ -65,10 +65,10 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx, ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, resource->buf, rusage)) { if (usage & PIPE_TRANSFER_DONTBLOCK) { - si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL); + si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL); return NULL; } else { - si_flush_gfx_cs(ctx, 0, NULL); + si_flush_gfx_cs(sctx, 0, NULL); busy = true; } } diff --git a/src/gallium/drivers/radeon/r600_cs.h b/src/gallium/drivers/radeon/r600_cs.h index 0283ad7bff5..c90f06bdc6d 100644 --- a/src/gallium/drivers/radeon/r600_cs.h +++ b/src/gallium/drivers/radeon/r600_cs.h @@ -105,7 +105,7 @@ radeon_add_to_gfx_buffer_list_check_mem(struct si_context *sctx, !radeon_cs_memory_below_limit(sctx->screen, sctx->b.gfx_cs, sctx->b.vram + rbo->vram_usage, sctx->b.gtt + rbo->gart_usage)) - si_flush_gfx_cs(&sctx->b, PIPE_FLUSH_ASYNC, NULL); + si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL); radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, rbo, usage, priority); } diff --git a/src/gallium/drivers/radeon/r600_query.c b/src/gallium/drivers/radeon/r600_query.c index ad859c40d56..97412239a0c 100644 --- a/src/gallium/drivers/radeon/r600_query.c +++ b/src/gallium/drivers/radeon/r600_query.c @@ -827,7 +827,8 @@ static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx, struct r600_resource *buffer, uint64_t va) { - struct radeon_winsys_cs *cs = ctx->gfx_cs; + struct si_context *sctx = (struct si_context*)ctx; + struct radeon_winsys_cs *cs = sctx->b.gfx_cs; uint64_t fence_va = 0; switch (query->b.type) { @@ -858,9 +859,9 @@ static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx, va += 8; /* fall through */ case PIPE_QUERY_TIMESTAMP: - si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS, - 0, EOP_DATA_SEL_TIMESTAMP, NULL, va, - 0, query->b.type); + si_gfx_write_event_eop(sctx, V_028A90_BOTTOM_OF_PIPE_TS, + 0, EOP_DATA_SEL_TIMESTAMP, NULL, va, + 0, query->b.type); fence_va = va + 8; break; case PIPE_QUERY_PIPELINE_STATISTICS: { @@ -882,10 +883,10 @@ static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx, RADEON_PRIO_QUERY); if (fence_va) - si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS, 0, - EOP_DATA_SEL_VALUE_32BIT, - query->buffer.buf, fence_va, 0x80000000, - query->b.type); + si_gfx_write_event_eop(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0, + EOP_DATA_SEL_VALUE_32BIT, + query->buffer.buf, fence_va, 0x80000000, + query->b.type); } static void r600_query_hw_emit_stop(struct r600_common_context *ctx, @@ -1626,6 +1627,7 @@ static void r600_query_hw_get_result_resource(struct r600_common_context *rctx, struct pipe_resource *resource, unsigned offset) { + struct si_context *sctx = (struct si_context*)rctx; struct r600_query_hw *query = (struct r600_query_hw *)rquery; struct r600_query_buffer *qbuf; struct r600_query_buffer *qbuf_prev; @@ -1756,7 +1758,7 @@ static void r600_query_hw_get_result_resource(struct r600_common_context *rctx, va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size; va += params.fence_offset; - si_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000); + si_gfx_wait_fence(sctx, va, 0x80000000, 0x80000000); } rctx->b.launch_grid(&rctx->b, &grid); diff --git a/src/gallium/drivers/radeon/r600_texture.c b/src/gallium/drivers/radeon/r600_texture.c index 8ff427f8476..9be31955939 100644 --- a/src/gallium/drivers/radeon/r600_texture.c +++ b/src/gallium/drivers/radeon/r600_texture.c @@ -1828,6 +1828,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx, static void r600_texture_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer* transfer) { + struct si_context *sctx = (struct si_context*)ctx; struct r600_common_context *rctx = (struct r600_common_context*)ctx; struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; struct pipe_resource *texture = transfer->resource; @@ -1863,7 +1864,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx, * The result is that the kernel memory manager is never a bottleneck. */ if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) { - si_flush_gfx_cs(rctx, PIPE_FLUSH_ASYNC, NULL); + si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL); rctx->num_alloc_tex_transfer_bytes = 0; } diff --git a/src/gallium/drivers/radeonsi/si_fence.c b/src/gallium/drivers/radeonsi/si_fence.c index 3522244a016..885ee025062 100644 --- a/src/gallium/drivers/radeonsi/si_fence.c +++ b/src/gallium/drivers/radeonsi/si_fence.c @@ -64,13 +64,13 @@ struct si_multi_fence { * \param old_value Previous fence value (for a bug workaround) * \param new_value Fence value to write for this event. */ -void si_gfx_write_event_eop(struct r600_common_context *ctx, +void si_gfx_write_event_eop(struct si_context *ctx, unsigned event, unsigned event_flags, unsigned data_sel, struct r600_resource *buf, uint64_t va, uint32_t new_fence, unsigned query_type) { - struct radeon_winsys_cs *cs = ctx->gfx_cs; + struct radeon_winsys_cs *cs = ctx->b.gfx_cs; unsigned op = EVENT_TYPE(event) | EVENT_INDEX(5) | event_flags; @@ -81,7 +81,7 @@ void si_gfx_write_event_eop(struct r600_common_context *ctx, if (data_sel != EOP_DATA_SEL_DISCARD) sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM); - if (ctx->chip_class >= GFX9) { + if (ctx->b.chip_class >= GFX9) { /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion * counters) must immediately precede every timestamp event to * prevent a GPU hang on GFX9. @@ -89,20 +89,20 @@ void si_gfx_write_event_eop(struct r600_common_context *ctx, * Occlusion queries don't need to do it here, because they * always do ZPASS_DONE before the timestamp. */ - if (ctx->chip_class == GFX9 && + if (ctx->b.chip_class == GFX9 && query_type != PIPE_QUERY_OCCLUSION_COUNTER && query_type != PIPE_QUERY_OCCLUSION_PREDICATE && query_type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) { - struct r600_resource *scratch = ctx->eop_bug_scratch; + struct r600_resource *scratch = ctx->b.eop_bug_scratch; - assert(16 * ctx->screen->info.num_render_backends <= + assert(16 * ctx->b.screen->info.num_render_backends <= scratch->b.b.width0); radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0)); radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1)); radeon_emit(cs, scratch->gpu_address); radeon_emit(cs, scratch->gpu_address >> 32); - radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch, + radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, scratch, RADEON_USAGE_WRITE, RADEON_PRIO_QUERY); } @@ -115,9 +115,9 @@ void si_gfx_write_event_eop(struct r600_common_context *ctx, radeon_emit(cs, 0); /* immediate data hi */ radeon_emit(cs, 0); /* unused */ } else { - if (ctx->chip_class == CIK || - ctx->chip_class == VI) { - struct r600_resource *scratch = ctx->eop_bug_scratch; + if (ctx->b.chip_class == CIK || + ctx->b.chip_class == VI) { + struct r600_resource *scratch = ctx->b.eop_bug_scratch; uint64_t va = scratch->gpu_address; /* Two EOP events are required to make all engines go idle @@ -131,7 +131,7 @@ void si_gfx_write_event_eop(struct r600_common_context *ctx, radeon_emit(cs, 0); /* immediate data */ radeon_emit(cs, 0); /* unused */ - radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch, + radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, scratch, RADEON_USAGE_WRITE, RADEON_PRIO_QUERY); } @@ -144,7 +144,7 @@ void si_gfx_write_event_eop(struct r600_common_context *ctx, } if (buf) { - radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_WRITE, + radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, buf, RADEON_USAGE_WRITE, RADEON_PRIO_QUERY); } } @@ -160,10 +160,10 @@ unsigned si_gfx_write_fence_dwords(struct si_screen *screen) return dwords; } -void si_gfx_wait_fence(struct r600_common_context *ctx, +void si_gfx_wait_fence(struct si_context *ctx, uint64_t va, uint32_t ref, uint32_t mask) { - struct radeon_winsys_cs *cs = ctx->gfx_cs; + struct radeon_winsys_cs *cs = ctx->b.gfx_cs; radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1)); @@ -277,7 +277,7 @@ static void si_fine_fence_set(struct si_context *ctx, radeon_emit(cs, fence_va >> 32); radeon_emit(cs, 0x80000000); } else if (flags & PIPE_FLUSH_BOTTOM_OF_PIPE) { - si_gfx_write_event_eop(&ctx->b, V_028A90_BOTTOM_OF_PIPE_TS, 0, + si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DATA_SEL_VALUE_32BIT, NULL, fence_va, 0x80000000, PIPE_QUERY_GPU_FINISHED); @@ -376,7 +376,7 @@ static boolean si_fence_finish(struct pipe_screen *screen, * not going to wait. */ threaded_context_unwrap_sync(ctx); - si_flush_gfx_cs(&sctx->b, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL); + si_flush_gfx_cs(sctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL); rfence->gfx_unflushed.ctx = NULL; if (!timeout) diff --git a/src/gallium/drivers/radeonsi/si_gfx_cs.c b/src/gallium/drivers/radeonsi/si_gfx_cs.c index 203d7704f36..dc52371e14f 100644 --- a/src/gallium/drivers/radeonsi/si_gfx_cs.c +++ b/src/gallium/drivers/radeonsi/si_gfx_cs.c @@ -63,10 +63,9 @@ void si_need_gfx_cs_space(struct si_context *ctx) si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL); } -void si_flush_gfx_cs(void *context, unsigned flags, +void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_handle **fence) { - struct si_context *ctx = context; struct radeon_winsys_cs *cs = ctx->b.gfx_cs; struct radeon_winsys *ws = ctx->b.ws; diff --git a/src/gallium/drivers/radeonsi/si_perfcounter.c b/src/gallium/drivers/radeonsi/si_perfcounter.c index 42c05e5c222..46e2c7454f0 100644 --- a/src/gallium/drivers/radeonsi/si_perfcounter.c +++ b/src/gallium/drivers/radeonsi/si_perfcounter.c @@ -579,12 +579,13 @@ static void si_pc_emit_start(struct r600_common_context *ctx, static void si_pc_emit_stop(struct r600_common_context *ctx, struct r600_resource *buffer, uint64_t va) { - struct radeon_winsys_cs *cs = ctx->gfx_cs; + struct si_context *sctx = (struct si_context*)ctx; + struct radeon_winsys_cs *cs = sctx->b.gfx_cs; - si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS, 0, - EOP_DATA_SEL_VALUE_32BIT, - buffer, va, 0, SI_NOT_QUERY); - si_gfx_wait_fence(ctx, va, 0, 0xffffffff); + si_gfx_write_event_eop(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0, + EOP_DATA_SEL_VALUE_32BIT, + buffer, va, 0, SI_NOT_QUERY); + si_gfx_wait_fence(sctx, va, 0, 0xffffffff); radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_SAMPLE) | EVENT_INDEX(0)); diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c index c0b4f81e6da..f74ff0dffa0 100644 --- a/src/gallium/drivers/radeonsi/si_pipe.c +++ b/src/gallium/drivers/radeonsi/si_pipe.c @@ -286,7 +286,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, } sctx->b.gfx_cs = ws->cs_create(sctx->b.ctx, RING_GFX, - si_flush_gfx_cs, sctx); + (void*)si_flush_gfx_cs, sctx); /* Border colors. */ sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS * diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h index 91300113e2c..52d6eed1097 100644 --- a/src/gallium/drivers/radeonsi/si_pipe.h +++ b/src/gallium/drivers/radeonsi/si_pipe.h @@ -730,13 +730,13 @@ void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst uint64_t offset, uint64_t size, unsigned value); /* si_fence.c */ -void si_gfx_write_event_eop(struct r600_common_context *ctx, +void si_gfx_write_event_eop(struct si_context *ctx, unsigned event, unsigned event_flags, unsigned data_sel, struct r600_resource *buf, uint64_t va, uint32_t new_fence, unsigned query_type); unsigned si_gfx_write_fence_dwords(struct si_screen *screen); -void si_gfx_wait_fence(struct r600_common_context *ctx, +void si_gfx_wait_fence(struct si_context *ctx, uint64_t va, uint32_t ref, uint32_t mask); void si_init_fence_functions(struct si_context *ctx); void si_init_screen_fence_functions(struct si_screen *screen); @@ -748,7 +748,7 @@ const char *si_get_family_name(const struct si_screen *sscreen); void si_init_screen_get_functions(struct si_screen *sscreen); /* si_gfx_cs.c */ -void si_flush_gfx_cs(void *context, unsigned flags, +void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_handle **fence); void si_begin_new_gfx_cs(struct si_context *ctx); void si_need_gfx_cs_space(struct si_context *ctx); @@ -781,13 +781,13 @@ void si_init_viewport_functions(struct si_context *ctx); static inline void si_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r) { - struct r600_common_context *rctx = (struct r600_common_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct r600_resource *res = (struct r600_resource *)r; if (res) { /* Add memory usage for need_gfx_cs_space */ - rctx->vram += res->vram_usage; - rctx->gtt += res->gart_usage; + sctx->b.vram += res->vram_usage; + sctx->b.gtt += res->gart_usage; } } diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c index 7e86c0f07dc..fc17eec7665 100644 --- a/src/gallium/drivers/radeonsi/si_state_draw.c +++ b/src/gallium/drivers/radeonsi/si_state_draw.c @@ -913,7 +913,7 @@ void si_emit_cache_flush(struct si_context *sctx) /* Necessary for DCC */ if (sctx->b.chip_class == VI) - si_gfx_write_event_eop(&sctx->b, V_028A90_FLUSH_AND_INV_CB_DATA_TS, + si_gfx_write_event_eop(sctx, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 0, EOP_DATA_SEL_DISCARD, NULL, 0, 0, SI_NOT_QUERY); } @@ -1028,11 +1028,11 @@ void si_emit_cache_flush(struct si_context *sctx) va = sctx->wait_mem_scratch->gpu_address; sctx->wait_mem_number++; - si_gfx_write_event_eop(&sctx->b, cb_db_event, tc_flags, + si_gfx_write_event_eop(sctx, cb_db_event, tc_flags, EOP_DATA_SEL_VALUE_32BIT, sctx->wait_mem_scratch, va, sctx->wait_mem_number, SI_NOT_QUERY); - si_gfx_wait_fence(&sctx->b, va, sctx->wait_mem_number, 0xffffffff); + si_gfx_wait_fence(sctx, va, sctx->wait_mem_number, 0xffffffff); } /* Make sure ME is idle (it executes most packets) before continuing. -- 2.30.2