From 8662e66bf237a820a704df112718be599136098b Mon Sep 17 00:00:00 2001 From: Andreas Hartmetz Date: Sat, 11 Jan 2014 16:00:50 +0100 Subject: [PATCH] radeonsi: Rename the commonly occurring rctx/r600 variables. MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit The "r" stands for R600. Reviewed-by: Marek Olšák --- src/gallium/drivers/radeonsi/si_blit.c | 160 +++--- src/gallium/drivers/radeonsi/si_buffer.c | 10 +- src/gallium/drivers/radeonsi/si_compute.c | 46 +- src/gallium/drivers/radeonsi/si_descriptors.c | 228 ++++----- src/gallium/drivers/radeonsi/si_hw_context.c | 8 +- src/gallium/drivers/radeonsi/si_pipe.c | 136 ++--- src/gallium/drivers/radeonsi/si_pipe.h | 20 +- src/gallium/drivers/radeonsi/si_pm4.c | 50 +- src/gallium/drivers/radeonsi/si_pm4.h | 14 +- src/gallium/drivers/radeonsi/si_query.c | 62 +-- src/gallium/drivers/radeonsi/si_resource.c | 10 +- src/gallium/drivers/radeonsi/si_resource.h | 2 +- src/gallium/drivers/radeonsi/si_shader.c | 18 +- src/gallium/drivers/radeonsi/si_shader.h | 2 +- src/gallium/drivers/radeonsi/si_state.c | 464 +++++++++--------- src/gallium/drivers/radeonsi/si_state.h | 42 +- src/gallium/drivers/radeonsi/si_state_draw.c | 236 ++++----- src/gallium/drivers/radeonsi/si_translate.c | 6 +- 18 files changed, 757 insertions(+), 757 deletions(-) diff --git a/src/gallium/drivers/radeonsi/si_blit.c b/src/gallium/drivers/radeonsi/si_blit.c index 4a5e5723d6c..ba2ebe7d7f0 100644 --- a/src/gallium/drivers/radeonsi/si_blit.c +++ b/src/gallium/drivers/radeonsi/si_blit.c @@ -49,58 +49,58 @@ enum si_blitter_op /* bitmask */ static void si_blitter_begin(struct pipe_context *ctx, enum si_blitter_op op) { - struct si_context *rctx = (struct si_context *)ctx; - - si_context_queries_suspend(rctx); - - util_blitter_save_blend(rctx->blitter, rctx->queued.named.blend); - util_blitter_save_depth_stencil_alpha(rctx->blitter, rctx->queued.named.dsa); - util_blitter_save_stencil_ref(rctx->blitter, &rctx->stencil_ref); - util_blitter_save_rasterizer(rctx->blitter, rctx->queued.named.rasterizer); - util_blitter_save_fragment_shader(rctx->blitter, rctx->ps_shader); - util_blitter_save_vertex_shader(rctx->blitter, rctx->vs_shader); - util_blitter_save_vertex_elements(rctx->blitter, rctx->vertex_elements); - if (rctx->queued.named.viewport) { - util_blitter_save_viewport(rctx->blitter, &rctx->queued.named.viewport->viewport); + struct si_context *sctx = (struct si_context *)ctx; + + si_context_queries_suspend(sctx); + + util_blitter_save_blend(sctx->blitter, sctx->queued.named.blend); + util_blitter_save_depth_stencil_alpha(sctx->blitter, sctx->queued.named.dsa); + util_blitter_save_stencil_ref(sctx->blitter, &sctx->stencil_ref); + util_blitter_save_rasterizer(sctx->blitter, sctx->queued.named.rasterizer); + util_blitter_save_fragment_shader(sctx->blitter, sctx->ps_shader); + util_blitter_save_vertex_shader(sctx->blitter, sctx->vs_shader); + util_blitter_save_vertex_elements(sctx->blitter, sctx->vertex_elements); + if (sctx->queued.named.viewport) { + util_blitter_save_viewport(sctx->blitter, &sctx->queued.named.viewport->viewport); } - util_blitter_save_vertex_buffer_slot(rctx->blitter, rctx->vertex_buffer); - util_blitter_save_so_targets(rctx->blitter, rctx->b.streamout.num_targets, - (struct pipe_stream_output_target**)rctx->b.streamout.targets); + util_blitter_save_vertex_buffer_slot(sctx->blitter, sctx->vertex_buffer); + util_blitter_save_so_targets(sctx->blitter, sctx->b.streamout.num_targets, + (struct pipe_stream_output_target**)sctx->b.streamout.targets); if (op & SI_SAVE_FRAMEBUFFER) - util_blitter_save_framebuffer(rctx->blitter, &rctx->framebuffer); + util_blitter_save_framebuffer(sctx->blitter, &sctx->framebuffer); if (op & SI_SAVE_TEXTURES) { util_blitter_save_fragment_sampler_states( - rctx->blitter, rctx->samplers[PIPE_SHADER_FRAGMENT].n_samplers, - (void**)rctx->samplers[PIPE_SHADER_FRAGMENT].samplers); + sctx->blitter, sctx->samplers[PIPE_SHADER_FRAGMENT].n_samplers, + (void**)sctx->samplers[PIPE_SHADER_FRAGMENT].samplers); - util_blitter_save_fragment_sampler_views(rctx->blitter, - util_last_bit(rctx->samplers[PIPE_SHADER_FRAGMENT].views.desc.enabled_mask & + util_blitter_save_fragment_sampler_views(sctx->blitter, + util_last_bit(sctx->samplers[PIPE_SHADER_FRAGMENT].views.desc.enabled_mask & ((1 << NUM_TEX_UNITS) - 1)), - rctx->samplers[PIPE_SHADER_FRAGMENT].views.views); + sctx->samplers[PIPE_SHADER_FRAGMENT].views.views); } - if ((op & SI_DISABLE_RENDER_COND) && rctx->current_render_cond) { - rctx->saved_render_cond = rctx->current_render_cond; - rctx->saved_render_cond_cond = rctx->current_render_cond_cond; - rctx->saved_render_cond_mode = rctx->current_render_cond_mode; - rctx->b.b.render_condition(&rctx->b.b, NULL, FALSE, 0); + if ((op & SI_DISABLE_RENDER_COND) && sctx->current_render_cond) { + sctx->saved_render_cond = sctx->current_render_cond; + sctx->saved_render_cond_cond = sctx->current_render_cond_cond; + sctx->saved_render_cond_mode = sctx->current_render_cond_mode; + sctx->b.b.render_condition(&sctx->b.b, NULL, FALSE, 0); } } static void si_blitter_end(struct pipe_context *ctx) { - struct si_context *rctx = (struct si_context *)ctx; - if (rctx->saved_render_cond) { - rctx->b.b.render_condition(&rctx->b.b, - rctx->saved_render_cond, - rctx->saved_render_cond_cond, - rctx->saved_render_cond_mode); - rctx->saved_render_cond = NULL; + struct si_context *sctx = (struct si_context *)ctx; + if (sctx->saved_render_cond) { + sctx->b.b.render_condition(&sctx->b.b, + sctx->saved_render_cond, + sctx->saved_render_cond_cond, + sctx->saved_render_cond_mode); + sctx->saved_render_cond = NULL; } - si_context_queries_resume(rctx); + si_context_queries_resume(sctx); } static unsigned u_max_sample(struct pipe_resource *r) @@ -115,7 +115,7 @@ static void si_blit_decompress_depth(struct pipe_context *ctx, unsigned first_layer, unsigned last_layer, unsigned first_sample, unsigned last_sample) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; unsigned layer, level, sample, checked_last_layer, max_layer, max_sample; float depth = 1.0f; const struct util_format_description *desc; @@ -134,13 +134,13 @@ static void si_blit_decompress_depth(struct pipe_context *ctx, assert(!"No depth or stencil to uncompress"); return; case 3: - custom_dsa = rctx->custom_dsa_flush_depth_stencil; + custom_dsa = sctx->custom_dsa_flush_depth_stencil; break; case 2: - custom_dsa = rctx->custom_dsa_flush_stencil; + custom_dsa = sctx->custom_dsa_flush_stencil; break; case 1: - custom_dsa = rctx->custom_dsa_flush_depth; + custom_dsa = sctx->custom_dsa_flush_depth; break; } @@ -169,7 +169,7 @@ static void si_blit_decompress_depth(struct pipe_context *ctx, (struct pipe_resource*)flushed_depth_texture, &surf_tmpl); si_blitter_begin(ctx, SI_DECOMPRESS); - util_blitter_custom_depth_stencil(rctx->blitter, zsurf, cbsurf, 1 << sample, + util_blitter_custom_depth_stencil(sctx->blitter, zsurf, cbsurf, 1 << sample, custom_dsa[sample], depth); si_blitter_end(ctx); @@ -188,7 +188,7 @@ static void si_blit_decompress_depth(struct pipe_context *ctx, } } -static void si_blit_decompress_depth_in_place(struct si_context *rctx, +static void si_blit_decompress_depth_in_place(struct si_context *sctx, struct r600_texture *texture, unsigned first_level, unsigned last_level, unsigned first_layer, unsigned last_layer) @@ -213,13 +213,13 @@ static void si_blit_decompress_depth_in_place(struct si_context *rctx, surf_tmpl.u.tex.first_layer = layer; surf_tmpl.u.tex.last_layer = layer; - zsurf = rctx->b.b.create_surface(&rctx->b.b, &texture->resource.b.b, &surf_tmpl); + zsurf = sctx->b.b.create_surface(&sctx->b.b, &texture->resource.b.b, &surf_tmpl); - si_blitter_begin(&rctx->b.b, SI_DECOMPRESS); - util_blitter_custom_depth_stencil(rctx->blitter, zsurf, NULL, ~0, - rctx->custom_dsa_flush_inplace, + si_blitter_begin(&sctx->b.b, SI_DECOMPRESS); + util_blitter_custom_depth_stencil(sctx->blitter, zsurf, NULL, ~0, + sctx->custom_dsa_flush_inplace, 1.0f); - si_blitter_end(&rctx->b.b); + si_blitter_end(&sctx->b.b); pipe_surface_reference(&zsurf, NULL); } @@ -232,7 +232,7 @@ static void si_blit_decompress_depth_in_place(struct si_context *rctx, } } -void si_flush_depth_textures(struct si_context *rctx, +void si_flush_depth_textures(struct si_context *sctx, struct si_textures_info *textures) { unsigned i; @@ -248,7 +248,7 @@ void si_flush_depth_textures(struct si_context *rctx, if (!tex->is_depth || tex->is_flushing_texture) continue; - si_blit_decompress_depth_in_place(rctx, tex, + si_blit_decompress_depth_in_place(sctx, tex, view->u.tex.first_level, view->u.tex.last_level, 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level)); } @@ -259,7 +259,7 @@ static void si_blit_decompress_color(struct pipe_context *ctx, unsigned first_level, unsigned last_level, unsigned first_layer, unsigned last_layer) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; unsigned layer, level, checked_last_layer, max_layer; if (!rtex->dirty_level_mask) @@ -284,8 +284,8 @@ static void si_blit_decompress_color(struct pipe_context *ctx, cbsurf = ctx->create_surface(ctx, &rtex->resource.b.b, &surf_tmpl); si_blitter_begin(ctx, SI_DECOMPRESS); - util_blitter_custom_color(rctx->blitter, cbsurf, - rctx->custom_blend_decompress); + util_blitter_custom_color(sctx->blitter, cbsurf, + sctx->custom_blend_decompress); si_blitter_end(ctx); pipe_surface_reference(&cbsurf, NULL); @@ -299,7 +299,7 @@ static void si_blit_decompress_color(struct pipe_context *ctx, } } -void si_decompress_color_textures(struct si_context *rctx, +void si_decompress_color_textures(struct si_context *sctx, struct si_textures_info *textures) { unsigned i; @@ -317,7 +317,7 @@ void si_decompress_color_textures(struct si_context *rctx, tex = (struct r600_texture *)view->texture; assert(tex->cmask.size || tex->fmask.size); - si_blit_decompress_color(&rctx->b.b, tex, + si_blit_decompress_color(&sctx->b.b, tex, view->u.tex.first_level, view->u.tex.last_level, 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level)); } @@ -327,11 +327,11 @@ static void si_clear(struct pipe_context *ctx, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { - struct si_context *rctx = (struct si_context *)ctx; - struct pipe_framebuffer_state *fb = &rctx->framebuffer; + struct si_context *sctx = (struct si_context *)ctx; + struct pipe_framebuffer_state *fb = &sctx->framebuffer; si_blitter_begin(ctx, SI_CLEAR); - util_blitter_clear(rctx->blitter, fb->width, fb->height, + util_blitter_clear(sctx->blitter, fb->width, fb->height, util_framebuffer_get_num_layers(fb), buffers, color, depth, stencil); si_blitter_end(ctx); @@ -343,10 +343,10 @@ static void si_clear_render_target(struct pipe_context *ctx, unsigned dstx, unsigned dsty, unsigned width, unsigned height) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; si_blitter_begin(ctx, SI_CLEAR_SURFACE); - util_blitter_clear_render_target(rctx->blitter, dst, color, + util_blitter_clear_render_target(sctx->blitter, dst, color, dstx, dsty, width, height); si_blitter_end(ctx); } @@ -359,10 +359,10 @@ static void si_clear_depth_stencil(struct pipe_context *ctx, unsigned dstx, unsigned dsty, unsigned width, unsigned height) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; si_blitter_begin(ctx, SI_CLEAR_SURFACE); - util_blitter_clear_depth_stencil(rctx->blitter, dst, clear_flags, depth, stencil, + util_blitter_clear_depth_stencil(sctx->blitter, dst, clear_flags, depth, stencil, dstx, dsty, width, height); si_blitter_end(ctx); } @@ -376,11 +376,11 @@ static void si_decompress_subresource(struct pipe_context *ctx, unsigned level, unsigned first_layer, unsigned last_layer) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct r600_texture *rtex = (struct r600_texture*)tex; if (rtex->is_depth && !rtex->is_flushing_texture) { - si_blit_decompress_depth_in_place(rctx, rtex, + si_blit_decompress_depth_in_place(sctx, rtex, level, level, first_layer, last_layer); } else if (rtex->fmask.size || rtex->cmask.size) { @@ -490,7 +490,7 @@ static void si_resource_copy_region(struct pipe_context *ctx, unsigned src_level, const struct pipe_box *src_box) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct texture_orig_info orig_info[2]; struct pipe_box sbox; const struct pipe_box *psbox = src_box; @@ -498,7 +498,7 @@ static void si_resource_copy_region(struct pipe_context *ctx, /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { - si_copy_buffer(rctx, dst, src, dstx, src_box->x, src_box->width); + si_copy_buffer(sctx, dst, src, dstx, src_box->x, src_box->width); return; } @@ -528,7 +528,7 @@ static void si_resource_copy_region(struct pipe_context *ctx, /* translate the dst box as well */ dstx = util_format_get_nblocksx(orig_info[1].format, dstx); dsty = util_format_get_nblocksy(orig_info[1].format, dsty); - } else if (!util_blitter_is_copy_supported(rctx->blitter, dst, src)) { + } else if (!util_blitter_is_copy_supported(sctx->blitter, dst, src)) { unsigned blocksize = util_format_get_blocksize(src->format); switch (blocksize) { @@ -572,7 +572,7 @@ static void si_resource_copy_region(struct pipe_context *ctx, } si_blitter_begin(ctx, SI_COPY); - util_blitter_copy_texture(rctx->blitter, dst, dst_level, dstx, dsty, dstz, + util_blitter_copy_texture(sctx->blitter, dst, dst_level, dstx, dsty, dstz, src, src_level, psbox); si_blitter_end(ctx); @@ -622,7 +622,7 @@ static enum pipe_format int_to_norm_format(enum pipe_format format) static bool do_hardware_msaa_resolve(struct pipe_context *ctx, const struct pipe_blit_info *info) { - struct si_context *rctx = (struct si_context*)ctx; + struct si_context *sctx = (struct si_context*)ctx; struct r600_texture *dst = (struct r600_texture*)info->dst.resource; unsigned dst_width = u_minify(info->dst.resource->width0, info->dst.level); unsigned dst_height = u_minify(info->dst.resource->height0, info->dst.level); @@ -653,11 +653,11 @@ static bool do_hardware_msaa_resolve(struct pipe_context *ctx, dst->surface.level[info->dst.level].mode >= RADEON_SURF_MODE_1D && !(dst->surface.flags & RADEON_SURF_SCANOUT)) { si_blitter_begin(ctx, SI_COLOR_RESOLVE); - util_blitter_custom_resolve_color(rctx->blitter, + util_blitter_custom_resolve_color(sctx->blitter, info->dst.resource, info->dst.level, info->dst.box.z, info->src.resource, info->src.box.z, - sample_mask, rctx->custom_blend_resolve, + sample_mask, sctx->custom_blend_resolve, format); si_blitter_end(ctx); return true; @@ -668,13 +668,13 @@ static bool do_hardware_msaa_resolve(struct pipe_context *ctx, static void si_blit(struct pipe_context *ctx, const struct pipe_blit_info *info) { - struct si_context *rctx = (struct si_context*)ctx; + struct si_context *sctx = (struct si_context*)ctx; if (do_hardware_msaa_resolve(ctx, info)) { return; } - assert(util_blitter_is_blit_supported(rctx->blitter, info)); + assert(util_blitter_is_blit_supported(sctx->blitter, info)); /* The driver doesn't decompress resources automatically while * u_blitter is rendering. */ @@ -683,7 +683,7 @@ static void si_blit(struct pipe_context *ctx, info->src.box.z + info->src.box.depth - 1); si_blitter_begin(ctx, SI_BLIT); - util_blitter_blit(rctx->blitter, info); + util_blitter_blit(sctx->blitter, info); si_blitter_end(ctx); } @@ -692,13 +692,13 @@ static void si_flush_resource(struct pipe_context *ctx, { } -void si_init_blit_functions(struct si_context *rctx) +void si_init_blit_functions(struct si_context *sctx) { - rctx->b.b.clear = si_clear; - rctx->b.b.clear_render_target = si_clear_render_target; - rctx->b.b.clear_depth_stencil = si_clear_depth_stencil; - rctx->b.b.resource_copy_region = si_resource_copy_region; - rctx->b.b.blit = si_blit; - rctx->b.b.flush_resource = si_flush_resource; - rctx->b.blit_decompress_depth = si_blit_decompress_depth; + sctx->b.b.clear = si_clear; + sctx->b.b.clear_render_target = si_clear_render_target; + sctx->b.b.clear_depth_stencil = si_clear_depth_stencil; + sctx->b.b.resource_copy_region = si_resource_copy_region; + sctx->b.b.blit = si_blit; + sctx->b.b.flush_resource = si_flush_resource; + sctx->b.blit_decompress_depth = si_blit_decompress_depth; } diff --git a/src/gallium/drivers/radeonsi/si_buffer.c b/src/gallium/drivers/radeonsi/si_buffer.c index 111081df53c..6b05c9f52b8 100644 --- a/src/gallium/drivers/radeonsi/si_buffer.c +++ b/src/gallium/drivers/radeonsi/si_buffer.c @@ -35,14 +35,14 @@ #include "si.h" #include "si_pipe.h" -void si_upload_index_buffer(struct si_context *rctx, +void si_upload_index_buffer(struct si_context *sctx, struct pipe_index_buffer *ib, unsigned count) { - u_upload_data(rctx->b.uploader, 0, count * ib->index_size, + u_upload_data(sctx->b.uploader, 0, count * ib->index_size, ib->user_buffer, &ib->offset, &ib->buffer); } -void si_upload_const_buffer(struct si_context *rctx, struct r600_resource **rbuffer, +void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer, const uint8_t *ptr, unsigned size, uint32_t *const_offset) { @@ -59,12 +59,12 @@ void si_upload_const_buffer(struct si_context *rctx, struct r600_resource **rbuf tmpPtr[i] = util_bswap32(((uint32_t *)ptr)[i]); } - u_upload_data(rctx->b.uploader, 0, size, tmpPtr, const_offset, + u_upload_data(sctx->b.uploader, 0, size, tmpPtr, const_offset, (struct pipe_resource**)rbuffer); free(tmpPtr); } else { - u_upload_data(rctx->b.uploader, 0, size, ptr, const_offset, + u_upload_data(sctx->b.uploader, 0, size, ptr, const_offset, (struct pipe_resource**)rbuffer); } } diff --git a/src/gallium/drivers/radeonsi/si_compute.c b/src/gallium/drivers/radeonsi/si_compute.c index 31359b1a2fc..3aea7999426 100644 --- a/src/gallium/drivers/radeonsi/si_compute.c +++ b/src/gallium/drivers/radeonsi/si_compute.c @@ -27,7 +27,7 @@ static void *si_create_compute_state( struct pipe_context *ctx, const struct pipe_compute_state *cso) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_pipe_compute *program = CALLOC_STRUCT(si_pipe_compute); const struct pipe_llvm_program_header *header; @@ -39,7 +39,7 @@ static void *si_create_compute_state( header = cso->prog; code = cso->prog + sizeof(struct pipe_llvm_program_header); - program->ctx = rctx; + program->ctx = sctx; program->local_size = cso->req_local_mem; program->private_size = cso->req_private_mem; program->input_size = cso->req_input_mem; @@ -51,7 +51,7 @@ static void *si_create_compute_state( for (i = 0; i < program->num_kernels; i++) { LLVMModuleRef mod = radeon_llvm_get_kernel_module(program->llvm_ctx, i, code, header->num_bytes); - si_compile_llvm(rctx, &program->kernels[i], mod); + si_compile_llvm(sctx, &program->kernels[i], mod); LLVMDisposeModule(mod); } @@ -60,8 +60,8 @@ static void *si_create_compute_state( static void si_bind_compute_state(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context*)ctx; - rctx->cs_shader_state.program = (struct si_pipe_compute*)state; + struct si_context *sctx = (struct si_context*)ctx; + sctx->cs_shader_state.program = (struct si_pipe_compute*)state; } static void si_set_global_binding( @@ -70,8 +70,8 @@ static void si_set_global_binding( uint32_t **handles) { unsigned i; - struct si_context *rctx = (struct si_context*)ctx; - struct si_pipe_compute *program = rctx->cs_shader_state.program; + struct si_context *sctx = (struct si_context*)ctx; + struct si_pipe_compute *program = sctx->cs_shader_state.program; if (!resources) { for (i = first; i < first + n; i++) { @@ -93,8 +93,8 @@ static void si_launch_grid( const uint *block_layout, const uint *grid_layout, uint32_t pc, const void *input) { - struct si_context *rctx = (struct si_context*)ctx; - struct si_pipe_compute *program = rctx->cs_shader_state.program; + struct si_context *sctx = (struct si_context*)ctx; + struct si_pipe_compute *program = sctx->cs_shader_state.program; struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state); struct r600_resource *kernel_args_buffer = NULL; unsigned kernel_args_size; @@ -134,7 +134,7 @@ static void si_launch_grid( memcpy(kernel_args + (num_work_size_bytes / 4), input, program->input_size); - si_upload_const_buffer(rctx, &kernel_args_buffer, (uint8_t*)kernel_args, + si_upload_const_buffer(sctx, &kernel_args_buffer, (uint8_t*)kernel_args, kernel_args_size, &kernel_args_offset); kernel_args_va = r600_resource_va(ctx->screen, (struct pipe_resource*)kernel_args_buffer); @@ -171,7 +171,7 @@ static void si_launch_grid( * kernel if we want to use something other than the default value, * which is now 0x22f. */ - if (rctx->b.chip_class <= SI) { + if (sctx->b.chip_class <= SI) { /* XXX: This should be: * (number of compute units) * 4 * (waves per simd) - 1 */ @@ -204,7 +204,7 @@ static void si_launch_grid( * the shader and 4 bytes allocated by the state tracker, then * we will set LDS_SIZE to 512 bytes rather than 256. */ - if (rctx->b.chip_class <= SI) { + if (sctx->b.chip_class <= SI) { lds_blocks += align(program->local_size, 256) >> 8; } else { lds_blocks += align(program->local_size, 512) >> 9; @@ -250,12 +250,12 @@ static void si_launch_grid( si_pm4_inval_shader_cache(pm4); si_cmd_surface_sync(pm4, pm4->cp_coher_cntl); - si_pm4_emit(rctx, pm4); + si_pm4_emit(sctx, pm4); #if 0 - fprintf(stderr, "cdw: %i\n", rctx->cs->cdw); - for (i = 0; i < rctx->cs->cdw; i++) { - fprintf(stderr, "%4i : 0x%08X\n", i, rctx->cs->buf[i]); + fprintf(stderr, "cdw: %i\n", sctx->cs->cdw); + for (i = 0; i < sctx->cs->cdw; i++) { + fprintf(stderr, "%4i : 0x%08X\n", i, sctx->cs->buf[i]); } #endif @@ -287,13 +287,13 @@ static void si_set_compute_resources(struct pipe_context * ctx_, unsigned start, unsigned count, struct pipe_surface ** surfaces) { } -void si_init_compute_functions(struct si_context *rctx) +void si_init_compute_functions(struct si_context *sctx) { - rctx->b.b.create_compute_state = si_create_compute_state; - rctx->b.b.delete_compute_state = si_delete_compute_state; - rctx->b.b.bind_compute_state = si_bind_compute_state; + sctx->b.b.create_compute_state = si_create_compute_state; + sctx->b.b.delete_compute_state = si_delete_compute_state; + sctx->b.b.bind_compute_state = si_bind_compute_state; /* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */ - rctx->b.b.set_compute_resources = si_set_compute_resources; - rctx->b.b.set_global_binding = si_set_global_binding; - rctx->b.b.launch_grid = si_launch_grid; + sctx->b.b.set_compute_resources = si_set_compute_resources; + sctx->b.b.set_global_binding = si_set_global_binding; + sctx->b.b.launch_grid = si_launch_grid; } diff --git a/src/gallium/drivers/radeonsi/si_descriptors.c b/src/gallium/drivers/radeonsi/si_descriptors.c index dad4a9feec5..e64799d904a 100644 --- a/src/gallium/drivers/radeonsi/si_descriptors.c +++ b/src/gallium/drivers/radeonsi/si_descriptors.c @@ -46,18 +46,18 @@ static uint32_t null_desc[8]; /* zeros */ /* Emit a CP DMA packet to do a copy from one buffer to another. * The size must fit in bits [20:0]. */ -static void si_emit_cp_dma_copy_buffer(struct si_context *rctx, +static void si_emit_cp_dma_copy_buffer(struct si_context *sctx, uint64_t dst_va, uint64_t src_va, unsigned size, unsigned flags) { - struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs; uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0; uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0; assert(size); assert((size & ((1<<21)-1)) == size); - if (rctx->b.chip_class >= CIK) { + if (sctx->b.chip_class >= CIK) { radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0)); radeon_emit(cs, sync_flag); /* CP_SYNC [31] */ radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */ @@ -76,18 +76,18 @@ static void si_emit_cp_dma_copy_buffer(struct si_context *rctx, } /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */ -static void si_emit_cp_dma_clear_buffer(struct si_context *rctx, +static void si_emit_cp_dma_clear_buffer(struct si_context *sctx, uint64_t dst_va, unsigned size, uint32_t clear_value, unsigned flags) { - struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs; uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0; uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0; assert(size); assert((size & ((1<<21)-1)) == size); - if (rctx->b.chip_class >= CIK) { + if (sctx->b.chip_class >= CIK) { radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0)); radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */ radeon_emit(cs, clear_value); /* DATA [31:0] */ @@ -105,7 +105,7 @@ static void si_emit_cp_dma_clear_buffer(struct si_context *rctx, } } -static void si_init_descriptors(struct si_context *rctx, +static void si_init_descriptors(struct si_context *sctx, struct si_descriptors *desc, unsigned shader_userdata_reg, unsigned element_dw_size, @@ -124,16 +124,16 @@ static void si_init_descriptors(struct si_context *rctx, desc->context_size = num_elements * element_dw_size * 4; desc->buffer = (struct r600_resource*) - pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM, + pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, SI_NUM_CONTEXTS * desc->context_size); - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, desc->buffer, RADEON_USAGE_READWRITE); - va = r600_resource_va(rctx->b.b.screen, &desc->buffer->b.b); + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, desc->buffer, RADEON_USAGE_READWRITE); + va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b); /* We don't check for CS space here, because this should be called * only once at context initialization. */ - si_emit_cp_dma_clear_buffer(rctx, va, desc->buffer->b.b.width0, 0, + si_emit_cp_dma_clear_buffer(sctx, va, desc->buffer->b.b.width0, 0, R600_CP_DMA_SYNC); } @@ -142,7 +142,7 @@ static void si_release_descriptors(struct si_descriptors *desc) pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL); } -static void si_update_descriptors(struct si_context *rctx, +static void si_update_descriptors(struct si_context *sctx, struct si_descriptors *desc) { if (desc->dirty_mask) { @@ -152,17 +152,17 @@ static void si_update_descriptors(struct si_context *rctx, 4; /* pointer update */ desc->atom.dirty = true; /* The descriptors are read with the K cache. */ - rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE; + sctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE; } else { desc->atom.dirty = false; } } -static void si_emit_shader_pointer(struct si_context *rctx, +static void si_emit_shader_pointer(struct si_context *sctx, struct si_descriptors *desc) { - struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; - uint64_t va = r600_resource_va(rctx->b.b.screen, &desc->buffer->b.b) + + struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs; + uint64_t va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b) + desc->current_context_id * desc->context_size; radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0)); @@ -171,11 +171,11 @@ static void si_emit_shader_pointer(struct si_context *rctx, radeon_emit(cs, va >> 32); } -static void si_emit_descriptors(struct si_context *rctx, +static void si_emit_descriptors(struct si_context *sctx, struct si_descriptors *desc, uint32_t **descriptors) { - struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs; uint64_t va_base; int packet_start; int packet_size = 0; @@ -185,11 +185,11 @@ static void si_emit_descriptors(struct si_context *rctx, assert(dirty_mask); - va_base = r600_resource_va(rctx->b.b.screen, &desc->buffer->b.b); + va_base = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b); /* Copy the descriptors to a new context slot. */ /* XXX Consider using TC or L2 for this copy on CIK. */ - si_emit_cp_dma_copy_buffer(rctx, + si_emit_cp_dma_copy_buffer(sctx, va_base + new_context_id * desc->context_size, va_base + desc->current_context_id * desc->context_size, desc->context_size, R600_CP_DMA_SYNC); @@ -235,7 +235,7 @@ static void si_emit_descriptors(struct si_context *rctx, desc->current_context_id = new_context_id; /* Now update the shader userdata pointer. */ - si_emit_shader_pointer(rctx, desc); + si_emit_shader_pointer(sctx, desc); } static unsigned si_get_shader_user_data_base(unsigned shader) @@ -255,18 +255,18 @@ static unsigned si_get_shader_user_data_base(unsigned shader) /* SAMPLER VIEWS */ -static void si_emit_sampler_views(struct si_context *rctx, struct r600_atom *atom) +static void si_emit_sampler_views(struct si_context *sctx, struct r600_atom *atom) { struct si_sampler_views *views = (struct si_sampler_views*)atom; - si_emit_descriptors(rctx, &views->desc, views->desc_data); + si_emit_descriptors(sctx, &views->desc, views->desc_data); } -static void si_init_sampler_views(struct si_context *rctx, +static void si_init_sampler_views(struct si_context *sctx, struct si_sampler_views *views, unsigned shader) { - si_init_descriptors(rctx, &views->desc, + si_init_descriptors(sctx, &views->desc, si_get_shader_user_data_base(shader) + SI_SGPR_RESOURCE * 4, 8, NUM_SAMPLER_VIEWS, si_emit_sampler_views); @@ -282,7 +282,7 @@ static void si_release_sampler_views(struct si_sampler_views *views) si_release_descriptors(&views->desc); } -static void si_sampler_views_begin_new_cs(struct si_context *rctx, +static void si_sampler_views_begin_new_cs(struct si_context *sctx, struct si_sampler_views *views) { unsigned mask = views->desc.enabled_mask; @@ -293,19 +293,19 @@ static void si_sampler_views_begin_new_cs(struct si_context *rctx, struct si_pipe_sampler_view *rview = (struct si_pipe_sampler_view*)views->views[i]; - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ); + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ); } - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, views->desc.buffer, RADEON_USAGE_READWRITE); + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer, RADEON_USAGE_READWRITE); - si_emit_shader_pointer(rctx, &views->desc); + si_emit_shader_pointer(sctx, &views->desc); } -void si_set_sampler_view(struct si_context *rctx, unsigned shader, +void si_set_sampler_view(struct si_context *sctx, unsigned shader, unsigned slot, struct pipe_sampler_view *view, unsigned *view_desc) { - struct si_sampler_views *views = &rctx->samplers[shader].views; + struct si_sampler_views *views = &sctx->samplers[shader].views; if (views->views[slot] == view) return; @@ -314,7 +314,7 @@ void si_set_sampler_view(struct si_context *rctx, unsigned shader, struct si_pipe_sampler_view *rview = (struct si_pipe_sampler_view*)view; - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ); + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ); pipe_sampler_view_reference(&views->views[slot], view); views->desc_data[slot] = view_desc; @@ -326,19 +326,19 @@ void si_set_sampler_view(struct si_context *rctx, unsigned shader, } views->desc.dirty_mask |= 1 << slot; - si_update_descriptors(rctx, &views->desc); + si_update_descriptors(sctx, &views->desc); } /* BUFFER RESOURCES */ -static void si_emit_buffer_resources(struct si_context *rctx, struct r600_atom *atom) +static void si_emit_buffer_resources(struct si_context *sctx, struct r600_atom *atom) { struct si_buffer_resources *buffers = (struct si_buffer_resources*)atom; - si_emit_descriptors(rctx, &buffers->desc, buffers->desc_data); + si_emit_descriptors(sctx, &buffers->desc, buffers->desc_data); } -static void si_init_buffer_resources(struct si_context *rctx, +static void si_init_buffer_resources(struct si_context *sctx, struct si_buffer_resources *buffers, unsigned num_buffers, unsigned shader, unsigned shader_userdata_index, @@ -358,7 +358,7 @@ static void si_init_buffer_resources(struct si_context *rctx, buffers->desc_data[i] = &buffers->desc_storage[i*4]; } - si_init_descriptors(rctx, &buffers->desc, + si_init_descriptors(sctx, &buffers->desc, si_get_shader_user_data_base(shader) + shader_userdata_index*4, 4, num_buffers, si_emit_buffer_resources); @@ -378,7 +378,7 @@ static void si_release_buffer_resources(struct si_buffer_resources *buffers) si_release_descriptors(&buffers->desc); } -static void si_buffer_resources_begin_new_cs(struct si_context *rctx, +static void si_buffer_resources_begin_new_cs(struct si_context *sctx, struct si_buffer_resources *buffers) { unsigned mask = buffers->desc.enabled_mask; @@ -387,15 +387,15 @@ static void si_buffer_resources_begin_new_cs(struct si_context *rctx, while (mask) { int i = u_bit_scan(&mask); - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)buffers->buffers[i], buffers->shader_usage); } - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, buffers->desc.buffer, RADEON_USAGE_READWRITE); - si_emit_shader_pointer(rctx, &buffers->desc); + si_emit_shader_pointer(sctx, &buffers->desc); } /* CONSTANT BUFFERS */ @@ -403,8 +403,8 @@ static void si_buffer_resources_begin_new_cs(struct si_context *rctx, static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot, struct pipe_constant_buffer *input) { - struct si_context *rctx = (struct si_context *)ctx; - struct si_buffer_resources *buffers = &rctx->const_buffers[shader]; + struct si_context *sctx = (struct si_context *)ctx; + struct si_buffer_resources *buffers = &sctx->const_buffers[shader]; if (shader >= SI_NUM_SHADERS) return; @@ -414,9 +414,9 @@ static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint s /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy * with a NULL buffer). We need to use a dummy buffer instead. */ - if (rctx->b.chip_class == CIK && + if (sctx->b.chip_class == CIK && (!input || (!input->buffer && !input->user_buffer))) - input = &rctx->null_const_buf; + input = &sctx->null_const_buf; if (input && (input->buffer || input->user_buffer)) { struct pipe_resource *buffer = NULL; @@ -426,7 +426,7 @@ static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint s if (input->user_buffer) { unsigned buffer_offset; - si_upload_const_buffer(rctx, + si_upload_const_buffer(sctx, (struct r600_resource**)&buffer, input->user_buffer, input->buffer_size, &buffer_offset); va = r600_resource_va(ctx->screen, buffer) + buffer_offset; @@ -449,7 +449,7 @@ static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint s S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); buffers->buffers[slot] = buffer; - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)buffer, buffers->shader_usage); buffers->desc.enabled_mask |= 1 << slot; } else { @@ -459,7 +459,7 @@ static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint s } buffers->desc.dirty_mask |= 1 << slot; - si_update_descriptors(rctx, &buffers->desc); + si_update_descriptors(sctx, &buffers->desc); } /* STREAMOUT BUFFERS */ @@ -469,9 +469,9 @@ static void si_set_streamout_targets(struct pipe_context *ctx, struct pipe_stream_output_target **targets, unsigned append_bitmask) { - struct si_context *rctx = (struct si_context *)ctx; - struct si_buffer_resources *buffers = &rctx->streamout_buffers; - unsigned old_num_targets = rctx->b.streamout.num_targets; + struct si_context *sctx = (struct si_context *)ctx; + struct si_buffer_resources *buffers = &sctx->streamout_buffers; + unsigned old_num_targets = sctx->b.streamout.num_targets; unsigned i; /* Streamout buffers must be bound in 2 places: @@ -500,7 +500,7 @@ static void si_set_streamout_targets(struct pipe_context *ctx, /* Set the resource. */ pipe_resource_reference(&buffers->buffers[i], buffer); - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)buffer, buffers->shader_usage); buffers->desc.enabled_mask |= 1 << i; @@ -520,7 +520,7 @@ static void si_set_streamout_targets(struct pipe_context *ctx, buffers->desc.dirty_mask |= 1 << i; } - si_update_descriptors(rctx, &buffers->desc); + si_update_descriptors(sctx, &buffers->desc); } static void si_desc_reset_buffer_offset(struct pipe_context *ctx, @@ -553,7 +553,7 @@ static void si_desc_reset_buffer_offset(struct pipe_context *ctx, */ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf) { - struct si_context *rctx = (struct si_context*)ctx; + struct si_context *sctx = (struct si_context*)ctx; struct r600_resource *rbuffer = r600_resource(buf); unsigned i, shader, alignment = rbuffer->buf->alignment; uint64_t old_va = r600_resource_va(ctx->screen, buf); @@ -562,7 +562,7 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource pb_reference(&rbuffer->buf, NULL); /* Create a new one in the same pipe_resource. */ - r600_init_resource(&rctx->screen->b, rbuffer, rbuffer->b.b.width0, alignment, + r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0, alignment, TRUE, rbuffer->b.b.usage); /* We changed the buffer, now we need to bind it where the old one @@ -575,30 +575,30 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource /* Nothing to do. Vertex buffer bindings are updated before every draw call. */ /* Streamout buffers. */ - for (i = 0; i < rctx->streamout_buffers.num_buffers; i++) { - if (rctx->streamout_buffers.buffers[i] == buf) { + for (i = 0; i < sctx->streamout_buffers.num_buffers; i++) { + if (sctx->streamout_buffers.buffers[i] == buf) { /* Update the descriptor. */ - si_desc_reset_buffer_offset(ctx, rctx->streamout_buffers.desc_data[i], + si_desc_reset_buffer_offset(ctx, sctx->streamout_buffers.desc_data[i], old_va, buf); - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)buf, - rctx->streamout_buffers.shader_usage); - rctx->streamout_buffers.desc.dirty_mask |= 1 << i; - si_update_descriptors(rctx, &rctx->streamout_buffers.desc); + sctx->streamout_buffers.shader_usage); + sctx->streamout_buffers.desc.dirty_mask |= 1 << i; + si_update_descriptors(sctx, &sctx->streamout_buffers.desc); /* Update the streamout state. */ - if (rctx->b.streamout.begin_emitted) { - r600_emit_streamout_end(&rctx->b); + if (sctx->b.streamout.begin_emitted) { + r600_emit_streamout_end(&sctx->b); } - rctx->b.streamout.append_bitmask = rctx->b.streamout.enabled_mask; - r600_streamout_buffers_dirty(&rctx->b); + sctx->b.streamout.append_bitmask = sctx->b.streamout.enabled_mask; + r600_streamout_buffers_dirty(&sctx->b); } } /* Constant buffers. */ for (shader = 0; shader < SI_NUM_SHADERS; shader++) { - struct si_buffer_resources *buffers = &rctx->const_buffers[shader]; + struct si_buffer_resources *buffers = &sctx->const_buffers[shader]; bool found = false; uint32_t mask = buffers->desc.enabled_mask; @@ -608,7 +608,7 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource si_desc_reset_buffer_offset(ctx, buffers->desc_data[i], old_va, buf); - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rbuffer, buffers->shader_usage); buffers->desc.dirty_mask |= 1 << i; @@ -616,13 +616,13 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource } } if (found) { - si_update_descriptors(rctx, &buffers->desc); + si_update_descriptors(sctx, &buffers->desc); } } /* Texture buffers. */ for (shader = 0; shader < SI_NUM_SHADERS; shader++) { - struct si_sampler_views *views = &rctx->samplers[shader].views; + struct si_sampler_views *views = &sctx->samplers[shader].views; bool found = false; uint32_t mask = views->desc.enabled_mask; @@ -633,7 +633,7 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource si_desc_reset_buffer_offset(ctx, views->desc_data[i], old_va, buf); - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ); views->desc.dirty_mask |= 1 << i; @@ -641,7 +641,7 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource } } if (found) { - si_update_descriptors(rctx, &views->desc); + si_update_descriptors(sctx, &views->desc); } } } @@ -654,7 +654,7 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst, unsigned offset, unsigned size, unsigned value) { - struct si_context *rctx = (struct si_context*)ctx; + struct si_context *sctx = (struct si_context*)ctx; if (!size) return; @@ -667,8 +667,8 @@ static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst, /* Fallback for unaligned clears. */ if (offset % 4 != 0 || size % 4 != 0) { - uint32_t *map = rctx->b.ws->buffer_map(r600_resource(dst)->cs_buf, - rctx->b.rings.gfx.cs, + uint32_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf, + sctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE); size /= 4; for (unsigned i = 0; i < size; i++) @@ -676,33 +676,33 @@ static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst, return; } - uint64_t va = r600_resource_va(&rctx->screen->b.b, dst) + offset; + uint64_t va = r600_resource_va(&sctx->screen->b.b, dst) + offset; /* Flush the caches where the resource is bound. */ /* XXX only flush the caches where the buffer is bound. */ - rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | + sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | R600_CONTEXT_INV_CONST_CACHE | R600_CONTEXT_FLUSH_AND_INV_CB | R600_CONTEXT_FLUSH_AND_INV_DB | R600_CONTEXT_FLUSH_AND_INV_CB_META | R600_CONTEXT_FLUSH_AND_INV_DB_META; - rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; + sctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; while (size) { unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT); unsigned dma_flags = 0; - si_need_cs_space(rctx, 7 + (rctx->b.flags ? rctx->cache_flush.num_dw : 0), + si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0), FALSE); /* This must be done after need_cs_space. */ - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE); /* Flush the caches for the first copy only. * Also wait for the previous CP DMA operations. */ - if (rctx->b.flags) { - si_emit_cache_flush(&rctx->b, NULL); + if (sctx->b.flags) { + si_emit_cache_flush(&sctx->b, NULL); dma_flags |= SI_CP_DMA_RAW_WAIT; /* same as WAIT_UNTIL=CP_DMA_IDLE */ } @@ -711,7 +711,7 @@ static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst, dma_flags |= R600_CP_DMA_SYNC; /* Emit the clear packet. */ - si_emit_cp_dma_clear_buffer(rctx, va, byte_count, value, dma_flags); + si_emit_cp_dma_clear_buffer(sctx, va, byte_count, value, dma_flags); size -= byte_count; va += byte_count; @@ -720,7 +720,7 @@ static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst, /* Flush the caches again in case the 3D engine has been prefetching * the resource. */ /* XXX only flush the caches where the buffer is bound. */ - rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | + sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | R600_CONTEXT_INV_CONST_CACHE | R600_CONTEXT_FLUSH_AND_INV_CB | R600_CONTEXT_FLUSH_AND_INV_DB | @@ -728,7 +728,7 @@ static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst, R600_CONTEXT_FLUSH_AND_INV_DB_META; } -void si_copy_buffer(struct si_context *rctx, +void si_copy_buffer(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src, uint64_t dst_offset, uint64_t src_offset, unsigned size) { @@ -741,11 +741,11 @@ void si_copy_buffer(struct si_context *rctx, util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset, dst_offset + size); - dst_offset += r600_resource_va(&rctx->screen->b.b, dst); - src_offset += r600_resource_va(&rctx->screen->b.b, src); + dst_offset += r600_resource_va(&sctx->screen->b.b, dst); + src_offset += r600_resource_va(&sctx->screen->b.b, src); /* Flush the caches where the resource is bound. */ - rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | + sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | R600_CONTEXT_INV_CONST_CACHE | R600_CONTEXT_FLUSH_AND_INV_CB | R600_CONTEXT_FLUSH_AND_INV_DB | @@ -757,11 +757,11 @@ void si_copy_buffer(struct si_context *rctx, unsigned sync_flags = 0; unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT); - si_need_cs_space(rctx, 7 + (rctx->b.flags ? rctx->cache_flush.num_dw : 0), FALSE); + si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0), FALSE); /* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */ - if (rctx->b.flags) { - si_emit_cache_flush(&rctx->b, NULL); + if (sctx->b.flags) { + si_emit_cache_flush(&sctx->b, NULL); sync_flags |= SI_CP_DMA_RAW_WAIT; } @@ -771,17 +771,17 @@ void si_copy_buffer(struct si_context *rctx, } /* This must be done after r600_need_cs_space. */ - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ); - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE); + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ); + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE); - si_emit_cp_dma_copy_buffer(rctx, dst_offset, src_offset, byte_count, sync_flags); + si_emit_cp_dma_copy_buffer(sctx, dst_offset, src_offset, byte_count, sync_flags); size -= byte_count; src_offset += byte_count; dst_offset += byte_count; } - rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | + sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | R600_CONTEXT_INV_CONST_CACHE | R600_CONTEXT_FLUSH_AND_INV_CB | R600_CONTEXT_FLUSH_AND_INV_DB | @@ -791,50 +791,50 @@ void si_copy_buffer(struct si_context *rctx, /* INIT/DEINIT */ -void si_init_all_descriptors(struct si_context *rctx) +void si_init_all_descriptors(struct si_context *sctx) { int i; for (i = 0; i < SI_NUM_SHADERS; i++) { - si_init_buffer_resources(rctx, &rctx->const_buffers[i], + si_init_buffer_resources(sctx, &sctx->const_buffers[i], NUM_CONST_BUFFERS, i, SI_SGPR_CONST, RADEON_USAGE_READ); - si_init_sampler_views(rctx, &rctx->samplers[i].views, i); + si_init_sampler_views(sctx, &sctx->samplers[i].views, i); - rctx->atoms.const_buffers[i] = &rctx->const_buffers[i].desc.atom; - rctx->atoms.sampler_views[i] = &rctx->samplers[i].views.desc.atom; + sctx->atoms.const_buffers[i] = &sctx->const_buffers[i].desc.atom; + sctx->atoms.sampler_views[i] = &sctx->samplers[i].views.desc.atom; } - si_init_buffer_resources(rctx, &rctx->streamout_buffers, 4, PIPE_SHADER_VERTEX, + si_init_buffer_resources(sctx, &sctx->streamout_buffers, 4, PIPE_SHADER_VERTEX, SI_SGPR_SO_BUFFER, RADEON_USAGE_WRITE); - rctx->atoms.streamout_buffers = &rctx->streamout_buffers.desc.atom; + sctx->atoms.streamout_buffers = &sctx->streamout_buffers.desc.atom; /* Set pipe_context functions. */ - rctx->b.b.set_constant_buffer = si_set_constant_buffer; - rctx->b.b.set_stream_output_targets = si_set_streamout_targets; - rctx->b.clear_buffer = si_clear_buffer; - rctx->b.invalidate_buffer = si_invalidate_buffer; + sctx->b.b.set_constant_buffer = si_set_constant_buffer; + sctx->b.b.set_stream_output_targets = si_set_streamout_targets; + sctx->b.clear_buffer = si_clear_buffer; + sctx->b.invalidate_buffer = si_invalidate_buffer; } -void si_release_all_descriptors(struct si_context *rctx) +void si_release_all_descriptors(struct si_context *sctx) { int i; for (i = 0; i < SI_NUM_SHADERS; i++) { - si_release_buffer_resources(&rctx->const_buffers[i]); - si_release_sampler_views(&rctx->samplers[i].views); + si_release_buffer_resources(&sctx->const_buffers[i]); + si_release_sampler_views(&sctx->samplers[i].views); } - si_release_buffer_resources(&rctx->streamout_buffers); + si_release_buffer_resources(&sctx->streamout_buffers); } -void si_all_descriptors_begin_new_cs(struct si_context *rctx) +void si_all_descriptors_begin_new_cs(struct si_context *sctx) { int i; for (i = 0; i < SI_NUM_SHADERS; i++) { - si_buffer_resources_begin_new_cs(rctx, &rctx->const_buffers[i]); - si_sampler_views_begin_new_cs(rctx, &rctx->samplers[i].views); + si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]); + si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views); } - si_buffer_resources_begin_new_cs(rctx, &rctx->streamout_buffers); + si_buffer_resources_begin_new_cs(sctx, &sctx->streamout_buffers); } diff --git a/src/gallium/drivers/radeonsi/si_hw_context.c b/src/gallium/drivers/radeonsi/si_hw_context.c index 4542b0f5efc..57aa4a744d4 100644 --- a/src/gallium/drivers/radeonsi/si_hw_context.c +++ b/src/gallium/drivers/radeonsi/si_hw_context.c @@ -696,14 +696,14 @@ void si_context_queries_resume(struct si_context *ctx) } #if SI_TRACE_CS -void si_trace_emit(struct si_context *rctx) +void si_trace_emit(struct si_context *sctx) { - struct si_screen *rscreen = rctx->screen; - struct radeon_winsys_cs *cs = rctx->cs; + struct si_screen *rscreen = sctx->screen; + struct radeon_winsys_cs *cs = sctx->cs; uint64_t va; va = r600_resource_va(&rscreen->screen, (void*)rscreen->trace_bo); - r600_context_bo_reloc(rctx, rscreen->trace_bo, RADEON_USAGE_READWRITE); + r600_context_bo_reloc(sctx, rscreen->trace_bo, RADEON_USAGE_READWRITE); cs->buf[cs->cdw++] = PKT3(PKT3_WRITE_DATA, 4, 0); cs->buf[cs->cdw++] = PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC) | PKT3_WRITE_DATA_WR_CONFIRM | diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c index 5c772cba914..69069b88f9f 100644 --- a/src/gallium/drivers/radeonsi/si_pipe.c +++ b/src/gallium/drivers/radeonsi/si_pipe.c @@ -57,24 +57,24 @@ void si_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence, unsigned flags) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct pipe_query *render_cond = NULL; boolean render_cond_cond = FALSE; unsigned render_cond_mode = 0; if (fence) { - *fence = rctx->b.ws->cs_create_fence(rctx->b.rings.gfx.cs); + *fence = sctx->b.ws->cs_create_fence(sctx->b.rings.gfx.cs); } /* Disable render condition. */ - if (rctx->current_render_cond) { - render_cond = rctx->current_render_cond; - render_cond_cond = rctx->current_render_cond_cond; - render_cond_mode = rctx->current_render_cond_mode; + if (sctx->current_render_cond) { + render_cond = sctx->current_render_cond; + render_cond_cond = sctx->current_render_cond_cond; + render_cond_mode = sctx->current_render_cond_mode; ctx->render_condition(ctx, NULL, FALSE, 0); } - si_context_flush(rctx, flags); + si_context_flush(sctx, flags); /* Re-enable render condition. */ if (render_cond) { @@ -97,127 +97,127 @@ static void si_flush_from_winsys(void *ctx, unsigned flags) static void si_destroy_context(struct pipe_context *context) { - struct si_context *rctx = (struct si_context *)context; + struct si_context *sctx = (struct si_context *)context; - si_release_all_descriptors(rctx); + si_release_all_descriptors(sctx); - pipe_resource_reference(&rctx->null_const_buf.buffer, NULL); - r600_resource_reference(&rctx->border_color_table, NULL); + pipe_resource_reference(&sctx->null_const_buf.buffer, NULL); + r600_resource_reference(&sctx->border_color_table, NULL); - if (rctx->dummy_pixel_shader) { - rctx->b.b.delete_fs_state(&rctx->b.b, rctx->dummy_pixel_shader); + if (sctx->dummy_pixel_shader) { + sctx->b.b.delete_fs_state(&sctx->b.b, sctx->dummy_pixel_shader); } for (int i = 0; i < 8; i++) { - rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush_depth_stencil[i]); - rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush_depth[i]); - rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush_stencil[i]); + sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_depth_stencil[i]); + sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_depth[i]); + sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_stencil[i]); } - rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush_inplace); - rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_resolve); - rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_decompress); - util_unreference_framebuffer_state(&rctx->framebuffer); + sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_inplace); + sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_resolve); + sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_decompress); + util_unreference_framebuffer_state(&sctx->framebuffer); - util_blitter_destroy(rctx->blitter); + util_blitter_destroy(sctx->blitter); - r600_common_context_cleanup(&rctx->b); - FREE(rctx); + r600_common_context_cleanup(&sctx->b); + FREE(sctx); } static struct pipe_context *si_create_context(struct pipe_screen *screen, void *priv) { - struct si_context *rctx = CALLOC_STRUCT(si_context); + struct si_context *sctx = CALLOC_STRUCT(si_context); struct si_screen* rscreen = (struct si_screen *)screen; int shader, i; - if (rctx == NULL) + if (sctx == NULL) return NULL; - if (!r600_common_context_init(&rctx->b, &rscreen->b)) + if (!r600_common_context_init(&sctx->b, &rscreen->b)) goto fail; - rctx->b.b.screen = screen; - rctx->b.b.priv = priv; - rctx->b.b.destroy = si_destroy_context; - rctx->b.b.flush = si_flush_from_st; + sctx->b.b.screen = screen; + sctx->b.b.priv = priv; + sctx->b.b.destroy = si_destroy_context; + sctx->b.b.flush = si_flush_from_st; /* Easy accessing of screen/winsys. */ - rctx->screen = rscreen; + sctx->screen = rscreen; - si_init_blit_functions(rctx); - si_init_query_functions(rctx); - si_init_context_resource_functions(rctx); - si_init_compute_functions(rctx); + si_init_blit_functions(sctx); + si_init_query_functions(sctx); + si_init_context_resource_functions(sctx); + si_init_compute_functions(sctx); if (rscreen->b.info.has_uvd) { - rctx->b.b.create_video_codec = si_uvd_create_decoder; - rctx->b.b.create_video_buffer = si_video_buffer_create; + sctx->b.b.create_video_codec = si_uvd_create_decoder; + sctx->b.b.create_video_buffer = si_video_buffer_create; } else { - rctx->b.b.create_video_codec = vl_create_decoder; - rctx->b.b.create_video_buffer = vl_video_buffer_create; + sctx->b.b.create_video_codec = vl_create_decoder; + sctx->b.b.create_video_buffer = vl_video_buffer_create; } - rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, NULL); - rctx->b.rings.gfx.flush = si_flush_from_winsys; + sctx->b.rings.gfx.cs = sctx->b.ws->cs_create(sctx->b.ws, RING_GFX, NULL); + sctx->b.rings.gfx.flush = si_flush_from_winsys; - si_init_all_descriptors(rctx); + si_init_all_descriptors(sctx); /* Initialize cache_flush. */ - rctx->cache_flush = si_atom_cache_flush; - rctx->atoms.cache_flush = &rctx->cache_flush; + sctx->cache_flush = si_atom_cache_flush; + sctx->atoms.cache_flush = &sctx->cache_flush; - rctx->atoms.streamout_begin = &rctx->b.streamout.begin_atom; + sctx->atoms.streamout_begin = &sctx->b.streamout.begin_atom; - switch (rctx->b.chip_class) { + switch (sctx->b.chip_class) { case SI: case CIK: - si_init_state_functions(rctx); - LIST_INITHEAD(&rctx->active_nontimer_query_list); - rctx->max_db = 8; - si_init_config(rctx); + si_init_state_functions(sctx); + LIST_INITHEAD(&sctx->active_nontimer_query_list); + sctx->max_db = 8; + si_init_config(sctx); break; default: - R600_ERR("Unsupported chip class %d.\n", rctx->b.chip_class); + R600_ERR("Unsupported chip class %d.\n", sctx->b.chip_class); goto fail; } - rctx->b.ws->cs_set_flush_callback(rctx->b.rings.gfx.cs, si_flush_from_winsys, rctx); + sctx->b.ws->cs_set_flush_callback(sctx->b.rings.gfx.cs, si_flush_from_winsys, sctx); - rctx->blitter = util_blitter_create(&rctx->b.b); - if (rctx->blitter == NULL) + sctx->blitter = util_blitter_create(&sctx->b.b); + if (sctx->blitter == NULL) goto fail; - rctx->dummy_pixel_shader = - util_make_fragment_cloneinput_shader(&rctx->b.b, 0, + sctx->dummy_pixel_shader = + util_make_fragment_cloneinput_shader(&sctx->b.b, 0, TGSI_SEMANTIC_GENERIC, TGSI_INTERPOLATE_CONSTANT); - rctx->b.b.bind_fs_state(&rctx->b.b, rctx->dummy_pixel_shader); + sctx->b.b.bind_fs_state(&sctx->b.b, sctx->dummy_pixel_shader); /* these must be last */ - si_begin_new_cs(rctx); - si_get_backend_mask(rctx); + si_begin_new_cs(sctx); + si_get_backend_mask(sctx); /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy * with a NULL buffer). We need to use a dummy buffer instead. */ - if (rctx->b.chip_class == CIK) { - rctx->null_const_buf.buffer = pipe_buffer_create(screen, PIPE_BIND_CONSTANT_BUFFER, + if (sctx->b.chip_class == CIK) { + sctx->null_const_buf.buffer = pipe_buffer_create(screen, PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STATIC, 16); - rctx->null_const_buf.buffer_size = rctx->null_const_buf.buffer->width0; + sctx->null_const_buf.buffer_size = sctx->null_const_buf.buffer->width0; for (shader = 0; shader < SI_NUM_SHADERS; shader++) { for (i = 0; i < NUM_CONST_BUFFERS; i++) { - rctx->b.b.set_constant_buffer(&rctx->b.b, shader, i, - &rctx->null_const_buf); + sctx->b.b.set_constant_buffer(&sctx->b.b, shader, i, + &sctx->null_const_buf); } } /* Clear the NULL constant buffer, because loads should return zeros. */ - rctx->b.clear_buffer(&rctx->b.b, rctx->null_const_buf.buffer, 0, - rctx->null_const_buf.buffer->width0, 0); + sctx->b.clear_buffer(&sctx->b.b, sctx->null_const_buf.buffer, 0, + sctx->null_const_buf.buffer->width0, 0); } - return &rctx->b.b; + return &sctx->b.b; fail: - si_destroy_context(&rctx->b.b); + si_destroy_context(&sctx->b.b); return NULL; } diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h index abf19442c90..56b704bfa5d 100644 --- a/src/gallium/drivers/radeonsi/si_pipe.h +++ b/src/gallium/drivers/radeonsi/si_pipe.h @@ -87,7 +87,7 @@ struct si_textures_info { unsigned n_samplers; }; -#define SI_NUM_ATOMS(rctx) (sizeof((rctx)->atoms)/sizeof((rctx)->atoms.array[0])) +#define SI_NUM_ATOMS(sctx) (sizeof((sctx)->atoms)/sizeof((sctx)->atoms.array[0])) #define SI_NUM_SHADERS (PIPE_SHADER_FRAGMENT+1) struct si_context { @@ -176,14 +176,14 @@ struct si_context { }; /* si_blit.c */ -void si_init_blit_functions(struct si_context *rctx); -void si_flush_depth_textures(struct si_context *rctx, +void si_init_blit_functions(struct si_context *sctx); +void si_flush_depth_textures(struct si_context *sctx, struct si_textures_info *textures); -void si_decompress_color_textures(struct si_context *rctx, +void si_decompress_color_textures(struct si_context *sctx, struct si_textures_info *textures); /* si_buffer.c */ -void si_upload_index_buffer(struct si_context *rctx, +void si_upload_index_buffer(struct si_context *sctx, struct pipe_index_buffer *ib, unsigned count); @@ -193,22 +193,22 @@ void si_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence, const char *si_get_llvm_processor_name(enum radeon_family family); /* si_query.c */ -void si_init_query_functions(struct si_context *rctx); +void si_init_query_functions(struct si_context *sctx); /* si_resource.c */ -void si_init_context_resource_functions(struct si_context *r600); +void si_init_context_resource_functions(struct si_context *sctx); /* si_translate.c */ -void si_translate_index_buffer(struct si_context *r600, +void si_translate_index_buffer(struct si_context *sctx, struct pipe_index_buffer *ib, unsigned count); #if SI_TRACE_CS -void si_trace_emit(struct si_context *rctx); +void si_trace_emit(struct si_context *sctx); #endif /* si_compute.c */ -void si_init_compute_functions(struct si_context *rctx); +void si_init_compute_functions(struct si_context *sctx); /* si_uvd.c */ struct pipe_video_codec *si_uvd_create_decoder(struct pipe_context *context, diff --git a/src/gallium/drivers/radeonsi/si_pm4.c b/src/gallium/drivers/radeonsi/si_pm4.c index ba57563105e..5b132850272 100644 --- a/src/gallium/drivers/radeonsi/si_pm4.c +++ b/src/gallium/drivers/radeonsi/si_pm4.c @@ -145,15 +145,15 @@ void si_pm4_inval_texture_cache(struct si_pm4_state *state) state->cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1); } -void si_pm4_free_state(struct si_context *rctx, +void si_pm4_free_state(struct si_context *sctx, struct si_pm4_state *state, unsigned idx) { if (state == NULL) return; - if (idx != ~0 && rctx->emitted.array[idx] == state) { - rctx->emitted.array[idx] = NULL; + if (idx != ~0 && sctx->emitted.array[idx] == state) { + sctx->emitted.array[idx] = NULL; } for (int i = 0; i < state->nbo; ++i) { @@ -162,26 +162,26 @@ void si_pm4_free_state(struct si_context *rctx, FREE(state); } -struct si_pm4_state * si_pm4_alloc_state(struct si_context *rctx) +struct si_pm4_state * si_pm4_alloc_state(struct si_context *sctx) { struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state); if (pm4 == NULL) return NULL; - pm4->chip_class = rctx->b.chip_class; + pm4->chip_class = sctx->b.chip_class; return pm4; } -uint32_t si_pm4_sync_flags(struct si_context *rctx) +uint32_t si_pm4_sync_flags(struct si_context *sctx) { uint32_t cp_coher_cntl = 0; for (int i = 0; i < NUMBER_OF_STATES; ++i) { - struct si_pm4_state *state = rctx->queued.array[i]; + struct si_pm4_state *state = sctx->queued.array[i]; - if (!state || rctx->emitted.array[i] == state) + if (!state || sctx->emitted.array[i] == state) continue; cp_coher_cntl |= state->cp_coher_cntl; @@ -189,20 +189,20 @@ uint32_t si_pm4_sync_flags(struct si_context *rctx) return cp_coher_cntl; } -unsigned si_pm4_dirty_dw(struct si_context *rctx) +unsigned si_pm4_dirty_dw(struct si_context *sctx) { unsigned count = 0; for (int i = 0; i < NUMBER_OF_STATES; ++i) { - struct si_pm4_state *state = rctx->queued.array[i]; + struct si_pm4_state *state = sctx->queued.array[i]; - if (!state || rctx->emitted.array[i] == state) + if (!state || sctx->emitted.array[i] == state) continue; count += state->ndw; #if SI_TRACE_CS /* for tracing each states */ - if (rctx->screen->trace_bo) { + if (sctx->screen->trace_bo) { count += SI_TRACE_CS_DWORDS; } #endif @@ -211,11 +211,11 @@ unsigned si_pm4_dirty_dw(struct si_context *rctx) return count; } -void si_pm4_emit(struct si_context *rctx, struct si_pm4_state *state) +void si_pm4_emit(struct si_context *sctx, struct si_pm4_state *state) { - struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs; for (int i = 0; i < state->nbo; ++i) { - r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, state->bo[i], + r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, state->bo[i], state->bo_usage[i]); } @@ -228,27 +228,27 @@ void si_pm4_emit(struct si_context *rctx, struct si_pm4_state *state) cs->cdw += state->ndw; #if SI_TRACE_CS - if (rctx->screen->trace_bo) { - si_trace_emit(rctx); + if (sctx->screen->trace_bo) { + si_trace_emit(sctx); } #endif } -void si_pm4_emit_dirty(struct si_context *rctx) +void si_pm4_emit_dirty(struct si_context *sctx) { for (int i = 0; i < NUMBER_OF_STATES; ++i) { - struct si_pm4_state *state = rctx->queued.array[i]; + struct si_pm4_state *state = sctx->queued.array[i]; - if (!state || rctx->emitted.array[i] == state) + if (!state || sctx->emitted.array[i] == state) continue; - assert(state != rctx->queued.named.init); - si_pm4_emit(rctx, state); - rctx->emitted.array[i] = state; + assert(state != sctx->queued.named.init); + si_pm4_emit(sctx, state); + sctx->emitted.array[i] = state; } } -void si_pm4_reset_emitted(struct si_context *rctx) +void si_pm4_reset_emitted(struct si_context *sctx) { - memset(&rctx->emitted, 0, sizeof(rctx->emitted)); + memset(&sctx->emitted, 0, sizeof(sctx->emitted)); } diff --git a/src/gallium/drivers/radeonsi/si_pm4.h b/src/gallium/drivers/radeonsi/si_pm4.h index a22f5d1c705..c9bc091173f 100644 --- a/src/gallium/drivers/radeonsi/si_pm4.h +++ b/src/gallium/drivers/radeonsi/si_pm4.h @@ -81,15 +81,15 @@ void si_pm4_sh_data_end(struct si_pm4_state *state, unsigned base, unsigned idx) void si_pm4_inval_shader_cache(struct si_pm4_state *state); void si_pm4_inval_texture_cache(struct si_pm4_state *state); -void si_pm4_free_state(struct si_context *rctx, +void si_pm4_free_state(struct si_context *sctx, struct si_pm4_state *state, unsigned idx); -struct si_pm4_state * si_pm4_alloc_state(struct si_context *rctx); +struct si_pm4_state * si_pm4_alloc_state(struct si_context *sctx); -uint32_t si_pm4_sync_flags(struct si_context *rctx); -unsigned si_pm4_dirty_dw(struct si_context *rctx); -void si_pm4_emit(struct si_context *rctx, struct si_pm4_state *state); -void si_pm4_emit_dirty(struct si_context *rctx); -void si_pm4_reset_emitted(struct si_context *rctx); +uint32_t si_pm4_sync_flags(struct si_context *sctx); +unsigned si_pm4_dirty_dw(struct si_context *sctx); +void si_pm4_emit(struct si_context *sctx, struct si_pm4_state *state); +void si_pm4_emit_dirty(struct si_context *sctx); +void si_pm4_reset_emitted(struct si_context *sctx); #endif diff --git a/src/gallium/drivers/radeonsi/si_query.c b/src/gallium/drivers/radeonsi/si_query.c index 245f0430095..d694eca8d92 100644 --- a/src/gallium/drivers/radeonsi/si_query.c +++ b/src/gallium/drivers/radeonsi/si_query.c @@ -25,21 +25,21 @@ static struct pipe_query *si_create_query(struct pipe_context *ctx, unsigned query_type) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; - return (struct pipe_query*)si_context_query_create(rctx, query_type); + return (struct pipe_query*)si_context_query_create(sctx, query_type); } static void si_destroy_query(struct pipe_context *ctx, struct pipe_query *query) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; - si_context_query_destroy(rctx, (struct si_query *)query); + si_context_query_destroy(sctx, (struct si_query *)query); } static void si_begin_query(struct pipe_context *ctx, struct pipe_query *query) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_query *rquery = (struct si_query *)query; if (!si_query_needs_begin(rquery->type)) { @@ -49,23 +49,23 @@ static void si_begin_query(struct pipe_context *ctx, struct pipe_query *query) memset(&rquery->result, 0, sizeof(rquery->result)); rquery->results_start = rquery->results_end; - si_query_begin(rctx, (struct si_query *)query); + si_query_begin(sctx, (struct si_query *)query); if (!si_is_timer_query(rquery->type)) { - LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_query_list); + LIST_ADDTAIL(&rquery->list, &sctx->active_nontimer_query_list); } } static void si_end_query(struct pipe_context *ctx, struct pipe_query *query) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_query *rquery = (struct si_query *)query; if (!si_query_needs_begin(rquery->type)) { memset(&rquery->result, 0, sizeof(rquery->result)); } - si_query_end(rctx, rquery); + si_query_end(sctx, rquery); if (si_query_needs_begin(rquery->type) && !si_is_timer_query(rquery->type)) { LIST_DELINIT(&rquery->list); @@ -76,10 +76,10 @@ static boolean si_get_query_result(struct pipe_context *ctx, struct pipe_query *query, boolean wait, union pipe_query_result *vresult) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_query *rquery = (struct si_query *)query; - return si_context_query_result(rctx, rquery, wait, vresult); + return si_context_query_result(sctx, rquery, wait, vresult); } static void si_render_condition(struct pipe_context *ctx, @@ -87,26 +87,26 @@ static void si_render_condition(struct pipe_context *ctx, boolean condition, uint mode) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_query *rquery = (struct si_query *)query; int wait_flag = 0; /* If we already have nonzero result, render unconditionally */ if (query != NULL && rquery->result.u64 != 0) { - if (rctx->current_render_cond) { + if (sctx->current_render_cond) { si_render_condition(ctx, NULL, FALSE, 0); } return; } - rctx->current_render_cond = query; - rctx->current_render_cond_cond = condition; - rctx->current_render_cond_mode = mode; + sctx->current_render_cond = query; + sctx->current_render_cond_cond = condition; + sctx->current_render_cond_mode = mode; if (query == NULL) { - if (rctx->predicate_drawing) { - rctx->predicate_drawing = false; - si_query_predication(rctx, NULL, PREDICATION_OP_CLEAR, 1); + if (sctx->predicate_drawing) { + sctx->predicate_drawing = false; + si_query_predication(sctx, NULL, PREDICATION_OP_CLEAR, 1); } return; } @@ -116,32 +116,32 @@ static void si_render_condition(struct pipe_context *ctx, wait_flag = 1; } - rctx->predicate_drawing = true; + sctx->predicate_drawing = true; switch (rquery->type) { case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: - si_query_predication(rctx, rquery, PREDICATION_OP_ZPASS, wait_flag); + si_query_predication(sctx, rquery, PREDICATION_OP_ZPASS, wait_flag); break; case PIPE_QUERY_PRIMITIVES_EMITTED: case PIPE_QUERY_PRIMITIVES_GENERATED: case PIPE_QUERY_SO_STATISTICS: case PIPE_QUERY_SO_OVERFLOW_PREDICATE: - si_query_predication(rctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag); + si_query_predication(sctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag); break; default: assert(0); } } -void si_init_query_functions(struct si_context *rctx) +void si_init_query_functions(struct si_context *sctx) { - rctx->b.b.create_query = si_create_query; - rctx->b.b.destroy_query = si_destroy_query; - rctx->b.b.begin_query = si_begin_query; - rctx->b.b.end_query = si_end_query; - rctx->b.b.get_query_result = si_get_query_result; - - if (rctx->screen->b.info.r600_num_backends > 0) - rctx->b.b.render_condition = si_render_condition; + sctx->b.b.create_query = si_create_query; + sctx->b.b.destroy_query = si_destroy_query; + sctx->b.b.begin_query = si_begin_query; + sctx->b.b.end_query = si_end_query; + sctx->b.b.get_query_result = si_get_query_result; + + if (sctx->screen->b.info.r600_num_backends > 0) + sctx->b.b.render_condition = si_render_condition; } diff --git a/src/gallium/drivers/radeonsi/si_resource.c b/src/gallium/drivers/radeonsi/si_resource.c index 11ac3c9a1d0..c0f1e264ae4 100644 --- a/src/gallium/drivers/radeonsi/si_resource.c +++ b/src/gallium/drivers/radeonsi/si_resource.c @@ -52,10 +52,10 @@ void si_init_screen_resource_functions(struct pipe_screen *screen) screen->resource_destroy = u_resource_destroy_vtbl; } -void si_init_context_resource_functions(struct si_context *r600) +void si_init_context_resource_functions(struct si_context *sctx) { - r600->b.b.transfer_map = u_transfer_map_vtbl; - r600->b.b.transfer_flush_region = u_default_transfer_flush_region; - r600->b.b.transfer_unmap = u_transfer_unmap_vtbl; - r600->b.b.transfer_inline_write = u_default_transfer_inline_write; + sctx->b.b.transfer_map = u_transfer_map_vtbl; + sctx->b.b.transfer_flush_region = u_default_transfer_flush_region; + sctx->b.b.transfer_unmap = u_transfer_unmap_vtbl; + sctx->b.b.transfer_inline_write = u_default_transfer_inline_write; } diff --git a/src/gallium/drivers/radeonsi/si_resource.h b/src/gallium/drivers/radeonsi/si_resource.h index 9f97c866b0a..67da8faead8 100644 --- a/src/gallium/drivers/radeonsi/si_resource.h +++ b/src/gallium/drivers/radeonsi/si_resource.h @@ -48,7 +48,7 @@ void si_init_screen_resource_functions(struct pipe_screen *screen); struct si_context; -void si_upload_const_buffer(struct si_context *rctx, struct r600_resource **rbuffer, +void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer, const uint8_t *ptr, unsigned size, uint32_t *const_offset); diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c index 3e401371975..5c6f128861f 100644 --- a/src/gallium/drivers/radeonsi/si_shader.c +++ b/src/gallium/drivers/radeonsi/si_shader.c @@ -1889,17 +1889,17 @@ static void preload_streamout_buffers(struct si_shader_context *si_shader_ctx) } } -int si_compile_llvm(struct si_context *rctx, struct si_pipe_shader *shader, +int si_compile_llvm(struct si_context *sctx, struct si_pipe_shader *shader, LLVMModuleRef mod) { unsigned i; uint32_t *ptr; struct radeon_llvm_binary binary; - bool dump = r600_can_dump_shader(&rctx->screen->b, + bool dump = r600_can_dump_shader(&sctx->screen->b, shader->selector ? shader->selector->tokens : NULL); memset(&binary, 0, sizeof(binary)); radeon_llvm_compile(mod, &binary, - si_get_llvm_processor_name(rctx->screen->b.family), dump); + si_get_llvm_processor_name(sctx->screen->b.family), dump); if (dump && ! binary.disassembled) { fprintf(stderr, "SI CODE:\n"); for (i = 0; i < binary.code_size; i+=4 ) { @@ -1941,13 +1941,13 @@ int si_compile_llvm(struct si_context *rctx, struct si_pipe_shader *shader, /* copy new shader */ r600_resource_reference(&shader->bo, NULL); - shader->bo = si_resource_create_custom(rctx->b.b.screen, PIPE_USAGE_IMMUTABLE, + shader->bo = si_resource_create_custom(sctx->b.b.screen, PIPE_USAGE_IMMUTABLE, binary.code_size); if (shader->bo == NULL) { return -ENOMEM; } - ptr = (uint32_t*)rctx->b.ws->buffer_map(shader->bo->cs_buf, rctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE); + ptr = (uint32_t*)sctx->b.ws->buffer_map(shader->bo->cs_buf, sctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE); if (0 /*SI_BIG_ENDIAN*/) { for (i = 0; i < binary.code_size / 4; ++i) { ptr[i] = util_bswap32(*(uint32_t*)(binary.code + i*4)); @@ -1955,7 +1955,7 @@ int si_compile_llvm(struct si_context *rctx, struct si_pipe_shader *shader, } else { memcpy(ptr, binary.code, binary.code_size); } - rctx->b.ws->buffer_unmap(shader->bo->cs_buf); + sctx->b.ws->buffer_unmap(shader->bo->cs_buf); free(binary.code); free(binary.config); @@ -1967,14 +1967,14 @@ int si_pipe_shader_create( struct pipe_context *ctx, struct si_pipe_shader *shader) { - struct si_context *rctx = (struct si_context*)ctx; + struct si_context *sctx = (struct si_context*)ctx; struct si_pipe_shader_selector *sel = shader->selector; struct si_shader_context si_shader_ctx; struct tgsi_shader_info shader_info; struct lp_build_tgsi_context * bld_base; LLVMModuleRef mod; int r = 0; - bool dump = r600_can_dump_shader(&rctx->screen->b, shader->selector->tokens); + bool dump = r600_can_dump_shader(&sctx->screen->b, shader->selector->tokens); assert(shader->shader.noutput == 0); assert(shader->shader.ninterp == 0); @@ -2039,7 +2039,7 @@ int si_pipe_shader_create( radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld); mod = bld_base->base.gallivm->module; - r = si_compile_llvm(rctx, shader, mod); + r = si_compile_llvm(sctx, shader, mod); radeon_llvm_dispose(&si_shader_ctx.radeon_bld); tgsi_parse_free(&si_shader_ctx.parse); diff --git a/src/gallium/drivers/radeonsi/si_shader.h b/src/gallium/drivers/radeonsi/si_shader.h index 3106473773d..766059bd056 100644 --- a/src/gallium/drivers/radeonsi/si_shader.h +++ b/src/gallium/drivers/radeonsi/si_shader.h @@ -154,7 +154,7 @@ struct si_pipe_shader { /* radeonsi_shader.c */ int si_pipe_shader_create(struct pipe_context *ctx, struct si_pipe_shader *shader); int si_pipe_shader_create(struct pipe_context *ctx, struct si_pipe_shader *shader); -int si_compile_llvm(struct si_context *rctx, struct si_pipe_shader *shader, +int si_compile_llvm(struct si_context *sctx, struct si_pipe_shader *shader, LLVMModuleRef mod); void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader); diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c index 22111787069..5255571331f 100644 --- a/src/gallium/drivers/radeonsi/si_state.c +++ b/src/gallium/drivers/radeonsi/si_state.c @@ -169,24 +169,24 @@ static unsigned cik_db_pipe_config(struct si_screen *rscreen, unsigned tile_mode /* * inferred framebuffer and blender state */ -static void si_update_fb_blend_state(struct si_context *rctx) +static void si_update_fb_blend_state(struct si_context *sctx) { struct si_pm4_state *pm4; - struct si_state_blend *blend = rctx->queued.named.blend; + struct si_state_blend *blend = sctx->queued.named.blend; uint32_t mask; if (blend == NULL) return; - pm4 = si_pm4_alloc_state(rctx); + pm4 = si_pm4_alloc_state(sctx); if (pm4 == NULL) return; - mask = (1ULL << ((unsigned)rctx->framebuffer.nr_cbufs * 4)) - 1; + mask = (1ULL << ((unsigned)sctx->framebuffer.nr_cbufs * 4)) - 1; mask &= blend->cb_target_mask; si_pm4_set_reg(pm4, R_028238_CB_TARGET_MASK, mask); - si_pm4_set_state(rctx, fb_blend, pm4); + si_pm4_set_state(sctx, fb_blend, pm4); } /* @@ -344,22 +344,22 @@ static void *si_create_blend_state(struct pipe_context *ctx, static void si_bind_blend_state(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; - si_pm4_bind_state(rctx, blend, (struct si_state_blend *)state); - si_update_fb_blend_state(rctx); + struct si_context *sctx = (struct si_context *)ctx; + si_pm4_bind_state(sctx, blend, (struct si_state_blend *)state); + si_update_fb_blend_state(sctx); } static void si_delete_blend_state(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; - si_pm4_delete_state(rctx, blend, (struct si_state_blend *)state); + struct si_context *sctx = (struct si_context *)ctx; + si_pm4_delete_state(sctx, blend, (struct si_state_blend *)state); } static void si_set_blend_color(struct pipe_context *ctx, const struct pipe_blend_color *state) { - struct si_context *rctx = (struct si_context *)ctx; - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); + struct si_context *sctx = (struct si_context *)ctx; + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); if (pm4 == NULL) return; @@ -369,7 +369,7 @@ static void si_set_blend_color(struct pipe_context *ctx, si_pm4_set_reg(pm4, R_02841C_CB_BLEND_BLUE, fui(state->color[2])); si_pm4_set_reg(pm4, R_028420_CB_BLEND_ALPHA, fui(state->color[3])); - si_pm4_set_state(rctx, blend_color, pm4); + si_pm4_set_state(sctx, blend_color, pm4); } /* @@ -379,8 +379,8 @@ static void si_set_blend_color(struct pipe_context *ctx, static void si_set_clip_state(struct pipe_context *ctx, const struct pipe_clip_state *state) { - struct si_context *rctx = (struct si_context *)ctx; - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); + struct si_context *sctx = (struct si_context *)ctx; + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); struct pipe_constant_buffer cb; if (pm4 == NULL) @@ -404,7 +404,7 @@ static void si_set_clip_state(struct pipe_context *ctx, ctx->set_constant_buffer(ctx, PIPE_SHADER_VERTEX, NUM_PIPE_CONST_BUFFERS, &cb); pipe_resource_reference(&cb.buffer, NULL); - si_pm4_set_state(rctx, clip, pm4); + si_pm4_set_state(sctx, clip, pm4); } static void si_set_scissor_states(struct pipe_context *ctx, @@ -412,8 +412,8 @@ static void si_set_scissor_states(struct pipe_context *ctx, unsigned num_scissors, const struct pipe_scissor_state *state) { - struct si_context *rctx = (struct si_context *)ctx; - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); + struct si_context *sctx = (struct si_context *)ctx; + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); uint32_t tl, br; if (pm4 == NULL) @@ -430,7 +430,7 @@ static void si_set_scissor_states(struct pipe_context *ctx, si_pm4_set_reg(pm4, R_028228_PA_SC_CLIPRECT_3_TL, tl); si_pm4_set_reg(pm4, R_02822C_PA_SC_CLIPRECT_3_BR, br); - si_pm4_set_state(rctx, scissor, pm4); + si_pm4_set_state(sctx, scissor, pm4); } static void si_set_viewport_states(struct pipe_context *ctx, @@ -438,7 +438,7 @@ static void si_set_viewport_states(struct pipe_context *ctx, unsigned num_viewports, const struct pipe_viewport_state *state) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_state_viewport *viewport = CALLOC_STRUCT(si_state_viewport); struct si_pm4_state *pm4 = &viewport->pm4; @@ -453,24 +453,24 @@ static void si_set_viewport_states(struct pipe_context *ctx, si_pm4_set_reg(pm4, R_02844C_PA_CL_VPORT_ZSCALE_0, fui(state->scale[2])); si_pm4_set_reg(pm4, R_028450_PA_CL_VPORT_ZOFFSET_0, fui(state->translate[2])); - si_pm4_set_state(rctx, viewport, viewport); + si_pm4_set_state(sctx, viewport, viewport); } /* * inferred state between framebuffer and rasterizer */ -static void si_update_fb_rs_state(struct si_context *rctx) +static void si_update_fb_rs_state(struct si_context *sctx) { - struct si_state_rasterizer *rs = rctx->queued.named.rasterizer; + struct si_state_rasterizer *rs = sctx->queued.named.rasterizer; struct si_pm4_state *pm4; unsigned offset_db_fmt_cntl = 0, depth; float offset_units; - if (!rs || !rctx->framebuffer.zsbuf) + if (!rs || !sctx->framebuffer.zsbuf) return; - offset_units = rctx->queued.named.rasterizer->offset_units; - switch (rctx->framebuffer.zsbuf->texture->format) { + offset_units = sctx->queued.named.rasterizer->offset_units; + switch (sctx->framebuffer.zsbuf->texture->format) { case PIPE_FORMAT_S8_UINT_Z24_UNORM: case PIPE_FORMAT_X8Z24_UNORM: case PIPE_FORMAT_Z24X8_UNORM: @@ -492,7 +492,7 @@ static void si_update_fb_rs_state(struct si_context *rctx) return; } - pm4 = si_pm4_alloc_state(rctx); + pm4 = si_pm4_alloc_state(sctx); if (pm4 == NULL) return; @@ -500,14 +500,14 @@ static void si_update_fb_rs_state(struct si_context *rctx) /* FIXME some of those reg can be computed with cso */ offset_db_fmt_cntl |= S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(depth); si_pm4_set_reg(pm4, R_028B80_PA_SU_POLY_OFFSET_FRONT_SCALE, - fui(rctx->queued.named.rasterizer->offset_scale)); + fui(sctx->queued.named.rasterizer->offset_scale)); si_pm4_set_reg(pm4, R_028B84_PA_SU_POLY_OFFSET_FRONT_OFFSET, fui(offset_units)); si_pm4_set_reg(pm4, R_028B88_PA_SU_POLY_OFFSET_BACK_SCALE, - fui(rctx->queued.named.rasterizer->offset_scale)); + fui(sctx->queued.named.rasterizer->offset_scale)); si_pm4_set_reg(pm4, R_028B8C_PA_SU_POLY_OFFSET_BACK_OFFSET, fui(offset_units)); si_pm4_set_reg(pm4, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL, offset_db_fmt_cntl); - si_pm4_set_state(rctx, fb_rs, pm4); + si_pm4_set_state(sctx, fb_rs, pm4); } /* @@ -631,35 +631,35 @@ static void *si_create_rs_state(struct pipe_context *ctx, static void si_bind_rs_state(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_state_rasterizer *rs = (struct si_state_rasterizer *)state; if (state == NULL) return; // TODO - rctx->sprite_coord_enable = rs->sprite_coord_enable; - rctx->pa_sc_line_stipple = rs->pa_sc_line_stipple; - rctx->pa_su_sc_mode_cntl = rs->pa_su_sc_mode_cntl; + sctx->sprite_coord_enable = rs->sprite_coord_enable; + sctx->pa_sc_line_stipple = rs->pa_sc_line_stipple; + sctx->pa_su_sc_mode_cntl = rs->pa_su_sc_mode_cntl; - si_pm4_bind_state(rctx, rasterizer, rs); - si_update_fb_rs_state(rctx); + si_pm4_bind_state(sctx, rasterizer, rs); + si_update_fb_rs_state(sctx); } static void si_delete_rs_state(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; - si_pm4_delete_state(rctx, rasterizer, (struct si_state_rasterizer *)state); + struct si_context *sctx = (struct si_context *)ctx; + si_pm4_delete_state(sctx, rasterizer, (struct si_state_rasterizer *)state); } /* * infeered state between dsa and stencil ref */ -static void si_update_dsa_stencil_ref(struct si_context *rctx) +static void si_update_dsa_stencil_ref(struct si_context *sctx) { - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); - struct pipe_stencil_ref *ref = &rctx->stencil_ref; - struct si_state_dsa *dsa = rctx->queued.named.dsa; + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); + struct pipe_stencil_ref *ref = &sctx->stencil_ref; + struct si_state_dsa *dsa = sctx->queued.named.dsa; if (pm4 == NULL) return; @@ -675,15 +675,15 @@ static void si_update_dsa_stencil_ref(struct si_context *rctx) S_028434_STENCILWRITEMASK_BF(dsa->writemask[1]) | S_028434_STENCILOPVAL_BF(1)); - si_pm4_set_state(rctx, dsa_stencil_ref, pm4); + si_pm4_set_state(sctx, dsa_stencil_ref, pm4); } static void si_set_pipe_stencil_ref(struct pipe_context *ctx, const struct pipe_stencil_ref *state) { - struct si_context *rctx = (struct si_context *)ctx; - rctx->stencil_ref = *state; - si_update_dsa_stencil_ref(rctx); + struct si_context *sctx = (struct si_context *)ctx; + sctx->stencil_ref = *state; + si_update_dsa_stencil_ref(sctx); } @@ -779,23 +779,23 @@ static void *si_create_dsa_state(struct pipe_context *ctx, static void si_bind_dsa_state(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_state_dsa *dsa = state; if (state == NULL) return; - si_pm4_bind_state(rctx, dsa, dsa); - si_update_dsa_stencil_ref(rctx); + si_pm4_bind_state(sctx, dsa, dsa); + si_update_dsa_stencil_ref(sctx); } static void si_delete_dsa_state(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; - si_pm4_delete_state(rctx, dsa, (struct si_state_dsa *)state); + struct si_context *sctx = (struct si_context *)ctx; + si_pm4_delete_state(sctx, dsa, (struct si_state_dsa *)state); } -static void *si_create_db_flush_dsa(struct si_context *rctx, bool copy_depth, +static void *si_create_db_flush_dsa(struct si_context *sctx, bool copy_depth, bool copy_stencil, int sample) { struct pipe_depth_stencil_alpha_state dsa; @@ -803,7 +803,7 @@ static void *si_create_db_flush_dsa(struct si_context *rctx, bool copy_depth, memset(&dsa, 0, sizeof(dsa)); - state = rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa); + state = sctx->b.b.create_depth_stencil_alpha_state(&sctx->b.b, &dsa); if (copy_depth || copy_stencil) { si_pm4_set_reg(&state->pm4, R_028000_DB_RENDER_CONTROL, S_028000_DEPTH_COPY(copy_depth) | @@ -1561,7 +1561,7 @@ static unsigned si_tile_mode_index(struct r600_texture *rtex, unsigned level, bo * framebuffer handling */ -static void si_cb(struct si_context *rctx, struct si_pm4_state *pm4, +static void si_cb(struct si_context *sctx, struct si_pm4_state *pm4, const struct pipe_framebuffer_state *state, int cb) { struct r600_texture *rtex; @@ -1682,11 +1682,11 @@ static void si_cb(struct si_context *rctx, struct si_pm4_state *pm4, color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(rtex->fmask.tile_mode_index); - if (rctx->b.chip_class == SI) { + if (sctx->b.chip_class == SI) { /* due to a hw bug, FMASK_BANK_HEIGHT must be set on SI too */ color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh); } - if (rctx->b.chip_class >= CIK) { + if (sctx->b.chip_class >= CIK) { color_pitch |= S_028C64_FMASK_TILE_MAX(rtex->fmask.pitch / 8 - 1); } } @@ -1696,7 +1696,7 @@ static void si_cb(struct si_context *rctx, struct si_pm4_state *pm4, color_info |= S_028C70_FAST_CLEAR(1); } - offset += r600_resource_va(rctx->b.b.screen, state->cbufs[cb]->texture); + offset += r600_resource_va(sctx->b.b.screen, state->cbufs[cb]->texture); offset >>= 8; si_pm4_add_bo(pm4, &rtex->resource, RADEON_USAGE_READWRITE); @@ -1732,17 +1732,17 @@ static void si_cb(struct si_context *rctx, struct si_pm4_state *pm4, ((ntype == V_028C70_NUMBER_UNORM || ntype == V_028C70_NUMBER_SNORM) && max_comp_size <= 10) || (ntype == V_028C70_NUMBER_FLOAT && max_comp_size <= 16)) { - rctx->export_16bpc |= 1 << cb; + sctx->export_16bpc |= 1 << cb; /* set SPI_SHADER_COL_FORMAT for possible dual-src blending */ if (state->nr_cbufs == 1) - rctx->export_16bpc |= 1 << 1; + sctx->export_16bpc |= 1 << 1; } } -static void si_db(struct si_context *rctx, struct si_pm4_state *pm4, +static void si_db(struct si_context *sctx, struct si_pm4_state *pm4, const struct pipe_framebuffer_state *state) { - struct si_screen *rscreen = rctx->screen; + struct si_screen *rscreen = sctx->screen; struct r600_texture *rtex; struct si_surface *surf; unsigned level, pitch, slice, format, tile_mode_index, array_mode; @@ -1768,7 +1768,7 @@ static void si_db(struct si_context *rctx, struct si_pm4_state *pm4, } assert(format != V_028040_Z_INVALID); - s_offs = z_offs = r600_resource_va(rctx->b.b.screen, surf->base.texture); + s_offs = z_offs = r600_resource_va(sctx->b.b.screen, surf->base.texture); z_offs += rtex->surface.level[level].offset; s_offs += rtex->surface.stencil_level[level].offset; @@ -1793,7 +1793,7 @@ static void si_db(struct si_context *rctx, struct si_pm4_state *pm4, else s_info = S_028044_FORMAT(V_028044_STENCIL_INVALID); - if (rctx->b.chip_class >= CIK) { + if (sctx->b.chip_class >= CIK) { switch (rtex->surface.level[level].mode) { case RADEON_SURF_MODE_2D: array_mode = V_02803C_ARRAY_2D_TILED_THIN1; @@ -1853,7 +1853,7 @@ static void si_db(struct si_context *rctx, struct si_pm4_state *pm4, s_info |= S_028044_TILE_STENCIL_DISABLE(1); } - uint64_t va = r600_resource_va(&rctx->screen->b.b, &rtex->htile_buffer->b.b); + uint64_t va = r600_resource_va(&sctx->screen->b.b, &rtex->htile_buffer->b.b); db_htile_data_base = va >> 8; db_htile_surface = S_028ABC_FULL_CACHE(1); @@ -1988,7 +1988,7 @@ static void si_get_sample_position(struct pipe_context *ctx, } } -static void si_set_msaa_state(struct si_context *rctx, struct si_pm4_state *pm4, int nr_samples) +static void si_set_msaa_state(struct si_context *sctx, struct si_pm4_state *pm4, int nr_samples) { unsigned max_dist = 0; @@ -2079,28 +2079,28 @@ static void si_set_msaa_state(struct si_context *rctx, struct si_pm4_state *pm4, static void si_set_framebuffer_state(struct pipe_context *ctx, const struct pipe_framebuffer_state *state) { - struct si_context *rctx = (struct si_context *)ctx; - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); + struct si_context *sctx = (struct si_context *)ctx; + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); uint32_t tl, br; int tl_x, tl_y, br_x, br_y, nr_samples, i; if (pm4 == NULL) return; - if (rctx->framebuffer.nr_cbufs) { - rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB | + if (sctx->framebuffer.nr_cbufs) { + sctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB | R600_CONTEXT_FLUSH_AND_INV_CB_META; } - if (rctx->framebuffer.zsbuf) { - rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB | + if (sctx->framebuffer.zsbuf) { + sctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB | R600_CONTEXT_FLUSH_AND_INV_DB_META; } - util_copy_framebuffer_state(&rctx->framebuffer, state); + util_copy_framebuffer_state(&sctx->framebuffer, state); /* build states */ - rctx->export_16bpc = 0; - rctx->fb_compressed_cb_mask = 0; + sctx->export_16bpc = 0; + sctx->fb_compressed_cb_mask = 0; for (i = 0; i < state->nr_cbufs; i++) { struct r600_texture *rtex; @@ -2112,10 +2112,10 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, rtex = (struct r600_texture*)state->cbufs[i]->texture; - si_cb(rctx, pm4, state, i); + si_cb(sctx, pm4, state, i); if (rtex->fmask.size || rtex->cmask.size) { - rctx->fb_compressed_cb_mask |= 1 << i; + sctx->fb_compressed_cb_mask |= 1 << i; } } for (; i < 8; i++) { @@ -2123,8 +2123,8 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, S_028C70_FORMAT(V_028C70_COLOR_INVALID)); } - assert(!(rctx->export_16bpc & ~0xff)); - si_db(rctx, pm4, state); + assert(!(sctx->export_16bpc & ~0xff)); + si_db(sctx, pm4, state); tl_x = 0; tl_y = 0; @@ -2145,14 +2145,14 @@ static void si_set_framebuffer_state(struct pipe_context *ctx, nr_samples = util_framebuffer_get_num_samples(state); - si_set_msaa_state(rctx, pm4, nr_samples); - rctx->fb_log_samples = util_logbase2(nr_samples); - rctx->fb_cb0_is_integer = state->nr_cbufs && state->cbufs[0] && + si_set_msaa_state(sctx, pm4, nr_samples); + sctx->fb_log_samples = util_logbase2(nr_samples); + sctx->fb_cb0_is_integer = state->nr_cbufs && state->cbufs[0] && util_format_is_pure_integer(state->cbufs[0]->format); - si_pm4_set_state(rctx, framebuffer, pm4); - si_update_fb_rs_state(rctx); - si_update_fb_blend_state(rctx); + si_pm4_set_state(sctx, framebuffer, pm4); + si_update_fb_rs_state(sctx); + si_update_fb_blend_state(sctx); } /* @@ -2164,43 +2164,43 @@ static INLINE void si_shader_selector_key(struct pipe_context *ctx, struct si_pipe_shader_selector *sel, union si_shader_key *key) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; memset(key, 0, sizeof(*key)); if (sel->type == PIPE_SHADER_VERTEX) { unsigned i; - if (!rctx->vertex_elements) + if (!sctx->vertex_elements) return; - for (i = 0; i < rctx->vertex_elements->count; ++i) - key->vs.instance_divisors[i] = rctx->vertex_elements->elements[i].instance_divisor; + for (i = 0; i < sctx->vertex_elements->count; ++i) + key->vs.instance_divisors[i] = sctx->vertex_elements->elements[i].instance_divisor; - if (rctx->queued.named.rasterizer->clip_plane_enable & 0xf0) + if (sctx->queued.named.rasterizer->clip_plane_enable & 0xf0) key->vs.ucps_enabled |= 0x2; - if (rctx->queued.named.rasterizer->clip_plane_enable & 0xf) + if (sctx->queued.named.rasterizer->clip_plane_enable & 0xf) key->vs.ucps_enabled |= 0x1; } else if (sel->type == PIPE_SHADER_FRAGMENT) { if (sel->fs_write_all) - key->ps.nr_cbufs = rctx->framebuffer.nr_cbufs; - key->ps.export_16bpc = rctx->export_16bpc; + key->ps.nr_cbufs = sctx->framebuffer.nr_cbufs; + key->ps.export_16bpc = sctx->export_16bpc; - if (rctx->queued.named.rasterizer) { - key->ps.color_two_side = rctx->queued.named.rasterizer->two_side; - key->ps.flatshade = rctx->queued.named.rasterizer->flatshade; + if (sctx->queued.named.rasterizer) { + key->ps.color_two_side = sctx->queued.named.rasterizer->two_side; + key->ps.flatshade = sctx->queued.named.rasterizer->flatshade; - if (rctx->queued.named.blend) { - key->ps.alpha_to_one = rctx->queued.named.blend->alpha_to_one && - rctx->queued.named.rasterizer->multisample_enable && - !rctx->fb_cb0_is_integer; + if (sctx->queued.named.blend) { + key->ps.alpha_to_one = sctx->queued.named.blend->alpha_to_one && + sctx->queued.named.rasterizer->multisample_enable && + !sctx->fb_cb0_is_integer; } } - if (rctx->queued.named.dsa) { - key->ps.alpha_func = rctx->queued.named.dsa->alpha_func; + if (sctx->queued.named.dsa) { + key->ps.alpha_func = sctx->queued.named.dsa->alpha_func; /* Alpha-test should be disabled if colorbuffer 0 is integer. */ - if (rctx->framebuffer.nr_cbufs && - rctx->framebuffer.cbufs[0] && - util_format_is_pure_integer(rctx->framebuffer.cbufs[0]->texture->format)) + if (sctx->framebuffer.nr_cbufs && + sctx->framebuffer.cbufs[0] && + util_format_is_pure_integer(sctx->framebuffer.cbufs[0]->texture->format)) key->ps.alpha_func = PIPE_FUNC_ALWAYS; } else { key->ps.alpha_func = PIPE_FUNC_ALWAYS; @@ -2309,46 +2309,46 @@ static void *si_create_vs_state(struct pipe_context *ctx, static void si_bind_vs_shader(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_pipe_shader_selector *sel = state; - if (rctx->vs_shader == sel) + if (sctx->vs_shader == sel) return; if (!sel || !sel->current) return; - rctx->vs_shader = sel; - si_pm4_bind_state(rctx, vs, sel->current->pm4); - rctx->b.streamout.stride_in_dw = sel->so.stride; - rctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE; + sctx->vs_shader = sel; + si_pm4_bind_state(sctx, vs, sel->current->pm4); + sctx->b.streamout.stride_in_dw = sel->so.stride; + sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE; } static void si_bind_ps_shader(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_pipe_shader_selector *sel = state; - if (rctx->ps_shader == sel) + if (sctx->ps_shader == sel) return; if (!sel || !sel->current) - sel = rctx->dummy_pixel_shader; + sel = sctx->dummy_pixel_shader; - rctx->ps_shader = sel; - si_pm4_bind_state(rctx, ps, sel->current->pm4); - rctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE; + sctx->ps_shader = sel; + si_pm4_bind_state(sctx, ps, sel->current->pm4); + sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE; } static void si_delete_shader_selector(struct pipe_context *ctx, struct si_pipe_shader_selector *sel) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_pipe_shader *p = sel->current, *c; while (p) { c = p->next_variant; - si_pm4_delete_state(rctx, vs, p->pm4); + si_pm4_delete_state(sctx, vs, p->pm4); si_pipe_shader_destroy(ctx, p); free(p); p = c; @@ -2360,11 +2360,11 @@ static void si_delete_shader_selector(struct pipe_context *ctx, static void si_delete_vs_shader(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_pipe_shader_selector *sel = (struct si_pipe_shader_selector *)state; - if (rctx->vs_shader == sel) { - rctx->vs_shader = NULL; + if (sctx->vs_shader == sel) { + sctx->vs_shader = NULL; } si_delete_shader_selector(ctx, sel); @@ -2372,11 +2372,11 @@ static void si_delete_vs_shader(struct pipe_context *ctx, void *state) static void si_delete_ps_shader(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_pipe_shader_selector *sel = (struct si_pipe_shader_selector *)state; - if (rctx->ps_shader == sel) { - rctx->ps_shader = NULL; + if (sctx->ps_shader == sel) { + sctx->ps_shader = NULL; } si_delete_shader_selector(ctx, sel); @@ -2709,8 +2709,8 @@ static void si_set_sampler_views(struct pipe_context *ctx, unsigned count, struct pipe_sampler_view **views) { - struct si_context *rctx = (struct si_context *)ctx; - struct si_textures_info *samplers = &rctx->samplers[shader]; + struct si_context *sctx = (struct si_context *)ctx; + struct si_textures_info *samplers = &sctx->samplers[shader]; struct si_pipe_sampler_view **rviews = (struct si_pipe_sampler_view **)views; int i; @@ -2723,13 +2723,13 @@ static void si_set_sampler_views(struct pipe_context *ctx, if (!views[i]) { samplers->depth_texture_mask &= ~(1 << i); samplers->compressed_colortex_mask &= ~(1 << i); - si_set_sampler_view(rctx, shader, i, NULL, NULL); - si_set_sampler_view(rctx, shader, FMASK_TEX_OFFSET + i, + si_set_sampler_view(sctx, shader, i, NULL, NULL); + si_set_sampler_view(sctx, shader, FMASK_TEX_OFFSET + i, NULL, NULL); continue; } - si_set_sampler_view(rctx, shader, i, views[i], rviews[i]->state); + si_set_sampler_view(sctx, shader, i, views[i], rviews[i]->state); if (views[i]->texture->target != PIPE_BUFFER) { struct r600_texture *rtex = @@ -2747,10 +2747,10 @@ static void si_set_sampler_views(struct pipe_context *ctx, } if (rtex->fmask.size) { - si_set_sampler_view(rctx, shader, FMASK_TEX_OFFSET + i, + si_set_sampler_view(sctx, shader, FMASK_TEX_OFFSET + i, views[i], rviews[i]->fmask_state); } else { - si_set_sampler_view(rctx, shader, FMASK_TEX_OFFSET + i, + si_set_sampler_view(sctx, shader, FMASK_TEX_OFFSET + i, NULL, NULL); } } @@ -2758,62 +2758,62 @@ static void si_set_sampler_views(struct pipe_context *ctx, for (; i < samplers->n_views; i++) { samplers->depth_texture_mask &= ~(1 << i); samplers->compressed_colortex_mask &= ~(1 << i); - si_set_sampler_view(rctx, shader, i, NULL, NULL); - si_set_sampler_view(rctx, shader, FMASK_TEX_OFFSET + i, + si_set_sampler_view(sctx, shader, i, NULL, NULL); + si_set_sampler_view(sctx, shader, FMASK_TEX_OFFSET + i, NULL, NULL); } samplers->n_views = count; - rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE; + sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE; } -static struct si_pm4_state *si_set_sampler_states(struct si_context *rctx, unsigned count, +static struct si_pm4_state *si_set_sampler_states(struct si_context *sctx, unsigned count, void **states, struct si_textures_info *samplers, unsigned user_data_reg) { struct si_pipe_sampler_state **rstates = (struct si_pipe_sampler_state **)states; - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); uint32_t *border_color_table = NULL; int i, j; if (!count) goto out; - rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE; + sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE; si_pm4_sh_data_begin(pm4); for (i = 0; i < count; i++) { if (rstates[i] && G_008F3C_BORDER_COLOR_TYPE(rstates[i]->val[3]) == V_008F3C_SQ_TEX_BORDER_COLOR_REGISTER) { - if (!rctx->border_color_table || - ((rctx->border_color_offset + count - i) & + if (!sctx->border_color_table || + ((sctx->border_color_offset + count - i) & C_008F3C_BORDER_COLOR_PTR)) { - r600_resource_reference(&rctx->border_color_table, NULL); - rctx->border_color_offset = 0; + r600_resource_reference(&sctx->border_color_table, NULL); + sctx->border_color_offset = 0; - rctx->border_color_table = - si_resource_create_custom(&rctx->screen->b.b, + sctx->border_color_table = + si_resource_create_custom(&sctx->screen->b.b, PIPE_USAGE_STAGING, 4096 * 4 * 4); } if (!border_color_table) { border_color_table = - rctx->b.ws->buffer_map(rctx->border_color_table->cs_buf, - rctx->b.rings.gfx.cs, + sctx->b.ws->buffer_map(sctx->border_color_table->cs_buf, + sctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED); } for (j = 0; j < 4; j++) { - border_color_table[4 * rctx->border_color_offset + j] = + border_color_table[4 * sctx->border_color_offset + j] = util_le32_to_cpu(rstates[i]->border_color[j]); } rstates[i]->val[3] &= C_008F3C_BORDER_COLOR_PTR; - rstates[i]->val[3] |= S_008F3C_BORDER_COLOR_PTR(rctx->border_color_offset++); + rstates[i]->val[3] |= S_008F3C_BORDER_COLOR_PTR(sctx->border_color_offset++); } for (j = 0; j < Elements(rstates[i]->val); ++j) { @@ -2824,14 +2824,14 @@ static struct si_pm4_state *si_set_sampler_states(struct si_context *rctx, unsig if (border_color_table) { uint64_t va_offset = - r600_resource_va(&rctx->screen->b.b, - (void*)rctx->border_color_table); + r600_resource_va(&sctx->screen->b.b, + (void*)sctx->border_color_table); si_pm4_set_reg(pm4, R_028080_TA_BC_BASE_ADDR, va_offset >> 8); - if (rctx->b.chip_class >= CIK) + if (sctx->b.chip_class >= CIK) si_pm4_set_reg(pm4, R_028084_TA_BC_BASE_ADDR_HI, va_offset >> 40); - rctx->b.ws->buffer_unmap(rctx->border_color_table->cs_buf); - si_pm4_add_bo(pm4, rctx->border_color_table, RADEON_USAGE_READ); + sctx->b.ws->buffer_unmap(sctx->border_color_table->cs_buf); + si_pm4_add_bo(pm4, sctx->border_color_table, RADEON_USAGE_READ); } memcpy(samplers->samplers, states, sizeof(void*) * count); @@ -2843,22 +2843,22 @@ out: static void si_bind_vs_sampler_states(struct pipe_context *ctx, unsigned count, void **states) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_pm4_state *pm4; - pm4 = si_set_sampler_states(rctx, count, states, &rctx->samplers[PIPE_SHADER_VERTEX], + pm4 = si_set_sampler_states(sctx, count, states, &sctx->samplers[PIPE_SHADER_VERTEX], R_00B130_SPI_SHADER_USER_DATA_VS_0); - si_pm4_set_state(rctx, vs_sampler, pm4); + si_pm4_set_state(sctx, vs_sampler, pm4); } static void si_bind_ps_sampler_states(struct pipe_context *ctx, unsigned count, void **states) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_pm4_state *pm4; - pm4 = si_set_sampler_states(rctx, count, states, &rctx->samplers[PIPE_SHADER_FRAGMENT], + pm4 = si_set_sampler_states(sctx, count, states, &sctx->samplers[PIPE_SHADER_FRAGMENT], R_00B030_SPI_SHADER_USER_DATA_PS_0); - si_pm4_set_state(rctx, ps_sampler, pm4); + si_pm4_set_state(sctx, ps_sampler, pm4); } @@ -2884,8 +2884,8 @@ static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader, static void si_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask) { - struct si_context *rctx = (struct si_context *)ctx; - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); + struct si_context *sctx = (struct si_context *)ctx; + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); uint16_t mask = sample_mask; if (pm4 == NULL) @@ -2894,7 +2894,7 @@ static void si_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask) si_pm4_set_reg(pm4, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, mask | (mask << 16)); si_pm4_set_reg(pm4, R_028C3C_PA_SC_AA_MASK_X0Y1_X1Y1, mask | (mask << 16)); - si_pm4_set_state(rctx, sample_mask, pm4); + si_pm4_set_state(sctx, sample_mask, pm4); } static void si_delete_sampler_state(struct pipe_context *ctx, void *state) @@ -2942,39 +2942,39 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, static void si_bind_vertex_elements(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_vertex_element *v = (struct si_vertex_element*)state; - rctx->vertex_elements = v; + sctx->vertex_elements = v; } static void si_delete_vertex_element(struct pipe_context *ctx, void *state) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; - if (rctx->vertex_elements == state) - rctx->vertex_elements = NULL; + if (sctx->vertex_elements == state) + sctx->vertex_elements = NULL; FREE(state); } static void si_set_vertex_buffers(struct pipe_context *ctx, unsigned start_slot, unsigned count, const struct pipe_vertex_buffer *buffers) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; - util_set_vertex_buffers_count(rctx->vertex_buffer, &rctx->nr_vertex_buffers, buffers, start_slot, count); + util_set_vertex_buffers_count(sctx->vertex_buffer, &sctx->nr_vertex_buffers, buffers, start_slot, count); } static void si_set_index_buffer(struct pipe_context *ctx, const struct pipe_index_buffer *ib) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; if (ib) { - pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer); - memcpy(&rctx->index_buffer, ib, sizeof(*ib)); + pipe_resource_reference(&sctx->index_buffer.buffer, ib->buffer); + memcpy(&sctx->index_buffer, ib, sizeof(*ib)); } else { - pipe_resource_reference(&rctx->index_buffer.buffer, NULL); + pipe_resource_reference(&sctx->index_buffer.buffer, NULL); } } @@ -2988,20 +2988,20 @@ static void si_set_polygon_stipple(struct pipe_context *ctx, static void si_texture_barrier(struct pipe_context *ctx) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; - rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | + sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | R600_CONTEXT_FLUSH_AND_INV_CB; } -static void *si_create_blend_custom(struct si_context *rctx, unsigned mode) +static void *si_create_blend_custom(struct si_context *sctx, unsigned mode) { struct pipe_blend_state blend; memset(&blend, 0, sizeof(blend)); blend.independent_blend_enable = true; blend.rt[0].colormask = 0xf; - return si_create_blend_state_mode(&rctx->b.b, &blend, mode); + return si_create_blend_state_mode(&sctx->b.b, &blend, mode); } static struct pipe_surface *r600_create_surface(struct pipe_context *pipe, @@ -3051,75 +3051,75 @@ static boolean si_dma_copy(struct pipe_context *ctx, return FALSE; } -void si_init_state_functions(struct si_context *rctx) +void si_init_state_functions(struct si_context *sctx) { int i; - rctx->b.b.create_blend_state = si_create_blend_state; - rctx->b.b.bind_blend_state = si_bind_blend_state; - rctx->b.b.delete_blend_state = si_delete_blend_state; - rctx->b.b.set_blend_color = si_set_blend_color; + sctx->b.b.create_blend_state = si_create_blend_state; + sctx->b.b.bind_blend_state = si_bind_blend_state; + sctx->b.b.delete_blend_state = si_delete_blend_state; + sctx->b.b.set_blend_color = si_set_blend_color; - rctx->b.b.create_rasterizer_state = si_create_rs_state; - rctx->b.b.bind_rasterizer_state = si_bind_rs_state; - rctx->b.b.delete_rasterizer_state = si_delete_rs_state; + sctx->b.b.create_rasterizer_state = si_create_rs_state; + sctx->b.b.bind_rasterizer_state = si_bind_rs_state; + sctx->b.b.delete_rasterizer_state = si_delete_rs_state; - rctx->b.b.create_depth_stencil_alpha_state = si_create_dsa_state; - rctx->b.b.bind_depth_stencil_alpha_state = si_bind_dsa_state; - rctx->b.b.delete_depth_stencil_alpha_state = si_delete_dsa_state; + sctx->b.b.create_depth_stencil_alpha_state = si_create_dsa_state; + sctx->b.b.bind_depth_stencil_alpha_state = si_bind_dsa_state; + sctx->b.b.delete_depth_stencil_alpha_state = si_delete_dsa_state; for (i = 0; i < 8; i++) { - rctx->custom_dsa_flush_depth_stencil[i] = si_create_db_flush_dsa(rctx, true, true, i); - rctx->custom_dsa_flush_depth[i] = si_create_db_flush_dsa(rctx, true, false, i); - rctx->custom_dsa_flush_stencil[i] = si_create_db_flush_dsa(rctx, false, true, i); + sctx->custom_dsa_flush_depth_stencil[i] = si_create_db_flush_dsa(sctx, true, true, i); + sctx->custom_dsa_flush_depth[i] = si_create_db_flush_dsa(sctx, true, false, i); + sctx->custom_dsa_flush_stencil[i] = si_create_db_flush_dsa(sctx, false, true, i); } - rctx->custom_dsa_flush_inplace = si_create_db_flush_dsa(rctx, false, false, 0); - rctx->custom_blend_resolve = si_create_blend_custom(rctx, V_028808_CB_RESOLVE); - rctx->custom_blend_decompress = si_create_blend_custom(rctx, V_028808_CB_FMASK_DECOMPRESS); + sctx->custom_dsa_flush_inplace = si_create_db_flush_dsa(sctx, false, false, 0); + sctx->custom_blend_resolve = si_create_blend_custom(sctx, V_028808_CB_RESOLVE); + sctx->custom_blend_decompress = si_create_blend_custom(sctx, V_028808_CB_FMASK_DECOMPRESS); - rctx->b.b.set_clip_state = si_set_clip_state; - rctx->b.b.set_scissor_states = si_set_scissor_states; - rctx->b.b.set_viewport_states = si_set_viewport_states; - rctx->b.b.set_stencil_ref = si_set_pipe_stencil_ref; + sctx->b.b.set_clip_state = si_set_clip_state; + sctx->b.b.set_scissor_states = si_set_scissor_states; + sctx->b.b.set_viewport_states = si_set_viewport_states; + sctx->b.b.set_stencil_ref = si_set_pipe_stencil_ref; - rctx->b.b.set_framebuffer_state = si_set_framebuffer_state; - rctx->b.b.get_sample_position = si_get_sample_position; + sctx->b.b.set_framebuffer_state = si_set_framebuffer_state; + sctx->b.b.get_sample_position = si_get_sample_position; - rctx->b.b.create_vs_state = si_create_vs_state; - rctx->b.b.create_fs_state = si_create_fs_state; - rctx->b.b.bind_vs_state = si_bind_vs_shader; - rctx->b.b.bind_fs_state = si_bind_ps_shader; - rctx->b.b.delete_vs_state = si_delete_vs_shader; - rctx->b.b.delete_fs_state = si_delete_ps_shader; + sctx->b.b.create_vs_state = si_create_vs_state; + sctx->b.b.create_fs_state = si_create_fs_state; + sctx->b.b.bind_vs_state = si_bind_vs_shader; + sctx->b.b.bind_fs_state = si_bind_ps_shader; + sctx->b.b.delete_vs_state = si_delete_vs_shader; + sctx->b.b.delete_fs_state = si_delete_ps_shader; - rctx->b.b.create_sampler_state = si_create_sampler_state; - rctx->b.b.bind_sampler_states = si_bind_sampler_states; - rctx->b.b.delete_sampler_state = si_delete_sampler_state; + sctx->b.b.create_sampler_state = si_create_sampler_state; + sctx->b.b.bind_sampler_states = si_bind_sampler_states; + sctx->b.b.delete_sampler_state = si_delete_sampler_state; - rctx->b.b.create_sampler_view = si_create_sampler_view; - rctx->b.b.set_sampler_views = si_set_sampler_views; - rctx->b.b.sampler_view_destroy = si_sampler_view_destroy; + sctx->b.b.create_sampler_view = si_create_sampler_view; + sctx->b.b.set_sampler_views = si_set_sampler_views; + sctx->b.b.sampler_view_destroy = si_sampler_view_destroy; - rctx->b.b.set_sample_mask = si_set_sample_mask; + sctx->b.b.set_sample_mask = si_set_sample_mask; - rctx->b.b.create_vertex_elements_state = si_create_vertex_elements; - rctx->b.b.bind_vertex_elements_state = si_bind_vertex_elements; - rctx->b.b.delete_vertex_elements_state = si_delete_vertex_element; - rctx->b.b.set_vertex_buffers = si_set_vertex_buffers; - rctx->b.b.set_index_buffer = si_set_index_buffer; + sctx->b.b.create_vertex_elements_state = si_create_vertex_elements; + sctx->b.b.bind_vertex_elements_state = si_bind_vertex_elements; + sctx->b.b.delete_vertex_elements_state = si_delete_vertex_element; + sctx->b.b.set_vertex_buffers = si_set_vertex_buffers; + sctx->b.b.set_index_buffer = si_set_index_buffer; - rctx->b.b.texture_barrier = si_texture_barrier; - rctx->b.b.set_polygon_stipple = si_set_polygon_stipple; - rctx->b.b.create_surface = r600_create_surface; - rctx->b.b.surface_destroy = r600_surface_destroy; - rctx->b.dma_copy = si_dma_copy; + sctx->b.b.texture_barrier = si_texture_barrier; + sctx->b.b.set_polygon_stipple = si_set_polygon_stipple; + sctx->b.b.create_surface = r600_create_surface; + sctx->b.b.surface_destroy = r600_surface_destroy; + sctx->b.dma_copy = si_dma_copy; - rctx->b.b.draw_vbo = si_draw_vbo; + sctx->b.b.draw_vbo = si_draw_vbo; } -void si_init_config(struct si_context *rctx) +void si_init_config(struct si_context *sctx) { - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); if (pm4 == NULL) return; @@ -3146,7 +3146,7 @@ void si_init_config(struct si_context *rctx) si_pm4_set_reg(pm4, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0); si_pm4_set_reg(pm4, R_028B94_VGT_STRMOUT_CONFIG, 0x0); si_pm4_set_reg(pm4, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0); - if (rctx->b.chip_class == SI) { + if (sctx->b.chip_class == SI) { si_pm4_set_reg(pm4, R_028AA8_IA_MULTI_VGT_PARAM, S_028AA8_SWITCH_ON_EOP(1) | S_028AA8_PARTIAL_VS_WAVE_ON(1) | @@ -3154,7 +3154,7 @@ void si_init_config(struct si_context *rctx) } si_pm4_set_reg(pm4, R_028AB4_VGT_REUSE_OFF, 0x00000000); si_pm4_set_reg(pm4, R_028AB8_VGT_VTX_CNT_EN, 0x0); - if (rctx->b.chip_class < CIK) + if (sctx->b.chip_class < CIK) si_pm4_set_reg(pm4, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) | S_008A14_CLIP_VTX_REORDER_ENA(1)); @@ -3164,8 +3164,8 @@ void si_init_config(struct si_context *rctx) si_pm4_set_reg(pm4, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0); - if (rctx->b.chip_class >= CIK) { - switch (rctx->screen->b.family) { + if (sctx->b.chip_class >= CIK) { + switch (sctx->screen->b.family) { case CHIP_BONAIRE: si_pm4_set_reg(pm4, R_028350_PA_SC_RASTER_CONFIG, 0x16000012); si_pm4_set_reg(pm4, R_028354_PA_SC_RASTER_CONFIG_1, 0x00000000); @@ -3184,7 +3184,7 @@ void si_init_config(struct si_context *rctx) break; } } else { - switch (rctx->screen->b.family) { + switch (sctx->screen->b.family) { case CHIP_TAHITI: case CHIP_PITCAIRN: si_pm4_set_reg(pm4, R_028350_PA_SC_RASTER_CONFIG, 0x2a00126a); @@ -3227,11 +3227,11 @@ void si_init_config(struct si_context *rctx) si_pm4_set_reg(pm4, R_028400_VGT_MAX_VTX_INDX, ~0); si_pm4_set_reg(pm4, R_028404_VGT_MIN_VTX_INDX, 0); - if (rctx->b.chip_class >= CIK) { + if (sctx->b.chip_class >= CIK) { si_pm4_set_reg(pm4, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xffff)); si_pm4_set_reg(pm4, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(0)); si_pm4_set_reg(pm4, R_00B01C_SPI_SHADER_PGM_RSRC3_PS, S_00B01C_CU_EN(0xffff)); } - si_pm4_set_state(rctx, init, pm4); + si_pm4_set_state(sctx, init, pm4); } diff --git a/src/gallium/drivers/radeonsi/si_state.h b/src/gallium/drivers/radeonsi/si_state.h index 0a211112e02..3fe3cb80538 100644 --- a/src/gallium/drivers/radeonsi/si_state.h +++ b/src/gallium/drivers/radeonsi/si_state.h @@ -162,41 +162,41 @@ struct si_buffer_resources { #define si_pm4_block_idx(member) \ (offsetof(union si_state, named.member) / sizeof(struct si_pm4_state *)) -#define si_pm4_state_changed(rctx, member) \ - ((rctx)->queued.named.member != (rctx)->emitted.named.member) +#define si_pm4_state_changed(sctx, member) \ + ((sctx)->queued.named.member != (sctx)->emitted.named.member) -#define si_pm4_bind_state(rctx, member, value) \ +#define si_pm4_bind_state(sctx, member, value) \ do { \ - (rctx)->queued.named.member = (value); \ + (sctx)->queued.named.member = (value); \ } while(0) -#define si_pm4_delete_state(rctx, member, value) \ +#define si_pm4_delete_state(sctx, member, value) \ do { \ - if ((rctx)->queued.named.member == (value)) { \ - (rctx)->queued.named.member = NULL; \ + if ((sctx)->queued.named.member == (value)) { \ + (sctx)->queued.named.member = NULL; \ } \ - si_pm4_free_state(rctx, (struct si_pm4_state *)(value), \ + si_pm4_free_state(sctx, (struct si_pm4_state *)(value), \ si_pm4_block_idx(member)); \ } while(0) -#define si_pm4_set_state(rctx, member, value) \ +#define si_pm4_set_state(sctx, member, value) \ do { \ - if ((rctx)->queued.named.member != (value)) { \ - si_pm4_free_state(rctx, \ - (struct si_pm4_state *)(rctx)->queued.named.member, \ + if ((sctx)->queued.named.member != (value)) { \ + si_pm4_free_state(sctx, \ + (struct si_pm4_state *)(sctx)->queued.named.member, \ si_pm4_block_idx(member)); \ - (rctx)->queued.named.member = (value); \ + (sctx)->queued.named.member = (value); \ } \ } while(0) /* si_descriptors.c */ -void si_set_sampler_view(struct si_context *rctx, unsigned shader, +void si_set_sampler_view(struct si_context *sctx, unsigned shader, unsigned slot, struct pipe_sampler_view *view, unsigned *view_desc); -void si_init_all_descriptors(struct si_context *rctx); -void si_release_all_descriptors(struct si_context *rctx); -void si_all_descriptors_begin_new_cs(struct si_context *rctx); -void si_copy_buffer(struct si_context *rctx, +void si_init_all_descriptors(struct si_context *sctx); +void si_release_all_descriptors(struct si_context *sctx); +void si_all_descriptors_begin_new_cs(struct si_context *sctx); +void si_copy_buffer(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src, uint64_t dst_offset, uint64_t src_offset, unsigned size); @@ -212,12 +212,12 @@ boolean si_is_format_supported(struct pipe_screen *screen, int si_shader_select(struct pipe_context *ctx, struct si_pipe_shader_selector *sel, unsigned *dirty); -void si_init_state_functions(struct si_context *rctx); -void si_init_config(struct si_context *rctx); +void si_init_state_functions(struct si_context *sctx); +void si_init_config(struct si_context *sctx); /* si_state_draw.c */ extern const struct r600_atom si_atom_cache_flush; -void si_emit_cache_flush(struct r600_common_context *rctx, struct r600_atom *atom); +void si_emit_cache_flush(struct r600_common_context *sctx, struct r600_atom *atom); void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo); /* si_commands.c */ diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c index a141b3a4db9..f36a1b87398 100644 --- a/src/gallium/drivers/radeonsi/si_state_draw.c +++ b/src/gallium/drivers/radeonsi/si_state_draw.c @@ -40,14 +40,14 @@ static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *shader) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_pm4_state *pm4; unsigned num_sgprs, num_user_sgprs; unsigned nparams, i, vgpr_comp_cnt; uint64_t va; - si_pm4_delete_state(rctx, vs, shader->pm4); - pm4 = shader->pm4 = si_pm4_alloc_state(rctx); + si_pm4_delete_state(sctx, vs, shader->pm4); + pm4 = shader->pm4 = si_pm4_alloc_state(sctx); if (pm4 == NULL) return; @@ -111,27 +111,27 @@ static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *s S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) | S_00B12C_SO_EN(!!shader->selector->so.num_outputs)); - si_pm4_bind_state(rctx, vs, shader->pm4); - rctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE; + si_pm4_bind_state(sctx, vs, shader->pm4); + sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE; } static void si_pipe_shader_ps(struct pipe_context *ctx, struct si_pipe_shader *shader) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct si_pm4_state *pm4; unsigned i, exports_ps, spi_ps_in_control, db_shader_control; unsigned num_sgprs, num_user_sgprs; unsigned spi_baryc_cntl = 0, spi_ps_input_ena, spi_shader_z_format; uint64_t va; - si_pm4_delete_state(rctx, ps, shader->pm4); - pm4 = shader->pm4 = si_pm4_alloc_state(rctx); + si_pm4_delete_state(sctx, ps, shader->pm4); + pm4 = shader->pm4 = si_pm4_alloc_state(sctx); if (pm4 == NULL) return; db_shader_control = S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) | - S_02880C_ALPHA_TO_MASK_DISABLE(rctx->fb_cb0_is_integer); + S_02880C_ALPHA_TO_MASK_DISABLE(sctx->fb_cb0_is_integer); for (i = 0; i < shader->shader.ninput; i++) { switch (shader->shader.input[i].name) { @@ -225,10 +225,10 @@ static void si_pipe_shader_ps(struct pipe_context *ctx, struct si_pipe_shader *s si_pm4_set_reg(pm4, R_02880C_DB_SHADER_CONTROL, db_shader_control); - shader->cb0_is_integer = rctx->fb_cb0_is_integer; - shader->sprite_coord_enable = rctx->sprite_coord_enable; - si_pm4_bind_state(rctx, ps, shader->pm4); - rctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE; + shader->cb0_is_integer = sctx->fb_cb0_is_integer; + shader->sprite_coord_enable = sctx->sprite_coord_enable; + si_pm4_bind_state(sctx, ps, shader->pm4); + sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE; } /* @@ -283,12 +283,12 @@ static unsigned si_conv_prim_to_gs_out(unsigned mode) return prim_conv[mode]; } -static bool si_update_draw_info_state(struct si_context *rctx, +static bool si_update_draw_info_state(struct si_context *sctx, const struct pipe_draw_info *info, const struct pipe_index_buffer *ib) { - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); - struct si_shader *vs = &rctx->vs_shader->current->shader; + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); + struct si_shader *vs = &sctx->vs_shader->current->shader; unsigned prim = si_conv_pipe_prim(info->mode); unsigned gs_out_prim = si_conv_prim_to_gs_out(info->mode); unsigned ls_mask = 0; @@ -301,8 +301,8 @@ static bool si_update_draw_info_state(struct si_context *rctx, return false; } - if (rctx->b.chip_class >= CIK) { - struct si_state_rasterizer *rs = rctx->queued.named.rasterizer; + if (sctx->b.chip_class >= CIK) { + struct si_state_rasterizer *rs = sctx->queued.named.rasterizer; bool wd_switch_on_eop = prim == V_008958_DI_PT_POLYGON || prim == V_008958_DI_PT_LINELOOP || prim == V_008958_DI_PT_TRIFAN || @@ -339,13 +339,13 @@ static bool si_update_draw_info_state(struct si_context *rctx, ls_mask = 2; si_pm4_set_reg(pm4, R_028A0C_PA_SC_LINE_STIPPLE, S_028A0C_AUTO_RESET_CNTL(ls_mask) | - rctx->pa_sc_line_stipple); + sctx->pa_sc_line_stipple); if (info->mode == PIPE_PRIM_QUADS || info->mode == PIPE_PRIM_QUAD_STRIP || info->mode == PIPE_PRIM_POLYGON) { si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL, - S_028814_PROVOKING_VTX_LAST(1) | rctx->pa_su_sc_mode_cntl); + S_028814_PROVOKING_VTX_LAST(1) | sctx->pa_su_sc_mode_cntl); } else { - si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL, rctx->pa_su_sc_mode_cntl); + si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL, sctx->pa_su_sc_mode_cntl); } si_pm4_set_reg(pm4, R_02881C_PA_CL_VS_OUT_CNTL, S_02881C_USE_VTX_POINT_SIZE(vs->vs_out_point_size) | @@ -354,22 +354,22 @@ static bool si_update_draw_info_state(struct si_context *rctx, S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs->clip_dist_write & 0x0F) != 0) | S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs->clip_dist_write & 0xF0) != 0) | S_02881C_VS_OUT_MISC_VEC_ENA(vs->vs_out_misc_write) | - (rctx->queued.named.rasterizer->clip_plane_enable & + (sctx->queued.named.rasterizer->clip_plane_enable & vs->clip_dist_write)); si_pm4_set_reg(pm4, R_028810_PA_CL_CLIP_CNTL, - rctx->queued.named.rasterizer->pa_cl_clip_cntl | + sctx->queued.named.rasterizer->pa_cl_clip_cntl | (vs->clip_dist_write ? 0 : - rctx->queued.named.rasterizer->clip_plane_enable & 0x3F)); + sctx->queued.named.rasterizer->clip_plane_enable & 0x3F)); - si_pm4_set_state(rctx, draw_info, pm4); + si_pm4_set_state(sctx, draw_info, pm4); return true; } -static void si_update_spi_map(struct si_context *rctx) +static void si_update_spi_map(struct si_context *sctx) { - struct si_shader *ps = &rctx->ps_shader->current->shader; - struct si_shader *vs = &rctx->vs_shader->current->shader; - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); + struct si_shader *ps = &sctx->ps_shader->current->shader; + struct si_shader *vs = &sctx->vs_shader->current->shader; + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); unsigned i, j, tmp; for (i = 0; i < ps->ninput; i++) { @@ -385,12 +385,12 @@ bcolor: if (ps->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT || (ps->input[i].interpolate == TGSI_INTERPOLATE_COLOR && - rctx->ps_shader->current->key.ps.flatshade)) { + sctx->ps_shader->current->key.ps.flatshade)) { tmp |= S_028644_FLAT_SHADE(1); } if (name == TGSI_SEMANTIC_GENERIC && - rctx->sprite_coord_enable & (1 << ps->input[i].sid)) { + sctx->sprite_coord_enable & (1 << ps->input[i].sid)) { tmp |= S_028644_PT_SPRITE_TEX(1); } @@ -412,94 +412,94 @@ bcolor: tmp); if (name == TGSI_SEMANTIC_COLOR && - rctx->ps_shader->current->key.ps.color_two_side) { + sctx->ps_shader->current->key.ps.color_two_side) { name = TGSI_SEMANTIC_BCOLOR; param_offset++; goto bcolor; } } - si_pm4_set_state(rctx, spi, pm4); + si_pm4_set_state(sctx, spi, pm4); } -static void si_update_derived_state(struct si_context *rctx) +static void si_update_derived_state(struct si_context *sctx) { - struct pipe_context * ctx = (struct pipe_context*)rctx; + struct pipe_context * ctx = (struct pipe_context*)sctx; unsigned vs_dirty = 0, ps_dirty = 0; - if (!rctx->blitter->running) { + if (!sctx->blitter->running) { /* Flush depth textures which need to be flushed. */ for (int i = 0; i < SI_NUM_SHADERS; i++) { - if (rctx->samplers[i].depth_texture_mask) { - si_flush_depth_textures(rctx, &rctx->samplers[i]); + if (sctx->samplers[i].depth_texture_mask) { + si_flush_depth_textures(sctx, &sctx->samplers[i]); } - if (rctx->samplers[i].compressed_colortex_mask) { - si_decompress_color_textures(rctx, &rctx->samplers[i]); + if (sctx->samplers[i].compressed_colortex_mask) { + si_decompress_color_textures(sctx, &sctx->samplers[i]); } } } - si_shader_select(ctx, rctx->vs_shader, &vs_dirty); + si_shader_select(ctx, sctx->vs_shader, &vs_dirty); - if (!rctx->vs_shader->current->pm4) { - si_pipe_shader_vs(ctx, rctx->vs_shader->current); + if (!sctx->vs_shader->current->pm4) { + si_pipe_shader_vs(ctx, sctx->vs_shader->current); vs_dirty = 0; } if (vs_dirty) { - si_pm4_bind_state(rctx, vs, rctx->vs_shader->current->pm4); + si_pm4_bind_state(sctx, vs, sctx->vs_shader->current->pm4); } - si_shader_select(ctx, rctx->ps_shader, &ps_dirty); + si_shader_select(ctx, sctx->ps_shader, &ps_dirty); - if (!rctx->ps_shader->current->pm4) { - si_pipe_shader_ps(ctx, rctx->ps_shader->current); + if (!sctx->ps_shader->current->pm4) { + si_pipe_shader_ps(ctx, sctx->ps_shader->current); ps_dirty = 0; } - if (rctx->ps_shader->current->cb0_is_integer != rctx->fb_cb0_is_integer) { - si_pipe_shader_ps(ctx, rctx->ps_shader->current); + if (sctx->ps_shader->current->cb0_is_integer != sctx->fb_cb0_is_integer) { + si_pipe_shader_ps(ctx, sctx->ps_shader->current); ps_dirty = 0; } if (ps_dirty) { - si_pm4_bind_state(rctx, ps, rctx->ps_shader->current->pm4); + si_pm4_bind_state(sctx, ps, sctx->ps_shader->current->pm4); } - if (si_pm4_state_changed(rctx, ps) || si_pm4_state_changed(rctx, vs)) { + if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs)) { /* XXX: Emitting the PS state even when only the VS changed * fixes random failures with piglit glsl-max-varyings. * Not sure why... */ - rctx->emitted.named.ps = NULL; - si_update_spi_map(rctx); + sctx->emitted.named.ps = NULL; + si_update_spi_map(sctx); } } -static void si_vertex_buffer_update(struct si_context *rctx) +static void si_vertex_buffer_update(struct si_context *sctx) { - struct pipe_context *ctx = &rctx->b.b; - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); + struct pipe_context *ctx = &sctx->b.b; + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); bool bound[PIPE_MAX_ATTRIBS] = {}; unsigned i, count; uint64_t va; - rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE; + sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE; - count = rctx->vertex_elements->count; + count = sctx->vertex_elements->count; assert(count <= 256 / 4); si_pm4_sh_data_begin(pm4); for (i = 0 ; i < count; i++) { - struct pipe_vertex_element *ve = &rctx->vertex_elements->elements[i]; + struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i]; struct pipe_vertex_buffer *vb; struct r600_resource *rbuffer; unsigned offset; - if (ve->vertex_buffer_index >= rctx->nr_vertex_buffers) + if (ve->vertex_buffer_index >= sctx->nr_vertex_buffers) continue; - vb = &rctx->vertex_buffer[ve->vertex_buffer_index]; + vb = &sctx->vertex_buffer[ve->vertex_buffer_index]; rbuffer = (struct r600_resource*)vb->buffer; if (rbuffer == NULL) continue; @@ -523,7 +523,7 @@ static void si_vertex_buffer_update(struct si_context *rctx) vb->stride + 1); else si_pm4_sh_data_add(pm4, vb->buffer->width0 - offset); - si_pm4_sh_data_add(pm4, rctx->vertex_elements->rsrc_word3[i]); + si_pm4_sh_data_add(pm4, sctx->vertex_elements->rsrc_word3[i]); if (!bound[ve->vertex_buffer_index]) { si_pm4_add_bo(pm4, rbuffer, RADEON_USAGE_READ); @@ -531,39 +531,39 @@ static void si_vertex_buffer_update(struct si_context *rctx) } } si_pm4_sh_data_end(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0, SI_SGPR_VERTEX_BUFFER); - si_pm4_set_state(rctx, vertex_buffers, pm4); + si_pm4_set_state(sctx, vertex_buffers, pm4); } -static void si_state_draw(struct si_context *rctx, +static void si_state_draw(struct si_context *sctx, const struct pipe_draw_info *info, const struct pipe_index_buffer *ib) { - struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx); + struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx); if (pm4 == NULL) return; /* queries need some special values * (this is non-zero if any query is active) */ - if (rctx->num_cs_dw_nontimer_queries_suspend) { - if (rctx->b.chip_class >= CIK) { + if (sctx->num_cs_dw_nontimer_queries_suspend) { + if (sctx->b.chip_class >= CIK) { si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL, S_028004_PERFECT_ZPASS_COUNTS(1) | - S_028004_SAMPLE_RATE(rctx->fb_log_samples) | + S_028004_SAMPLE_RATE(sctx->fb_log_samples) | S_028004_ZPASS_ENABLE(1) | S_028004_SLICE_EVEN_ENABLE(1) | S_028004_SLICE_ODD_ENABLE(1)); } else { si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL, S_028004_PERFECT_ZPASS_COUNTS(1) | - S_028004_SAMPLE_RATE(rctx->fb_log_samples)); + S_028004_SAMPLE_RATE(sctx->fb_log_samples)); } } if (info->count_from_stream_output) { struct r600_so_target *t = (struct r600_so_target*)info->count_from_stream_output; - uint64_t va = r600_resource_va(&rctx->screen->b.b, + uint64_t va = r600_resource_va(&sctx->screen->b.b, &t->buf_filled_size->b.b); va += t->buf_filled_size_offset; @@ -592,52 +592,52 @@ static void si_state_draw(struct si_context *rctx, si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_16 | (SI_BIG_ENDIAN ? V_028A7C_VGT_DMA_SWAP_16_BIT : 0)); } - si_pm4_cmd_end(pm4, rctx->predicate_drawing); + si_pm4_cmd_end(pm4, sctx->predicate_drawing); si_pm4_cmd_begin(pm4, PKT3_NUM_INSTANCES); si_pm4_cmd_add(pm4, info->instance_count); - si_pm4_cmd_end(pm4, rctx->predicate_drawing); + si_pm4_cmd_end(pm4, sctx->predicate_drawing); if (info->indexed) { uint32_t max_size = (ib->buffer->width0 - ib->offset) / - rctx->index_buffer.index_size; + sctx->index_buffer.index_size; uint64_t va; - va = r600_resource_va(&rctx->screen->b.b, ib->buffer); + va = r600_resource_va(&sctx->screen->b.b, ib->buffer); va += ib->offset; si_pm4_add_bo(pm4, (struct r600_resource *)ib->buffer, RADEON_USAGE_READ); si_cmd_draw_index_2(pm4, max_size, va, info->count, V_0287F0_DI_SRC_SEL_DMA, - rctx->predicate_drawing); + sctx->predicate_drawing); } else { uint32_t initiator = V_0287F0_DI_SRC_SEL_AUTO_INDEX; initiator |= S_0287F0_USE_OPAQUE(!!info->count_from_stream_output); - si_cmd_draw_index_auto(pm4, info->count, initiator, rctx->predicate_drawing); + si_cmd_draw_index_auto(pm4, info->count, initiator, sctx->predicate_drawing); } - si_pm4_set_state(rctx, draw, pm4); + si_pm4_set_state(sctx, draw, pm4); } -void si_emit_cache_flush(struct r600_common_context *rctx, struct r600_atom *atom) +void si_emit_cache_flush(struct r600_common_context *sctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = sctx->rings.gfx.cs; uint32_t cp_coher_cntl = 0; /* XXX SI flushes both ICACHE and KCACHE if either flag is set. * XXX CIK shouldn't have this issue. Test CIK before separating the flags * XXX to ensure there is no regression. Also find out if there is another * XXX way to flush either ICACHE or KCACHE but not both for SI. */ - if (rctx->flags & (R600_CONTEXT_INV_SHADER_CACHE | + if (sctx->flags & (R600_CONTEXT_INV_SHADER_CACHE | R600_CONTEXT_INV_CONST_CACHE)) { cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1) | S_0085F0_SH_KCACHE_ACTION_ENA(1); } - if (rctx->flags & (R600_CONTEXT_INV_TEX_CACHE | + if (sctx->flags & (R600_CONTEXT_INV_TEX_CACHE | R600_CONTEXT_STREAMOUT_FLUSH)) { cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1); } - if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB) { + if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB) { cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) | S_0085F0_CB1_DEST_BASE_ENA(1) | @@ -648,13 +648,13 @@ void si_emit_cache_flush(struct r600_common_context *rctx, struct r600_atom *ato S_0085F0_CB6_DEST_BASE_ENA(1) | S_0085F0_CB7_DEST_BASE_ENA(1); } - if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB) { + if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB) { cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1); } if (cp_coher_cntl) { - if (rctx->chip_class >= CIK) { + if (sctx->chip_class >= CIK) { radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0)); radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */ @@ -671,104 +671,104 @@ void si_emit_cache_flush(struct r600_common_context *rctx, struct r600_atom *ato } } - if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META) { + if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META) { radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0)); } - if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META) { + if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META) { radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0)); } - if (rctx->flags & R600_CONTEXT_WAIT_3D_IDLE) { + if (sctx->flags & R600_CONTEXT_WAIT_3D_IDLE) { radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); - } else if (rctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) { + } else if (sctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) { /* Needed if streamout buffers are going to be used as a source. */ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4)); } - rctx->flags = 0; + sctx->flags = 0; } const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 13 }; /* number of CS dwords */ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info) { - struct si_context *rctx = (struct si_context *)ctx; + struct si_context *sctx = (struct si_context *)ctx; struct pipe_index_buffer ib = {}; uint32_t i; if (!info->count && (info->indexed || !info->count_from_stream_output)) return; - if (!rctx->ps_shader || !rctx->vs_shader) + if (!sctx->ps_shader || !sctx->vs_shader) return; - si_update_derived_state(rctx); - si_vertex_buffer_update(rctx); + si_update_derived_state(sctx); + si_vertex_buffer_update(sctx); if (info->indexed) { /* Initialize the index buffer struct. */ - pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer); - ib.user_buffer = rctx->index_buffer.user_buffer; - ib.index_size = rctx->index_buffer.index_size; - ib.offset = rctx->index_buffer.offset + info->start * ib.index_size; + pipe_resource_reference(&ib.buffer, sctx->index_buffer.buffer); + ib.user_buffer = sctx->index_buffer.user_buffer; + ib.index_size = sctx->index_buffer.index_size; + ib.offset = sctx->index_buffer.offset + info->start * ib.index_size; /* Translate or upload, if needed. */ - si_translate_index_buffer(rctx, &ib, info->count); + si_translate_index_buffer(sctx, &ib, info->count); if (ib.user_buffer && !ib.buffer) { - si_upload_index_buffer(rctx, &ib, info->count); + si_upload_index_buffer(sctx, &ib, info->count); } } - if (!si_update_draw_info_state(rctx, info, &ib)) + if (!si_update_draw_info_state(sctx, info, &ib)) return; - si_state_draw(rctx, info, &ib); + si_state_draw(sctx, info, &ib); - rctx->pm4_dirty_cdwords += si_pm4_dirty_dw(rctx); + sctx->pm4_dirty_cdwords += si_pm4_dirty_dw(sctx); /* Check flush flags. */ - if (rctx->b.flags) - rctx->atoms.cache_flush->dirty = true; + if (sctx->b.flags) + sctx->atoms.cache_flush->dirty = true; - si_need_cs_space(rctx, 0, TRUE); + si_need_cs_space(sctx, 0, TRUE); /* Emit states. */ - for (i = 0; i < SI_NUM_ATOMS(rctx); i++) { - if (rctx->atoms.array[i]->dirty) { - rctx->atoms.array[i]->emit(&rctx->b, rctx->atoms.array[i]); - rctx->atoms.array[i]->dirty = false; + for (i = 0; i < SI_NUM_ATOMS(sctx); i++) { + if (sctx->atoms.array[i]->dirty) { + sctx->atoms.array[i]->emit(&sctx->b, sctx->atoms.array[i]); + sctx->atoms.array[i]->dirty = false; } } - si_pm4_emit_dirty(rctx); - rctx->pm4_dirty_cdwords = 0; + si_pm4_emit_dirty(sctx); + sctx->pm4_dirty_cdwords = 0; #if SI_TRACE_CS - if (rctx->screen->trace_bo) { - si_trace_emit(rctx); + if (sctx->screen->trace_bo) { + si_trace_emit(sctx); } #endif /* Set the depth buffer as dirty. */ - if (rctx->framebuffer.zsbuf) { - struct pipe_surface *surf = rctx->framebuffer.zsbuf; + if (sctx->framebuffer.zsbuf) { + struct pipe_surface *surf = sctx->framebuffer.zsbuf; struct r600_texture *rtex = (struct r600_texture *)surf->texture; rtex->dirty_level_mask |= 1 << surf->u.tex.level; } - if (rctx->fb_compressed_cb_mask) { + if (sctx->fb_compressed_cb_mask) { struct pipe_surface *surf; struct r600_texture *rtex; - unsigned mask = rctx->fb_compressed_cb_mask; + unsigned mask = sctx->fb_compressed_cb_mask; do { unsigned i = u_bit_scan(&mask); - surf = rctx->framebuffer.cbufs[i]; + surf = sctx->framebuffer.cbufs[i]; rtex = (struct r600_texture*)surf->texture; rtex->dirty_level_mask |= 1 << surf->u.tex.level; diff --git a/src/gallium/drivers/radeonsi/si_translate.c b/src/gallium/drivers/radeonsi/si_translate.c index 233fedec132..be9d621b299 100644 --- a/src/gallium/drivers/radeonsi/si_translate.c +++ b/src/gallium/drivers/radeonsi/si_translate.c @@ -28,7 +28,7 @@ #include "si_pipe.h" -void si_translate_index_buffer(struct si_context *r600, +void si_translate_index_buffer(struct si_context *sctx, struct pipe_index_buffer *ib, unsigned count) { @@ -38,11 +38,11 @@ void si_translate_index_buffer(struct si_context *r600, switch (ib->index_size) { case 1: - u_upload_alloc(r600->b.uploader, 0, count * 2, + u_upload_alloc(sctx->b.uploader, 0, count * 2, &out_offset, &out_buffer, &ptr); util_shorten_ubyte_elts_to_userptr( - &r600->b.b, ib, 0, ib->offset, count, ptr); + &sctx->b.b, ib, 0, ib->offset, count, ptr); pipe_resource_reference(&ib->buffer, NULL); ib->buffer = out_buffer; -- 2.30.2