From: Marek Olšák Date: Sat, 19 Jan 2019 00:30:17 +0000 (-0500) Subject: radeonsi: rename rbo, rbuffer to buf or buffer X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=260ff576470f45991ce2bc8934494c82e98c061b;p=mesa.git radeonsi: rename rbo, rbuffer to buf or buffer Reviewed-by: Bas Nieuwenhuizen --- diff --git a/src/gallium/drivers/radeonsi/si_buffer.c b/src/gallium/drivers/radeonsi/si_buffer.c index c61a90e5868..03c11cb7013 100644 --- a/src/gallium/drivers/radeonsi/si_buffer.c +++ b/src/gallium/drivers/radeonsi/si_buffer.c @@ -248,12 +248,12 @@ bool si_alloc_resource(struct si_screen *sscreen, static void si_buffer_destroy(struct pipe_screen *screen, struct pipe_resource *buf) { - struct si_resource *rbuffer = si_resource(buf); + struct si_resource *buffer = si_resource(buf); threaded_resource_deinit(buf); - util_range_destroy(&rbuffer->valid_buffer_range); - pb_reference(&rbuffer->buf, NULL); - FREE(rbuffer); + util_range_destroy(&buffer->valid_buffer_range); + pb_reference(&buffer->buf, NULL); + FREE(buffer); } /* Reallocate the buffer a update all resource bindings where the buffer is @@ -264,32 +264,32 @@ static void si_buffer_destroy(struct pipe_screen *screen, */ static bool si_invalidate_buffer(struct si_context *sctx, - struct si_resource *rbuffer) + struct si_resource *buf) { /* Shared buffers can't be reallocated. */ - if (rbuffer->b.is_shared) + if (buf->b.is_shared) return false; /* Sparse buffers can't be reallocated. */ - if (rbuffer->flags & RADEON_FLAG_SPARSE) + if (buf->flags & RADEON_FLAG_SPARSE) return false; /* In AMD_pinned_memory, the user pointer association only gets * broken when the buffer is explicitly re-allocated. */ - if (rbuffer->b.is_user_ptr) + if (buf->b.is_user_ptr) return false; /* Check if mapping this buffer would cause waiting for the GPU. */ - if (si_rings_is_buffer_referenced(sctx, rbuffer->buf, RADEON_USAGE_READWRITE) || - !sctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) { - uint64_t old_va = rbuffer->gpu_address; + if (si_rings_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) || + !sctx->ws->buffer_wait(buf->buf, 0, RADEON_USAGE_READWRITE)) { + uint64_t old_va = buf->gpu_address; /* Reallocate the buffer in the same pipe_resource. */ - si_alloc_resource(sctx->screen, rbuffer); - si_rebind_buffer(sctx, &rbuffer->b.b, old_va); + si_alloc_resource(sctx->screen, buf); + si_rebind_buffer(sctx, &buf->b.b, old_va); } else { - util_range_set_empty(&rbuffer->valid_buffer_range); + util_range_set_empty(&buf->valid_buffer_range); } return true; @@ -325,11 +325,11 @@ static void si_invalidate_resource(struct pipe_context *ctx, struct pipe_resource *resource) { struct si_context *sctx = (struct si_context*)ctx; - struct si_resource *rbuffer = si_resource(resource); + struct si_resource *buf = si_resource(resource); /* We currently only do anyting here for buffers */ if (resource->target == PIPE_BUFFER) - (void)si_invalidate_buffer(sctx, rbuffer); + (void)si_invalidate_buffer(sctx, buf); } static void *si_buffer_get_transfer(struct pipe_context *ctx, @@ -370,7 +370,7 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_transfer **ptransfer) { struct si_context *sctx = (struct si_context*)ctx; - struct si_resource *rbuffer = si_resource(resource); + struct si_resource *buf = si_resource(resource); uint8_t *data; assert(box->x + box->width <= resource->width0); @@ -386,7 +386,7 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, * * So don't ever use staging buffers. */ - if (rbuffer->b.is_user_ptr) + if (buf->b.is_user_ptr) usage |= PIPE_TRANSFER_PERSISTENT; /* See if the buffer range being mapped has never been initialized, @@ -394,8 +394,8 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) && usage & PIPE_TRANSFER_WRITE && - !rbuffer->b.is_shared && - !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) { + !buf->b.is_shared && + !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) { usage |= PIPE_TRANSFER_UNSYNCHRONIZED; } @@ -414,8 +414,8 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, !(usage & PIPE_TRANSFER_PERSISTENT) && /* Try not to decrement the counter if it's not positive. Still racy, * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */ - rbuffer->max_forced_staging_uploads > 0 && - p_atomic_dec_return(&rbuffer->max_forced_staging_uploads) >= 0) { + buf->max_forced_staging_uploads > 0 && + p_atomic_dec_return(&buf->max_forced_staging_uploads) >= 0) { usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_UNSYNCHRONIZED); usage |= PIPE_TRANSFER_DISCARD_RANGE; @@ -427,7 +427,7 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, TC_TRANSFER_MAP_NO_INVALIDATE))) { assert(usage & PIPE_TRANSFER_WRITE); - if (si_invalidate_buffer(sctx, rbuffer)) { + if (si_invalidate_buffer(sctx, buf)) { /* At this point, the buffer is always idle. */ usage |= PIPE_TRANSFER_UNSYNCHRONIZED; } else { @@ -439,15 +439,15 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, if ((usage & PIPE_TRANSFER_DISCARD_RANGE) && ((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_PERSISTENT))) || - (rbuffer->flags & RADEON_FLAG_SPARSE))) { + (buf->flags & RADEON_FLAG_SPARSE))) { assert(usage & PIPE_TRANSFER_WRITE); /* Check if mapping this buffer would cause waiting for the GPU. */ - if (rbuffer->flags & RADEON_FLAG_SPARSE || + if (buf->flags & RADEON_FLAG_SPARSE || force_discard_range || - si_rings_is_buffer_referenced(sctx, rbuffer->buf, RADEON_USAGE_READWRITE) || - !sctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) { + si_rings_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) || + !sctx->ws->buffer_wait(buf->buf, 0, RADEON_USAGE_READWRITE)) { /* Do a wait-free write-only transfer using a temporary buffer. */ unsigned offset; struct si_resource *staging = NULL; @@ -462,7 +462,7 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, data += box->x % SI_MAP_BUFFER_ALIGNMENT; return si_buffer_get_transfer(ctx, resource, usage, box, ptransfer, data, staging, offset); - } else if (rbuffer->flags & RADEON_FLAG_SPARSE) { + } else if (buf->flags & RADEON_FLAG_SPARSE) { return NULL; } } else { @@ -473,9 +473,9 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, /* Use a staging buffer in cached GTT for reads. */ else if (((usage & PIPE_TRANSFER_READ) && !(usage & PIPE_TRANSFER_PERSISTENT) && - (rbuffer->domains & RADEON_DOMAIN_VRAM || - rbuffer->flags & RADEON_FLAG_GTT_WC)) || - (rbuffer->flags & RADEON_FLAG_SPARSE)) { + (buf->domains & RADEON_DOMAIN_VRAM || + buf->flags & RADEON_FLAG_GTT_WC)) || + (buf->flags & RADEON_FLAG_SPARSE)) { struct si_resource *staging; assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC)); @@ -498,12 +498,12 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, return si_buffer_get_transfer(ctx, resource, usage, box, ptransfer, data, staging, 0); - } else if (rbuffer->flags & RADEON_FLAG_SPARSE) { + } else if (buf->flags & RADEON_FLAG_SPARSE) { return NULL; } } - data = si_buffer_map_sync_with_rings(sctx, rbuffer, usage); + data = si_buffer_map_sync_with_rings(sctx, buf, usage); if (!data) { return NULL; } @@ -518,7 +518,7 @@ static void si_buffer_do_flush_region(struct pipe_context *ctx, const struct pipe_box *box) { struct si_transfer *stransfer = (struct si_transfer*)transfer; - struct si_resource *rbuffer = si_resource(transfer->resource); + struct si_resource *buf = si_resource(transfer->resource); if (stransfer->staging) { /* Copy the staging buffer into the original one. */ @@ -528,7 +528,7 @@ static void si_buffer_do_flush_region(struct pipe_context *ctx, box->width); } - util_range_add(&rbuffer->valid_buffer_range, box->x, + util_range_add(&buf->valid_buffer_range, box->x, box->x + box->width); } @@ -601,23 +601,23 @@ static struct si_resource * si_alloc_buffer_struct(struct pipe_screen *screen, const struct pipe_resource *templ) { - struct si_resource *rbuffer; + struct si_resource *buf; - rbuffer = MALLOC_STRUCT(si_resource); + buf = MALLOC_STRUCT(si_resource); - rbuffer->b.b = *templ; - rbuffer->b.b.next = NULL; - pipe_reference_init(&rbuffer->b.b.reference, 1); - rbuffer->b.b.screen = screen; + buf->b.b = *templ; + buf->b.b.next = NULL; + pipe_reference_init(&buf->b.b.reference, 1); + buf->b.b.screen = screen; - rbuffer->b.vtbl = &si_buffer_vtbl; - threaded_resource_init(&rbuffer->b.b); + buf->b.vtbl = &si_buffer_vtbl; + threaded_resource_init(&buf->b.b); - rbuffer->buf = NULL; - rbuffer->bind_history = 0; - rbuffer->TC_L2_dirty = false; - util_range_init(&rbuffer->valid_buffer_range); - return rbuffer; + buf->buf = NULL; + buf->bind_history = 0; + buf->TC_L2_dirty = false; + util_range_init(&buf->valid_buffer_range); + return buf; } static struct pipe_resource *si_buffer_create(struct pipe_screen *screen, @@ -625,21 +625,21 @@ static struct pipe_resource *si_buffer_create(struct pipe_screen *screen, unsigned alignment) { struct si_screen *sscreen = (struct si_screen*)screen; - struct si_resource *rbuffer = si_alloc_buffer_struct(screen, templ); + struct si_resource *buf = si_alloc_buffer_struct(screen, templ); if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE) - rbuffer->b.b.flags |= SI_RESOURCE_FLAG_UNMAPPABLE; + buf->b.b.flags |= SI_RESOURCE_FLAG_UNMAPPABLE; - si_init_resource_fields(sscreen, rbuffer, templ->width0, alignment); + si_init_resource_fields(sscreen, buf, templ->width0, alignment); if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE) - rbuffer->flags |= RADEON_FLAG_SPARSE; + buf->flags |= RADEON_FLAG_SPARSE; - if (!si_alloc_resource(sscreen, rbuffer)) { - FREE(rbuffer); + if (!si_alloc_resource(sscreen, buf)) { + FREE(buf); return NULL; } - return &rbuffer->b.b; + return &buf->b.b; } struct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen, @@ -676,26 +676,26 @@ si_buffer_from_user_memory(struct pipe_screen *screen, { struct si_screen *sscreen = (struct si_screen*)screen; struct radeon_winsys *ws = sscreen->ws; - struct si_resource *rbuffer = si_alloc_buffer_struct(screen, templ); + struct si_resource *buf = si_alloc_buffer_struct(screen, templ); - rbuffer->domains = RADEON_DOMAIN_GTT; - rbuffer->flags = 0; - rbuffer->b.is_user_ptr = true; - util_range_add(&rbuffer->valid_buffer_range, 0, templ->width0); - util_range_add(&rbuffer->b.valid_buffer_range, 0, templ->width0); + buf->domains = RADEON_DOMAIN_GTT; + buf->flags = 0; + buf->b.is_user_ptr = true; + util_range_add(&buf->valid_buffer_range, 0, templ->width0); + util_range_add(&buf->b.valid_buffer_range, 0, templ->width0); /* Convert a user pointer to a buffer. */ - rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0); - if (!rbuffer->buf) { - FREE(rbuffer); + buf->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0); + if (!buf->buf) { + FREE(buf); return NULL; } - rbuffer->gpu_address = ws->buffer_get_virtual_address(rbuffer->buf); - rbuffer->vram_usage = 0; - rbuffer->gart_usage = templ->width0; + buf->gpu_address = ws->buffer_get_virtual_address(buf->buf); + buf->vram_usage = 0; + buf->gart_usage = templ->width0; - return &rbuffer->b.b; + return &buf->b.b; } static struct pipe_resource *si_resource_create(struct pipe_screen *screen, diff --git a/src/gallium/drivers/radeonsi/si_descriptors.c b/src/gallium/drivers/radeonsi/si_descriptors.c index 61cd8898d67..21d4ca946d3 100644 --- a/src/gallium/drivers/radeonsi/si_descriptors.c +++ b/src/gallium/drivers/radeonsi/si_descriptors.c @@ -1126,22 +1126,22 @@ bool si_upload_vertex_buffer_descriptors(struct si_context *sctx) for (i = 0; i < count; i++) { struct pipe_vertex_buffer *vb; - struct si_resource *rbuffer; + struct si_resource *buf; unsigned vbo_index = velems->vertex_buffer_index[i]; uint32_t *desc = &ptr[i*4]; vb = &sctx->vertex_buffer[vbo_index]; - rbuffer = si_resource(vb->buffer.resource); - if (!rbuffer) { + buf = si_resource(vb->buffer.resource); + if (!buf) { memset(desc, 0, 16); continue; } int64_t offset = (int64_t)((int)vb->buffer_offset) + velems->src_offset[i]; - uint64_t va = rbuffer->gpu_address + offset; + uint64_t va = buf->gpu_address + offset; - int64_t num_records = (int64_t)rbuffer->b.b.width0 - offset; + int64_t num_records = (int64_t)buf->b.b.width0 - offset; if (sctx->chip_class != VI && vb->stride) { /* Round up by rounding down and adding 1 */ num_records = (num_records - velems->format_size[i]) / @@ -1189,7 +1189,7 @@ si_const_and_shader_buffer_descriptors(struct si_context *sctx, unsigned shader) return &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(shader)]; } -void si_upload_const_buffer(struct si_context *sctx, struct si_resource **rbuffer, +void si_upload_const_buffer(struct si_context *sctx, struct si_resource **buf, const uint8_t *ptr, unsigned size, uint32_t *const_offset) { void *tmp; @@ -1197,8 +1197,8 @@ void si_upload_const_buffer(struct si_context *sctx, struct si_resource **rbuffe u_upload_alloc(sctx->b.const_uploader, 0, size, si_optimal_tcc_alignment(sctx, size), const_offset, - (struct pipe_resource**)rbuffer, &tmp); - if (*rbuffer) + (struct pipe_resource**)buf, &tmp); + if (*buf) util_memcpy_cpu_to_le32(tmp, ptr, size); } @@ -1623,7 +1623,7 @@ static void si_reset_buffer_resources(struct si_context *sctx, void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, uint64_t old_va) { - struct si_resource *rbuffer = si_resource(buf); + struct si_resource *buffer = si_resource(buf); unsigned i, shader; unsigned num_elems = sctx->vertex_elements ? sctx->vertex_elements->count : 0; @@ -1635,7 +1635,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, */ /* Vertex buffers. */ - if (rbuffer->bind_history & PIPE_BIND_VERTEX_BUFFER) { + if (buffer->bind_history & PIPE_BIND_VERTEX_BUFFER) { for (i = 0; i < num_elems; i++) { int vb = sctx->vertex_elements->vertex_buffer_index[i]; @@ -1652,7 +1652,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, } /* Streamout buffers. (other internal buffers can't be invalidated) */ - if (rbuffer->bind_history & PIPE_BIND_STREAM_OUTPUT) { + if (buffer->bind_history & PIPE_BIND_STREAM_OUTPUT) { for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) { struct si_buffer_resources *buffers = &sctx->rw_buffers; struct si_descriptors *descs = @@ -1666,7 +1666,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS; radeon_add_to_gfx_buffer_list_check_mem(sctx, - rbuffer, buffers->shader_usage, + buffer, buffers->shader_usage, RADEON_PRIO_SHADER_RW_BUFFER, true); @@ -1680,7 +1680,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, } /* Constant and shader buffers. */ - if (rbuffer->bind_history & PIPE_BIND_CONSTANT_BUFFER) { + if (buffer->bind_history & PIPE_BIND_CONSTANT_BUFFER) { for (shader = 0; shader < SI_NUM_SHADERS; shader++) si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader], si_const_and_shader_buffer_descriptors_idx(shader), @@ -1690,7 +1690,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, sctx->const_and_shader_buffers[shader].priority_constbuf); } - if (rbuffer->bind_history & PIPE_BIND_SHADER_BUFFER) { + if (buffer->bind_history & PIPE_BIND_SHADER_BUFFER) { for (shader = 0; shader < SI_NUM_SHADERS; shader++) si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader], si_const_and_shader_buffer_descriptors_idx(shader), @@ -1700,7 +1700,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, sctx->const_and_shader_buffers[shader].priority); } - if (rbuffer->bind_history & PIPE_BIND_SAMPLER_VIEW) { + if (buffer->bind_history & PIPE_BIND_SAMPLER_VIEW) { /* Texture buffers - update bindings. */ for (shader = 0; shader < SI_NUM_SHADERS; shader++) { struct si_samplers *samplers = &sctx->samplers[shader]; @@ -1720,7 +1720,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, 1u << si_sampler_and_image_descriptors_idx(shader); radeon_add_to_gfx_buffer_list_check_mem(sctx, - rbuffer, RADEON_USAGE_READ, + buffer, RADEON_USAGE_READ, RADEON_PRIO_SAMPLER_BUFFER, true); } @@ -1729,7 +1729,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, } /* Shader images */ - if (rbuffer->bind_history & PIPE_BIND_SHADER_IMAGE) { + if (buffer->bind_history & PIPE_BIND_SHADER_IMAGE) { for (shader = 0; shader < SI_NUM_SHADERS; ++shader) { struct si_images *images = &sctx->images[shader]; struct si_descriptors *descs = @@ -1752,7 +1752,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, 1u << si_sampler_and_image_descriptors_idx(shader); radeon_add_to_gfx_buffer_list_check_mem( - sctx, rbuffer, + sctx, buffer, RADEON_USAGE_READWRITE, RADEON_PRIO_SAMPLER_BUFFER, true); } @@ -1761,7 +1761,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, } /* Bindless texture handles */ - if (rbuffer->texture_handle_allocated) { + if (buffer->texture_handle_allocated) { struct si_descriptors *descs = &sctx->bindless_descriptors; util_dynarray_foreach(&sctx->resident_tex_handles, @@ -1770,7 +1770,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, unsigned desc_slot = (*tex_handle)->desc_slot; if (view->texture == buf) { - si_set_buf_desc_address(rbuffer, + si_set_buf_desc_address(buffer, view->u.buf.offset, descs->list + desc_slot * 16 + 4); @@ -1779,7 +1779,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, sctx->bindless_descriptors_dirty = true; radeon_add_to_gfx_buffer_list_check_mem( - sctx, rbuffer, + sctx, buffer, RADEON_USAGE_READ, RADEON_PRIO_SAMPLER_BUFFER, true); } @@ -1787,7 +1787,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, } /* Bindless image handles */ - if (rbuffer->image_handle_allocated) { + if (buffer->image_handle_allocated) { struct si_descriptors *descs = &sctx->bindless_descriptors; util_dynarray_foreach(&sctx->resident_img_handles, @@ -1799,7 +1799,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, if (view->access & PIPE_IMAGE_ACCESS_WRITE) si_mark_image_range_valid(view); - si_set_buf_desc_address(rbuffer, + si_set_buf_desc_address(buffer, view->u.buf.offset, descs->list + desc_slot * 16 + 4); @@ -1808,7 +1808,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf, sctx->bindless_descriptors_dirty = true; radeon_add_to_gfx_buffer_list_check_mem( - sctx, rbuffer, + sctx, buffer, RADEON_USAGE_READWRITE, RADEON_PRIO_SAMPLER_BUFFER, true); } diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h index 209b76440d1..eb3ba951dae 100644 --- a/src/gallium/drivers/radeonsi/si_pipe.h +++ b/src/gallium/drivers/radeonsi/si_pipe.h @@ -1650,15 +1650,15 @@ radeon_cs_memory_below_limit(struct si_screen *screen, */ static inline void radeon_add_to_buffer_list(struct si_context *sctx, struct radeon_cmdbuf *cs, - struct si_resource *rbo, + struct si_resource *bo, enum radeon_bo_usage usage, enum radeon_bo_priority priority) { assert(usage); sctx->ws->cs_add_buffer( - cs, rbo->buf, + cs, bo->buf, (enum radeon_bo_usage)(usage | RADEON_USAGE_SYNCHRONIZED), - rbo->domains, priority); + bo->domains, priority); } /** @@ -1680,18 +1680,18 @@ static inline void radeon_add_to_buffer_list(struct si_context *sctx, */ static inline void radeon_add_to_gfx_buffer_list_check_mem(struct si_context *sctx, - struct si_resource *rbo, + struct si_resource *bo, enum radeon_bo_usage usage, enum radeon_bo_priority priority, bool check_mem) { if (check_mem && !radeon_cs_memory_below_limit(sctx->screen, sctx->gfx_cs, - sctx->vram + rbo->vram_usage, - sctx->gtt + rbo->gart_usage)) + sctx->vram + bo->vram_usage, + sctx->gtt + bo->gart_usage)) si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL); - radeon_add_to_buffer_list(sctx, sctx->gfx_cs, rbo, usage, priority); + radeon_add_to_buffer_list(sctx, sctx->gfx_cs, bo, usage, priority); } #define PRINT_ERR(fmt, args...) \ diff --git a/src/gallium/drivers/radeonsi/si_state.h b/src/gallium/drivers/radeonsi/si_state.h index fb9cba67c21..767e789276a 100644 --- a/src/gallium/drivers/radeonsi/si_state.h +++ b/src/gallium/drivers/radeonsi/si_state.h @@ -465,7 +465,7 @@ bool si_upload_compute_shader_descriptors(struct si_context *sctx); void si_release_all_descriptors(struct si_context *sctx); void si_all_descriptors_begin_new_cs(struct si_context *sctx); void si_all_resident_buffers_begin_new_cs(struct si_context *sctx); -void si_upload_const_buffer(struct si_context *sctx, struct si_resource **rbuffer, +void si_upload_const_buffer(struct si_context *sctx, struct si_resource **buf, const uint8_t *ptr, unsigned size, uint32_t *const_offset); void si_update_all_texture_descriptors(struct si_context *sctx); void si_shader_change_notify(struct si_context *sctx); diff --git a/src/gallium/drivers/radeonsi/si_state_streamout.c b/src/gallium/drivers/radeonsi/si_state_streamout.c index 64994139c5f..2bf6862c89b 100644 --- a/src/gallium/drivers/radeonsi/si_state_streamout.c +++ b/src/gallium/drivers/radeonsi/si_state_streamout.c @@ -43,7 +43,7 @@ si_create_so_target(struct pipe_context *ctx, { struct si_context *sctx = (struct si_context *)ctx; struct si_streamout_target *t; - struct si_resource *rbuffer = si_resource(buffer); + struct si_resource *buf = si_resource(buffer); t = CALLOC_STRUCT(si_streamout_target); if (!t) { @@ -64,7 +64,7 @@ si_create_so_target(struct pipe_context *ctx, t->b.buffer_offset = buffer_offset; t->b.buffer_size = buffer_size; - util_range_add(&rbuffer->valid_buffer_range, buffer_offset, + util_range_add(&buf->valid_buffer_range, buffer_offset, buffer_offset + buffer_size); return &t->b; }