decode->bsd_size = align(dec->bs_size, 128);
decode->dpb_size = dec->dpb.res->buf->size;
decode->dt_size =
- r600_resource(((struct vl_video_buffer *)target)->resources[0])->buf->size +
- r600_resource(((struct vl_video_buffer *)target)->resources[1])->buf->size;
+ si_resource(((struct vl_video_buffer *)target)->resources[0])->buf->size +
+ si_resource(((struct vl_video_buffer *)target)->resources[1])->buf->size;
decode->sct_size = 0;
decode->sc_coeff_size = 0;
* able to move buffers around individually, so request a
* non-sub-allocated buffer.
*/
- buffer->res = r600_resource(pipe_buffer_create(screen, PIPE_BIND_SHARED,
- usage, size));
+ buffer->res = si_resource(pipe_buffer_create(screen, PIPE_BIND_SHARED,
+ usage, size));
return buffer->res != NULL;
}
/* destroy a buffer */
void si_vid_destroy_buffer(struct rvid_buffer *buffer)
{
- r600_resource_reference(&buffer->res, NULL);
+ si_resource_reference(&buffer->res, NULL);
}
/* reallocate a buffer, preserving its content */
struct rvid_buffer
{
unsigned usage;
- struct r600_resource *res;
+ struct si_resource *res;
};
/* generate an stream handle */
{
struct radeon_cmdbuf *cs = ctx->dma_cs;
unsigned i, ncopy, csize;
- struct r600_resource *rdst = r600_resource(dst);
- struct r600_resource *rsrc = r600_resource(src);
+ struct si_resource *rdst = si_resource(dst);
+ struct si_resource *rsrc = si_resource(src);
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
}
void *si_buffer_map_sync_with_rings(struct si_context *sctx,
- struct r600_resource *resource,
+ struct si_resource *resource,
unsigned usage)
{
enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
}
void si_init_resource_fields(struct si_screen *sscreen,
- struct r600_resource *res,
+ struct si_resource *res,
uint64_t size, unsigned alignment)
{
struct si_texture *tex = (struct si_texture*)res;
}
bool si_alloc_resource(struct si_screen *sscreen,
- struct r600_resource *res)
+ struct si_resource *res)
{
struct pb_buffer *old_buf, *new_buf;
static void si_buffer_destroy(struct pipe_screen *screen,
struct pipe_resource *buf)
{
- struct r600_resource *rbuffer = r600_resource(buf);
+ struct si_resource *rbuffer = si_resource(buf);
threaded_resource_deinit(buf);
util_range_destroy(&rbuffer->valid_buffer_range);
*/
static bool
si_invalidate_buffer(struct si_context *sctx,
- struct r600_resource *rbuffer)
+ struct si_resource *rbuffer)
{
/* Shared buffers can't be reallocated. */
if (rbuffer->b.is_shared)
struct pipe_resource *src)
{
struct si_context *sctx = (struct si_context*)ctx;
- struct r600_resource *rdst = r600_resource(dst);
- struct r600_resource *rsrc = r600_resource(src);
+ struct si_resource *rdst = si_resource(dst);
+ struct si_resource *rsrc = si_resource(src);
uint64_t old_gpu_address = rdst->gpu_address;
pb_reference(&rdst->buf, rsrc->buf);
struct pipe_resource *resource)
{
struct si_context *sctx = (struct si_context*)ctx;
- struct r600_resource *rbuffer = r600_resource(resource);
+ struct si_resource *rbuffer = si_resource(resource);
/* We currently only do anyting here for buffers */
if (resource->target == PIPE_BUFFER)
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer,
- void *data, struct r600_resource *staging,
+ void *data, struct si_resource *staging,
unsigned offset)
{
struct si_context *sctx = (struct si_context*)ctx;
struct pipe_transfer **ptransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
- struct r600_resource *rbuffer = r600_resource(resource);
+ struct si_resource *rbuffer = si_resource(resource);
uint8_t *data;
assert(box->x + box->width <= resource->width0);
!sctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
/* Do a wait-free write-only transfer using a temporary buffer. */
unsigned offset;
- struct r600_resource *staging = NULL;
+ struct si_resource *staging = NULL;
u_upload_alloc(ctx->stream_uploader, 0,
box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT),
(rbuffer->domains & RADEON_DOMAIN_VRAM ||
rbuffer->flags & RADEON_FLAG_GTT_WC)) ||
(rbuffer->flags & RADEON_FLAG_SPARSE)) {
- struct r600_resource *staging;
+ struct si_resource *staging;
assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC));
- staging = r600_resource(pipe_buffer_create(
+ staging = si_resource(pipe_buffer_create(
ctx->screen, 0, PIPE_USAGE_STAGING,
box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT)));
if (staging) {
data = si_buffer_map_sync_with_rings(sctx, staging,
usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
if (!data) {
- r600_resource_reference(&staging, NULL);
+ si_resource_reference(&staging, NULL);
return NULL;
}
data += box->x % SI_MAP_BUFFER_ALIGNMENT;
const struct pipe_box *box)
{
struct si_transfer *stransfer = (struct si_transfer*)transfer;
- struct r600_resource *rbuffer = r600_resource(transfer->resource);
+ struct si_resource *rbuffer = si_resource(transfer->resource);
if (stransfer->staging) {
/* Copy the staging buffer into the original one. */
!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
si_buffer_do_flush_region(ctx, transfer, &transfer->box);
- r600_resource_reference(&stransfer->staging, NULL);
+ si_resource_reference(&stransfer->staging, NULL);
assert(stransfer->b.staging == NULL); /* for threaded context only */
pipe_resource_reference(&transfer->resource, NULL);
si_buffer_transfer_unmap, /* transfer_unmap */
};
-static struct r600_resource *
+static struct si_resource *
si_alloc_buffer_struct(struct pipe_screen *screen,
const struct pipe_resource *templ)
{
- struct r600_resource *rbuffer;
+ struct si_resource *rbuffer;
- rbuffer = MALLOC_STRUCT(r600_resource);
+ rbuffer = MALLOC_STRUCT(si_resource);
rbuffer->b.b = *templ;
rbuffer->b.b.next = NULL;
unsigned alignment)
{
struct si_screen *sscreen = (struct si_screen*)screen;
- struct r600_resource *rbuffer = si_alloc_buffer_struct(screen, templ);
+ struct si_resource *rbuffer = si_alloc_buffer_struct(screen, templ);
if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
rbuffer->b.b.flags |= SI_RESOURCE_FLAG_UNMAPPABLE;
return si_buffer_create(screen, &buffer, alignment);
}
-struct r600_resource *si_aligned_buffer_create(struct pipe_screen *screen,
+struct si_resource *si_aligned_buffer_create(struct pipe_screen *screen,
unsigned flags, unsigned usage,
unsigned size, unsigned alignment)
{
- return r600_resource(pipe_aligned_buffer_create(screen, flags, usage,
+ return si_resource(pipe_aligned_buffer_create(screen, flags, usage,
size, alignment));
}
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct radeon_winsys *ws = sscreen->ws;
- struct r600_resource *rbuffer = si_alloc_buffer_struct(screen, templ);
+ struct si_resource *rbuffer = si_alloc_buffer_struct(screen, templ);
rbuffer->domains = RADEON_DOMAIN_GTT;
rbuffer->flags = 0;
bool commit)
{
struct si_context *ctx = (struct si_context *)pctx;
- struct r600_resource *res = r600_resource(resource);
+ struct si_resource *res = si_resource(resource);
/*
* Since buffer commitment changes cannot be pipelined, we need to
uint64_t va;
uint32_t offset;
pipe_resource_reference(&program->global_buffers[first + i], resources[i]);
- va = r600_resource(resources[i])->gpu_address;
+ va = si_resource(resources[i])->gpu_address;
offset = util_le32_to_cpu(*handles[i]);
va += offset;
va = util_cpu_to_le64(va);
scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
if (scratch_bo_size < scratch_needed) {
- r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
+ si_resource_reference(&sctx->compute_scratch_buffer, NULL);
sctx->compute_scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
if (si_shader_binary_upload(sctx->screen, shader))
return false;
- r600_resource_reference(&shader->scratch_bo,
+ si_resource_reference(&shader->scratch_bo,
sctx->compute_scratch_buffer);
}
AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) {
struct dispatch_packet dispatch;
unsigned dispatch_offset;
- struct r600_resource *dispatch_buf = NULL;
+ struct si_resource *dispatch_buf = NULL;
uint64_t dispatch_va;
/* Upload dispatch ptr */
radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) |
S_008F04_STRIDE(0));
- r600_resource_reference(&dispatch_buf, NULL);
+ si_resource_reference(&dispatch_buf, NULL);
user_sgpr += 2;
}
{
struct radeon_cmdbuf *cs = sctx->gfx_cs;
struct si_compute *program = sctx->cs_shader_state.program;
- struct r600_resource *input_buffer = NULL;
+ struct si_resource *input_buffer = NULL;
unsigned kernel_args_size;
unsigned num_work_size_bytes = program->use_code_object_v2 ? 0 : 36;
uint32_t kernel_args_offset = 0;
S_008F04_STRIDE(0));
}
- r600_resource_reference(&input_buffer, NULL);
+ si_resource_reference(&input_buffer, NULL);
return true;
}
if (info->indirect) {
if (program->uses_grid_size) {
- uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+ uint64_t base_va = si_resource(info->indirect)->gpu_address;
uint64_t va = base_va + info->indirect_offset;
int i;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(info->indirect),
+ si_resource(info->indirect),
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
for (i = 0; i < 3; ++i) {
}
if (info->indirect) {
- uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+ uint64_t base_va = si_resource(info->indirect)->gpu_address;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(info->indirect),
+ si_resource(info->indirect),
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
/* Indirect buffers use TC L2 on GFX9, but not older hw. */
if (sctx->chip_class <= VI &&
- r600_resource(info->indirect)->TC_L2_dirty) {
+ si_resource(info->indirect)->TC_L2_dirty) {
sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(info->indirect)->TC_L2_dirty = false;
+ si_resource(info->indirect)->TC_L2_dirty = false;
}
}
/* Global buffers */
for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
- struct r600_resource *buffer =
- r600_resource(program->global_buffers[i]);
+ struct si_resource *buffer =
+ si_resource(program->global_buffers[i]);
if (!buffer) {
continue;
}
(cache_policy == L2_BYPASS ? SI_CONTEXT_WRITEBACK_GLOBAL_L2 : 0);
if (cache_policy != L2_BYPASS)
- r600_resource(dst)->TC_L2_dirty = true;
+ si_resource(dst)->TC_L2_dirty = true;
/* Restore states. */
ctx->bind_compute_state(ctx, saved_cs);
/* Only use compute for VRAM copies on dGPUs. */
if (sctx->screen->info.has_dedicated_vram &&
- r600_resource(dst)->domains & RADEON_DOMAIN_VRAM &&
- r600_resource(src)->domains & RADEON_DOMAIN_VRAM &&
+ si_resource(dst)->domains & RADEON_DOMAIN_VRAM &&
+ si_resource(src)->domains & RADEON_DOMAIN_VRAM &&
size > 32 * 1024 &&
dst_offset % 4 == 0 && src_offset % 4 == 0 && size % 4 == 0) {
si_compute_do_clear_or_copy(sctx, dst, dst_offset, src, src_offset,
if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
if (dst)
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(dst),
+ si_resource(dst),
RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
if (src)
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(src),
+ si_resource(src),
RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
}
uint64_t size, unsigned value, unsigned user_flags,
enum si_coherency coher, enum si_cache_policy cache_policy)
{
- struct r600_resource *rdst = r600_resource(dst);
+ struct si_resource *rdst = si_resource(dst);
uint64_t va = (rdst ? rdst->gpu_address : 0) + offset;
bool is_first = true;
*/
if (!sctx->scratch_buffer ||
sctx->scratch_buffer->b.b.width0 < scratch_size) {
- r600_resource_reference(&sctx->scratch_buffer, NULL);
+ si_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
SI_RESOURCE_FLAG_UNMAPPABLE,
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
- util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
+ util_range_add(&si_resource(dst)->valid_buffer_range, dst_offset,
dst_offset + size);
}
- dst_offset += r600_resource(dst)->gpu_address;
+ dst_offset += si_resource(dst)->gpu_address;
}
if (src)
- src_offset += r600_resource(src)->gpu_address;
+ src_offset += si_resource(src)->gpu_address;
/* The workarounds aren't needed on Fiji and beyond. */
if (sctx->family <= CHIP_CARRIZO ||
}
if (dst && cache_policy != L2_BYPASS)
- r600_resource(dst)->TC_L2_dirty = true;
+ si_resource(dst)->TC_L2_dirty = true;
/* If it's not a prefetch or GDS copy... */
if (dst && src && (dst != src || dst_offset != src_offset))
exit(0);
}
-void si_cp_write_data(struct si_context *sctx, struct r600_resource *buf,
+void si_cp_write_data(struct si_context *sctx, struct si_resource *buf,
unsigned offset, unsigned size, unsigned dst_sel,
unsigned engine, const void *data)
{
void si_destroy_saved_cs(struct si_saved_cs *scs)
{
si_clear_saved_cs(&scs->gfx);
- r600_resource_reference(&scs->trace_buf, NULL);
+ si_resource_reference(&scs->trace_buf, NULL);
free(scs);
}
uint32_t *gpu_list;
/** Reference of buffer where the list is uploaded, so that gpu_list
* is kept live. */
- struct r600_resource *buf;
+ struct si_resource *buf;
const char *shader_name;
const char *elem_name;
si_log_chunk_desc_list_destroy(void *data)
{
struct si_log_chunk_desc_list *chunk = data;
- r600_resource_reference(&chunk->buf, NULL);
+ si_resource_reference(&chunk->buf, NULL);
FREE(chunk);
}
chunk->slot_remap = slot_remap;
chunk->chip_class = screen->info.chip_class;
- r600_resource_reference(&chunk->buf, desc->buffer);
+ si_resource_reference(&chunk->buf, desc->buffer);
chunk->gpu_list = desc->gpu_list;
for (unsigned i = 0; i < num_elements; ++i) {
static void si_release_descriptors(struct si_descriptors *desc)
{
- r600_resource_reference(&desc->buffer, NULL);
+ si_resource_reference(&desc->buffer, NULL);
FREE(desc->list);
}
desc->element_dw_size];
/* The buffer is already in the buffer list. */
- r600_resource_reference(&desc->buffer, NULL);
+ si_resource_reference(&desc->buffer, NULL);
desc->gpu_list = NULL;
desc->gpu_address = si_desc_extract_buffer_address(descriptor);
si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
/* SAMPLER VIEWS */
static inline enum radeon_bo_priority
-si_get_sampler_view_priority(struct r600_resource *res)
+si_get_sampler_view_priority(struct si_resource *res)
{
if (res->b.b.target == PIPE_BUFFER)
return RADEON_PRIO_SAMPLER_BUFFER;
}
/* Set buffer descriptor fields that can be changed by reallocations. */
-static void si_set_buf_desc_address(struct r600_resource *buf,
+static void si_set_buf_desc_address(struct si_resource *buf,
uint64_t offset, uint32_t *state)
{
uint64_t va = buf->gpu_address + offset;
static void
si_mark_image_range_valid(const struct pipe_image_view *view)
{
- struct r600_resource *res = r600_resource(view->resource);
+ struct si_resource *res = si_resource(view->resource);
assert(res && res->b.b.target == PIPE_BUFFER);
uint32_t *desc, uint32_t *fmask_desc)
{
struct si_screen *screen = ctx->screen;
- struct r600_resource *res;
+ struct si_resource *res;
- res = r600_resource(view->resource);
+ res = si_resource(view->resource);
if (res->b.b.target == PIPE_BUFFER) {
if (view->access & PIPE_IMAGE_ACCESS_WRITE)
{
struct si_images *images = &ctx->images[shader];
struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
- struct r600_resource *res;
+ struct si_resource *res;
unsigned desc_slot = si_get_image_slot(slot);
uint32_t *desc = descs->list + desc_slot * 8;
return;
}
- res = r600_resource(view->resource);
+ res = si_resource(view->resource);
if (&images->views[slot] != view)
util_copy_image_view(&images->views[slot], view);
int i = u_bit_scan(&mask);
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(buffers->buffers[i]),
+ si_resource(buffers->buffers[i]),
i < SI_NUM_SHADER_BUFFERS ? buffers->shader_usage :
buffers->shader_usage_constbuf,
i < SI_NUM_SHADER_BUFFERS ? buffers->priority :
{
pipe_resource_reference(buf, buffers->buffers[idx]);
if (*buf) {
- struct r600_resource *res = r600_resource(*buf);
+ struct si_resource *res = si_resource(*buf);
const uint32_t *desc = descs->list + idx * 4;
uint64_t va;
continue;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(sctx->vertex_buffer[vb].buffer.resource),
+ si_resource(sctx->vertex_buffer[vb].buffer.resource),
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
for (i = 0; i < count; i++) {
struct pipe_vertex_buffer *vb;
- struct r600_resource *rbuffer;
+ struct si_resource *rbuffer;
unsigned vbo_index = velems->vertex_buffer_index[i];
uint32_t *desc = &ptr[i*4];
vb = &sctx->vertex_buffer[vbo_index];
- rbuffer = r600_resource(vb->buffer.resource);
+ rbuffer = si_resource(vb->buffer.resource);
if (!rbuffer) {
memset(desc, 0, 16);
continue;
if (first_vb_use_mask & (1 << i)) {
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(vb->buffer.resource),
+ si_resource(vb->buffer.resource),
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
}
return &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(shader)];
}
-void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
+void si_upload_const_buffer(struct si_context *sctx, struct si_resource **rbuffer,
const uint8_t *ptr, unsigned size, uint32_t *const_offset)
{
void *tmp;
unsigned buffer_offset;
si_upload_const_buffer(sctx,
- (struct r600_resource**)&buffer, input->user_buffer,
+ (struct si_resource**)&buffer, input->user_buffer,
input->buffer_size, &buffer_offset);
if (!buffer) {
/* Just unbind on failure. */
si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, NULL);
return;
}
- va = r600_resource(buffer)->gpu_address + buffer_offset;
+ va = si_resource(buffer)->gpu_address + buffer_offset;
} else {
pipe_resource_reference(&buffer, input->buffer);
- va = r600_resource(buffer)->gpu_address + input->buffer_offset;
+ va = si_resource(buffer)->gpu_address + input->buffer_offset;
}
/* Set the descriptor. */
buffers->buffers[slot] = buffer;
radeon_add_to_gfx_buffer_list_check_mem(sctx,
- r600_resource(buffer),
+ si_resource(buffer),
buffers->shader_usage_constbuf,
buffers->priority_constbuf, true);
buffers->enabled_mask |= 1u << slot;
return;
if (slot == 0 && input && input->buffer &&
- !(r600_resource(input->buffer)->flags & RADEON_FLAG_32BIT)) {
+ !(si_resource(input->buffer)->flags & RADEON_FLAG_32BIT)) {
assert(!"constant buffer 0 must have a 32-bit VM address, use const_uploader");
return;
}
if (input && input->buffer)
- r600_resource(input->buffer)->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
+ si_resource(input->buffer)->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
slot = si_get_constbuf_slot(slot);
si_set_constant_buffer(sctx, &sctx->const_and_shader_buffers[shader],
return;
}
- struct r600_resource *buf = r600_resource(sbuffer->buffer);
+ struct si_resource *buf = si_resource(sbuffer->buffer);
uint64_t va = buf->gpu_address + sbuffer->buffer_offset;
desc[0] = va;
unsigned slot = si_get_shaderbuf_slot(start_slot + i);
if (sbuffer && sbuffer->buffer)
- r600_resource(sbuffer->buffer)->bind_history |= PIPE_BIND_SHADER_BUFFER;
+ si_resource(sbuffer->buffer)->bind_history |= PIPE_BIND_SHADER_BUFFER;
si_set_shader_buffer(sctx, buffers, descriptors_idx, slot, sbuffer,
buffers->priority);
if (buffer) {
uint64_t va;
- va = r600_resource(buffer)->gpu_address + offset;
+ va = si_resource(buffer)->gpu_address + offset;
switch (element_size) {
default:
pipe_resource_reference(&buffers->buffers[slot], buffer);
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(buffer),
+ si_resource(buffer),
buffers->shader_usage, buffers->priority);
buffers->enabled_mask |= 1u << slot;
} else {
uint64_t offset_within_buffer = old_desc_va - old_buf_va;
/* Update the descriptor. */
- si_set_buf_desc_address(r600_resource(new_buf), offset_within_buffer,
+ si_set_buf_desc_address(si_resource(new_buf), offset_within_buffer,
desc);
}
sctx->descriptors_dirty |= 1u << descriptors_idx;
radeon_add_to_gfx_buffer_list_check_mem(sctx,
- r600_resource(buf),
+ si_resource(buf),
usage, priority, true);
}
}
void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf,
uint64_t old_va)
{
- struct r600_resource *rbuffer = r600_resource(buf);
+ struct si_resource *rbuffer = si_resource(buf);
unsigned i, shader;
unsigned num_elems = sctx->vertex_elements ?
sctx->vertex_elements->count : 0;
bool *desc_dirty)
{
struct si_descriptors *desc = &sctx->bindless_descriptors;
- struct r600_resource *buf = r600_resource(resource);
+ struct si_resource *buf = si_resource(resource);
unsigned desc_slot_offset = desc_slot * 16;
uint32_t *desc_list = desc->list + desc_slot_offset + 4;
uint64_t old_desc_va;
pipe_sampler_view_reference(&tex_handle->view, view);
- r600_resource(sview->base.texture)->texture_handle_allocated = true;
+ si_resource(sview->base.texture)->texture_handle_allocated = true;
return handle;
}
util_copy_image_view(&img_handle->view, view);
- r600_resource(view->resource)->image_handle_allocated = true;
+ si_resource(view->resource)->image_handle_allocated = true;
return handle;
}
struct si_context *sctx = (struct si_context *)ctx;
struct si_image_handle *img_handle;
struct pipe_image_view *view;
- struct r600_resource *res;
+ struct si_resource *res;
struct hash_entry *entry;
entry = _mesa_hash_table_search(sctx->img_handles,
img_handle = (struct si_image_handle *)entry->data;
view = &img_handle->view;
- res = r600_resource(view->resource);
+ res = si_resource(view->resource);
if (resident) {
if (res->b.b.target != PIPE_BUFFER) {
for (i = 0; i < SI_NUM_DESCS; ++i)
si_release_descriptors(&sctx->descriptors[i]);
- r600_resource_reference(&sctx->vb_descriptors_buffer, NULL);
+ si_resource_reference(&sctx->vb_descriptors_buffer, NULL);
sctx->vb_descriptors_gpu_list = NULL; /* points into a mapped buffer */
si_release_bindless_descriptors(sctx);
{
struct radeon_cmdbuf *cs = ctx->dma_cs;
unsigned i, ncopy, count, max_size, sub_cmd, shift;
- struct r600_resource *rdst = r600_resource(dst);
- struct r600_resource *rsrc = r600_resource(src);
+ struct si_resource *rdst = si_resource(dst);
+ struct si_resource *rsrc = si_resource(src);
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
radeon_emit(cs, 0xf0000000); /* NOP */
}
-void si_dma_emit_timestamp(struct si_context *sctx, struct r600_resource *dst,
+void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
uint64_t offset)
{
struct radeon_cmdbuf *cs = sctx->dma_cs;
{
struct radeon_cmdbuf *cs = sctx->dma_cs;
unsigned i, ncopy, csize;
- struct r600_resource *rdst = r600_resource(dst);
+ struct si_resource *rdst = si_resource(dst);
assert(offset % 4 == 0);
assert(size);
}
void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
- struct r600_resource *dst, struct r600_resource *src)
+ struct si_resource *dst, struct si_resource *src)
{
uint64_t vram = ctx->dma_cs->used_vram;
uint64_t gtt = ctx->dma_cs->used_gart;
#include "si_build_pm4.h"
struct si_fine_fence {
- struct r600_resource *buf;
+ struct si_resource *buf;
unsigned offset;
};
void si_cp_release_mem(struct si_context *ctx,
unsigned event, unsigned event_flags,
unsigned dst_sel, unsigned int_sel, unsigned data_sel,
- struct r600_resource *buf, uint64_t va,
+ struct si_resource *buf, uint64_t va,
uint32_t new_fence, unsigned query_type)
{
struct radeon_cmdbuf *cs = ctx->gfx_cs;
query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
query_type != PIPE_QUERY_OCCLUSION_PREDICATE &&
query_type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
- struct r600_resource *scratch = ctx->eop_bug_scratch;
+ struct si_resource *scratch = ctx->eop_bug_scratch;
assert(16 * ctx->screen->info.num_render_backends <=
scratch->b.b.width0);
} else {
if (ctx->chip_class == CIK ||
ctx->chip_class == VI) {
- struct r600_resource *scratch = ctx->eop_bug_scratch;
+ struct si_resource *scratch = ctx->eop_bug_scratch;
uint64_t va = scratch->gpu_address;
/* Two EOP events are required to make all engines go idle
ws->fence_reference(&(*rdst)->gfx, NULL);
ws->fence_reference(&(*rdst)->sdma, NULL);
tc_unflushed_batch_token_reference(&(*rdst)->tc_token, NULL);
- r600_resource_reference(&(*rdst)->fine.buf, NULL);
+ si_resource_reference(&(*rdst)->fine.buf, NULL);
FREE(*rdst);
}
*rdst = rsrc;
if (rfence->fine.buf &&
si_fine_fence_signaled(rws, &rfence->fine)) {
rws->fence_reference(&rfence->gfx, NULL);
- r600_resource_reference(&rfence->fine.buf, NULL);
+ si_resource_reference(&rfence->fine.buf, NULL);
return true;
}
pipe_reference_init(&ctx->current_saved_cs->reference, 1);
- ctx->current_saved_cs->trace_buf = r600_resource(
+ ctx->current_saved_cs->trace_buf = si_resource(
pipe_buffer_create(ctx->b.screen, 0, PIPE_USAGE_STAGING, 8));
if (!ctx->current_saved_cs->trace_buf) {
free(ctx->current_saved_cs);
}
static void si_pc_emit_start(struct si_context *sctx,
- struct r600_resource *buffer, uint64_t va)
+ struct si_resource *buffer, uint64_t va)
{
struct radeon_cmdbuf *cs = sctx->gfx_cs;
/* Note: The buffer was already added in si_pc_emit_start, so we don't have to
* do it again in here. */
static void si_pc_emit_stop(struct si_context *sctx,
- struct r600_resource *buffer, uint64_t va)
+ struct si_resource *buffer, uint64_t va)
{
struct radeon_cmdbuf *cs = sctx->gfx_cs;
static void si_pc_query_resume(struct si_context *sctx, struct si_query *rquery)
/*
struct si_query_hw *hwquery,
- struct r600_resource *buffer, uint64_t va)*/
+ struct si_resource *buffer, uint64_t va)*/
{
struct si_query_pc *query = (struct si_query_pc *)rquery;
int current_se = -1;
pipe_resource_reference(&sctx->tess_rings, NULL);
pipe_resource_reference(&sctx->null_const_buf.buffer, NULL);
pipe_resource_reference(&sctx->sample_pos_buffer, NULL);
- r600_resource_reference(&sctx->border_color_buffer, NULL);
+ si_resource_reference(&sctx->border_color_buffer, NULL);
free(sctx->border_color_table);
- r600_resource_reference(&sctx->scratch_buffer, NULL);
- r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
- r600_resource_reference(&sctx->wait_mem_scratch, NULL);
+ si_resource_reference(&sctx->scratch_buffer, NULL);
+ si_resource_reference(&sctx->compute_scratch_buffer, NULL);
+ si_resource_reference(&sctx->wait_mem_scratch, NULL);
si_pm4_free_state(sctx, sctx->init_config, ~0);
if (sctx->init_config_gs_rings)
sctx->ws->fence_reference(&sctx->last_gfx_fence, NULL);
sctx->ws->fence_reference(&sctx->last_sdma_fence, NULL);
- r600_resource_reference(&sctx->eop_bug_scratch, NULL);
+ si_resource_reference(&sctx->eop_bug_scratch, NULL);
si_destroy_compiler(&sctx->compiler);
if (sctx->chip_class == CIK ||
sctx->chip_class == VI ||
sctx->chip_class == GFX9) {
- sctx->eop_bug_scratch = r600_resource(
+ sctx->eop_bug_scratch = si_resource(
pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
16 * sscreen->info.num_render_backends));
if (!sctx->eop_bug_scratch)
if (!sctx->border_color_table)
goto fail;
- sctx->border_color_buffer = r600_resource(
+ sctx->border_color_buffer = si_resource(
pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT,
SI_MAX_BORDER_COLORS *
sizeof(*sctx->border_color_table)));
sctx->sample_mask = 0xffff;
if (sctx->chip_class >= GFX9) {
- sctx->wait_mem_scratch = r600_resource(
+ sctx->wait_mem_scratch = si_resource(
pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 4));
if (!sctx->wait_mem_scratch)
goto fail;
exit(1);
}
- r600_resource(buf)->gpu_address = 0; /* cause a VM fault */
+ si_resource(buf)->gpu_address = 0; /* cause a VM fault */
if (sscreen->debug_flags & DBG(TEST_VMFAULT_CP)) {
si_cp_dma_copy_buffer(sctx, buf, buf, 0, 4, 4, 0,
/* Only 32-bit buffer allocations are supported, gallium doesn't support more
* at the moment.
*/
-struct r600_resource {
+struct si_resource {
struct threaded_resource b;
/* Winsys objects. */
struct si_transfer {
struct threaded_transfer b;
- struct r600_resource *staging;
+ struct si_resource *staging;
unsigned offset;
};
struct si_texture {
- struct r600_resource buffer;
+ struct si_resource buffer;
struct radeon_surf surface;
uint64_t size;
uint64_t fmask_offset;
uint64_t cmask_offset;
uint64_t cmask_base_address_reg;
- struct r600_resource *cmask_buffer;
+ struct si_resource *cmask_buffer;
uint64_t dcc_offset; /* 0 = disabled */
unsigned cb_color_info; /* fast clear enable bit */
unsigned color_clear_value[2];
* target == 2D and last_level == 0. If enabled, dcc_offset contains
* the absolute GPUVM address, not the relative one.
*/
- struct r600_resource *dcc_separate_buffer;
+ struct si_resource *dcc_separate_buffer;
/* When DCC is temporarily disabled, the separate buffer is here. */
- struct r600_resource *last_dcc_separate_buffer;
+ struct si_resource *last_dcc_separate_buffer;
/* Estimate of how much this color buffer is written to in units of
* full-screen draws: ps_invocations / (width * height)
* Shader kills, late Z, and blending with trivial discards make it
struct pipe_stream_output_target b;
/* The buffer where BUFFER_FILLED_SIZE is stored. */
- struct r600_resource *buf_filled_size;
+ struct si_resource *buf_filled_size;
unsigned buf_filled_size_offset;
bool buf_filled_size_valid;
struct pipe_reference reference;
struct si_context *ctx;
struct radeon_saved_cs gfx;
- struct r600_resource *trace_buf;
+ struct si_resource *trace_buf;
unsigned trace_id;
unsigned gfx_last_dw;
struct radeon_cmdbuf *dma_cs;
struct pipe_fence_handle *last_gfx_fence;
struct pipe_fence_handle *last_sdma_fence;
- struct r600_resource *eop_bug_scratch;
+ struct si_resource *eop_bug_scratch;
struct u_upload_mgr *cached_gtt_allocator;
struct threaded_context *tc;
struct u_suballocator *allocator_zeroed_memory;
struct pipe_debug_callback debug;
struct ac_llvm_compiler compiler; /* only non-threaded compilation */
struct si_shader_ctx_state fixed_func_tcs_shader;
- struct r600_resource *wait_mem_scratch;
+ struct si_resource *wait_mem_scratch;
unsigned wait_mem_number;
uint16_t prefetch_L2_mask;
/* vertex buffer descriptors */
uint32_t *vb_descriptors_gpu_list;
- struct r600_resource *vb_descriptors_buffer;
+ struct si_resource *vb_descriptors_buffer;
unsigned vb_descriptors_offset;
/* shader descriptors */
struct pipe_resource *gsvs_ring;
struct pipe_resource *tess_rings;
union pipe_color_union *border_color_table; /* in CPU memory, any endian */
- struct r600_resource *border_color_buffer;
+ struct si_resource *border_color_buffer;
union pipe_color_union *border_color_map; /* in VRAM (slow access), little endian */
unsigned border_color_count;
unsigned num_vs_blit_sgprs;
enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */
/* Scratch buffer */
- struct r600_resource *scratch_buffer;
+ struct si_resource *scratch_buffer;
unsigned scratch_waves;
unsigned spi_tmpring_size;
- struct r600_resource *compute_scratch_buffer;
+ struct si_resource *compute_scratch_buffer;
/* Emitted derived tessellation state. */
/* Local shader (VS), or HS if LS-HS are merged. */
struct pb_buffer *buf,
enum radeon_bo_usage usage);
void *si_buffer_map_sync_with_rings(struct si_context *sctx,
- struct r600_resource *resource,
+ struct si_resource *resource,
unsigned usage);
void si_init_resource_fields(struct si_screen *sscreen,
- struct r600_resource *res,
+ struct si_resource *res,
uint64_t size, unsigned alignment);
bool si_alloc_resource(struct si_screen *sscreen,
- struct r600_resource *res);
+ struct si_resource *res);
struct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen,
unsigned flags, unsigned usage,
unsigned size, unsigned alignment);
-struct r600_resource *si_aligned_buffer_create(struct pipe_screen *screen,
+struct si_resource *si_aligned_buffer_create(struct pipe_screen *screen,
unsigned flags, unsigned usage,
unsigned size, unsigned alignment);
void si_replace_buffer_storage(struct pipe_context *ctx,
uint64_t offset, unsigned size);
void cik_emit_prefetch_L2(struct si_context *sctx, bool vertex_stage_only);
void si_test_gds(struct si_context *sctx);
-void si_cp_write_data(struct si_context *sctx, struct r600_resource *buf,
+void si_cp_write_data(struct si_context *sctx, struct si_resource *buf,
unsigned offset, unsigned size, unsigned dst_sel,
unsigned engine, const void *data);
void si_init_dma_functions(struct si_context *sctx);
/* si_dma_cs.c */
-void si_dma_emit_timestamp(struct si_context *sctx, struct r600_resource *dst,
+void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
uint64_t offset);
void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned clear_value);
void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
- struct r600_resource *dst, struct r600_resource *src);
+ struct si_resource *dst, struct si_resource *src);
void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
struct pipe_fence_handle **fence);
void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
void si_cp_release_mem(struct si_context *ctx,
unsigned event, unsigned event_flags,
unsigned dst_sel, unsigned int_sel, unsigned data_sel,
- struct r600_resource *buf, uint64_t va,
+ struct si_resource *buf, uint64_t va,
uint32_t new_fence, unsigned query_type);
unsigned si_cp_write_fence_dwords(struct si_screen *screen);
void si_cp_wait_mem(struct si_context *ctx, struct radeon_cmdbuf *cs,
* common helpers
*/
-static inline struct r600_resource *r600_resource(struct pipe_resource *r)
+static inline struct si_resource *si_resource(struct pipe_resource *r)
{
- return (struct r600_resource*)r;
+ return (struct si_resource*)r;
}
static inline void
-r600_resource_reference(struct r600_resource **ptr, struct r600_resource *res)
+si_resource_reference(struct si_resource **ptr, struct si_resource *res)
{
pipe_resource_reference((struct pipe_resource **)ptr,
(struct pipe_resource *)res);
{
if (r) {
/* Add memory usage for need_gfx_cs_space */
- sctx->vram += r600_resource(r)->vram_usage;
- sctx->gtt += r600_resource(r)->gart_usage;
+ sctx->vram += si_resource(r)->vram_usage;
+ sctx->gtt += si_resource(r)->gart_usage;
}
}
*/
static inline void radeon_add_to_buffer_list(struct si_context *sctx,
struct radeon_cmdbuf *cs,
- struct r600_resource *rbo,
+ struct si_resource *rbo,
enum radeon_bo_usage usage,
enum radeon_bo_priority priority)
{
*/
static inline void
radeon_add_to_gfx_buffer_list_check_mem(struct si_context *sctx,
- struct r600_resource *rbo,
+ struct si_resource *rbo,
enum radeon_bo_usage usage,
enum radeon_bo_priority priority,
bool check_mem)
}
void si_pm4_add_bo(struct si_pm4_state *state,
- struct r600_resource *bo,
+ struct si_resource *bo,
enum radeon_bo_usage usage,
enum radeon_bo_priority priority)
{
unsigned idx = state->nbo++;
assert(idx < SI_PM4_MAX_BO);
- r600_resource_reference(&state->bo[idx], bo);
+ si_resource_reference(&state->bo[idx], bo);
state->bo_usage[idx] = usage;
state->bo_priority[idx] = priority;
}
void si_pm4_clear_state(struct si_pm4_state *state)
{
for (int i = 0; i < state->nbo; ++i)
- r600_resource_reference(&state->bo[i], NULL);
- r600_resource_reference(&state->indirect_buffer, NULL);
+ si_resource_reference(&state->bo[i], NULL);
+ si_resource_reference(&state->indirect_buffer, NULL);
state->nbo = 0;
state->ndw = 0;
}
if (!state->indirect_buffer) {
radeon_emit_array(cs, state->pm4, state->ndw);
} else {
- struct r600_resource *ib = state->indirect_buffer;
+ struct si_resource *ib = state->indirect_buffer;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs, ib,
RADEON_USAGE_READ,
assert(state->ndw);
assert(aligned_ndw <= SI_PM4_MAX_DW);
- r600_resource_reference(&state->indirect_buffer, NULL);
+ si_resource_reference(&state->indirect_buffer, NULL);
/* TODO: this hangs with 1024 or higher alignment on GFX9. */
state->indirect_buffer =
si_aligned_buffer_create(screen, 0,
struct si_pm4_state
{
/* optional indirect buffer */
- struct r600_resource *indirect_buffer;
+ struct si_resource *indirect_buffer;
/* PKT3_SET_*_REG handling */
unsigned last_opcode;
/* BO's referenced by this state */
unsigned nbo;
- struct r600_resource *bo[SI_PM4_MAX_BO];
+ struct si_resource *bo[SI_PM4_MAX_BO];
enum radeon_bo_usage bo_usage[SI_PM4_MAX_BO];
enum radeon_bo_priority bo_priority[SI_PM4_MAX_BO];
void si_pm4_set_reg(struct si_pm4_state *state, unsigned reg, uint32_t val);
void si_pm4_add_bo(struct si_pm4_state *state,
- struct r600_resource *bo,
+ struct si_resource *bo,
enum radeon_bo_usage usage,
enum radeon_bo_priority priority);
void si_pm4_upload_indirect_buffer(struct si_context *sctx,
while (prev) {
struct si_query_buffer *qbuf = prev;
prev = prev->previous;
- r600_resource_reference(&qbuf->buf, NULL);
+ si_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
}
- r600_resource_reference(&buffer->buf, NULL);
+ si_resource_reference(&buffer->buf, NULL);
}
void si_query_buffer_reset(struct si_context *sctx, struct si_query_buffer *buffer)
struct si_query_buffer *qbuf = buffer->previous;
buffer->previous = qbuf->previous;
- r600_resource_reference(&buffer->buf, NULL);
+ si_resource_reference(&buffer->buf, NULL);
buffer->buf = qbuf->buf; /* move ownership */
FREE(qbuf);
}
if (buffer->buf &&
(si_rings_is_buffer_referenced(sctx, buffer->buf->buf, RADEON_USAGE_READWRITE) ||
!sctx->ws->buffer_wait(buffer->buf->buf, 0, RADEON_USAGE_READWRITE))) {
- r600_resource_reference(&buffer->buf, NULL);
+ si_resource_reference(&buffer->buf, NULL);
}
}
*/
struct si_screen *screen = sctx->screen;
unsigned buf_size = MAX2(size, screen->info.min_alloc_size);
- buffer->buf = r600_resource(
+ buffer->buf = si_resource(
pipe_buffer_create(&screen->b, 0, PIPE_USAGE_STAGING, buf_size));
if (unlikely(!buffer->buf))
return false;
if (prepare_buffer) {
if (unlikely(!prepare_buffer(sctx, buffer))) {
- r600_resource_reference(&buffer->buf, NULL);
+ si_resource_reference(&buffer->buf, NULL);
return false;
}
}
struct si_query_hw *query = (struct si_query_hw *)rquery;
si_query_buffer_destroy(sscreen, &query->buffer);
- r600_resource_reference(&query->workaround_buf, NULL);
+ si_resource_reference(&query->workaround_buf, NULL);
FREE(rquery);
}
static void si_query_hw_do_emit_start(struct si_context *sctx,
struct si_query_hw *query,
- struct r600_resource *buffer,
+ struct si_resource *buffer,
uint64_t va);
static void si_query_hw_do_emit_stop(struct si_context *sctx,
struct si_query_hw *query,
- struct r600_resource *buffer,
+ struct si_resource *buffer,
uint64_t va);
static void si_query_hw_add_result(struct si_screen *sscreen,
struct si_query_hw *, void *buffer,
static void si_query_hw_do_emit_start(struct si_context *sctx,
struct si_query_hw *query,
- struct r600_resource *buffer,
+ struct si_resource *buffer,
uint64_t va)
{
struct radeon_cmdbuf *cs = sctx->gfx_cs;
static void si_query_hw_do_emit_stop(struct si_context *sctx,
struct si_query_hw *query,
- struct r600_resource *buffer,
+ struct si_resource *buffer,
uint64_t va)
{
struct radeon_cmdbuf *cs = sctx->gfx_cs;
}
static void emit_set_predicate(struct si_context *ctx,
- struct r600_resource *buf, uint64_t va,
+ struct si_resource *buf, uint64_t va,
uint32_t op)
{
struct radeon_cmdbuf *cs = ctx->gfx_cs;
if (!(query->flags & SI_QUERY_HW_FLAG_BEGIN_RESUMES))
si_query_buffer_reset(sctx, &query->buffer);
- r600_resource_reference(&query->workaround_buf, NULL);
+ si_resource_reference(&query->workaround_buf, NULL);
si_query_hw_emit_start(sctx, query);
if (!query->buffer.buf)
ssbo[2].buffer_offset = offset;
ssbo[2].buffer_size = 8;
- r600_resource(resource)->TC_L2_dirty = true;
+ si_resource(resource)->TC_L2_dirty = true;
}
sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
struct si_query;
struct si_query_buffer;
struct si_query_hw;
-struct r600_resource;
+struct si_resource;
enum {
SI_QUERY_DRAW_CALLS = PIPE_QUERY_DRIVER_SPECIFIC,
bool (*prepare_buffer)(struct si_context *, struct si_query_buffer *);
void (*emit_start)(struct si_context *,
struct si_query_hw *,
- struct r600_resource *buffer, uint64_t va);
+ struct si_resource *buffer, uint64_t va);
void (*emit_stop)(struct si_context *,
struct si_query_hw *,
- struct r600_resource *buffer, uint64_t va);
+ struct si_resource *buffer, uint64_t va);
void (*clear_result)(struct si_query_hw *, union pipe_query_result *);
void (*add_result)(struct si_screen *screen,
struct si_query_hw *, void *buffer,
struct si_query_buffer {
/* The buffer where query results are stored. */
- struct r600_resource *buf;
+ struct si_resource *buf;
/* Offset of the next free result after current query data */
unsigned results_end;
/* If a query buffer is full, a new buffer is created and the old one
unsigned stream;
/* Workaround via compute shader */
- struct r600_resource *workaround_buf;
+ struct si_resource *workaround_buf;
unsigned workaround_offset;
};
!mainb->rodata_size);
assert(!epilog || !epilog->rodata_size);
- r600_resource_reference(&shader->bo, NULL);
+ si_resource_reference(&shader->bo, NULL);
shader->bo = si_aligned_buffer_create(&sscreen->b,
sscreen->cpdma_prefetch_writes_memory ?
0 : SI_RESOURCE_FLAG_READ_ONLY,
void si_shader_destroy(struct si_shader *shader)
{
if (shader->scratch_bo)
- r600_resource_reference(&shader->scratch_bo, NULL);
+ si_resource_reference(&shader->scratch_bo, NULL);
- r600_resource_reference(&shader->bo, NULL);
+ si_resource_reference(&shader->bo, NULL);
if (!shader->is_binary_shared)
ac_shader_binary_clean(&shader->binary);
struct si_shader_part *epilog;
struct si_pm4_state *pm4;
- struct r600_resource *bo;
- struct r600_resource *scratch_bo;
+ struct si_resource *bo;
+ struct si_resource *scratch_bo;
struct si_shader_key key;
struct util_queue_fence ready;
bool compilation_failed;
* @param state 256-bit descriptor; only the high 128 bits are filled in
*/
void
-si_make_buffer_descriptor(struct si_screen *screen, struct r600_resource *buf,
+si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf,
enum pipe_format format,
unsigned offset, unsigned size,
uint32_t *state)
/* Buffer resource. */
if (texture->target == PIPE_BUFFER) {
si_make_buffer_descriptor(sctx->screen,
- r600_resource(texture),
+ si_resource(texture),
state->format,
state->u.buf.offset,
state->u.buf.size,
unsigned num_divisors = util_last_bit(v->instance_divisor_is_fetched);
v->instance_divisor_factor_buffer =
- (struct r600_resource*)
+ (struct si_resource*)
pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
num_divisors * sizeof(divisor_factors[0]));
if (!v->instance_divisor_factor_buffer) {
if (sctx->vertex_elements == state)
sctx->vertex_elements = NULL;
- r600_resource_reference(&v->instance_divisor_factor_buffer, NULL);
+ si_resource_reference(&v->instance_divisor_factor_buffer, NULL);
FREE(state);
}
dsti->stride = src->stride;
si_context_add_resource_size(sctx, buf);
if (buf)
- r600_resource(buf)->bind_history |= PIPE_BIND_VERTEX_BUFFER;
+ si_resource(buf)->bind_history |= PIPE_BIND_VERTEX_BUFFER;
}
} else {
for (i = 0; i < count; i++) {
cb.user_buffer = NULL;
cb.buffer_size = sizeof(array);
- si_upload_const_buffer(sctx, (struct r600_resource**)&cb.buffer,
+ si_upload_const_buffer(sctx, (struct si_resource**)&cb.buffer,
(void*)array, sizeof(array),
&cb.buffer_offset);
struct si_vertex_elements
{
- struct r600_resource *instance_divisor_factor_buffer;
+ struct si_resource *instance_divisor_factor_buffer;
uint32_t rsrc_word3[SI_MAX_ATTRIBS];
uint16_t src_offset[SI_MAX_ATTRIBS];
uint8_t fix_fetch[SI_MAX_ATTRIBS];
uint32_t *gpu_list;
/* The buffer where the descriptors have been uploaded. */
- struct r600_resource *buffer;
+ struct si_resource *buffer;
uint64_t gpu_address;
/* The maximum number of descriptors. */
void si_release_all_descriptors(struct si_context *sctx);
void si_all_descriptors_begin_new_cs(struct si_context *sctx);
void si_all_resident_buffers_begin_new_cs(struct si_context *sctx);
-void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
+void si_upload_const_buffer(struct si_context *sctx, struct si_resource **rbuffer,
const uint8_t *ptr, unsigned size, uint32_t *const_offset);
void si_update_all_texture_descriptors(struct si_context *sctx);
void si_shader_change_notify(struct si_context *sctx);
void si_init_state_functions(struct si_context *sctx);
void si_init_screen_state_functions(struct si_screen *sscreen);
void
-si_make_buffer_descriptor(struct si_screen *screen, struct r600_resource *buf,
+si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf,
enum pipe_format format,
unsigned offset, unsigned size,
uint32_t *state);
assert(num_tcs_input_cp <= 32);
assert(num_tcs_output_cp <= 32);
- uint64_t ring_va = r600_resource(sctx->tess_rings)->gpu_address;
+ uint64_t ring_va = si_resource(sctx->tess_rings)->gpu_address;
assert((ring_va & u_bit_consecutive(0, 19)) == 0);
tcs_in_layout = S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size / 4) |
index_max_size = (indexbuf->width0 - index_offset) /
index_size;
- index_va = r600_resource(indexbuf)->gpu_address + index_offset;
+ index_va = si_resource(indexbuf)->gpu_address + index_offset;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(indexbuf),
+ si_resource(indexbuf),
RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
} else {
/* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
}
if (indirect) {
- uint64_t indirect_va = r600_resource(indirect->buffer)->gpu_address;
+ uint64_t indirect_va = si_resource(indirect->buffer)->gpu_address;
assert(indirect_va % 8 == 0);
radeon_emit(cs, indirect_va >> 32);
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- r600_resource(indirect->buffer),
+ si_resource(indirect->buffer),
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA
uint64_t count_va = 0;
if (indirect->indirect_draw_count) {
- struct r600_resource *params_buf =
- r600_resource(indirect->indirect_draw_count);
+ struct si_resource *params_buf =
+ si_resource(indirect->indirect_draw_count);
radeon_add_to_buffer_list(
sctx, sctx->gfx_cs, params_buf,
/* info->start will be added by the drawing code */
index_offset -= start_offset;
} else if (sctx->chip_class <= CIK &&
- r600_resource(indexbuf)->TC_L2_dirty) {
+ si_resource(indexbuf)->TC_L2_dirty) {
/* VI reads index buffers through TC L2, so it doesn't
* need this. */
sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(indexbuf)->TC_L2_dirty = false;
+ si_resource(indexbuf)->TC_L2_dirty = false;
}
}
/* Indirect buffers use TC L2 on GFX9, but not older hw. */
if (sctx->chip_class <= VI) {
- if (r600_resource(indirect->buffer)->TC_L2_dirty) {
+ if (si_resource(indirect->buffer)->TC_L2_dirty) {
sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(indirect->buffer)->TC_L2_dirty = false;
+ si_resource(indirect->buffer)->TC_L2_dirty = false;
}
if (indirect->indirect_draw_count &&
- r600_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
+ si_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
+ si_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
}
}
}
/* Update the shader state to use the new shader bo. */
si_shader_init_pm4_state(sctx->screen, shader);
- r600_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
+ si_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
si_shader_unlock(shader);
return 1;
if (scratch_needed_size > 0) {
if (scratch_needed_size > current_scratch_buffer_size) {
/* Create a bigger scratch buffer */
- r600_resource_reference(&sctx->scratch_buffer, NULL);
+ si_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
si_init_config_add_vgt_flush(sctx);
- si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tess_rings),
+ si_pm4_add_bo(sctx->init_config, si_resource(sctx->tess_rings),
RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
- uint64_t factor_va = r600_resource(sctx->tess_rings)->gpu_address +
+ uint64_t factor_va = si_resource(sctx->tess_rings)->gpu_address +
sctx->screen->tess_offchip_ring_size;
/* Append these registers to the init config state. */
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_streamout_target *t;
- struct r600_resource *rbuffer = r600_resource(buffer);
+ struct si_resource *rbuffer = si_resource(buffer);
t = CALLOC_STRUCT(si_streamout_target);
if (!t) {
{
struct si_streamout_target *t = (struct si_streamout_target*)target;
pipe_resource_reference(&t->b.buffer, NULL);
- r600_resource_reference(&t->buf_filled_size, NULL);
+ si_resource_reference(&t->buf_filled_size, NULL);
FREE(t);
}
*/
for (i = 0; i < sctx->streamout.num_targets; i++)
if (sctx->streamout.targets[i])
- r600_resource(sctx->streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
+ si_resource(sctx->streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
/* Invalidate the scalar cache in case a streamout buffer is
* going to be used as a constant buffer.
sbuf.buffer_size = targets[i]->buffer_offset +
targets[i]->buffer_size;
si_set_rw_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, &sbuf);
- r600_resource(targets[i]->buffer)->bind_history |= PIPE_BIND_STREAM_OUTPUT;
+ si_resource(targets[i]->buffer)->bind_history |= PIPE_BIND_STREAM_OUTPUT;
} else {
si_set_rw_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, NULL);
}
tex->cb_color_info &= ~S_028C70_FAST_CLEAR(1);
if (tex->cmask_buffer != &tex->buffer)
- r600_resource_reference(&tex->cmask_buffer, NULL);
+ si_resource_reference(&tex->cmask_buffer, NULL);
tex->cmask_buffer = NULL;
if (tex->cmask_buffer == &tex->buffer)
tex->cmask_buffer = NULL;
else
- r600_resource_reference(&tex->cmask_buffer, NULL);
+ si_resource_reference(&tex->cmask_buffer, NULL);
if (new_tex->cmask_buffer == &new_tex->buffer)
tex->cmask_buffer = &tex->buffer;
else
- r600_resource_reference(&tex->cmask_buffer, new_tex->cmask_buffer);
+ si_resource_reference(&tex->cmask_buffer, new_tex->cmask_buffer);
tex->dcc_offset = new_tex->dcc_offset;
tex->cb_color_info = new_tex->cb_color_info;
tex->separate_dcc_dirty = new_tex->separate_dcc_dirty;
tex->dcc_gather_statistics = new_tex->dcc_gather_statistics;
- r600_resource_reference(&tex->dcc_separate_buffer,
+ si_resource_reference(&tex->dcc_separate_buffer,
new_tex->dcc_separate_buffer);
- r600_resource_reference(&tex->last_dcc_separate_buffer,
+ si_resource_reference(&tex->last_dcc_separate_buffer,
new_tex->last_dcc_separate_buffer);
if (new_bind_flag == PIPE_BIND_LINEAR) {
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct si_context *sctx;
- struct r600_resource *res = r600_resource(resource);
+ struct si_resource *res = si_resource(resource);
struct si_texture *tex = (struct si_texture*)resource;
struct radeon_bo_metadata metadata;
bool update_metadata = false;
struct pipe_resource *ptex)
{
struct si_texture *tex = (struct si_texture*)ptex;
- struct r600_resource *resource = &tex->buffer;
+ struct si_resource *resource = &tex->buffer;
si_texture_reference(&tex->flushed_depth_texture, NULL);
if (tex->cmask_buffer != &tex->buffer) {
- r600_resource_reference(&tex->cmask_buffer, NULL);
+ si_resource_reference(&tex->cmask_buffer, NULL);
}
pb_reference(&resource->buf, NULL);
- r600_resource_reference(&tex->dcc_separate_buffer, NULL);
- r600_resource_reference(&tex->last_dcc_separate_buffer, NULL);
+ si_resource_reference(&tex->dcc_separate_buffer, NULL);
+ si_resource_reference(&tex->last_dcc_separate_buffer, NULL);
FREE(tex);
}
struct radeon_surf *surface)
{
struct si_texture *tex;
- struct r600_resource *resource;
+ struct si_resource *resource;
struct si_screen *sscreen = (struct si_screen*)screen;
tex = CALLOC_STRUCT(si_texture);
struct si_context *sctx = (struct si_context*)ctx;
struct si_texture *tex = (struct si_texture*)texture;
struct si_transfer *trans;
- struct r600_resource *buf;
+ struct si_resource *buf;
unsigned offset = 0;
char *map;
bool use_staging_texture = false;
return map + offset;
fail_trans:
- r600_resource_reference(&trans->staging, NULL);
+ si_resource_reference(&trans->staging, NULL);
pipe_resource_reference(&trans->b.b.resource, NULL);
FREE(trans);
return NULL;
* we don't run out of the CPU address space.
*/
if (sizeof(void*) == 4) {
- struct r600_resource *buf =
+ struct si_resource *buf =
stransfer->staging ? stransfer->staging : &tex->buffer;
sctx->ws->buffer_unmap(buf->buf);
if (stransfer->staging) {
sctx->num_alloc_tex_transfer_bytes += stransfer->staging->buf->size;
- r600_resource_reference(&stransfer->staging, NULL);
+ si_resource_reference(&stransfer->staging, NULL);
}
/* Heuristic for {upload, draw, upload, draw, ..}: