* The buffer list becomes empty after every context flush and must be
* rebuilt.
*/
-static inline void radeon_add_to_buffer_list(struct r600_common_context *rctx,
+static inline void radeon_add_to_buffer_list(struct si_context *sctx,
struct radeon_winsys_cs *cs,
struct r600_resource *rbo,
enum radeon_bo_usage usage,
enum radeon_bo_priority priority)
{
assert(usage);
- rctx->ws->cs_add_buffer(
+ sctx->b.ws->cs_add_buffer(
cs, rbo->buf,
(enum radeon_bo_usage)(usage | RADEON_USAGE_SYNCHRONIZED),
rbo->domains, priority);
sctx->b.gtt + rbo->gart_usage))
si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, rbo, usage, priority);
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, rbo, usage, priority);
}
static inline void radeon_set_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
default:
assert(0);
}
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
}
default:
assert(0);
}
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
if (fence_va)
radeon_emit(cs, va);
radeon_emit(cs, op | ((va >> 32) & 0xFF));
}
- radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, buf, RADEON_USAGE_READ,
+ radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, buf, RADEON_USAGE_READ,
RADEON_PRIO_QUERY);
}
config->scratch_bytes_per_wave *
sctx->scratch_waves);
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
shader->scratch_bo, RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
}
shader_va += sizeof(amd_kernel_code_t);
}
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, shader->bo,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, shader->bo,
RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
fprintf(stderr, "Error: Failed to allocate dispatch "
"packet.");
}
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, dispatch_buf,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, dispatch_buf,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
}
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, input_buffer,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, input_buffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
if (code_object) {
uint64_t va = base_va + info->indirect_offset;
int i;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource *)info->indirect,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
if (info->indirect) {
uint64_t base_va = r600_resource(info->indirect)->gpu_address;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource *)info->indirect,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
if (!buffer) {
continue;
}
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, buffer,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, buffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_COMPUTE_GLOBAL);
}
/* This must be done after need_cs_space. */
if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource*)dst,
RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
if (src)
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource*)src,
RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
}
upload_size);
desc->gpu_list = ptr - first_slot_offset / 4;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, desc->buffer,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, desc->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
/* The shader pointer should point to slot 0. */
if (!desc->buffer)
return;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, desc->buffer,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, desc->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
}
si_set_shader_image_desc(sctx, &view, true, desc, desc + 8);
pipe_resource_reference(&buffers->buffers[slot], &tex->resource.b.b);
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
&tex->resource, RADEON_USAGE_READ,
RADEON_PRIO_SHADER_RW_IMAGE);
buffers->enabled_mask |= 1u << slot;
while (mask) {
int i = u_bit_scan(&mask);
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
r600_resource(buffers->buffers[i]),
i < SI_NUM_SHADER_BUFFERS ? buffers->shader_usage :
buffers->shader_usage_constbuf,
if (!sctx->vertex_buffer[vb].buffer.resource)
continue;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource*)sctx->vertex_buffer[vb].buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
if (!sctx->vb_descriptors_buffer)
return;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
}
}
sctx->vb_descriptors_gpu_list = ptr;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
desc[3] = velems->rsrc_word3[i];
if (first_vb_use_mask & (1 << i)) {
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource*)vb->buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
desc[3] |= S_008F0C_ELEMENT_SIZE(element_size);
pipe_resource_reference(&buffers->buffers[slot], buffer);
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
buffers->enabled_mask |= 1u << slot;
si_dma_emit_wait_idle(ctx);
if (dst) {
- radeon_add_to_buffer_list(&ctx->b, ctx->b.dma_cs, dst,
+ radeon_add_to_buffer_list(ctx, ctx->b.dma_cs, dst,
RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_BUFFER);
}
if (src) {
- radeon_add_to_buffer_list(&ctx->b, ctx->b.dma_cs, src,
+ radeon_add_to_buffer_list(ctx, ctx->b.dma_cs, src,
RADEON_USAGE_READ,
RADEON_PRIO_SDMA_BUFFER);
}
radeon_emit(cs, scratch->gpu_address);
radeon_emit(cs, scratch->gpu_address >> 32);
- radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, scratch,
+ radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, scratch,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
radeon_emit(cs, 0); /* immediate data */
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, scratch,
+ radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, scratch,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
}
if (buf) {
- radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, buf, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
}
}
uint64_t fence_va = fine->buf->gpu_address + fine->offset;
- radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, fine->buf,
+ radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, fine->buf,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
if (flags & PIPE_FLUSH_TOP_OF_PIPE) {
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
si_trace_emit(ctx);
- radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, ctx->current_saved_cs->trace_buf,
+ radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, ctx->current_saved_cs->trace_buf,
RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
}
{
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, buffer,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, buffer,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
for (int i = 0; i < state->nbo; ++i) {
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, state->bo[i],
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, state->bo[i],
state->bo_usage[i], state->bo_priority[i]);
}
} else {
struct r600_resource *ib = state->indirect_buffer;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, ib,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, ib,
RADEON_USAGE_READ,
RADEON_PRIO_IB2);
}
tex = (struct r600_texture *)cb->base.texture;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
&tex->resource, RADEON_USAGE_READWRITE,
tex->resource.b.b.nr_samples > 1 ?
RADEON_PRIO_COLOR_BUFFER_MSAA :
RADEON_PRIO_COLOR_BUFFER);
if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
tex->cmask_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_CMASK);
}
if (tex->dcc_separate_buffer)
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
tex->dcc_separate_buffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_DCC);
struct r600_surface *zb = (struct r600_surface*)state->zsbuf;
struct r600_texture *rtex = (struct r600_texture*)zb->base.texture;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
&rtex->resource, RADEON_USAGE_READWRITE,
zb->base.texture->nr_samples > 1 ?
RADEON_PRIO_DEPTH_BUFFER_MSAA :
radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
t->buf_filled_size, RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
}
index_size;
index_va = r600_resource(indexbuf)->gpu_address + index_offset;
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource *)indexbuf,
RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
} else {
radeon_emit(cs, indirect_va);
radeon_emit(cs, indirect_va >> 32);
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource *)indirect->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
(struct r600_resource *)indirect->indirect_draw_count;
radeon_add_to_buffer_list(
- &sctx->b, sctx->b.gfx_cs, params_buf,
+ sctx, sctx->b.gfx_cs, params_buf,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
sctx->spi_tmpring_size);
if (sctx->scratch_buffer) {
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
sctx->scratch_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
}
radeon_emit(cs, va); /* src address lo */
radeon_emit(cs, va >> 32); /* src address hi */
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
t[i]->buf_filled_size,
RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
radeon_emit(cs, 0); /* unused */
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
t[i]->buf_filled_size,
RADEON_USAGE_WRITE,
RADEON_PRIO_SO_FILLED_SIZE);