struct pb_buffer *buf,
enum radeon_bo_usage usage)
{
- if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
+ if (ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, buf, usage)) {
return true;
}
- if (radeon_emitted(ctx->dma.cs, 0) &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, buf, usage)) {
+ if (radeon_emitted(ctx->dma_cs, 0) &&
+ ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, buf, usage)) {
return true;
}
return false;
rusage = RADEON_USAGE_WRITE;
}
- if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
+ if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
+ ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
busy = true;
}
}
- if (radeon_emitted(ctx->dma.cs, 0) &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
+ if (radeon_emitted(ctx->dma_cs, 0) &&
+ ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
} else {
/* We will be wait for the GPU. Wait for any offloaded
* CS flush to complete to avoid busy-waiting in the winsys. */
- ctx->ws->cs_sync_flush(ctx->gfx.cs);
- if (ctx->dma.cs)
- ctx->ws->cs_sync_flush(ctx->dma.cs);
+ ctx->ws->cs_sync_flush(ctx->gfx_cs);
+ if (ctx->dma_cs)
+ ctx->ws->cs_sync_flush(ctx->dma_cs);
}
}
* rebuilt.
*/
static inline void radeon_add_to_buffer_list(struct r600_common_context *rctx,
- struct r600_ring *ring,
+ struct radeon_winsys_cs *cs,
struct r600_resource *rbo,
enum radeon_bo_usage usage,
enum radeon_bo_priority priority)
{
assert(usage);
rctx->ws->cs_add_buffer(
- ring->cs, rbo->buf,
+ cs, rbo->buf,
(enum radeon_bo_usage)(usage | RADEON_USAGE_SYNCHRONIZED),
rbo->domains, priority);
}
bool check_mem)
{
if (check_mem &&
- !radeon_cs_memory_below_limit(sctx->screen, sctx->b.gfx.cs,
+ !radeon_cs_memory_below_limit(sctx->screen, sctx->b.gfx_cs,
sctx->b.vram + rbo->vram_usage,
sctx->b.gtt + rbo->gart_usage))
si_flush_gfx_cs(&sctx->b, PIPE_FLUSH_ASYNC, NULL);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rbo, usage, priority);
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, rbo, usage, priority);
}
static inline void radeon_set_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
* (b) wait for threaded submit to finish, including those that were
* triggered by some other, earlier operation.
*/
- if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
+ if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
+ ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
res->buf, RADEON_USAGE_READWRITE)) {
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
- if (radeon_emitted(ctx->dma.cs, 0) &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
+ if (radeon_emitted(ctx->dma_cs, 0) &&
+ ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
res->buf, RADEON_USAGE_READWRITE)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
- ctx->ws->cs_sync_flush(ctx->dma.cs);
- ctx->ws->cs_sync_flush(ctx->gfx.cs);
+ ctx->ws->cs_sync_flush(ctx->dma_cs);
+ ctx->ws->cs_sync_flush(ctx->gfx_cs);
assert(resource->target == PIPE_BUFFER);
return false;
if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
- rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
+ rctx->dma_cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
si_flush_dma_cs,
rctx);
}
if (rctx->query_result_shader)
rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader);
- if (rctx->gfx.cs)
- rctx->ws->cs_destroy(rctx->gfx.cs);
- if (rctx->dma.cs)
- rctx->ws->cs_destroy(rctx->dma.cs);
+ if (rctx->gfx_cs)
+ rctx->ws->cs_destroy(rctx->gfx_cs);
+ if (rctx->dma_cs)
+ rctx->ws->cs_destroy(rctx->dma_cs);
if (rctx->ctx)
rctx->ws->ctx_destroy(rctx->ctx);
unsigned short id;
};
-struct r600_ring {
- struct radeon_winsys_cs *cs;
-};
-
/* Saved CS data for debugging features. */
struct radeon_saved_cs {
uint32_t *ib;
struct radeon_winsys_ctx *ctx;
enum radeon_family family;
enum chip_class chip_class;
- struct r600_ring gfx;
- struct r600_ring dma;
+ struct radeon_winsys_cs *gfx_cs;
+ struct radeon_winsys_cs *dma_cs;
struct pipe_fence_handle *last_gfx_fence;
struct pipe_fence_handle *last_sdma_fence;
struct r600_resource *eop_bug_scratch;
struct r600_resource *buffer,
uint64_t va)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
default:
assert(0);
}
- radeon_add_to_buffer_list(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
}
struct r600_resource *buffer,
uint64_t va)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
uint64_t fence_va = 0;
switch (query->b.type) {
default:
assert(0);
}
- radeon_add_to_buffer_list(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
if (fence_va)
struct r600_resource *buf, uint64_t va,
uint32_t op)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
if (ctx->chip_class >= GFX9) {
radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
radeon_emit(cs, va);
radeon_emit(cs, op | ((va >> 32) & 0xFF));
}
- radeon_add_to_buffer_list(ctx, &ctx->gfx, buf, RADEON_USAGE_READ,
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_READ,
RADEON_PRIO_QUERY);
}
unsigned src_level,
const struct pipe_box *src_box)
{
- if (!rctx->dma.cs)
+ if (!rctx->dma_cs)
return false;
if (rdst->surface.bpe != rsrc->surface.bpe)
uint64_t src_offset,
uint64_t size)
{
- struct radeon_winsys_cs *cs = ctx->b.dma.cs;
+ struct radeon_winsys_cs *cs = ctx->b.dma_cs;
unsigned i, ncopy, csize;
struct r600_resource *rdst = r600_resource(dst);
struct r600_resource *rsrc = r600_resource(src);
unsigned clear_value)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct radeon_winsys_cs *cs = sctx->b.dma.cs;
+ struct radeon_winsys_cs *cs = sctx->b.dma_cs;
unsigned i, ncopy, csize;
struct r600_resource *rdst = r600_resource(dst);
sctx->b.family != CHIP_KAVERI) ||
(srcx + copy_width != (1 << 14) &&
srcy + copy_height != (1 << 14)))) {
- struct radeon_winsys_cs *cs = sctx->b.dma.cs;
+ struct radeon_winsys_cs *cs = sctx->b.dma_cs;
si_need_dma_space(&sctx->b, 13, &rdst->resource, &rsrc->resource);
copy_width_aligned <= (1 << 14) &&
copy_height <= (1 << 14) &&
copy_depth <= (1 << 11)) {
- struct radeon_winsys_cs *cs = sctx->b.dma.cs;
+ struct radeon_winsys_cs *cs = sctx->b.dma_cs;
uint32_t direction = linear == rdst ? 1u << 31 : 0;
si_need_dma_space(&sctx->b, 14, &rdst->resource, &rsrc->resource);
(srcx + copy_width_aligned != (1 << 14) &&
srcy + copy_height_aligned != (1 << 14) &&
dstx + copy_width != (1 << 14)))) {
- struct radeon_winsys_cs *cs = sctx->b.dma.cs;
+ struct radeon_winsys_cs *cs = sctx->b.dma_cs;
si_need_dma_space(&sctx->b, 15, &rdst->resource, &rsrc->resource);
{
struct si_context *sctx = (struct si_context *)ctx;
- if (!sctx->b.dma.cs ||
+ if (!sctx->b.dma_cs ||
src->flags & PIPE_RESOURCE_FLAG_SPARSE ||
dst->flags & PIPE_RESOURCE_FLAG_SPARSE)
goto fallback;
static void si_initialize_compute(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
uint64_t bc_va;
radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
const amd_kernel_code_t *code_object,
unsigned offset)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_shader_config inline_config = {0};
struct si_shader_config *config;
uint64_t shader_va;
config->scratch_bytes_per_wave *
sctx->scratch_waves);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
shader->scratch_bo, RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
}
shader_va += sizeof(amd_kernel_code_t);
}
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, shader->bo,
RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
const amd_kernel_code_t *code_object,
unsigned user_sgpr)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
unsigned max_private_element_size = AMD_HSA_BITS_GET(
uint64_t kernel_args_va)
{
struct si_compute *program = sctx->cs_shader_state.program;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
static const enum amd_code_property_mask_t workgroup_count_masks [] = {
AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
fprintf(stderr, "Error: Failed to allocate dispatch "
"packet.");
}
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, dispatch_buf,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, dispatch_buf,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
const amd_kernel_code_t *code_object,
const struct pipe_grid_info *info)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_compute *program = sctx->cs_shader_state.program;
struct r600_resource *input_buffer = NULL;
unsigned kernel_args_size;
}
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, input_buffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
if (code_object) {
const struct pipe_grid_info *info)
{
struct si_compute *program = sctx->cs_shader_state.program;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
4 * SI_NUM_RESOURCE_SGPRS;
unsigned block_size_reg = grid_size_reg +
uint64_t va = base_va + info->indirect_offset;
int i;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource *)info->indirect,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
const struct pipe_grid_info *info)
{
struct si_screen *sscreen = sctx->screen;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
unsigned waves_per_threadgroup =
DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
if (info->indirect) {
uint64_t base_va = r600_resource(info->indirect)->gpu_address;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource *)info->indirect,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
if (!buffer) {
continue;
}
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, buffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_COMPUTE_GLOBAL);
}
uint64_t src_va, unsigned size, unsigned flags,
enum r600_coherency coher)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
uint32_t header = 0, command = 0;
assert(size);
/* This must be done after need_cs_space. */
if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource*)dst,
RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
if (src)
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource*)src,
RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
}
/* dma_clear_buffer can use clear_buffer on failure. Make sure that
* doesn't happen. We don't want an infinite recursion: */
- if (sctx->b.dma.cs &&
+ if (sctx->b.dma_cs &&
!(dst->flags & PIPE_RESOURCE_FLAG_SPARSE) &&
(offset % 4 == 0) &&
/* CP DMA is very slow. Always use SDMA for big clears. This
* si_emit_framebuffer_state (in a draw call) adds them.
* For example, DeusEx:MD has 21 buffer clears per frame and all
* of them are moved to SDMA thanks to this. */
- !ws->cs_is_buffer_referenced(sctx->b.gfx.cs, rdst->buf,
+ !ws->cs_is_buffer_referenced(sctx->b.gfx_cs, rdst->buf,
RADEON_USAGE_READWRITE))) {
sctx->b.dma_clear_buffer(ctx, dst, offset, dma_clear_size, value);
&last_trace_id, map ? 1 : 0, "IB", ctx->b.chip_class,
NULL, NULL);
} else {
- si_parse_current_ib(f, ctx->b.gfx.cs, chunk->gfx_begin,
+ si_parse_current_ib(f, ctx->b.gfx_cs, chunk->gfx_begin,
chunk->gfx_end, &last_trace_id, map ? 1 : 0,
"IB", ctx->b.chip_class);
}
assert(ctx->current_saved_cs);
struct si_saved_cs *scs = ctx->current_saved_cs;
- unsigned gfx_cur = ctx->b.gfx.cs->prev_dw + ctx->b.gfx.cs->current.cdw;
+ unsigned gfx_cur = ctx->b.gfx_cs->prev_dw + ctx->b.gfx_cs->current.cdw;
if (!dump_bo_list &&
gfx_cur == scs->gfx_last_dw)
upload_size);
desc->gpu_list = ptr - first_slot_offset / 4;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, desc->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
/* The shader pointer should point to slot 0. */
if (!desc->buffer)
return;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, desc->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
}
si_set_shader_image_desc(sctx, &view, true, desc, desc + 8);
pipe_resource_reference(&buffers->buffers[slot], &tex->resource.b.b);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
&tex->resource, RADEON_USAGE_READ,
RADEON_PRIO_SHADER_RW_IMAGE);
buffers->enabled_mask |= 1u << slot;
while (mask) {
int i = u_bit_scan(&mask);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
r600_resource(buffers->buffers[i]),
i < SI_NUM_SHADER_BUFFERS ? buffers->shader_usage :
buffers->shader_usage_constbuf,
if (!sctx->vertex_buffer[vb].buffer.resource)
continue;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource*)sctx->vertex_buffer[vb].buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
if (!sctx->vb_descriptors_buffer)
return;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
}
}
sctx->vb_descriptors_gpu_list = ptr;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
desc[3] = velems->rsrc_word3[i];
if (first_vb_use_mask & (1 << i)) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource*)vb->buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
desc[3] |= S_008F0C_ELEMENT_SIZE(element_size);
pipe_resource_reference(&buffers->buffers[slot], buffer);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
buffers->enabled_mask |= 1u << slot;
unsigned num_dwords)
{
struct si_descriptors *desc = &sctx->bindless_descriptors;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned desc_slot_offset = desc_slot * 16;
uint32_t *data;
uint64_t va;
struct si_descriptors *desc,
unsigned sh_base)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned sh_offset = sh_base + desc->shader_userdata_offset;
si_emit_shader_pointer_head(cs, sh_offset, 1);
if (!sh_base)
return;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned mask = sctx->shader_pointers_dirty & pointer_mask;
while (mask) {
if (!sh_base)
return;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned mask = sctx->shader_pointers_dirty & pointer_mask;
while (mask) {
~u_bit_consecutive(SI_DESCS_RW_BUFFERS, SI_DESCS_FIRST_COMPUTE);
if (sctx->vertex_buffer_pointer_dirty) {
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
/* Find the location of the VB descriptor pointer. */
/* TODO: In the future, the pointer will be packed in unused
uint64_t src_offset,
uint64_t size)
{
- struct radeon_winsys_cs *cs = ctx->b.dma.cs;
+ struct radeon_winsys_cs *cs = ctx->b.dma_cs;
unsigned i, ncopy, count, max_size, sub_cmd, shift;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
unsigned clear_value)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct radeon_winsys_cs *cs = sctx->b.dma.cs;
+ struct radeon_winsys_cs *cs = sctx->b.dma_cs;
unsigned i, ncopy, csize;
struct r600_resource *rdst = r600_resource(dst);
unsigned pitch,
unsigned bpp)
{
- struct radeon_winsys_cs *cs = ctx->b.dma.cs;
+ struct radeon_winsys_cs *cs = ctx->b.dma_cs;
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
unsigned dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
unsigned src_x, src_y;
unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
- if (sctx->b.dma.cs == NULL ||
+ if (sctx->b.dma_cs == NULL ||
src->flags & PIPE_RESOURCE_FLAG_SPARSE ||
dst->flags & PIPE_RESOURCE_FLAG_SPARSE) {
goto fallback;
static void si_dma_emit_wait_idle(struct r600_common_context *rctx)
{
- struct radeon_winsys_cs *cs = rctx->dma.cs;
+ struct radeon_winsys_cs *cs = rctx->dma_cs;
/* NOP waits for idle on Evergreen and later. */
if (rctx->chip_class >= CIK)
void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
struct r600_resource *dst, struct r600_resource *src)
{
- uint64_t vram = ctx->dma.cs->used_vram;
- uint64_t gtt = ctx->dma.cs->used_gart;
+ uint64_t vram = ctx->dma_cs->used_vram;
+ uint64_t gtt = ctx->dma_cs->used_gart;
if (dst) {
vram += dst->vram_usage;
}
/* Flush the GFX IB if DMA depends on it. */
- if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
+ if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, dst->buf,
+ ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
RADEON_USAGE_READWRITE)) ||
(src &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
+ ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
RADEON_USAGE_WRITE))))
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
* engine busy while uploads are being submitted.
*/
num_dw++; /* for emit_wait_idle below */
- if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
- ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
- !radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
+ if (!ctx->ws->cs_check_space(ctx->dma_cs, num_dw) ||
+ ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 ||
+ !radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
- assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
+ assert((num_dw + ctx->dma_cs->current.cdw) <= ctx->dma_cs->current.max_dw);
}
/* Wait for idle if either buffer has been used in the IB before to
* prevent read-after-write hazards.
*/
if ((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, dst->buf,
+ ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, dst->buf,
RADEON_USAGE_READWRITE)) ||
(src &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, src->buf,
+ ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, src->buf,
RADEON_USAGE_WRITE)))
si_dma_emit_wait_idle(ctx);
if (dst) {
- radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
+ radeon_add_to_buffer_list(ctx, ctx->dma_cs, dst,
RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_BUFFER);
}
if (src) {
- radeon_add_to_buffer_list(ctx, &ctx->dma, src,
+ radeon_add_to_buffer_list(ctx, ctx->dma_cs, src,
RADEON_USAGE_READ,
RADEON_PRIO_SDMA_BUFFER);
}
void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct radeon_winsys_cs *cs = rctx->dma.cs;
+ struct radeon_winsys_cs *cs = rctx->dma_cs;
struct radeon_saved_cs saved;
bool check_vm = (rctx->screen->debug_flags & DBG(CHECK_VM));
struct r600_resource *buf, uint64_t va,
uint32_t new_fence, unsigned query_type)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
unsigned op = EVENT_TYPE(event) |
EVENT_INDEX(5) |
event_flags;
radeon_emit(cs, scratch->gpu_address);
radeon_emit(cs, scratch->gpu_address >> 32);
- radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
radeon_emit(cs, 0); /* immediate data */
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
}
if (buf) {
- radeon_add_to_buffer_list(ctx, &ctx->gfx, buf, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
}
}
void si_gfx_wait_fence(struct r600_common_context *ctx,
uint64_t va, uint32_t ref, uint32_t mask)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
{
struct radeon_winsys *ws = rctx->ws;
- if (rctx->dma.cs)
- ws->cs_add_fence_dependency(rctx->dma.cs, fence);
- ws->cs_add_fence_dependency(rctx->gfx.cs, fence);
+ if (rctx->dma_cs)
+ ws->cs_add_fence_dependency(rctx->dma_cs, fence);
+ ws->cs_add_fence_dependency(rctx->gfx_cs, fence);
}
static void si_add_syncobj_signal(struct r600_common_context *rctx,
{
struct radeon_winsys *ws = rctx->ws;
- ws->cs_add_syncobj_signal(rctx->gfx.cs, fence);
+ ws->cs_add_syncobj_signal(rctx->gfx_cs, fence);
}
static void si_fence_reference(struct pipe_screen *screen,
uint64_t fence_va = fine->buf->gpu_address + fine->offset;
- radeon_add_to_buffer_list(&ctx->b, &ctx->b.gfx, fine->buf,
+ radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, fine->buf,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
if (flags & PIPE_FLUSH_TOP_OF_PIPE) {
- struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
S_370_WR_CONFIRM(1) |
}
/* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
- if (rctx->dma.cs)
+ if (rctx->dma_cs)
si_flush_dma_cs(rctx, rflags, fence ? &sdma_fence : NULL);
- if (!radeon_emitted(rctx->gfx.cs, rctx->initial_gfx_cs_size)) {
+ if (!radeon_emitted(rctx->gfx_cs, rctx->initial_gfx_cs_size)) {
if (fence)
ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
if (!(flags & PIPE_FLUSH_DEFERRED))
- ws->cs_sync_flush(rctx->gfx.cs);
+ ws->cs_sync_flush(rctx->gfx_cs);
} else {
/* Instead of flushing, create a deferred fence. Constraints:
* - The state tracker must allow a deferred flush.
if (flags & PIPE_FLUSH_DEFERRED &&
!(flags & PIPE_FLUSH_FENCE_FD) &&
fence) {
- gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx.cs);
+ gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx_cs);
deferred_fence = true;
} else {
si_flush_gfx_cs(rctx, rflags, fence ? &gfx_fence : NULL);
assert(!fine.buf);
finish:
if (!(flags & PIPE_FLUSH_DEFERRED)) {
- if (rctx->dma.cs)
- ws->cs_sync_flush(rctx->dma.cs);
- ws->cs_sync_flush(rctx->gfx.cs);
+ if (rctx->dma_cs)
+ ws->cs_sync_flush(rctx->dma_cs);
+ ws->cs_sync_flush(rctx->gfx_cs);
}
}
/* initialize */
void si_need_gfx_cs_space(struct si_context *ctx)
{
- struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
/* There is no need to flush the DMA IB here, because
* r600_need_dma_space always flushes the GFX IB if there is
* that have been added (cs_add_buffer) and two counters in the pipe
* driver for those that haven't been added yet.
*/
- if (unlikely(!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
+ if (unlikely(!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx_cs,
ctx->b.vram, ctx->b.gtt))) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
struct pipe_fence_handle **fence)
{
struct si_context *ctx = context;
- struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct radeon_winsys *ws = ctx->b.ws;
if (ctx->gfx_flush_in_progress)
* This code is only needed when the driver flushes the GFX IB
* internally, and it never asks for a fence handle.
*/
- if (radeon_emitted(ctx->b.dma.cs, 0)) {
+ if (radeon_emitted(ctx->b.dma_cs, 0)) {
assert(fence == NULL); /* internal flushes only */
si_flush_dma_cs(ctx, flags, NULL);
}
si_trace_emit(ctx);
- radeon_add_to_buffer_list(&ctx->b, &ctx->b.gfx, ctx->current_saved_cs->trace_buf,
+ radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, ctx->current_saved_cs->trace_buf,
RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
}
if (!LIST_IS_EMPTY(&ctx->b.active_queries))
si_resume_queries(&ctx->b);
- assert(!ctx->b.gfx.cs->prev_dw);
- ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->current.cdw;
+ assert(!ctx->b.gfx_cs->prev_dw);
+ ctx->b.initial_gfx_cs_size = ctx->b.gfx_cs->current.cdw;
/* Invalidate various draw states so that they are emitted before
* the first draw call. */
static void si_pc_emit_instance(struct r600_common_context *ctx,
int se, int instance)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
unsigned value = S_030800_SH_BROADCAST_WRITES(1);
if (se >= 0) {
static void si_pc_emit_shaders(struct r600_common_context *ctx,
unsigned shaders)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
radeon_set_uconfig_reg_seq(cs, R_036780_SQ_PERFCOUNTER_CTRL, 2);
radeon_emit(cs, shaders & 0x7f);
{
struct si_pc_block *sigroup = (struct si_pc_block *)group->data;
struct si_pc_block_base *regs = sigroup->b;
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
unsigned idx;
unsigned layout_multi = regs->layout & SI_PC_MULTI_MASK;
unsigned dw;
static void si_pc_emit_start(struct r600_common_context *ctx,
struct r600_resource *buffer, uint64_t va)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
- radeon_add_to_buffer_list(ctx, &ctx->gfx, buffer,
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buffer,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
static void si_pc_emit_stop(struct r600_common_context *ctx,
struct r600_resource *buffer, uint64_t va)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
EOP_DATA_SEL_VALUE_32BIT,
{
struct si_pc_block *sigroup = (struct si_pc_block *)group->data;
struct si_pc_block_base *regs = sigroup->b;
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx_cs;
unsigned idx;
unsigned reg = regs->counter0_lo;
unsigned reg_delta = 8;
sctx->b.b.create_video_buffer = vl_video_buffer_create;
}
- sctx->b.gfx.cs = ws->cs_create(sctx->b.ctx, RING_GFX,
+ sctx->b.gfx_cs = ws->cs_create(sctx->b.ctx, RING_GFX,
si_flush_gfx_cs, sctx);
/* Border colors. */
goto fail;
/* Initialize the memory. */
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
S_370_WR_CONFIRM(1) |
void si_pm4_emit(struct si_context *sctx, struct si_pm4_state *state)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
for (int i = 0; i < state->nbo; ++i) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, state->bo[i],
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, state->bo[i],
state->bo_usage[i], state->bo_priority[i]);
}
} else {
struct r600_resource *ib = state->indirect_buffer;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, ib,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, ib,
RADEON_USAGE_READ,
RADEON_PRIO_IB2);
*/
static void si_emit_cb_render_state(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_state_blend *blend = sctx->queued.named.blend;
/* CB_COLORn_INFO.FORMAT=INVALID should disable unbound colorbuffers,
* but you never know. */
static void si_emit_blend_color(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
radeon_emit_array(cs, (uint32_t*)sctx->blend_color.state.color, 4);
static void si_emit_clip_state(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP_0_X, 6*4);
radeon_emit_array(cs, (uint32_t*)sctx->clip_state.state.ucp, 6*4);
static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_shader *vs = si_get_vs_state(sctx);
struct si_shader_selector *vs_sel = vs->selector;
struct tgsi_shader_info *info = &vs_sel->info;
*/
static void si_emit_stencil_ref(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct pipe_stencil_ref *ref = &sctx->stencil_ref.state;
struct si_dsa_stencil_ref_part *dsa = &sctx->stencil_ref.dsa_part;
static void si_emit_db_render_state(struct si_context *sctx, struct r600_atom *state)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
unsigned db_shader_control;
static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
unsigned i, nr_cbufs = state->nr_cbufs;
struct r600_texture *tex = NULL;
}
tex = (struct r600_texture *)cb->base.texture;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
&tex->resource, RADEON_USAGE_READWRITE,
tex->resource.b.b.nr_samples > 1 ?
RADEON_PRIO_COLOR_BUFFER_MSAA :
RADEON_PRIO_COLOR_BUFFER);
if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
tex->cmask_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_CMASK);
}
if (tex->dcc_separate_buffer)
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
tex->dcc_separate_buffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_DCC);
struct r600_surface *zb = (struct r600_surface*)state->zsbuf;
struct r600_texture *rtex = (struct r600_texture*)zb->base.texture;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
&rtex->resource, RADEON_USAGE_READWRITE,
zb->base.texture->nr_samples > 1 ?
RADEON_PRIO_DEPTH_BUFFER_MSAA :
static void si_emit_msaa_sample_locs(struct si_context *sctx,
struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned nr_samples = sctx->framebuffer.nr_samples;
bool has_msaa_sample_loc_bug = sctx->screen->has_msaa_sample_loc_bug;
static void si_emit_msaa_config(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned num_tile_pipes = sctx->screen->info.num_tile_pipes;
/* 33% faster rendering to linear color buffers */
bool dst_is_linear = sctx->framebuffer.any_dst_linear;
static void si_emit_sample_mask(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned mask = sctx->sample_mask.sample_mask;
/* Needed for line and polygon smoothing as well as for the Polaris
static void si_emit_dpbb_disable(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0,
S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) |
if (bin_size.y >= 32)
bin_size_extend.y = util_logbase2(bin_size.y) - 5;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0,
S_028C44_BINNING_MODE(V_028C44_BINNING_ALLOWED) |
S_028C44_BIN_SIZE_X(bin_size.x == 16) |
const struct pipe_draw_info *info,
unsigned *num_patches)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_shader *ls_current;
struct si_shader_selector *ls;
/* The TES pointer will only be used for sctx->last_tcs.
/* rast_prim is the primitive type after GS. */
static void si_emit_rasterizer_prim_state(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
enum pipe_prim_type rast_prim = sctx->current_rast_prim;
struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
}
if (sctx->current_vs_state != sctx->last_vs_state) {
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_sh_reg(cs,
sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
const struct pipe_draw_info *info,
unsigned num_patches)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
unsigned ia_multi_vgt_param;
unsigned index_offset)
{
struct pipe_draw_indirect_info *indirect = info->indirect;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
uint32_t index_max_size = 0;
radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
t->buf_filled_size, RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
}
index_size;
index_va = r600_resource(indexbuf)->gpu_address + index_offset;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource *)indexbuf,
RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
} else {
radeon_emit(cs, indirect_va);
radeon_emit(cs, indirect_va >> 32);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource *)indirect->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
(struct r600_resource *)indirect->indirect_draw_count;
radeon_add_to_buffer_list(
- &sctx->b, &sctx->b.gfx, params_buf,
+ &sctx->b, sctx->b.gfx_cs, params_buf,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
static void si_emit_surface_sync(struct r600_common_context *rctx,
unsigned cp_coher_cntl)
{
- struct radeon_winsys_cs *cs = rctx->gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->gfx_cs;
if (rctx->chip_class >= GFX9) {
/* Flush caches and wait for the caches to assert idle. */
void si_emit_cache_flush(struct si_context *sctx)
{
struct r600_common_context *rctx = &sctx->b;
- struct radeon_winsys_cs *cs = rctx->gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->gfx_cs;
uint32_t cp_coher_cntl = 0;
uint32_t flush_cb_db = rctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
SI_CONTEXT_FLUSH_AND_INV_DB);
void si_trace_emit(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
uint64_t va = sctx->current_saved_cs->trace_buf->gpu_address;
uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
static void si_emit_spi_map(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_shader *ps = sctx->ps_shader.current;
struct si_shader *vs = si_get_vs_state(sctx);
struct tgsi_shader_info *psinfo = ps ? &ps->selector->info : NULL;
static void si_emit_scratch_state(struct si_context *sctx,
struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
sctx->spi_tmpring_size);
if (sctx->scratch_buffer) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
sctx->scratch_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
}
static void si_flush_vgt_streamout(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned reg_strmout_cntl;
/* The register is at different places on different ASICs. */
static void si_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
{
struct si_context *sctx = (struct si_context*)rctx;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_streamout_target **t = sctx->streamout.targets;
uint16_t *stride_in_dw = sctx->streamout.stride_in_dw;
unsigned i;
radeon_emit(cs, va); /* src address lo */
radeon_emit(cs, va >> 32); /* src address hi */
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
t[i]->buf_filled_size,
RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
void si_emit_streamout_end(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_streamout_target **t = sctx->streamout.targets;
unsigned i;
uint64_t va;
radeon_emit(cs, 0); /* unused */
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
t[i]->buf_filled_size,
RADEON_USAGE_WRITE,
RADEON_PRIO_SO_FILLED_SIZE);
{
struct si_context *sctx = (struct si_context*)rctx;
- radeon_set_context_reg_seq(sctx->b.gfx.cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
- radeon_emit(sctx->b.gfx.cs,
+ radeon_set_context_reg_seq(sctx->b.gfx_cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
+ radeon_emit(sctx->b.gfx_cs,
S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
S_028B94_RAST_STREAM(0) |
S_028B94_STREAMOUT_1_EN(si_get_strmout_en(sctx)) |
S_028B94_STREAMOUT_2_EN(si_get_strmout_en(sctx)) |
S_028B94_STREAMOUT_3_EN(si_get_strmout_en(sctx)));
- radeon_emit(sctx->b.gfx.cs,
+ radeon_emit(sctx->b.gfx_cs,
sctx->streamout.hw_enabled_mask &
sctx->streamout.enabled_stream_buffers_mask);
}
static void si_emit_guardband(struct si_context *ctx,
struct si_signed_scissor *vp_as_scissor)
{
- struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct pipe_viewport_state vp;
float left, top, right, bottom, max_range, guardband_x, guardband_y;
float discard_x, discard_y;
static void si_emit_scissors(struct r600_common_context *rctx, struct r600_atom *atom)
{
struct si_context *ctx = (struct si_context *)rctx;
- struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct pipe_scissor_state *states = ctx->scissors.states;
unsigned mask = ctx->scissors.dirty_mask;
bool scissor_enabled = false;
static void si_emit_one_viewport(struct si_context *ctx,
struct pipe_viewport_state *state)
{
- struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
radeon_emit(cs, fui(state->scale[0]));
radeon_emit(cs, fui(state->translate[0]));
static void si_emit_viewports(struct si_context *ctx)
{
- struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct pipe_viewport_state *states = ctx->viewports.states;
unsigned mask = ctx->viewports.dirty_mask;
static void si_emit_depth_ranges(struct si_context *ctx)
{
- struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct pipe_viewport_state *states = ctx->viewports.states;
unsigned mask = ctx->viewports.depth_range_dirty_mask;
bool clip_halfz = false;