const uint *block_layout, const uint *grid_layout)
{
int i;
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
unsigned num_waves;
unsigned num_pipes = rctx->screen->b.info.r600_max_pipes;
static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
const uint *grid_layout)
{
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
unsigned i;
/* make sure that the gfx ring is only one active */
- if (ctx->b.rings.dma.cs && ctx->b.rings.dma.cs->cdw) {
- ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ if (ctx->b.dma.cs && ctx->b.dma.cs->cdw) {
+ ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
/* Initialize all the compute-related registers.
/* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */
for (i = 0; i < 8 && i < ctx->framebuffer.state.nr_cbufs; i++) {
struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
- unsigned reloc = radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.gfx,
+ unsigned reloc = radeon_add_to_buffer_list(&ctx->b, &ctx->b.gfx,
(struct r600_resource*)cb->base.texture,
RADEON_USAGE_READWRITE,
RADEON_PRIO_SHADER_RW_BUFFER);
struct r600_cs_shader_state *state =
(struct r600_cs_shader_state*)atom;
struct r600_pipe_compute *shader = state->shader;
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint64_t va;
struct r600_resource *code_bo;
unsigned ngpr, nstack;
radeon_emit(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
radeon_emit(cs, PKT3C(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
code_bo, RADEON_USAGE_READ,
RADEON_PRIO_USER_SHADER));
}
uint64_t src_offset,
uint64_t size)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
+ struct radeon_winsys_cs *cs = rctx->b.dma.cs;
unsigned i, ncopy, csize, sub_cmd, shift;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
for (i = 0; i < ncopy; i++) {
csize = size < EG_DMA_COPY_MAX_SIZE ? size : EG_DMA_COPY_MAX_SIZE;
/* emit reloc before writing cs so that cs is always in consistent state */
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, rsrc, RADEON_USAGE_READ,
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ,
RADEON_PRIO_SDMA_BUFFER);
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, rdst, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_BUFFER);
cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, sub_cmd, csize);
cs->buf[cs->cdw++] = dst_offset & 0xffffffff;
struct pipe_resource *dst, uint64_t offset,
unsigned size, uint32_t clear_value)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
assert(size);
assert(rctx->screen->b.has_cp_dma);
}
/* This must be done after r600_need_cs_space. */
- reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx,
+ reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)dst, RADEON_USAGE_WRITE,
RADEON_PRIO_CP_DMA);
static void evergreen_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct pipe_clip_state *state = &rctx->clip_state.state;
radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP0_X, 6*4);
static void evergreen_emit_scissor_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_scissor_state *rstate = &rctx->scissor;
struct pipe_scissor_state *state;
uint32_t dirty_mask;
static void evergreen_emit_msaa_state(struct r600_context *rctx, int nr_samples, int ps_iter_samples)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
unsigned max_dist = 0;
switch (nr_samples) {
static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct pipe_framebuffer_state *state = &rctx->framebuffer.state;
unsigned nr_cbufs = state->nr_cbufs;
unsigned i, tl, br;
tex = (struct r600_texture *)cb->base.texture;
reloc = radeon_add_to_buffer_list(&rctx->b,
- &rctx->b.rings.gfx,
+ &rctx->b.gfx,
(struct r600_resource*)cb->base.texture,
RADEON_USAGE_READWRITE,
tex->surface.nsamples > 1 ?
RADEON_PRIO_COLOR_BUFFER);
if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
- cmask_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx,
+ cmask_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
tex->cmask_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_CMASK);
} else {
if (!rctx->keep_tiling_flags) {
unsigned reloc = radeon_add_to_buffer_list(&rctx->b,
- &rctx->b.rings.gfx,
+ &rctx->b.gfx,
(struct r600_resource*)state->cbufs[0]->texture,
RADEON_USAGE_READWRITE,
RADEON_PRIO_COLOR_BUFFER);
if (state->zsbuf) {
struct r600_surface *zb = (struct r600_surface*)state->zsbuf;
unsigned reloc = radeon_add_to_buffer_list(&rctx->b,
- &rctx->b.rings.gfx,
+ &rctx->b.gfx,
(struct r600_resource*)state->zsbuf->texture,
RADEON_USAGE_READWRITE,
zb->base.texture->nr_samples > 1 ?
static void evergreen_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a;
float offset_units = state->offset_units;
float offset_scale = state->offset_scale;
static void evergreen_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom;
unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1;
unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1;
static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_db_state *a = (struct r600_db_state*)atom;
if (a->rsurf && a->rsurf->db_htile_surface) {
radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control);
radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
- reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rtex->htile_buffer,
+ reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rtex->htile_buffer,
RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE);
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
cs->buf[cs->cdw++] = reloc_idx;
static void evergreen_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom;
unsigned db_render_control = 0;
unsigned db_count_control = 0;
unsigned resource_offset,
unsigned pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = state->dirty_mask;
while (dirty_mask) {
radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD7 */
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER));
}
state->dirty_mask = 0;
unsigned reg_alu_const_cache,
unsigned pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = state->dirty_mask;
while (dirty_mask) {
}
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER));
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
dirty_mask &= ~(1 << buffer_index);
struct r600_samplerview_state *state,
unsigned resource_id_base, unsigned pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = state->dirty_mask;
while (dirty_mask) {
radeon_emit(cs, (resource_id_base + resource_index) * 8);
radeon_emit_array(cs, rview->tex_resource_words, 8);
- reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rview->tex_resource,
+ reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rview->tex_resource,
RADEON_USAGE_READ,
r600_get_sampler_view_priority(rview->tex_resource));
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
unsigned border_index_reg,
unsigned pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = texinfo->states.dirty_mask;
while (dirty_mask) {
struct r600_sample_mask *s = (struct r600_sample_mask*)a;
uint8_t mask = s->sample_mask;
- radeon_set_context_reg(rctx->b.rings.gfx.cs, R_028C3C_PA_SC_AA_MASK,
+ radeon_set_context_reg(rctx->b.gfx.cs, R_028C3C_PA_SC_AA_MASK,
mask | (mask << 8) | (mask << 16) | (mask << 24));
}
static void cayman_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a)
{
struct r600_sample_mask *s = (struct r600_sample_mask*)a;
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint16_t mask = s->sample_mask;
radeon_set_context_reg_seq(cs, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
static void evergreen_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_cso_state *state = (struct r600_cso_state*)a;
struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
radeon_set_context_reg(cs, R_0288A4_SQ_PGM_START_FS,
(shader->buffer->gpu_address + shader->offset) >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, shader->buffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->buffer,
RADEON_USAGE_READ,
RADEON_PRIO_INTERNAL_SHADER));
}
static void evergreen_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a;
uint32_t v = 0, v2 = 0, primid = 0;
static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
struct r600_resource *rbuffer;
radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE,
rbuffer->gpu_address >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_RINGS_STREAMOUT));
radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE,
rbuffer->gpu_address >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_RINGS_STREAMOUT));
radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
unsigned pitch,
unsigned bpp)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
+ struct radeon_winsys_cs *cs = rctx->b.dma.cs;
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
}
size = (cheight * pitch) / 4;
/* emit reloc before writing cs so that cs is always in consistent state */
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, &rsrc->resource,
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource,
RADEON_USAGE_READ, RADEON_PRIO_SDMA_TEXTURE);
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, &rdst->resource,
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource,
RADEON_USAGE_WRITE, RADEON_PRIO_SDMA_TEXTURE);
cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, sub_cmd, size);
cs->buf[cs->cdw++] = base >> 8;
unsigned src_x, src_y;
unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
- if (rctx->b.rings.dma.cs == NULL) {
+ if (rctx->b.dma.cs == NULL) {
goto fallback;
}
* Can we somehow flush the index buffer cache? Starting a new IB seems
* to do the trick. */
if (rctx->b.chip_class <= R700)
- rctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ rctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
/**
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
boolean count_draw_in)
{
- struct radeon_winsys_cs *dma = ctx->b.rings.dma.cs;
+ struct radeon_winsys_cs *dma = ctx->b.dma.cs;
/* Flush the DMA IB if it's not empty. */
if (dma && dma->cdw)
- ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
- if (!ctx->b.ws->cs_memory_below_limit(ctx->b.rings.gfx.cs, ctx->b.vram, ctx->b.gtt)) {
+ if (!ctx->b.ws->cs_memory_below_limit(ctx->b.gfx.cs, ctx->b.vram, ctx->b.gtt)) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return;
}
/* all will be accounted once relocation are emited */
ctx->b.vram = 0;
/* The number of dwords we already used in the CS so far. */
- num_dw += ctx->b.rings.gfx.cs->cdw;
+ num_dw += ctx->b.gfx.cs->cdw;
if (count_draw_in) {
uint64_t mask;
num_dw += 10;
/* Flush if there's not enough space. */
- if (num_dw > ctx->b.rings.gfx.cs->max_dw) {
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ if (num_dw > ctx->b.gfx.cs->max_dw) {
+ ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
}
void r600_flush_emit(struct r600_context *rctx)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
unsigned cp_coher_cntl = 0;
unsigned wait_until = 0;
struct pipe_fence_handle **fence)
{
struct r600_context *ctx = context;
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
if (cs->cdw == ctx->b.initial_gfx_cs_size && !fence)
return;
ctx->b.vram = 0;
/* Begin a new CS. */
- r600_emit_command_buffer(ctx->b.rings.gfx.cs, &ctx->start_cs_cmd);
+ r600_emit_command_buffer(ctx->b.gfx.cs, &ctx->start_cs_cmd);
/* Re-emit states. */
r600_mark_atom_dirty(ctx, &ctx->alphatest_state.atom);
ctx->last_primitive_type = -1;
ctx->last_start_instance = -1;
- ctx->b.initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw;
+ ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->cdw;
}
/* The max number of bytes to copy per packet. */
struct pipe_resource *src, uint64_t src_offset,
unsigned size)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
assert(size);
assert(rctx->screen->b.has_cp_dma);
}
/* This must be done after r600_need_cs_space. */
- src_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)src,
+ src_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, (struct r600_resource*)src,
RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
- dst_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)dst,
+ dst_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, (struct r600_resource*)dst,
RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
uint64_t src_offset,
uint64_t size)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
+ struct radeon_winsys_cs *cs = rctx->b.dma.cs;
unsigned i, ncopy, csize;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
for (i = 0; i < ncopy; i++) {
csize = size < R600_DMA_COPY_MAX_SIZE_DW ? size : R600_DMA_COPY_MAX_SIZE_DW;
/* emit reloc before writing cs so that cs is always in consistent state */
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, rsrc, RADEON_USAGE_READ,
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ,
RADEON_PRIO_SDMA_BUFFER);
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, rdst, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_BUFFER);
cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize);
cs->buf[cs->cdw++] = dst_offset & 0xfffffffc;
goto fail;
}
- rctx->b.rings.gfx.cs = ws->cs_create(rctx->b.ctx, RING_GFX,
- r600_context_gfx_flush, rctx,
- rscreen->b.trace_bo ?
- rscreen->b.trace_bo->cs_buf : NULL);
- rctx->b.rings.gfx.flush = r600_context_gfx_flush;
+ rctx->b.gfx.cs = ws->cs_create(rctx->b.ctx, RING_GFX,
+ r600_context_gfx_flush, rctx,
+ rscreen->b.trace_bo ?
+ rscreen->b.trace_bo->cs_buf : NULL);
+ rctx->b.gfx.flush = r600_context_gfx_flush;
rctx->allocator_fetch_shader = u_suballocator_create(&rctx->b.b, 64 * 1024, 256,
0, PIPE_USAGE_DEFAULT, FALSE);
static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a;
float offset_units = state->offset_units;
float offset_scale = state->offset_scale;
static void r600_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct pipe_clip_state *state = &rctx->clip_state.state;
radeon_set_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4);
static void r600_emit_scissor_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_scissor_state *rstate = &rctx->scissor;
struct pipe_scissor_state *state;
bool do_disable_workaround = false;
static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
unsigned max_dist = 0;
if (rctx->b.family == CHIP_R600) {
static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct pipe_framebuffer_state *state = &rctx->framebuffer.state;
unsigned nr_cbufs = state->nr_cbufs;
struct r600_surface **cb = (struct r600_surface**)&state->cbufs[0];
radeon_set_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base);
reloc = radeon_add_to_buffer_list(&rctx->b,
- &rctx->b.rings.gfx,
+ &rctx->b.gfx,
(struct r600_resource*)cb[i]->base.texture,
RADEON_USAGE_READWRITE,
cb[i]->base.texture->nr_samples > 1 ?
radeon_set_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask);
reloc = radeon_add_to_buffer_list(&rctx->b,
- &rctx->b.rings.gfx,
+ &rctx->b.gfx,
cb[i]->cb_buffer_fmask,
RADEON_USAGE_READWRITE,
cb[i]->base.texture->nr_samples > 1 ?
radeon_set_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask);
reloc = radeon_add_to_buffer_list(&rctx->b,
- &rctx->b.rings.gfx,
+ &rctx->b.gfx,
cb[i]->cb_buffer_cmask,
RADEON_USAGE_READWRITE,
cb[i]->base.texture->nr_samples > 1 ?
if (state->zsbuf) {
struct r600_surface *surf = (struct r600_surface*)state->zsbuf;
unsigned reloc = radeon_add_to_buffer_list(&rctx->b,
- &rctx->b.rings.gfx,
+ &rctx->b.gfx,
(struct r600_resource*)state->zsbuf->texture,
RADEON_USAGE_READWRITE,
surf->base.texture->nr_samples > 1 ?
static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom;
if (G_028808_SPECIAL_OP(a->cb_color_control) == V_028808_SPECIAL_RESOLVE_BOX) {
static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_db_state *a = (struct r600_db_state*)atom;
if (a->rsurf && a->rsurf->db_htile_surface) {
radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
- reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rtex->htile_buffer,
+ reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rtex->htile_buffer,
RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE);
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
cs->buf[cs->cdw++] = reloc_idx;
static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom;
unsigned db_render_control = 0;
unsigned db_render_override =
static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_config_state *a = (struct r600_config_state*)atom;
radeon_set_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1);
static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = rctx->vertex_buffer_state.dirty_mask;
while (dirty_mask) {
radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER));
}
}
unsigned reg_alu_constbuf_size,
unsigned reg_alu_const_cache)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = state->dirty_mask;
while (dirty_mask) {
}
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
dirty_mask &= ~(1 << buffer_index);
struct r600_samplerview_state *state,
unsigned resource_id_base)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = state->dirty_mask;
while (dirty_mask) {
radeon_emit(cs, (resource_id_base + resource_index) * 7);
radeon_emit_array(cs, rview->tex_resource_words, 7);
- reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rview->tex_resource,
+ reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rview->tex_resource,
RADEON_USAGE_READ,
r600_get_sampler_view_priority(rview->tex_resource));
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
unsigned resource_id_base,
unsigned border_color_reg)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = texinfo->states.dirty_mask;
while (dirty_mask) {
static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
unsigned tmp;
tmp = S_009508_DISABLE_CUBE_ANISO(1) |
struct r600_sample_mask *s = (struct r600_sample_mask*)a;
uint8_t mask = s->sample_mask;
- radeon_set_context_reg(rctx->b.rings.gfx.cs, R_028C48_PA_SC_AA_MASK,
+ radeon_set_context_reg(rctx->b.gfx.cs, R_028C48_PA_SC_AA_MASK,
mask | (mask << 8) | (mask << 16) | (mask << 24));
}
static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_cso_state *state = (struct r600_cso_state*)a;
struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
radeon_set_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, shader->buffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->buffer,
RADEON_USAGE_READ,
RADEON_PRIO_INTERNAL_SHADER));
}
static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a;
uint32_t v2 = 0, primid = 0;
static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
struct r600_resource *rbuffer;
rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_RINGS_STREAMOUT));
radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rbuffer,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_RINGS_STREAMOUT));
radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
unsigned pitch,
unsigned bpp)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
+ struct radeon_winsys_cs *cs = rctx->b.dma.cs;
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
cheight = cheight > copy_height ? copy_height : cheight;
size = (cheight * pitch) / 4;
/* emit reloc before writing cs so that cs is always in consistent state */
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, &rsrc->resource, RADEON_USAGE_READ,
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource, RADEON_USAGE_READ,
RADEON_PRIO_SDMA_TEXTURE);
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, &rdst->resource, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource, RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_TEXTURE);
cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 1, 0, size);
cs->buf[cs->cdw++] = base >> 8;
unsigned src_x, src_y;
unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
- if (rctx->b.rings.dma.cs == NULL) {
+ if (rctx->b.dma.cs == NULL) {
goto fallback;
}
void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom)
{
- r600_emit_command_buffer(rctx->b.rings.gfx.cs, ((struct r600_cso_state*)atom)->cb);
+ r600_emit_command_buffer(rctx->b.gfx.cs, ((struct r600_cso_state*)atom)->cb);
}
void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_alphatest_state *a = (struct r600_alphatest_state*)atom;
unsigned alpha_ref = a->sx_alpha_ref;
void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct pipe_blend_color *state = &rctx->blend_color.state;
radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_vgt_state *a = (struct r600_vgt_state *)atom;
radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en);
void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom;
radeon_set_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
void r600_emit_viewport_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_viewport_state *rstate = &rctx->viewport;
struct pipe_viewport_state *state;
uint32_t dirty_mask;
void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_clip_misc_state *state = &rctx->clip_misc_state;
radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
struct r600_context *rctx = (struct r600_context *)ctx;
struct pipe_draw_info info = *dinfo;
struct pipe_index_buffer ib = {};
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint64_t mask;
if (!info.indirect && !info.count && (info.indexed || !info.count_from_stream_output)) {
}
/* make sure that the gfx ring is only one active */
- if (rctx->b.rings.dma.cs && rctx->b.rings.dma.cs->cdw) {
- rctx->b.rings.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ if (rctx->b.dma.cs && rctx->b.dma.cs->cdw) {
+ rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
}
if (!r600_update_derived_state(rctx)) {
cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->b.predicate_drawing);
- cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx,
+ cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)info.indirect,
RADEON_USAGE_READ,
RADEON_PRIO_DRAW_INDIRECT);
cs->buf[cs->cdw++] = info.count;
cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_DMA;
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->b.predicate_drawing);
- cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx,
+ cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)ib.buffer,
RADEON_USAGE_READ,
RADEON_PRIO_INDEX_BUFFER);
cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->b.predicate_drawing);
- cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx,
+ cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)ib.buffer,
RADEON_USAGE_READ,
RADEON_PRIO_INDEX_BUFFER);
cs->buf[cs->cdw++] = 0; /* unused */
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx,
+ cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
t->buf_filled_size, RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
}
void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
struct r600_pipe_shader *shader = ((struct r600_shader_state*)a)->shader;
if (!shader)
r600_emit_command_buffer(cs, &shader->command_buffer);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, shader->bo,
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->bo,
RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER));
}
void r600_trace_emit(struct r600_context *rctx)
{
struct r600_screen *rscreen = rctx->screen;
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint64_t va;
uint32_t reloc;
va = rscreen->b.trace_bo->gpu_address;
- reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, rscreen->b.trace_bo,
+ reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rscreen->b.trace_bo,
RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0));
radeon_emit(cs, va & 0xFFFFFFFFUL);
struct radeon_winsys_cs_handle *buf,
enum radeon_bo_usage usage)
{
- if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, buf, usage)) {
+ if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
return TRUE;
}
- if (ctx->rings.dma.cs && ctx->rings.dma.cs->cdw &&
- ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, buf, usage)) {
+ if (ctx->dma.cs && ctx->dma.cs->cdw &&
+ ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, buf, usage)) {
return TRUE;
}
return FALSE;
rusage = RADEON_USAGE_WRITE;
}
- if (ctx->rings.gfx.cs->cdw != ctx->initial_gfx_cs_size &&
- ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs,
+ if (ctx->gfx.cs->cdw != ctx->initial_gfx_cs_size &&
+ ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
resource->cs_buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return NULL;
} else {
- ctx->rings.gfx.flush(ctx, 0, NULL);
+ ctx->gfx.flush(ctx, 0, NULL);
busy = true;
}
}
- if (ctx->rings.dma.cs &&
- ctx->rings.dma.cs->cdw &&
- ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs,
+ if (ctx->dma.cs &&
+ ctx->dma.cs->cdw &&
+ ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
resource->cs_buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return NULL;
} else {
- ctx->rings.dma.flush(ctx, 0, NULL);
+ ctx->dma.flush(ctx, 0, NULL);
busy = true;
}
}
} else {
/* We will be wait for the GPU. Wait for any offloaded
* CS flush to complete to avoid busy-waiting in the winsys. */
- ctx->ws->cs_sync_flush(ctx->rings.gfx.cs);
- if (ctx->rings.dma.cs)
- ctx->ws->cs_sync_flush(ctx->rings.dma.cs);
+ ctx->ws->cs_sync_flush(ctx->gfx.cs);
+ if (ctx->dma.cs)
+ ctx->ws->cs_sync_flush(ctx->dma.cs);
}
}
bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);
return rctx->screen->has_cp_dma ||
- (dword_aligned && (rctx->rings.dma.cs ||
+ (dword_aligned && (rctx->dma.cs ||
rctx->screen->has_streamout));
}
void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw)
{
/* Flush the GFX IB if it's not empty. */
- if (ctx->rings.gfx.cs->cdw > ctx->initial_gfx_cs_size)
- ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ if (ctx->gfx.cs->cdw > ctx->initial_gfx_cs_size)
+ ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
/* Flush if there's not enough space. */
- if ((num_dw + ctx->rings.dma.cs->cdw) > ctx->rings.dma.cs->max_dw) {
- ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
- assert((num_dw + ctx->rings.dma.cs->cdw) <= ctx->rings.dma.cs->max_dw);
+ if ((num_dw + ctx->dma.cs->cdw) > ctx->dma.cs->max_dw) {
+ ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ assert((num_dw + ctx->dma.cs->cdw) <= ctx->dma.cs->max_dw);
}
}
if (flags & PIPE_FLUSH_END_OF_FRAME)
rflags |= RADEON_FLUSH_END_OF_FRAME;
- if (rctx->rings.dma.cs) {
- rctx->rings.dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
+ if (rctx->dma.cs) {
+ rctx->dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
}
- rctx->rings.gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
+ rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
/* Both engines can signal out of order, so we need to keep both fences. */
if (gfx_fence || sdma_fence) {
struct pipe_fence_handle **fence)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct radeon_winsys_cs *cs = rctx->rings.dma.cs;
+ struct radeon_winsys_cs *cs = rctx->dma.cs;
if (cs->cdw)
rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence, 0);
return false;
if (rscreen->info.r600_has_dma && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
- rctx->rings.dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
- r600_flush_dma_ring,
- rctx, NULL);
- rctx->rings.dma.flush = r600_flush_dma_ring;
+ rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
+ r600_flush_dma_ring,
+ rctx, NULL);
+ rctx->dma.flush = r600_flush_dma_ring;
}
return true;
void r600_common_context_cleanup(struct r600_common_context *rctx)
{
- if (rctx->rings.gfx.cs)
- rctx->ws->cs_destroy(rctx->rings.gfx.cs);
- if (rctx->rings.dma.cs)
- rctx->ws->cs_destroy(rctx->rings.dma.cs);
+ if (rctx->gfx.cs)
+ rctx->ws->cs_destroy(rctx->gfx.cs);
+ if (rctx->dma.cs)
+ rctx->ws->cs_destroy(rctx->dma.cs);
if (rctx->ctx)
rctx->ws->ctx_destroy(rctx->ctx);
struct pipe_fence_handle **fence);
};
-struct r600_rings {
- struct r600_ring gfx;
- struct r600_ring dma;
-};
-
struct r600_common_context {
struct pipe_context b; /* base class */
struct radeon_winsys_ctx *ctx;
enum radeon_family family;
enum chip_class chip_class;
- struct r600_rings rings;
+ struct r600_ring gfx;
+ struct r600_ring dma;
struct pipe_fence_handle *last_sdma_fence;
unsigned initial_gfx_cs_size;
unsigned gpu_reset_counter;
static void r600_emit_query_begin(struct r600_common_context *ctx, struct r600_query *query)
{
- struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx.cs;
uint64_t va;
r600_update_occlusion_query_state(ctx, query->type, 1);
default:
assert(0);
}
- r600_emit_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE,
+ r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
if (r600_is_timer_query(query->type))
static void r600_emit_query_end(struct r600_common_context *ctx, struct r600_query *query)
{
- struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx.cs;
uint64_t va;
/* The queries which need begin already called this in begin_query. */
default:
assert(0);
}
- r600_emit_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE,
+ r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
query->buffer.results_end += query->result_size;
static void r600_emit_query_predication(struct r600_common_context *ctx, struct r600_query *query,
int operation, bool flag_wait)
{
- struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx.cs;
uint32_t op = PRED_OP(operation);
/* if true then invert, see GL_ARB_conditional_render_inverted */
radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
radeon_emit(cs, va + results_base);
radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
- r600_emit_reloc(ctx, &ctx->rings.gfx, qbuf->buf, RADEON_USAGE_READ,
+ r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
RADEON_PRIO_QUERY);
results_base += query->result_size;
/* Get backends mask */
void r600_query_init_backend_mask(struct r600_common_context *ctx)
{
- struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct r600_resource *buffer;
uint32_t *results;
unsigned num_backends = ctx->screen->info.r600_num_backends;
radeon_emit(cs, buffer->gpu_address);
radeon_emit(cs, buffer->gpu_address >> 32);
- r600_emit_reloc(ctx, &ctx->rings.gfx, buffer,
+ r600_emit_reloc(ctx, &ctx->gfx, buffer,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
/* analyze results */
static void r600_flush_vgt_streamout(struct r600_common_context *rctx)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->gfx.cs;
unsigned reg_strmout_cntl;
/* The register is at different places on different ASICs. */
static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->gfx.cs;
struct r600_so_target **t = rctx->streamout.targets;
unsigned *stride_in_dw = rctx->streamout.stride_in_dw;
unsigned i, update_flags = 0;
radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */
radeon_emit(cs, va >> 8); /* BUFFER_BASE */
- r600_emit_reloc(rctx, &rctx->rings.gfx, r600_resource(t[i]->b.buffer),
+ r600_emit_reloc(rctx, &rctx->gfx, r600_resource(t[i]->b.buffer),
RADEON_USAGE_WRITE, RADEON_PRIO_RINGS_STREAMOUT);
/* R7xx requires this packet after updating BUFFER_BASE.
radeon_emit(cs, i);
radeon_emit(cs, va >> 8);
- r600_emit_reloc(rctx, &rctx->rings.gfx, r600_resource(t[i]->b.buffer),
+ r600_emit_reloc(rctx, &rctx->gfx, r600_resource(t[i]->b.buffer),
RADEON_USAGE_WRITE, RADEON_PRIO_RINGS_STREAMOUT);
}
}
radeon_emit(cs, va); /* src address lo */
radeon_emit(cs, va >> 32); /* src address hi */
- r600_emit_reloc(rctx, &rctx->rings.gfx, t[i]->buf_filled_size,
+ r600_emit_reloc(rctx, &rctx->gfx, t[i]->buf_filled_size,
RADEON_USAGE_READ, RADEON_PRIO_SO_FILLED_SIZE);
} else {
/* Start from the beginning. */
void r600_emit_streamout_end(struct r600_common_context *rctx)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->gfx.cs;
struct r600_so_target **t = rctx->streamout.targets;
unsigned i;
uint64_t va;
radeon_emit(cs, 0); /* unused */
radeon_emit(cs, 0); /* unused */
- r600_emit_reloc(rctx, &rctx->rings.gfx, t[i]->buf_filled_size,
+ r600_emit_reloc(rctx, &rctx->gfx, t[i]->buf_filled_size,
RADEON_USAGE_WRITE, RADEON_PRIO_SO_FILLED_SIZE);
/* Zero the buffer size. The counters (primitives generated,
S_028B94_STREAMOUT_2_EN(r600_get_strmout_en(rctx)) |
S_028B94_STREAMOUT_3_EN(r600_get_strmout_en(rctx));
}
- radeon_set_context_reg(rctx->rings.gfx.cs, strmout_buffer_reg, strmout_buffer_val);
- radeon_set_context_reg(rctx->rings.gfx.cs, strmout_config_reg, strmout_config_val);
+ radeon_set_context_reg(rctx->gfx.cs, strmout_buffer_reg, strmout_buffer_val);
+ radeon_set_context_reg(rctx->gfx.cs, strmout_config_reg, strmout_config_val);
}
static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable)
uint64_t src_offset,
uint64_t size)
{
- struct radeon_winsys_cs *cs = ctx->b.rings.dma.cs;
+ struct radeon_winsys_cs *cs = ctx->b.dma.cs;
unsigned i, ncopy, csize;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
ncopy = (size + CIK_SDMA_COPY_MAX_SIZE - 1) / CIK_SDMA_COPY_MAX_SIZE;
r600_need_dma_space(&ctx->b, ncopy * 7);
- radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, rsrc, RADEON_USAGE_READ,
+ radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, rsrc, RADEON_USAGE_READ,
RADEON_PRIO_SDMA_BUFFER);
- radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, rdst, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, rdst, RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_BUFFER);
for (i = 0; i < ncopy; i++) {
unsigned pitch,
unsigned bpe)
{
- struct radeon_winsys_cs *cs = ctx->b.rings.dma.cs;
+ struct radeon_winsys_cs *cs = ctx->b.dma.cs;
struct si_screen *sscreen = ctx->screen;
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
ncopy = (copy_height + cheight - 1) / cheight;
r600_need_dma_space(&ctx->b, ncopy * 12);
- radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, &rsrc->resource,
+ radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, &rsrc->resource,
RADEON_USAGE_READ, RADEON_PRIO_SDMA_TEXTURE);
- radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, &rdst->resource,
+ radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, &rdst->resource,
RADEON_USAGE_WRITE, RADEON_PRIO_SDMA_TEXTURE);
copy_height = size * 4 / pitch;
unsigned copy_height, y_align;
unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
- if (sctx->b.rings.dma.cs == NULL) {
+ if (sctx->b.dma.cs == NULL) {
goto fallback;
}
uint32_t pc, const void *input)
{
struct si_context *sctx = (struct si_context*)ctx;
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct si_compute *program = sctx->cs_shader_state.program;
struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
struct r600_resource *input_buffer = program->input_buffer;
kernel_args_size = program->input_size + num_work_size_bytes + 8 /* For scratch va */;
kernel_args = sctx->b.ws->buffer_map(input_buffer->cs_buf,
- sctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE);
+ sctx->b.gfx.cs, PIPE_TRANSFER_WRITE);
for (i = 0; i < 3; i++) {
kernel_args[i] = grid_layout[i];
kernel_args[i + 3] = grid_layout[i] * block_layout[i];
shader->scratch_bytes_per_wave *
num_waves_for_scratch);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
shader->scratch_bo,
RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
kernel_args_va = input_buffer->gpu_address;
kernel_args_va += kernel_args_offset;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, input_buffer,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0, kernel_args_va);
if (!buffer) {
continue;
}
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, buffer,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_COMPUTE_GLOBAL);
}
#if HAVE_LLVM >= 0x0306
shader_va += pc;
#endif
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, shader->bo,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER);
si_pm4_set_reg(pm4, R_00B830_COMPUTE_PGM_LO, shader_va >> 8);
si_pm4_set_reg(pm4, R_00B834_COMPUTE_PGM_HI, shader_va >> 40);
uint64_t dst_va, uint64_t src_va,
unsigned size, unsigned flags)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? S_411_CP_SYNC(1) : 0;
uint32_t wr_confirm = !(flags & R600_CP_DMA_SYNC) ? S_414_DISABLE_WR_CONFIRM(1) : 0;
uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? S_414_RAW_WAIT(1) : 0;
uint64_t dst_va, unsigned size,
uint32_t clear_value, unsigned flags)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? S_411_CP_SYNC(1) : 0;
uint32_t wr_confirm = !(flags & R600_CP_DMA_SYNC) ? S_414_DISABLE_WR_CONFIRM(1) : 0;
uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? S_414_RAW_WAIT(1) : 0;
si_need_cs_space(sctx);
/* This must be done after need_cs_space. */
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)dst,
RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
if (src)
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)src,
RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
/* Fallback for unaligned clears. */
if (offset % 4 != 0 || size % 4 != 0) {
uint8_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
- sctx->b.rings.gfx.cs,
+ sctx->b.gfx.cs,
PIPE_TRANSFER_WRITE);
map += offset;
for (unsigned i = 0; i < size; i++) {
util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, desc->buffer,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
desc->list_dirty = false;
if (!rview->resource)
continue;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rview->resource, RADEON_USAGE_READ,
r600_get_sampler_view_priority(rview->resource));
}
if (!views->desc.buffer)
return;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
}
(struct si_sampler_view*)view;
if (rview->resource)
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rview->resource, RADEON_USAGE_READ,
r600_get_sampler_view_priority(rview->resource));
if (rview->dcc_buffer && rview->dcc_buffer != rview->resource)
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rview->dcc_buffer, RADEON_USAGE_READ,
RADEON_PRIO_DCC);
{
if (!states->desc.buffer)
return;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, states->desc.buffer,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, states->desc.buffer,
RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
}
while (mask) {
int i = u_bit_scan64(&mask);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffers->buffers[i],
buffers->shader_usage, buffers->priority);
}
if (!buffers->desc.buffer)
return;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
buffers->desc.buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_DESCRIPTORS);
}
if (!sctx->vertex_buffer[vb].buffer)
continue;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)sctx->vertex_buffer[vb].buffer,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
if (!desc->buffer)
return;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
desc->buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
}
if (!desc->buffer)
return false;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
desc->buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
desc[3] = sctx->vertex_elements->rsrc_word3[i];
if (!bound[ve->vertex_buffer_index]) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)vb->buffer,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
bound[ve->vertex_buffer_index] = true;
S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
buffers->buffers[slot] = buffer;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
buffers->desc.enabled_mask |= 1llu << slot;
S_008F0C_ADD_TID_ENABLE(add_tid);
pipe_resource_reference(&buffers->buffers[slot], buffer);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
buffers->desc.enabled_mask |= 1llu << slot;
/* Set the resource. */
pipe_resource_reference(&buffers->buffers[bufidx],
buffer);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
buffers->desc.enabled_mask |= 1llu << bufidx;
old_va, buf);
buffers->desc.list_dirty = true;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rbuffer, buffers->shader_usage,
buffers->priority);
old_va, buf);
buffers->desc.list_dirty = true;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rbuffer, buffers->shader_usage,
buffers->priority);
}
old_va, buf);
views->desc.list_dirty = true;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rbuffer, RADEON_USAGE_READ,
RADEON_PRIO_SAMPLER_BUFFER);
}
struct si_descriptors *desc,
unsigned sh_base, bool keep_dirty)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
uint64_t va;
if (!desc->pointer_dirty || !desc->buffer)
uint64_t src_offset,
uint64_t size)
{
- struct radeon_winsys_cs *cs = ctx->b.rings.dma.cs;
+ struct radeon_winsys_cs *cs = ctx->b.dma.cs;
unsigned i, ncopy, csize, max_csize, sub_cmd, shift;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
r600_need_dma_space(&ctx->b, ncopy * 5);
- radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, rsrc, RADEON_USAGE_READ,
+ radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, rsrc, RADEON_USAGE_READ,
RADEON_PRIO_SDMA_BUFFER);
- radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, rdst, RADEON_USAGE_WRITE,
+ radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, rdst, RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_BUFFER);
for (i = 0; i < ncopy; i++) {
unsigned pitch,
unsigned bpp)
{
- struct radeon_winsys_cs *cs = ctx->b.rings.dma.cs;
+ struct radeon_winsys_cs *cs = ctx->b.dma.cs;
struct si_screen *sscreen = ctx->screen;
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
ncopy = (size / SI_DMA_COPY_MAX_SIZE_DW) + !!(size % SI_DMA_COPY_MAX_SIZE_DW);
r600_need_dma_space(&ctx->b, ncopy * 9);
- radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, &rsrc->resource,
+ radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, &rsrc->resource,
RADEON_USAGE_READ, RADEON_PRIO_SDMA_TEXTURE);
- radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.dma, &rdst->resource,
+ radeon_add_to_buffer_list(&ctx->b, &ctx->b.dma, &rdst->resource,
RADEON_USAGE_WRITE, RADEON_PRIO_SDMA_TEXTURE);
for (i = 0; i < ncopy; i++) {
unsigned src_x, src_y;
unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
- if (sctx->b.rings.dma.cs == NULL) {
+ if (sctx->b.dma.cs == NULL) {
goto fallback;
}
/* initialize */
void si_need_cs_space(struct si_context *ctx)
{
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
- struct radeon_winsys_cs *dma = ctx->b.rings.dma.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *dma = ctx->b.dma.cs;
/* Flush the DMA IB if it's not empty. */
if (dma && dma->cdw)
- ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
/* There are two memory usage counters in the winsys for all buffers
* that have been added (cs_add_buffer) and two counters in the pipe
* driver for those that haven't been added yet.
*/
- if (unlikely(!ctx->b.ws->cs_memory_below_limit(ctx->b.rings.gfx.cs,
+ if (unlikely(!ctx->b.ws->cs_memory_below_limit(ctx->b.gfx.cs,
ctx->b.vram, ctx->b.gtt))) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return;
}
ctx->b.gtt = 0;
* and just flush if there is not enough space left.
*/
if (unlikely(cs->cdw > cs->max_dw - 2048))
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
void si_context_gfx_flush(void *context, unsigned flags,
struct pipe_fence_handle **fence)
{
struct si_context *ctx = context;
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys *ws = ctx->b.ws;
if (ctx->gfx_flush_in_progress)
r600_postflush_resume_features(&ctx->b);
- ctx->b.initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw;
+ ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->cdw;
/* Invalidate various draw states so that they are emitted before
* the first draw call. */
sctx->b.b.create_video_buffer = vl_video_buffer_create;
}
- sctx->b.rings.gfx.cs = ws->cs_create(sctx->b.ctx, RING_GFX, si_context_gfx_flush,
- sctx, sscreen->b.trace_bo ?
- sscreen->b.trace_bo->cs_buf : NULL);
- sctx->b.rings.gfx.flush = si_context_gfx_flush;
+ sctx->b.gfx.cs = ws->cs_create(sctx->b.ctx, RING_GFX, si_context_gfx_flush,
+ sctx, sscreen->b.trace_bo ?
+ sscreen->b.trace_bo->cs_buf : NULL);
+ sctx->b.gfx.flush = si_context_gfx_flush;
/* Border colors. */
sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS *
void si_pm4_emit(struct si_context *sctx, struct si_pm4_state *state)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
for (int i = 0; i < state->nbo; ++i) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, state->bo[i],
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, state->bo[i],
state->bo_usage[i], state->bo_priority[i]);
}
} else {
struct r600_resource *ib = state->indirect_buffer;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, ib,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, ib,
RADEON_USAGE_READ,
RADEON_PRIO_IB2);
*/
static void si_emit_cb_target_mask(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct si_state_blend *blend = sctx->queued.named.blend;
uint32_t mask = 0, i;
static void si_emit_blend_color(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
radeon_emit_array(cs, (uint32_t*)sctx->blend_color.state.color, 4);
static void si_emit_clip_state(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP_0_X, 6*4);
radeon_emit_array(cs, (uint32_t*)sctx->clip_state.state.ucp, 6*4);
static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct tgsi_shader_info *info = si_get_vs_info(sctx);
unsigned window_space =
info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
static void si_emit_scissors(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct pipe_scissor_state *states = sctx->scissors.states;
unsigned mask = sctx->scissors.dirty_mask;
static void si_emit_viewports(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct pipe_viewport_state *states = sctx->viewports.states;
unsigned mask = sctx->viewports.dirty_mask;
*/
static void si_emit_stencil_ref(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct pipe_stencil_ref *ref = &sctx->stencil_ref.state;
struct si_dsa_stencil_ref_part *dsa = &sctx->stencil_ref.dsa_part;
static void si_emit_db_render_state(struct si_context *sctx, struct r600_atom *state)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
unsigned db_shader_control;
static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
unsigned i, nr_cbufs = state->nr_cbufs;
struct r600_texture *tex = NULL;
}
tex = (struct r600_texture *)cb->base.texture;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
&tex->resource, RADEON_USAGE_READWRITE,
tex->surface.nsamples > 1 ?
RADEON_PRIO_COLOR_BUFFER_MSAA :
RADEON_PRIO_COLOR_BUFFER);
if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
tex->cmask_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_CMASK);
}
if (tex->dcc_buffer && tex->dcc_buffer != &tex->resource) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
tex->dcc_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_DCC);
}
struct r600_surface *zb = (struct r600_surface*)state->zsbuf;
struct r600_texture *rtex = (struct r600_texture*)zb->base.texture;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
&rtex->resource, RADEON_USAGE_READWRITE,
zb->base.texture->nr_samples > 1 ?
RADEON_PRIO_DEPTH_BUFFER_MSAA :
RADEON_PRIO_DEPTH_BUFFER);
if (zb->db_htile_data_base) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rtex->htile_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_HTILE);
}
static void si_emit_msaa_sample_locs(struct si_context *sctx,
struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned nr_samples = sctx->framebuffer.nr_samples;
cayman_emit_msaa_sample_locs(cs, nr_samples > 1 ? nr_samples :
static void si_emit_msaa_config(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
cayman_emit_msaa_config(cs, sctx->framebuffer.nr_samples,
sctx->ps_iter_samples,
static void si_emit_sample_mask(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned mask = sctx->sample_mask.sample_mask;
radeon_set_context_reg_seq(cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
const struct pipe_draw_info *info,
unsigned *num_patches)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct si_shader_ctx_state *ls = &sctx->vs_shader;
/* The TES pointer will only be used for sctx->last_tcs.
* It would be wrong to think that TCS = TES. */
static void si_emit_scratch_reloc(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
if (!sctx->emit_scratch_reloc)
return;
sctx->spi_tmpring_size);
if (sctx->scratch_buffer) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
sctx->scratch_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
/* rast_prim is the primitive type after GS. */
static void si_emit_rasterizer_prim_state(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned rast_prim = sctx->current_rast_prim;
struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
static void si_emit_draw_registers(struct si_context *sctx,
const struct pipe_draw_info *info)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
unsigned ia_multi_vgt_param, ls_hs_config, num_patches = 0;
const struct pipe_draw_info *info,
const struct pipe_index_buffer *ib)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned sh_base_reg = sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX];
if (info->count_from_stream_output) {
radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
t->buf_filled_size, RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
}
} else {
si_invalidate_draw_sh_constants(sctx);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource *)info->indirect,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
}
ib->index_size;
uint64_t index_va = r600_resource(ib->buffer)->gpu_address + ib->offset;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource *)ib->buffer,
RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
void si_emit_cache_flush(struct si_context *si_ctx, struct r600_atom *atom)
{
struct r600_common_context *sctx = &si_ctx->b;
- struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->gfx.cs;
uint32_t cp_coher_cntl = 0;
uint32_t compute =
PKT3_SHADER_TYPE_S(!!(sctx->flags & SI_CONTEXT_FLAG_COMPUTE));
void si_trace_emit(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
sctx->trace_id++;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, sctx->trace_buf,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, sctx->trace_buf,
RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
static void si_emit_spi_map(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct si_shader *ps = sctx->ps_shader.current;
struct si_shader *vs = si_get_vs_state(sctx);
struct tgsi_shader_info *psinfo;
static void si_emit_spi_ps_input(struct si_context *sctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct si_shader *ps = sctx->ps_shader.current;
unsigned input_ena;