#include "r600_shader.h"
#include "r600d.h"
-#include "util/u_format_s3tc.h"
+#include "util/format/u_format_s3tc.h"
#include "util/u_index_modify.h"
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
#include "tgsi/tgsi_scan.h"
#include "tgsi/tgsi_ureg.h"
+#include "nir.h"
+#include "nir/nir_to_tgsi_info.h"
+#include "tgsi/tgsi_from_mesa.h"
+
void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw)
{
assert(!cb->buf);
void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_alphatest_state *a = (struct r600_alphatest_state*)atom;
unsigned alpha_ref = a->sx_alpha_ref;
radeon_set_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref);
}
-static void r600_texture_barrier(struct pipe_context *ctx)
+static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+
+ if (!(flags & ~PIPE_BARRIER_UPDATE))
+ return;
+
+ if (flags & PIPE_BARRIER_CONSTANT_BUFFER)
+ rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
+
+ if (flags & (PIPE_BARRIER_VERTEX_BUFFER |
+ PIPE_BARRIER_SHADER_BUFFER |
+ PIPE_BARRIER_TEXTURE |
+ PIPE_BARRIER_IMAGE |
+ PIPE_BARRIER_STREAMOUT_BUFFER |
+ PIPE_BARRIER_GLOBAL_BUFFER)) {
+ rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE|
+ R600_CONTEXT_INV_TEX_CACHE;
+ }
+
+ if (flags & (PIPE_BARRIER_FRAMEBUFFER|
+ PIPE_BARRIER_IMAGE))
+ rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV;
+
+ rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
+}
+
+static void r600_texture_barrier(struct pipe_context *ctx, unsigned flags)
{
struct r600_context *rctx = (struct r600_context *)ctx;
R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV |
R600_CONTEXT_WAIT_3D_IDLE;
+ rctx->framebuffer.do_update_surf_dirtiness = true;
}
static unsigned r600_conv_pipe_prim(unsigned prim)
[PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
[R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
};
- assert(prim < Elements(prim_conv));
+ assert(prim < ARRAY_SIZE(prim_conv));
return prim_conv[prim];
}
[PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
[R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
};
- assert(mode < Elements(prim_conv));
+ assert(mode < ARRAY_SIZE(prim_conv));
return prim_conv[mode];
}
if (update_cb) {
r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
}
+ if (rctx->framebuffer.dual_src_blend != blend->dual_src_blend) {
+ rctx->framebuffer.dual_src_blend = blend->dual_src_blend;
+ r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
+ }
}
static void r600_bind_blend_state(struct pipe_context *ctx, void *state)
void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct pipe_blend_color *state = &rctx->blend_color.state;
radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_vgt_state *a = (struct r600_vgt_state *)atom;
radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en);
void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom;
radeon_set_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
if (rs->offset_enable &&
(rs->offset_units != rctx->poly_offset_state.offset_units ||
- rs->offset_scale != rctx->poly_offset_state.offset_scale)) {
+ rs->offset_scale != rctx->poly_offset_state.offset_scale ||
+ rs->offset_units_unscaled != rctx->poly_offset_state.offset_units_unscaled)) {
rctx->poly_offset_state.offset_units = rs->offset_units;
rctx->poly_offset_state.offset_scale = rs->offset_scale;
+ rctx->poly_offset_state.offset_units_unscaled = rs->offset_units_unscaled;
r600_mark_atom_dirty(rctx, &rctx->poly_offset_state.atom);
}
r600_mark_atom_dirty(rctx, &rctx->clip_misc_state.atom);
}
- r600_set_scissor_enable(&rctx->b, rs->scissor_enable);
+ r600_viewport_set_rast_deps(&rctx->b, rs->scissor_enable, rs->clip_halfz);
/* Re-emit PA_SC_LINE_STIPPLE. */
rctx->last_primitive_type = -1;
if (view->tex_resource->gpu_address &&
view->tex_resource->b.b.target == PIPE_BUFFER)
- LIST_DELINIT(&view->list);
+ list_delinit(&view->list);
pipe_resource_reference(&state->texture, NULL);
FREE(view);
}
static void r600_bind_sampler_states(struct pipe_context *pipe,
- unsigned shader,
+ enum pipe_shader_type shader,
unsigned start,
unsigned count, void **states)
{
static void r600_delete_vertex_elements(struct pipe_context *ctx, void *state)
{
struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state;
- pipe_resource_reference((struct pipe_resource**)&shader->buffer, NULL);
+ if (shader)
+ r600_resource_reference(&shader->buffer, NULL);
FREE(shader);
}
-static void r600_set_index_buffer(struct pipe_context *ctx,
- const struct pipe_index_buffer *ib)
-{
- struct r600_context *rctx = (struct r600_context *)ctx;
-
- if (ib) {
- pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
- memcpy(&rctx->index_buffer, ib, sizeof(*ib));
- r600_context_add_resource_size(ctx, ib->buffer);
- } else {
- pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
- }
-}
-
void r600_vertex_buffers_dirty(struct r600_context *rctx)
{
if (rctx->vertex_buffer_state.dirty_mask) {
- rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE;
rctx->vertex_buffer_state.atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 12 : 11) *
util_bitcount(rctx->vertex_buffer_state.dirty_mask);
r600_mark_atom_dirty(rctx, &rctx->vertex_buffer_state.atom);
/* Set vertex buffers. */
if (input) {
for (i = 0; i < count; i++) {
- if (memcmp(&input[i], &vb[i], sizeof(struct pipe_vertex_buffer))) {
- if (input[i].buffer) {
+ if ((input[i].buffer.resource != vb[i].buffer.resource) ||
+ (vb[i].stride != input[i].stride) ||
+ (vb[i].buffer_offset != input[i].buffer_offset) ||
+ (vb[i].is_user_buffer != input[i].is_user_buffer)) {
+ if (input[i].buffer.resource) {
vb[i].stride = input[i].stride;
vb[i].buffer_offset = input[i].buffer_offset;
- pipe_resource_reference(&vb[i].buffer, input[i].buffer);
+ pipe_resource_reference(&vb[i].buffer.resource, input[i].buffer.resource);
new_buffer_mask |= 1 << i;
- r600_context_add_resource_size(ctx, input[i].buffer);
+ r600_context_add_resource_size(ctx, input[i].buffer.resource);
} else {
- pipe_resource_reference(&vb[i].buffer, NULL);
+ pipe_resource_reference(&vb[i].buffer.resource, NULL);
disable_mask |= 1 << i;
}
}
}
} else {
for (i = 0; i < count; i++) {
- pipe_resource_reference(&vb[i].buffer, NULL);
+ pipe_resource_reference(&vb[i].buffer.resource, NULL);
}
disable_mask = ((1ull << count) - 1);
}
struct r600_samplerview_state *state)
{
if (state->dirty_mask) {
- rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
state->atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 14 : 13) *
util_bitcount(state->dirty_mask);
r600_mark_atom_dirty(rctx, &state->atom);
}
}
-static void r600_set_sampler_views(struct pipe_context *pipe, unsigned shader,
+static void r600_set_sampler_views(struct pipe_context *pipe,
+ enum pipe_shader_type shader,
unsigned start, unsigned count,
struct pipe_sampler_view **views)
{
(struct r600_texture*)rviews[i]->base.texture;
bool is_buffer = rviews[i]->base.texture->target == PIPE_BUFFER;
- if (!is_buffer && rtex->is_depth && !rtex->is_flushing_texture) {
+ if (!is_buffer && rtex->db_compatible) {
dst->views.compressed_depthtex_mask |= 1 << i;
} else {
dst->views.compressed_depthtex_mask &= ~(1 << i);
}
}
+static int r600_get_hw_atomic_count(const struct pipe_context *ctx,
+ enum pipe_shader_type shader)
+{
+ const struct r600_context *rctx = (struct r600_context *)ctx;
+ int value = 0;
+ switch (shader) {
+ case PIPE_SHADER_FRAGMENT:
+ case PIPE_SHADER_COMPUTE:
+ default:
+ break;
+ case PIPE_SHADER_VERTEX:
+ value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC];
+ break;
+ case PIPE_SHADER_GEOMETRY:
+ value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
+ rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC];
+ break;
+ case PIPE_SHADER_TESS_EVAL:
+ value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
+ rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
+ (rctx->gs_shader ? rctx->gs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] : 0);
+ break;
+ case PIPE_SHADER_TESS_CTRL:
+ value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
+ rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
+ (rctx->gs_shader ? rctx->gs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] : 0) +
+ rctx->tes_shader->info.file_count[TGSI_FILE_HW_ATOMIC];
+ break;
+ }
+ return value;
+}
+
+static void r600_update_compressed_colortex_mask_images(struct r600_image_state *images)
+{
+ uint32_t mask = images->enabled_mask;
+
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ struct pipe_resource *res = images->views[i].base.resource;
+
+ if (res && res->target != PIPE_BUFFER) {
+ struct r600_texture *rtex = (struct r600_texture *)res;
+
+ if (rtex->cmask.size) {
+ images->compressed_colortex_mask |= 1 << i;
+ } else {
+ images->compressed_colortex_mask &= ~(1 << i);
+ }
+ }
+ }
+}
+
/* Compute the key for the hw shader variant */
-static inline union r600_shader_key r600_shader_selector_key(struct pipe_context * ctx,
- struct r600_pipe_shader_selector * sel)
+static inline void r600_shader_selector_key(const struct pipe_context *ctx,
+ const struct r600_pipe_shader_selector *sel,
+ union r600_shader_key *key)
{
- struct r600_context *rctx = (struct r600_context *)ctx;
- union r600_shader_key key;
- memset(&key, 0, sizeof(key));
+ const struct r600_context *rctx = (struct r600_context *)ctx;
+ memset(key, 0, sizeof(*key));
switch (sel->type) {
case PIPE_SHADER_VERTEX: {
- key.vs.as_ls = (rctx->tes_shader != NULL);
- if (!key.vs.as_ls)
- key.vs.as_es = (rctx->gs_shader != NULL);
+ key->vs.as_ls = (rctx->tes_shader != NULL);
+ if (!key->vs.as_ls)
+ key->vs.as_es = (rctx->gs_shader != NULL);
if (rctx->ps_shader->current->shader.gs_prim_id_input && !rctx->gs_shader) {
- key.vs.as_gs_a = true;
- key.vs.prim_id_out = rctx->ps_shader->current->shader.input[rctx->ps_shader->current->shader.ps_prim_id_input].spi_sid;
+ key->vs.as_gs_a = true;
+ key->vs.prim_id_out = rctx->ps_shader->current->shader.input[rctx->ps_shader->current->shader.ps_prim_id_input].spi_sid;
}
+ key->vs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_VERTEX);
break;
}
case PIPE_SHADER_GEOMETRY:
+ key->gs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_GEOMETRY);
+ key->gs.tri_strip_adj_fix = rctx->gs_tri_strip_adj_fix;
break;
case PIPE_SHADER_FRAGMENT: {
- key.ps.color_two_side = rctx->rasterizer && rctx->rasterizer->two_side;
- key.ps.alpha_to_one = rctx->alpha_to_one &&
+ if (rctx->ps_shader->info.images_declared)
+ key->ps.image_size_const_offset = util_last_bit(rctx->samplers[PIPE_SHADER_FRAGMENT].views.enabled_mask);
+ key->ps.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_FRAGMENT);
+ key->ps.color_two_side = rctx->rasterizer && rctx->rasterizer->two_side;
+ key->ps.alpha_to_one = rctx->alpha_to_one &&
rctx->rasterizer && rctx->rasterizer->multisample_enable &&
!rctx->framebuffer.cb0_is_integer;
- key.ps.nr_cbufs = rctx->framebuffer.state.nr_cbufs;
+ key->ps.nr_cbufs = rctx->framebuffer.state.nr_cbufs;
/* Dual-source blending only makes sense with nr_cbufs == 1. */
- if (key.ps.nr_cbufs == 1 && rctx->dual_src_blend)
- key.ps.nr_cbufs = 2;
+ if (key->ps.nr_cbufs == 1 && rctx->dual_src_blend)
+ key->ps.nr_cbufs = 2;
break;
}
case PIPE_SHADER_TESS_EVAL:
- key.tes.as_es = (rctx->gs_shader != NULL);
+ key->tes.as_es = (rctx->gs_shader != NULL);
+ key->tes.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_TESS_EVAL);
break;
case PIPE_SHADER_TESS_CTRL:
- key.tcs.prim_mode = rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
+ key->tcs.prim_mode = rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
+ key->tcs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_TESS_CTRL);
+ break;
+ case PIPE_SHADER_COMPUTE:
break;
default:
assert(0);
}
-
- return key;
}
/* Select the hw shader variant depending on the current state.
* (*dirty) is set to 1 if current variant was changed */
-static int r600_shader_select(struct pipe_context *ctx,
+int r600_shader_select(struct pipe_context *ctx,
struct r600_pipe_shader_selector* sel,
bool *dirty)
{
struct r600_pipe_shader * shader = NULL;
int r;
- memset(&key, 0, sizeof(key));
- key = r600_shader_selector_key(ctx, sel);
+ r600_shader_selector_key(ctx, sel, &key);
/* Check if we don't need to change anything.
* This path is also used for most shaders that don't need multiple
if (sel->type == PIPE_SHADER_FRAGMENT &&
sel->num_shaders == 0) {
sel->nr_ps_max_color_exports = shader->shader.nr_ps_max_color_exports;
- key = r600_shader_selector_key(ctx, sel);
+ r600_shader_selector_key(ctx, sel, &key);
}
memcpy(&shader->key, &key, sizeof(key));
return 0;
}
+struct r600_pipe_shader_selector *r600_create_shader_state_tokens(struct pipe_context *ctx,
+ const void *prog, enum pipe_shader_ir ir,
+ unsigned pipe_shader_type)
+{
+ struct r600_pipe_shader_selector *sel = CALLOC_STRUCT(r600_pipe_shader_selector);
+
+ sel->type = pipe_shader_type;
+ if (ir == PIPE_SHADER_IR_TGSI) {
+ sel->tokens = tgsi_dup_tokens((const struct tgsi_token *)prog);
+ tgsi_scan_shader(sel->tokens, &sel->info);
+ } else if (ir == PIPE_SHADER_IR_NIR){
+ sel->nir = nir_shader_clone(NULL, (const nir_shader *)prog);
+ nir_tgsi_scan_shader(sel->nir, &sel->info, true);
+ }
+ return sel;
+}
+
static void *r600_create_shader_state(struct pipe_context *ctx,
const struct pipe_shader_state *state,
unsigned pipe_shader_type)
{
- struct r600_pipe_shader_selector *sel = CALLOC_STRUCT(r600_pipe_shader_selector);
int i;
-
- sel->type = pipe_shader_type;
- sel->tokens = tgsi_dup_tokens(state->tokens);
+ struct r600_pipe_shader_selector *sel;
+
+ if (state->type == PIPE_SHADER_IR_TGSI)
+ sel = r600_create_shader_state_tokens(ctx, state->tokens, state->type, pipe_shader_type);
+ else if (state->type == PIPE_SHADER_IR_NIR) {
+ sel = r600_create_shader_state_tokens(ctx, state->ir.nir, state->type, pipe_shader_type);
+ } else
+ assert(0 && "Unknown shader type\n");
+
+ sel->ir_type = state->type;
sel->so = state->stream_output;
- tgsi_scan_shader(state->tokens, &sel->info);
switch (pipe_shader_type) {
case PIPE_SHADER_GEOMETRY:
case TGSI_SEMANTIC_TESSOUTER:
case TGSI_SEMANTIC_PATCH:
sel->lds_patch_outputs_written_mask |=
- 1llu << r600_get_lds_unique_index(name, index);
+ 1ull << r600_get_lds_unique_index(name, index);
break;
default:
sel->lds_outputs_written_mask |=
- 1llu << r600_get_lds_unique_index(name, index);
+ 1ull << r600_get_lds_unique_index(name, index);
}
}
break;
{
struct r600_context *rctx = (struct r600_context *)ctx;
- if (!state)
+ if (!state || rctx->vs_shader == state)
return;
rctx->vs_shader = (struct r600_pipe_shader_selector *)state;
r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx));
- rctx->b.streamout.stride_in_dw = rctx->vs_shader->so.stride;
+
+ if (rctx->vs_shader->so.num_outputs)
+ rctx->b.streamout.stride_in_dw = rctx->vs_shader->so.stride;
}
static void r600_bind_gs_state(struct pipe_context *ctx, void *state)
{
struct r600_context *rctx = (struct r600_context *)ctx;
+ if (state == rctx->gs_shader)
+ return;
+
rctx->gs_shader = (struct r600_pipe_shader_selector *)state;
r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx));
if (!state)
return;
- rctx->b.streamout.stride_in_dw = rctx->gs_shader->so.stride;
+
+ if (rctx->gs_shader->so.num_outputs)
+ rctx->b.streamout.stride_in_dw = rctx->gs_shader->so.stride;
}
static void r600_bind_tcs_state(struct pipe_context *ctx, void *state)
{
struct r600_context *rctx = (struct r600_context *)ctx;
+ if (state == rctx->tes_shader)
+ return;
+
rctx->tes_shader = (struct r600_pipe_shader_selector *)state;
r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx));
if (!state)
return;
- rctx->b.streamout.stride_in_dw = rctx->tes_shader->so.stride;
+
+ if (rctx->tes_shader->so.num_outputs)
+ rctx->b.streamout.stride_in_dw = rctx->tes_shader->so.stride;
}
-static void r600_delete_shader_selector(struct pipe_context *ctx,
- struct r600_pipe_shader_selector *sel)
+void r600_delete_shader_selector(struct pipe_context *ctx,
+ struct r600_pipe_shader_selector *sel)
{
struct r600_pipe_shader *p = sel->current, *c;
while (p) {
p = c;
}
- free(sel->tokens);
+ if (sel->ir_type == PIPE_SHADER_IR_TGSI) {
+ free(sel->tokens);
+ /* We might have converted the TGSI shader to a NIR shader */
+ if (sel->nir)
+ ralloc_free(sel->nir);
+ }
+ else if (sel->ir_type == PIPE_SHADER_IR_NIR)
+ ralloc_free(sel->nir);
free(sel);
}
void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state)
{
if (state->dirty_mask) {
- rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
state->atom.num_dw = rctx->b.chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20
: util_bitcount(state->dirty_mask)*19;
r600_mark_atom_dirty(rctx, &state->atom);
}
}
-static void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
- struct pipe_constant_buffer *input)
+static void r600_set_constant_buffer(struct pipe_context *ctx,
+ enum pipe_shader_type shader, uint index,
+ const struct pipe_constant_buffer *input)
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
tmpPtr[i] = util_cpu_to_le32(((uint32_t *)ptr)[i]);
}
- u_upload_data(rctx->b.uploader, 0, size, 256, tmpPtr, &cb->buffer_offset, &cb->buffer);
+ u_upload_data(ctx->stream_uploader, 0, size, 256,
+ tmpPtr, &cb->buffer_offset, &cb->buffer);
free(tmpPtr);
} else {
- u_upload_data(rctx->b.uploader, 0, input->buffer_size, 256, ptr, &cb->buffer_offset, &cb->buffer);
+ u_upload_data(ctx->stream_uploader, 0,
+ input->buffer_size, 256, ptr,
+ &cb->buffer_offset, &cb->buffer);
}
/* account it in gtt */
rctx->b.gtt += input->buffer_size;
r600_mark_atom_dirty(rctx, &rctx->sample_mask.atom);
}
-static void r600_update_driver_const_buffers(struct r600_context *rctx)
+void r600_update_driver_const_buffers(struct r600_context *rctx, bool compute_only)
{
int sh, size;
void *ptr;
struct pipe_constant_buffer cb;
- for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
+ int start, end;
+
+ start = compute_only ? PIPE_SHADER_COMPUTE : 0;
+ end = compute_only ? PIPE_SHADER_TYPES : PIPE_SHADER_COMPUTE;
+
+ for (sh = start; sh < end; sh++) {
struct r600_shader_driver_constants_info *info = &rctx->driver_consts[sh];
if (!info->vs_ucp_dirty &&
!info->texture_const_dirty &&
- !info->ps_sample_pos_dirty)
+ !info->ps_sample_pos_dirty &&
+ !info->tcs_default_levels_dirty &&
+ !info->cs_block_grid_size_dirty)
continue;
ptr = info->constants;
info->vs_ucp_dirty = false;
}
- if (info->ps_sample_pos_dirty) {
+ else if (info->ps_sample_pos_dirty) {
assert(sh == PIPE_SHADER_FRAGMENT);
if (!size) {
ptr = rctx->sample_positions;
info->ps_sample_pos_dirty = false;
}
+ else if (info->cs_block_grid_size_dirty) {
+ assert(sh == PIPE_SHADER_COMPUTE);
+ if (!size) {
+ ptr = rctx->cs_block_grid_sizes;
+ size = R600_CS_BLOCK_GRID_SIZE;
+ } else {
+ memcpy(ptr, rctx->cs_block_grid_sizes, R600_CS_BLOCK_GRID_SIZE);
+ }
+ info->cs_block_grid_size_dirty = false;
+ }
+
+ else if (info->tcs_default_levels_dirty) {
+ /*
+ * We'd only really need this for default tcs shader.
+ */
+ assert(sh == PIPE_SHADER_TESS_CTRL);
+ if (!size) {
+ ptr = rctx->tess_state;
+ size = R600_TCS_DEFAULT_LEVELS_SIZE;
+ } else {
+ memcpy(ptr, rctx->tess_state, R600_TCS_DEFAULT_LEVELS_SIZE);
+ }
+ info->tcs_default_levels_dirty = false;
+ }
+
if (info->texture_const_dirty) {
assert (ptr);
assert (size);
memcpy(ptr, rctx->clip_state.state.ucp, R600_UCP_SIZE);
if (sh == PIPE_SHADER_FRAGMENT)
memcpy(ptr, rctx->sample_positions, R600_UCP_SIZE);
+ if (sh == PIPE_SHADER_COMPUTE)
+ memcpy(ptr, rctx->cs_block_grid_sizes, R600_CS_BLOCK_GRID_SIZE);
+ if (sh == PIPE_SHADER_TESS_CTRL)
+ memcpy(ptr, rctx->tess_state, R600_TCS_DEFAULT_LEVELS_SIZE);
}
info->texture_const_dirty = false;
}
static void *r600_alloc_buf_consts(struct r600_context *rctx, int shader_type,
- int array_size, uint32_t *base_offset)
+ unsigned array_size, uint32_t *base_offset)
{
struct r600_shader_driver_constants_info *info = &rctx->driver_consts[shader_type];
if (array_size + R600_UCP_SIZE > info->alloc_size) {
samplers->views.dirty_buffer_constants = FALSE;
bits = util_last_bit(samplers->views.enabled_mask);
- array_size = bits * 8 * sizeof(uint32_t) * 4;
+ array_size = bits * 8 * sizeof(uint32_t);
constants = r600_alloc_buf_consts(rctx, shader_type, array_size, &base_offset);
} else
constants[offset + 4] = 0;
- constants[offset + 5] = samplers->views.views[i]->base.texture->width0 / util_format_get_blocksize(samplers->views.views[i]->base.format);
+ constants[offset + 5] = samplers->views.views[i]->base.u.buf.size /
+ util_format_get_blocksize(samplers->views.views[i]->base.format);
constants[offset + 6] = samplers->views.views[i]->base.texture->array_size / 6;
}
}
}
-/* On evergreen we store two values
- * 1. buffer size for TXQ
- * 2. number of cube layers in a cube map array.
+/* On evergreen we store one value
+ * 1. number of cube layers in a cube map array.
*/
-static void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type)
+void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type)
{
struct r600_textures_info *samplers = &rctx->samplers[shader_type];
- int bits;
+ struct r600_image_state *images = NULL;
+ int bits, sview_bits, img_bits;
uint32_t array_size;
int i;
uint32_t *constants;
uint32_t base_offset;
- if (!samplers->views.dirty_buffer_constants)
+
+ if (shader_type == PIPE_SHADER_FRAGMENT) {
+ images = &rctx->fragment_images;
+ } else if (shader_type == PIPE_SHADER_COMPUTE) {
+ images = &rctx->compute_images;
+ }
+
+ if (!samplers->views.dirty_buffer_constants &&
+ !(images && images->dirty_buffer_constants))
return;
+ if (images)
+ images->dirty_buffer_constants = FALSE;
samplers->views.dirty_buffer_constants = FALSE;
- bits = util_last_bit(samplers->views.enabled_mask);
- array_size = bits * 2 * sizeof(uint32_t) * 4;
+ bits = sview_bits = util_last_bit(samplers->views.enabled_mask);
+ if (images)
+ bits += util_last_bit(images->enabled_mask);
+ img_bits = bits;
+
+ array_size = bits * sizeof(uint32_t);
constants = r600_alloc_buf_consts(rctx, shader_type, array_size,
&base_offset);
- for (i = 0; i < bits; i++) {
+ for (i = 0; i < sview_bits; i++) {
if (samplers->views.enabled_mask & (1 << i)) {
- uint32_t offset = (base_offset / 4) + i * 2;
- constants[offset] = samplers->views.views[i]->base.texture->width0 / util_format_get_blocksize(samplers->views.views[i]->base.format);
- constants[offset + 1] = samplers->views.views[i]->base.texture->array_size / 6;
+ uint32_t offset = (base_offset / 4) + i;
+ constants[offset] = samplers->views.views[i]->base.texture->array_size / 6;
+ }
+ }
+ if (images) {
+ for (i = sview_bits; i < img_bits; i++) {
+ int idx = i - sview_bits;
+ if (images->enabled_mask & (1 << idx)) {
+ uint32_t offset = (base_offset / 4) + i;
+ constants[offset] = images->views[idx].base.resource->array_size / 6;
+ }
}
}
}
/* set sample xy locations as array of fragment shader constants */
void r600_set_sample_locations_constant_buffer(struct r600_context *rctx)
{
- int i;
struct pipe_context *ctx = &rctx->b.b;
assert(rctx->framebuffer.nr_samples < R600_UCP_SIZE);
- assert(rctx->framebuffer.nr_samples <= Elements(rctx->sample_positions)/4);
+ assert(rctx->framebuffer.nr_samples <= ARRAY_SIZE(rctx->sample_positions)/4);
memset(rctx->sample_positions, 0, 4 * 4 * 16);
- for (i = 0; i < rctx->framebuffer.nr_samples; i++) {
+ for (unsigned i = 0; i < rctx->framebuffer.nr_samples; i++) {
ctx->get_sample_position(ctx, rctx->framebuffer.nr_samples, i, &rctx->sample_positions[4*i]);
/* Also fill in center-zeroed positions used for interpolateAtSample */
rctx->sample_positions[4*i + 2] = rctx->sample_positions[4*i + 0] - 0.5f;
if (enable && !rctx->gs_rings.esgs_ring.buffer) {
unsigned size = 0x1C000;
rctx->gs_rings.esgs_ring.buffer =
- pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
+ pipe_buffer_create(rctx->b.b.screen, 0,
PIPE_USAGE_DEFAULT, size);
rctx->gs_rings.esgs_ring.buffer_size = size;
size = 0x4000000;
rctx->gs_rings.gsvs_ring.buffer =
- pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
+ pipe_buffer_create(rctx->b.b.screen, 0,
PIPE_USAGE_DEFAULT, size);
rctx->gs_rings.gsvs_ring.buffer_size = size;
}
{
if (current->pa_cl_vs_out_cntl != rctx->clip_misc_state.pa_cl_vs_out_cntl ||
current->shader.clip_dist_write != rctx->clip_misc_state.clip_dist_write ||
+ current->shader.cull_dist_write != rctx->clip_misc_state.cull_dist_write ||
current->shader.vs_position_window_space != rctx->clip_misc_state.clip_disable ||
current->shader.vs_out_viewport != rctx->clip_misc_state.vs_out_viewport) {
rctx->clip_misc_state.pa_cl_vs_out_cntl = current->pa_cl_vs_out_cntl;
rctx->clip_misc_state.clip_dist_write = current->shader.clip_dist_write;
+ rctx->clip_misc_state.cull_dist_write = current->shader.cull_dist_write;
rctx->clip_misc_state.clip_disable = current->shader.vs_position_window_space;
rctx->clip_misc_state.vs_out_viewport = current->shader.vs_out_viewport;
r600_mark_atom_dirty(rctx, &rctx->clip_misc_state.atom);
assert(!rctx->fixed_func_tcs_shader);
- ureg_DECL_constant2D(ureg, 0, 3, R600_LDS_INFO_CONST_BUFFER);
- const0 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 2),
- R600_LDS_INFO_CONST_BUFFER);
- const1 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 3),
- R600_LDS_INFO_CONST_BUFFER);
+ ureg_DECL_constant2D(ureg, 0, 1, R600_BUFFER_INFO_CONST_BUFFER);
+ const0 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 0),
+ R600_BUFFER_INFO_CONST_BUFFER);
+ const1 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 1),
+ R600_BUFFER_INFO_CONST_BUFFER);
tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0);
tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0);
ureg_create_shader_and_destroy(ureg, &rctx->b.b);
}
+void r600_update_compressed_resource_state(struct r600_context *rctx, bool compute_only)
+{
+ unsigned i;
+ unsigned counter;
+
+ counter = p_atomic_read(&rctx->screen->b.compressed_colortex_counter);
+ if (counter != rctx->b.last_compressed_colortex_counter) {
+ rctx->b.last_compressed_colortex_counter = counter;
+
+ if (compute_only) {
+ r600_update_compressed_colortex_mask(&rctx->samplers[PIPE_SHADER_COMPUTE].views);
+ } else {
+ for (i = 0; i < PIPE_SHADER_TYPES; ++i) {
+ r600_update_compressed_colortex_mask(&rctx->samplers[i].views);
+ }
+ }
+ if (!compute_only)
+ r600_update_compressed_colortex_mask_images(&rctx->fragment_images);
+ r600_update_compressed_colortex_mask_images(&rctx->compute_images);
+ }
+
+ /* Decompress textures if needed. */
+ for (i = 0; i < PIPE_SHADER_TYPES; i++) {
+ struct r600_samplerview_state *views = &rctx->samplers[i].views;
+
+ if (compute_only)
+ if (i != PIPE_SHADER_COMPUTE)
+ continue;
+ if (views->compressed_depthtex_mask) {
+ r600_decompress_depth_textures(rctx, views);
+ }
+ if (views->compressed_colortex_mask) {
+ r600_decompress_color_textures(rctx, views);
+ }
+ }
+
+ {
+ struct r600_image_state *istate;
+
+ if (!compute_only) {
+ istate = &rctx->fragment_images;
+ if (istate->compressed_depthtex_mask)
+ r600_decompress_depth_images(rctx, istate);
+ if (istate->compressed_colortex_mask)
+ r600_decompress_color_images(rctx, istate);
+ }
+
+ istate = &rctx->compute_images;
+ if (istate->compressed_depthtex_mask)
+ r600_decompress_depth_images(rctx, istate);
+ if (istate->compressed_colortex_mask)
+ r600_decompress_color_images(rctx, istate);
+ }
+}
+
+/* update MEM_SCRATCH buffers if needed */
+void r600_setup_scratch_area_for_shader(struct r600_context *rctx,
+ struct r600_pipe_shader *shader, struct r600_scratch_buffer *scratch,
+ unsigned ring_base_reg, unsigned item_size_reg, unsigned ring_size_reg)
+{
+ unsigned num_ses = rctx->screen->b.info.max_se;
+ unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes;
+ unsigned nthreads = 128;
+
+ unsigned itemsize = shader->scratch_space_needed * 4;
+ unsigned size = align(itemsize * nthreads * num_pipes * num_ses * 4, 256);
+
+ if (scratch->dirty ||
+ unlikely(shader->scratch_space_needed != scratch->item_size ||
+ size > scratch->size)) {
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+
+ scratch->dirty = false;
+
+ if (size > scratch->size) {
+ // Release prior one if any
+ if (scratch->buffer) {
+ pipe_resource_reference((struct pipe_resource**)&scratch->buffer, NULL);
+ }
+
+ scratch->buffer = (struct r600_resource *)pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_DEFAULT, size);
+ if (scratch->buffer) {
+ scratch->size = size;
+ }
+ }
+
+ scratch->item_size = shader->scratch_space_needed;
+
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
+
+ // multi-SE chips need programming per SE
+ for (unsigned se = 0; se < num_ses; se++) {
+ struct r600_resource *rbuffer = scratch->buffer;
+ unsigned size_per_se = size / num_ses;
+
+ // Direct to particular SE
+ if (num_ses > 1) {
+ radeon_set_config_reg(cs, EG_0802C_GRBM_GFX_INDEX,
+ S_0802C_INSTANCE_INDEX(0) |
+ S_0802C_SE_INDEX(se) |
+ S_0802C_INSTANCE_BROADCAST_WRITES(1) |
+ S_0802C_SE_BROADCAST_WRITES(0));
+ }
+
+ radeon_set_config_reg(cs, ring_base_reg, (rbuffer->gpu_address + size_per_se * se) >> 8);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SCRATCH_BUFFER));
+ radeon_set_context_reg(cs, item_size_reg, itemsize);
+ radeon_set_config_reg(cs, ring_size_reg, size_per_se >> 8);
+ }
+
+ // Restore broadcast mode
+ if (num_ses > 1) {
+ radeon_set_config_reg(cs, EG_0802C_GRBM_GFX_INDEX,
+ S_0802C_INSTANCE_INDEX(0) |
+ S_0802C_SE_INDEX(0) |
+ S_0802C_INSTANCE_BROADCAST_WRITES(1) |
+ S_0802C_SE_BROADCAST_WRITES(1));
+ }
+
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
+ }
+}
+
+void r600_setup_scratch_buffers(struct r600_context *rctx) {
+ static const struct {
+ unsigned ring_base;
+ unsigned item_size;
+ unsigned ring_size;
+ } regs[R600_NUM_HW_STAGES] = {
+ [R600_HW_STAGE_PS] = { R_008C68_SQ_PSTMP_RING_BASE, R_0288BC_SQ_PSTMP_RING_ITEMSIZE, R_008C6C_SQ_PSTMP_RING_SIZE },
+ [R600_HW_STAGE_VS] = { R_008C60_SQ_VSTMP_RING_BASE, R_0288B8_SQ_VSTMP_RING_ITEMSIZE, R_008C64_SQ_VSTMP_RING_SIZE },
+ [R600_HW_STAGE_GS] = { R_008C58_SQ_GSTMP_RING_BASE, R_0288B4_SQ_GSTMP_RING_ITEMSIZE, R_008C5C_SQ_GSTMP_RING_SIZE },
+ [R600_HW_STAGE_ES] = { R_008C50_SQ_ESTMP_RING_BASE, R_0288B0_SQ_ESTMP_RING_ITEMSIZE, R_008C54_SQ_ESTMP_RING_SIZE }
+ };
+
+ for (unsigned i = 0; i < R600_NUM_HW_STAGES; i++) {
+ struct r600_pipe_shader *stage = rctx->hw_shader_stages[i].shader;
+
+ if (stage && unlikely(stage->scratch_space_needed)) {
+ r600_setup_scratch_area_for_shader(rctx, stage,
+ &rctx->scratch_buffers[i], regs[i].ring_base, regs[i].item_size, regs[i].ring_size);
+ }
+ }
+}
+
#define SELECT_SHADER_OR_FAIL(x) do { \
r600_shader_select(ctx, rctx->x##_shader, &x##_dirty); \
if (unlikely(!rctx->x##_shader->current)) \
bool need_buf_const;
struct r600_pipe_shader *clip_so_current = NULL;
- if (!rctx->blitter->running) {
- unsigned i;
- unsigned counter;
-
- counter = p_atomic_read(&rctx->screen->b.compressed_colortex_counter);
- if (counter != rctx->b.last_compressed_colortex_counter) {
- rctx->b.last_compressed_colortex_counter = counter;
-
- for (i = 0; i < PIPE_SHADER_TYPES; ++i) {
- r600_update_compressed_colortex_mask(&rctx->samplers[i].views);
- }
- }
-
- /* Decompress textures if needed. */
- for (i = 0; i < PIPE_SHADER_TYPES; i++) {
- struct r600_samplerview_state *views = &rctx->samplers[i].views;
- if (views->compressed_depthtex_mask) {
- r600_decompress_depth_textures(rctx, views);
- }
- if (views->compressed_colortex_mask) {
- r600_decompress_color_textures(rctx, views);
- }
- }
- }
+ if (!rctx->blitter->running)
+ r600_update_compressed_resource_state(rctx, false);
SELECT_SHADER_OR_FAIL(ps);
}
}
+ /*
+ * XXX: I believe there's some fatal flaw in the dirty state logic when
+ * enabling/disabling tes.
+ * VS/ES share all buffer/resource/sampler slots. If TES is enabled,
+ * it will therefore overwrite the VS slots. If it now gets disabled,
+ * the VS needs to rebind all buffer/resource/sampler slots - not only
+ * has TES overwritten the corresponding slots, but when the VS was
+ * operating as LS the things with correpsonding dirty bits got bound
+ * to LS slots and won't reflect what is dirty as VS stage even if the
+ * TES didn't overwrite it. The story for re-enabled TES is similar.
+ * In any case, we're not allowed to submit any TES state when
+ * TES is disabled (the state tracker may not do this but this looks
+ * like an optimization to me, not something which can be relied on).
+ */
+
/* Update clip misc state. */
if (clip_so_current) {
r600_update_clip_state(rctx, clip_so_current);
rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable ||
rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade)) {
- if (rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs) {
+ if (rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs ||
+ rctx->cb_misc_state.ps_color_export_mask != rctx->ps_shader->current->ps_color_export_mask) {
rctx->cb_misc_state.nr_ps_color_outputs = rctx->ps_shader->current->nr_ps_color_outputs;
+ rctx->cb_misc_state.ps_color_export_mask = rctx->ps_shader->current->ps_color_export_mask;
r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
}
r600_update_db_shader_control(rctx);
}
+ /* For each shader stage that needs to spill, set up buffer for MEM_SCRATCH */
+ if (rctx->b.chip_class >= EVERGREEN) {
+ evergreen_setup_scratch_buffers(rctx);
+ } else {
+ r600_setup_scratch_buffers(rctx);
+ }
+
/* on R600 we stuff masks + txq info into one constant buffer */
/* on evergreen we only need a txq info one */
if (rctx->ps_shader) {
}
}
- r600_update_driver_const_buffers(rctx);
+ if (rctx->tes_shader) {
+ assert(rctx->b.chip_class >= EVERGREEN);
+ need_buf_const = rctx->tes_shader->current->shader.uses_tex_buffers ||
+ rctx->tes_shader->current->shader.has_txq_cube_array_z_comp;
+ if (need_buf_const) {
+ eg_setup_buffer_constants(rctx, PIPE_SHADER_TESS_EVAL);
+ }
+ if (rctx->tcs_shader) {
+ need_buf_const = rctx->tcs_shader->current->shader.uses_tex_buffers ||
+ rctx->tcs_shader->current->shader.has_txq_cube_array_z_comp;
+ if (need_buf_const) {
+ eg_setup_buffer_constants(rctx, PIPE_SHADER_TESS_CTRL);
+ }
+ }
+ }
+
+ r600_update_driver_const_buffers(rctx, false);
if (rctx->b.chip_class < EVERGREEN && rctx->ps_shader && rctx->vs_shader) {
if (!r600_adjust_gprs(rctx)) {
void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_clip_misc_state *state = &rctx->clip_misc_state;
radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
S_028810_CLIP_DISABLE(state->clip_disable));
radeon_set_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL,
state->pa_cl_vs_out_cntl |
- (state->clip_plane_enable & state->clip_dist_write));
+ (state->clip_plane_enable & state->clip_dist_write) |
+ (state->cull_dist_write << 8));
/* reuse needs to be set off if we write oViewport */
if (rctx->b.chip_class >= EVERGREEN)
radeon_set_context_reg(cs, R_028AB4_VGT_REUSE_OFF,
S_028AB4_REUSE_OFF(state->vs_out_viewport));
}
-static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
+/* rast_prim is the primitive type after GS. */
+static inline void r600_emit_rasterizer_prim_state(struct r600_context *rctx)
+{
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ enum pipe_prim_type rast_prim = rctx->current_rast_prim;
+
+ /* Skip this if not rendering lines. */
+ if (rast_prim != PIPE_PRIM_LINES &&
+ rast_prim != PIPE_PRIM_LINE_LOOP &&
+ rast_prim != PIPE_PRIM_LINE_STRIP &&
+ rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
+ rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
+ return;
+
+ if (rast_prim == rctx->last_rast_prim)
+ return;
+
+ /* For lines, reset the stipple pattern at each primitive. Otherwise,
+ * reset the stipple pattern at each packet (line strips, line loops).
+ */
+ radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
+ S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2) |
+ (rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0));
+ rctx->last_rast_prim = rast_prim;
+}
+
+static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct pipe_draw_info info = *dinfo;
- struct pipe_index_buffer ib = {};
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
+ bool has_user_indices = info->has_user_indices;
uint64_t mask;
- unsigned num_patches, dirty_fb_counter;
+ unsigned num_patches, dirty_tex_counter, index_offset = 0;
+ unsigned index_size = info->index_size;
+ int index_bias;
+ struct r600_shader_atomic combined_atomics[8];
+ uint8_t atomic_used_mask;
- if (!info.indirect && !info.count && (info.indexed || !info.count_from_stream_output)) {
+ if (!info->indirect && !info->count && (index_size || !info->count_from_stream_output)) {
return;
}
- if (!rctx->vs_shader || !rctx->ps_shader) {
+ if (unlikely(!rctx->vs_shader)) {
+ assert(0);
+ return;
+ }
+ if (unlikely(!rctx->ps_shader &&
+ (!rctx->rasterizer || !rctx->rasterizer->rasterizer_discard))) {
assert(0);
return;
}
/* make sure that the gfx ring is only one active */
- if (rctx->b.dma.cs && rctx->b.dma.cs->cdw) {
- rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ if (radeon_emitted(rctx->b.dma.cs, 0)) {
+ rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ }
+
+ if (rctx->cmd_buf_is_compute) {
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ rctx->cmd_buf_is_compute = false;
}
/* Re-emit the framebuffer state if needed. */
- dirty_fb_counter = p_atomic_read(&rctx->b.screen->dirty_fb_counter);
- if (dirty_fb_counter != rctx->b.last_dirty_fb_counter) {
- rctx->b.last_dirty_fb_counter = dirty_fb_counter;
+ dirty_tex_counter = p_atomic_read(&rctx->b.screen->dirty_tex_counter);
+ if (unlikely(dirty_tex_counter != rctx->b.last_dirty_tex_counter)) {
+ rctx->b.last_dirty_tex_counter = dirty_tex_counter;
r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
+ rctx->framebuffer.do_update_surf_dirtiness = true;
}
+ if (rctx->gs_shader) {
+ /* Determine whether the GS triangle strip adjacency fix should
+ * be applied. Rotate every other triangle if
+ * - triangle strips with adjacency are fed to the GS and
+ * - primitive restart is disabled (the rotation doesn't help
+ * when the restart occurs after an odd number of triangles).
+ */
+ bool gs_tri_strip_adj_fix =
+ !rctx->tes_shader &&
+ info->mode == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY &&
+ !info->primitive_restart;
+ if (gs_tri_strip_adj_fix != rctx->gs_tri_strip_adj_fix)
+ rctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
+ }
if (!r600_update_derived_state(rctx)) {
/* useless to render because current rendering command
* can't be achieved
return;
}
- if (info.indexed) {
- /* Initialize the index buffer struct. */
- pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
- ib.user_buffer = rctx->index_buffer.user_buffer;
- ib.index_size = rctx->index_buffer.index_size;
- ib.offset = rctx->index_buffer.offset;
- if (!info.indirect) {
- ib.offset += info.start * ib.index_size;
- }
+ rctx->current_rast_prim = (rctx->gs_shader)? rctx->gs_shader->gs_output_prim
+ : (rctx->tes_shader)? rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]
+ : info->mode;
+
+ if (rctx->b.chip_class >= EVERGREEN) {
+ evergreen_emit_atomic_buffer_setup_count(rctx, NULL, combined_atomics, &atomic_used_mask);
+ }
+
+ if (index_size) {
+ index_offset += info->start * index_size;
/* Translate 8-bit indices to 16-bit. */
- if (unlikely(ib.index_size == 1)) {
+ if (unlikely(index_size == 1)) {
struct pipe_resource *out_buffer = NULL;
unsigned out_offset;
void *ptr;
unsigned start, count;
- if (likely(!info.indirect)) {
+ if (likely(!info->indirect)) {
start = 0;
- count = info.count;
+ count = info->count;
}
else {
/* Have to get start/count from indirect buffer, slow path ahead... */
- struct r600_resource *indirect_resource = (struct r600_resource *)info.indirect;
+ struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect->buffer;
unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource,
PIPE_TRANSFER_READ);
if (data) {
- data += info.indirect_offset / sizeof(unsigned);
- start = data[2] * ib.index_size;
+ data += info->indirect->offset / sizeof(unsigned);
+ start = data[2] * index_size;
count = data[0];
}
else {
}
}
- u_upload_alloc(rctx->b.uploader, start, count * 2, 256,
- &out_offset, &out_buffer, &ptr);
+ u_upload_alloc(ctx->stream_uploader, start, count * 2,
+ 256, &out_offset, &out_buffer, &ptr);
+ if (unlikely(!ptr))
+ return;
util_shorten_ubyte_elts_to_userptr(
- &rctx->b.b, &ib, 0, ib.offset + start, count, ptr);
+ &rctx->b.b, info, 0, 0, index_offset, count, ptr);
- pipe_resource_reference(&ib.buffer, NULL);
- ib.user_buffer = NULL;
- ib.buffer = out_buffer;
- ib.offset = out_offset;
- ib.index_size = 2;
+ indexbuf = out_buffer;
+ index_offset = out_offset;
+ index_size = 2;
+ has_user_indices = false;
}
/* Upload the index buffer.
* and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
* Indirect draws never use immediate indices.
* Note: Instanced rendering in combination with immediate indices hangs. */
- if (ib.user_buffer && (R600_BIG_ENDIAN || info.indirect ||
- info.instance_count > 1 ||
- info.count*ib.index_size > 20)) {
- u_upload_data(rctx->b.uploader, 0, info.count * ib.index_size, 256,
- ib.user_buffer, &ib.offset, &ib.buffer);
- ib.user_buffer = NULL;
- }
+ if (has_user_indices && (R600_BIG_ENDIAN || info->indirect ||
+ info->instance_count > 1 ||
+ info->count*index_size > 20)) {
+ indexbuf = NULL;
+ u_upload_data(ctx->stream_uploader, 0,
+ info->count * index_size, 256,
+ info->index.user, &index_offset, &indexbuf);
+ has_user_indices = false;
+ }
+ index_bias = info->index_bias;
} else {
- info.index_bias = info.start;
+ index_bias = info->start;
}
/* Set the index offset and primitive restart. */
- if (rctx->vgt_state.vgt_multi_prim_ib_reset_en != info.primitive_restart ||
- rctx->vgt_state.vgt_multi_prim_ib_reset_indx != info.restart_index ||
- rctx->vgt_state.vgt_indx_offset != info.index_bias ||
- (rctx->vgt_state.last_draw_was_indirect && !info.indirect)) {
- rctx->vgt_state.vgt_multi_prim_ib_reset_en = info.primitive_restart;
- rctx->vgt_state.vgt_multi_prim_ib_reset_indx = info.restart_index;
- rctx->vgt_state.vgt_indx_offset = info.index_bias;
+ if (rctx->vgt_state.vgt_multi_prim_ib_reset_en != info->primitive_restart ||
+ rctx->vgt_state.vgt_multi_prim_ib_reset_indx != info->restart_index ||
+ rctx->vgt_state.vgt_indx_offset != index_bias ||
+ (rctx->vgt_state.last_draw_was_indirect && !info->indirect)) {
+ rctx->vgt_state.vgt_multi_prim_ib_reset_en = info->primitive_restart;
+ rctx->vgt_state.vgt_multi_prim_ib_reset_indx = info->restart_index;
+ rctx->vgt_state.vgt_indx_offset = index_bias;
r600_mark_atom_dirty(rctx, &rctx->vgt_state.atom);
}
}
if (rctx->b.chip_class >= EVERGREEN)
- evergreen_setup_tess_constants(rctx, &info, &num_patches);
+ evergreen_setup_tess_constants(rctx, info, &num_patches);
/* Emit states. */
- r600_need_cs_space(rctx, ib.user_buffer ? 5 : 0, TRUE);
+ r600_need_cs_space(rctx, has_user_indices ? 5 : 0, TRUE, util_bitcount(atomic_used_mask));
r600_flush_emit(rctx);
mask = rctx->dirty_atoms;
r600_emit_atom(rctx, rctx->atoms[u_bit_scan64(&mask)]);
}
+ if (rctx->b.chip_class >= EVERGREEN) {
+ evergreen_emit_atomic_buffer_setup(rctx, false, combined_atomics, atomic_used_mask);
+ }
+
if (rctx->b.chip_class == CAYMAN) {
/* Copied from radeonsi. */
unsigned primgroup_size = 128; /* recommended without a GS */
}
if (rctx->b.chip_class >= EVERGREEN) {
- uint32_t ls_hs_config = evergreen_get_ls_hs_config(rctx, &info,
+ uint32_t ls_hs_config = evergreen_get_ls_hs_config(rctx, info,
num_patches);
evergreen_set_ls_hs_config(rctx, cs, ls_hs_config);
* even though it should have no effect on those. */
if (rctx->b.chip_class == R600 && rctx->rasterizer) {
unsigned su_sc_mode_cntl = rctx->rasterizer->pa_su_sc_mode_cntl;
- unsigned prim = info.mode;
+ unsigned prim = info->mode;
if (rctx->gs_shader) {
prim = rctx->gs_shader->gs_output_prim;
if (prim == V_028A6C_OUTPRIM_TYPE_POINTLIST ||
prim == V_028A6C_OUTPRIM_TYPE_LINESTRIP ||
- info.mode == R600_PRIM_RECTANGLE_LIST) {
+ info->mode == R600_PRIM_RECTANGLE_LIST) {
su_sc_mode_cntl &= C_028814_CULL_FRONT;
}
radeon_set_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL, su_sc_mode_cntl);
}
/* Update start instance. */
- if (!info.indirect && rctx->last_start_instance != info.start_instance) {
- radeon_set_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance);
- rctx->last_start_instance = info.start_instance;
+ if (!info->indirect && rctx->last_start_instance != info->start_instance) {
+ radeon_set_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info->start_instance);
+ rctx->last_start_instance = info->start_instance;
}
/* Update the primitive type. */
- if (rctx->last_primitive_type != info.mode) {
- unsigned ls_mask = 0;
-
- if (info.mode == PIPE_PRIM_LINES)
- ls_mask = 1;
- else if (info.mode == PIPE_PRIM_LINE_STRIP ||
- info.mode == PIPE_PRIM_LINE_LOOP)
- ls_mask = 2;
-
- radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
- S_028A0C_AUTO_RESET_CNTL(ls_mask) |
- (rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0));
+ if (rctx->last_primitive_type != info->mode) {
+ r600_emit_rasterizer_prim_state(rctx);
radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE,
- r600_conv_pipe_prim(info.mode));
+ r600_conv_pipe_prim(info->mode));
- rctx->last_primitive_type = info.mode;
+ rctx->last_primitive_type = info->mode;
}
/* Draw packets. */
- if (!info.indirect) {
- cs->buf[cs->cdw++] = PKT3(PKT3_NUM_INSTANCES, 0, 0);
- cs->buf[cs->cdw++] = info.instance_count;
- }
-
- if (unlikely(info.indirect)) {
- uint64_t va = r600_resource(info.indirect)->gpu_address;
+ if (likely(!info->indirect)) {
+ radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
+ radeon_emit(cs, info->instance_count);
+ } else {
+ uint64_t va = r600_resource(info->indirect->buffer)->gpu_address;
assert(rctx->b.chip_class >= EVERGREEN);
// Invalidate so non-indirect draw calls reset this state
rctx->vgt_state.last_draw_was_indirect = true;
rctx->last_start_instance = -1;
- cs->buf[cs->cdw++] = PKT3(EG_PKT3_SET_BASE, 2, 0);
- cs->buf[cs->cdw++] = EG_DRAW_INDEX_INDIRECT_PATCH_TABLE_BASE;
- cs->buf[cs->cdw++] = va;
- cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
+ radeon_emit(cs, PKT3(EG_PKT3_SET_BASE, 2, 0));
+ radeon_emit(cs, EG_DRAW_INDEX_INDIRECT_PATCH_TABLE_BASE);
+ radeon_emit(cs, va);
+ radeon_emit(cs, (va >> 32UL) & 0xFF);
- cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)info.indirect,
- RADEON_USAGE_READ,
- RADEON_PRIO_DRAW_INDIRECT);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
+ (struct r600_resource*)info->indirect->buffer,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_DRAW_INDIRECT));
}
- if (info.indexed) {
- cs->buf[cs->cdw++] = PKT3(PKT3_INDEX_TYPE, 0, 0);
- cs->buf[cs->cdw++] = ib.index_size == 4 ?
- (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
- (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0));
+ if (index_size) {
+ radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
+ radeon_emit(cs, index_size == 4 ?
+ (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
+ (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)));
- if (ib.user_buffer) {
- unsigned size_bytes = info.count*ib.index_size;
+ if (has_user_indices) {
+ unsigned size_bytes = info->count*index_size;
unsigned size_dw = align(size_bytes, 4) / 4;
- cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit);
- cs->buf[cs->cdw++] = info.count;
- cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_IMMEDIATE;
- memcpy(cs->buf+cs->cdw, ib.user_buffer, size_bytes);
- cs->cdw += size_dw;
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit));
+ radeon_emit(cs, info->count);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE);
+ radeon_emit_array(cs, info->index.user, size_dw);
} else {
- uint64_t va = r600_resource(ib.buffer)->gpu_address + ib.offset;
-
- if (likely(!info.indirect)) {
- cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit);
- cs->buf[cs->cdw++] = va;
- cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
- cs->buf[cs->cdw++] = info.count;
- cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_DMA;
- cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)ib.buffer,
- RADEON_USAGE_READ,
- RADEON_PRIO_INDEX_BUFFER);
+ uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset;
+
+ if (likely(!info->indirect)) {
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit));
+ radeon_emit(cs, va);
+ radeon_emit(cs, (va >> 32UL) & 0xFF);
+ radeon_emit(cs, info->count);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
+ (struct r600_resource*)indexbuf,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_INDEX_BUFFER));
}
else {
- uint32_t max_size = (ib.buffer->width0 - ib.offset) / ib.index_size;
+ uint32_t max_size = (indexbuf->width0 - index_offset) / index_size;
- cs->buf[cs->cdw++] = PKT3(EG_PKT3_INDEX_BASE, 1, 0);
- cs->buf[cs->cdw++] = va;
- cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
+ radeon_emit(cs, PKT3(EG_PKT3_INDEX_BASE, 1, 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, (va >> 32UL) & 0xFF);
- cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)ib.buffer,
- RADEON_USAGE_READ,
- RADEON_PRIO_INDEX_BUFFER);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
+ (struct r600_resource*)indexbuf,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_INDEX_BUFFER));
- cs->buf[cs->cdw++] = PKT3(EG_PKT3_INDEX_BUFFER_SIZE, 0, 0);
- cs->buf[cs->cdw++] = max_size;
+ radeon_emit(cs, PKT3(EG_PKT3_INDEX_BUFFER_SIZE, 0, 0));
+ radeon_emit(cs, max_size);
- cs->buf[cs->cdw++] = PKT3(EG_PKT3_DRAW_INDEX_INDIRECT, 1, render_cond_bit);
- cs->buf[cs->cdw++] = info.indirect_offset;
- cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_DMA;
+ radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT, 1, render_cond_bit));
+ radeon_emit(cs, info->indirect->offset);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
}
}
} else {
- if (unlikely(info.count_from_stream_output)) {
- struct r600_so_target *t = (struct r600_so_target*)info.count_from_stream_output;
+ if (unlikely(info->count_from_stream_output)) {
+ struct r600_so_target *t = (struct r600_so_target*)info->count_from_stream_output;
uint64_t va = t->buf_filled_size->gpu_address + t->buf_filled_size_offset;
radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
- cs->buf[cs->cdw++] = PKT3(PKT3_COPY_DW, 4, 0);
- cs->buf[cs->cdw++] = COPY_DW_SRC_IS_MEM | COPY_DW_DST_IS_REG;
- cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* src address lo */
- cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* src address hi */
- cs->buf[cs->cdw++] = R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2; /* dst register */
- cs->buf[cs->cdw++] = 0; /* unused */
+ radeon_emit(cs, PKT3(PKT3_COPY_DW, 4, 0));
+ radeon_emit(cs, COPY_DW_SRC_IS_MEM | COPY_DW_DST_IS_REG);
+ radeon_emit(cs, va & 0xFFFFFFFFUL); /* src address lo */
+ radeon_emit(cs, (va >> 32UL) & 0xFFUL); /* src address hi */
+ radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2); /* dst register */
+ radeon_emit(cs, 0); /* unused */
- cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- t->buf_filled_size, RADEON_USAGE_READ,
- RADEON_PRIO_SO_FILLED_SIZE);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
+ t->buf_filled_size, RADEON_USAGE_READ,
+ RADEON_PRIO_SO_FILLED_SIZE));
}
- if (likely(!info.indirect)) {
- cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit);
- cs->buf[cs->cdw++] = info.count;
+ if (likely(!info->indirect)) {
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
+ radeon_emit(cs, info->count);
}
else {
- cs->buf[cs->cdw++] = PKT3(EG_PKT3_DRAW_INDIRECT, 1, render_cond_bit);
- cs->buf[cs->cdw++] = info.indirect_offset;
+ radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDIRECT, 1, render_cond_bit));
+ radeon_emit(cs, info->indirect->offset);
}
- cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_AUTO_INDEX |
- (info.count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
+ (info->count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0));
}
/* SMX returns CONTEXT_DONE too early workaround */
/* ES ring rolling over at EOP - workaround */
if (rctx->b.chip_class == R600) {
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SQ_NON_EVENT);
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SQ_NON_EVENT));
}
- /* Set the depth buffer as dirty. */
- if (rctx->framebuffer.state.zsbuf) {
- struct pipe_surface *surf = rctx->framebuffer.state.zsbuf;
- struct r600_texture *rtex = (struct r600_texture *)surf->texture;
- rtex->dirty_level_mask |= 1 << surf->u.tex.level;
+ if (rctx->b.chip_class >= EVERGREEN)
+ evergreen_emit_atomic_buffer_save(rctx, false, combined_atomics, &atomic_used_mask);
- if (rtex->surface.flags & RADEON_SURF_SBUFFER)
- rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
- }
- if (rctx->framebuffer.compressed_cb_mask) {
- struct pipe_surface *surf;
- struct r600_texture *rtex;
- unsigned mask = rctx->framebuffer.compressed_cb_mask;
+ if (rctx->trace_buf)
+ eg_trace_emit(rctx);
- do {
- unsigned i = u_bit_scan(&mask);
- surf = rctx->framebuffer.state.cbufs[i];
- rtex = (struct r600_texture*)surf->texture;
+ if (rctx->framebuffer.do_update_surf_dirtiness) {
+ /* Set the depth buffer as dirty. */
+ if (rctx->framebuffer.state.zsbuf) {
+ struct pipe_surface *surf = rctx->framebuffer.state.zsbuf;
+ struct r600_texture *rtex = (struct r600_texture *)surf->texture;
rtex->dirty_level_mask |= 1 << surf->u.tex.level;
- } while (mask);
+ if (rtex->surface.has_stencil)
+ rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
+ }
+ if (rctx->framebuffer.compressed_cb_mask) {
+ struct pipe_surface *surf;
+ struct r600_texture *rtex;
+ unsigned mask = rctx->framebuffer.compressed_cb_mask;
+
+ do {
+ unsigned i = u_bit_scan(&mask);
+ surf = rctx->framebuffer.state.cbufs[i];
+ rtex = (struct r600_texture*)surf->texture;
+
+ rtex->dirty_level_mask |= 1 << surf->u.tex.level;
+
+ } while (mask);
+ }
+ rctx->framebuffer.do_update_surf_dirtiness = false;
}
- pipe_resource_reference(&ib.buffer, NULL);
+ if (index_size && indexbuf != info->index.resource)
+ pipe_resource_reference(&indexbuf, NULL);
rctx->b.num_draw_calls++;
}
void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_pipe_shader *shader = ((struct r600_shader_state*)a)->shader;
if (!shader)
r600_emit_command_buffer(cs, &shader->command_buffer);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->bo,
- RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER));
+ RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY));
}
unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format,
format = PIPE_FORMAT_A4R4_UNORM;
desc = util_format_description(format);
+ if (!desc)
+ goto out_unknown;
/* Depth and stencil swizzling is handled separately. */
if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS) {
}
if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
- if (!util_format_s3tc_enabled) {
- goto out_unknown;
- }
-
switch (format) {
case PIPE_FORMAT_DXT1_RGB:
case PIPE_FORMAT_DXT1_RGBA:
switch (desc->nr_channels) {
case 1:
result = FMT_8;
+ is_srgb_valid = TRUE;
goto out_word4;
case 2:
result = FMT_8_8;
const struct util_format_description *desc = util_format_description(format);
int channel = util_format_get_first_non_void_channel(format);
bool is_float;
+ if (!desc)
+ return ~0U;
#define HAS_SIZE(x,y,z,w) \
(desc->channel[0].size == (x) && desc->channel[1].size == (y) && \
{
struct r600_context *rctx = (struct r600_context*)ctx;
struct r600_resource *rbuffer = r600_resource(buf);
- unsigned i, shader, mask, alignment = rbuffer->buf->alignment;
+ unsigned i, shader, mask;
struct r600_pipe_sampler_view *view;
/* Reallocate the buffer in the same pipe_resource. */
- r600_init_resource(&rctx->screen->b, rbuffer, rbuffer->b.b.width0,
- alignment, TRUE);
+ r600_alloc_resource(&rctx->screen->b, rbuffer);
/* We changed the buffer, now we need to bind it where the old one was bound. */
/* Vertex buffers. */
mask = rctx->vertex_buffer_state.enabled_mask;
while (mask) {
i = u_bit_scan(&mask);
- if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
+ if (rctx->vertex_buffer_state.vb[i].buffer.resource == &rbuffer->b.b) {
rctx->vertex_buffer_state.dirty_mask |= 1 << i;
r600_vertex_buffers_dirty(rctx);
}
}
/* Streamout buffers. */
for (i = 0; i < rctx->b.streamout.num_targets; i++) {
- if (rctx->b.streamout.targets[i]->b.buffer == &rbuffer->b.b) {
+ if (rctx->b.streamout.targets[i] &&
+ rctx->b.streamout.targets[i]->b.buffer == &rbuffer->b.b) {
if (rctx->b.streamout.begin_emitted) {
r600_emit_streamout_end(&rctx->b);
}
}
/* Texture buffer objects - update the virtual addresses in descriptors. */
- LIST_FOR_EACH_ENTRY(view, &rctx->b.texture_buffers, list) {
+ LIST_FOR_EACH_ENTRY(view, &rctx->texture_buffers, list) {
if (view->base.texture == &rbuffer->b.b) {
- unsigned stride = util_format_get_blocksize(view->base.format);
- uint64_t offset = (uint64_t)view->base.u.buf.first_element * stride;
+ uint64_t offset = view->base.u.buf.offset;
uint64_t va = rbuffer->gpu_address + offset;
view->tex_resource_words[0] = va;
r600_sampler_views_dirty(rctx, state);
}
}
+
+ /* SSBOs */
+ struct r600_image_state *istate = &rctx->fragment_buffers;
+ {
+ uint32_t mask = istate->enabled_mask;
+ bool found = false;
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ if (istate->views[i].base.resource == &rbuffer->b.b) {
+ found = true;
+ istate->dirty_mask |= 1 << i;
+ }
+ }
+ if (found) {
+ r600_mark_atom_dirty(rctx, &istate->atom);
+ }
+ }
+
}
-static void r600_set_active_query_state(struct pipe_context *ctx, boolean enable)
+static void r600_set_active_query_state(struct pipe_context *ctx, bool enable)
{
struct r600_context *rctx = (struct r600_context*)ctx;
}
}
-static void r600_set_occlusion_query_state(struct pipe_context *ctx, bool enable)
-{
- struct r600_context *rctx = (struct r600_context*)ctx;
-
- r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
-}
-
static void r600_need_gfx_cs_space(struct pipe_context *ctx, unsigned num_dw,
bool include_draw_vbo)
{
- r600_need_cs_space((struct r600_context*)ctx, num_dw, include_draw_vbo);
+ r600_need_cs_space((struct r600_context*)ctx, num_dw, include_draw_vbo, 0);
}
/* keep this at the end of this file, please */
rctx->b.b.set_sample_mask = r600_set_sample_mask;
rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref;
rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers;
- rctx->b.b.set_index_buffer = r600_set_index_buffer;
rctx->b.b.set_sampler_views = r600_set_sampler_views;
rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy;
+ rctx->b.b.memory_barrier = r600_memory_barrier;
rctx->b.b.texture_barrier = r600_texture_barrier;
rctx->b.b.set_stream_output_targets = r600_set_streamout_targets;
rctx->b.b.set_active_query_state = r600_set_active_query_state;
+
rctx->b.b.draw_vbo = r600_draw_vbo;
rctx->b.invalidate_buffer = r600_invalidate_buffer;
- rctx->b.set_occlusion_query_state = r600_set_occlusion_query_state;
rctx->b.need_gfx_cs_space = r600_need_gfx_cs_space;
}