#include "util/u_memory.h"
#include "util/u_framebuffer.h"
#include "util/u_blitter.h"
+#include "util/u_index_modify.h"
+#include "util/u_upload_mgr.h"
#include "tgsi/tgsi_parse.h"
-#include "radeonsi_pipe.h"
-#include "radeonsi_shader.h"
+#include "si_pipe.h"
+#include "si_shader.h"
#include "si_state.h"
+#include "../radeon/r600_cs.h"
#include "sid.h"
/*
static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *shader)
{
- struct r600_context *rctx = (struct r600_context *)ctx;
+ struct si_context *sctx = (struct si_context *)ctx;
struct si_pm4_state *pm4;
unsigned num_sgprs, num_user_sgprs;
unsigned nparams, i, vgpr_comp_cnt;
uint64_t va;
- si_pm4_delete_state(rctx, vs, shader->pm4);
- pm4 = shader->pm4 = si_pm4_alloc_state(rctx);
+ si_pm4_delete_state(sctx, vs, shader->pm4);
+ pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
if (pm4 == NULL)
return;
- si_pm4_inval_shader_cache(pm4);
-
/* Certain attributes (position, psize, etc.) don't count as params.
* VS is required to export at least one param and r600_shader_from_tgsi()
* takes care of adding a dummy export.
*/
for (nparams = 0, i = 0 ; i < shader->shader.noutput; i++) {
switch (shader->shader.output[i].name) {
+ case TGSI_SEMANTIC_CLIPVERTEX:
case TGSI_SEMANTIC_POSITION:
case TGSI_SEMANTIC_PSIZE:
break;
si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
- S_02870C_POS1_EXPORT_FORMAT(shader->shader.vs_out_misc_write ?
+ S_02870C_POS1_EXPORT_FORMAT(shader->shader.nr_pos_exports > 1 ?
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS2_EXPORT_FORMAT((shader->shader.clip_dist_write & 0x0F) ?
+ S_02870C_POS2_EXPORT_FORMAT(shader->shader.nr_pos_exports > 2 ?
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS3_EXPORT_FORMAT((shader->shader.clip_dist_write & 0xF0) ?
+ S_02870C_POS3_EXPORT_FORMAT(shader->shader.nr_pos_exports > 3 ?
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE));
num_user_sgprs = SI_VS_NUM_USER_SGPR;
num_sgprs = shader->num_sgprs;
- if (num_user_sgprs > num_sgprs)
- num_sgprs = num_user_sgprs;
- /* Last 2 reserved SGPRs are used for VCC */
- num_sgprs += 2;
+ if (num_user_sgprs > num_sgprs) {
+ /* Last 2 reserved SGPRs are used for VCC */
+ num_sgprs = num_user_sgprs + 2;
+ }
assert(num_sgprs <= 104);
vgpr_comp_cnt = shader->shader.uses_instanceid ? 3 : 0;
S_00B128_SGPRS((num_sgprs - 1) / 8) |
S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt));
si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS,
- S_00B12C_USER_SGPR(num_user_sgprs));
-
- if (rctx->chip_class >= CIK) {
- si_pm4_set_reg(pm4, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
- S_00B118_CU_EN(0xffff));
- si_pm4_set_reg(pm4, R_00B11C_SPI_SHADER_LATE_ALLOC_VS,
- S_00B11C_LIMIT(0));
- }
-
- si_pm4_bind_state(rctx, vs, shader->pm4);
+ S_00B12C_USER_SGPR(num_user_sgprs) |
+ S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
+ S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
+ S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
+ S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
+ S_00B12C_SO_EN(!!shader->selector->so.num_outputs));
+
+ si_pm4_bind_state(sctx, vs, shader->pm4);
+ sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
static void si_pipe_shader_ps(struct pipe_context *ctx, struct si_pipe_shader *shader)
{
- struct r600_context *rctx = (struct r600_context *)ctx;
+ struct si_context *sctx = (struct si_context *)ctx;
struct si_pm4_state *pm4;
- unsigned i, exports_ps, num_cout, spi_ps_in_control, db_shader_control;
+ unsigned i, exports_ps, spi_ps_in_control, db_shader_control;
unsigned num_sgprs, num_user_sgprs;
- boolean have_linear = FALSE, have_centroid = FALSE, have_perspective = FALSE;
- unsigned fragcoord_interp_mode = 0;
- unsigned spi_baryc_cntl, spi_ps_input_ena, spi_shader_z_format;
+ unsigned spi_baryc_cntl = 0, spi_ps_input_ena, spi_shader_z_format;
uint64_t va;
- si_pm4_delete_state(rctx, ps, shader->pm4);
- pm4 = shader->pm4 = si_pm4_alloc_state(rctx);
+ si_pm4_delete_state(sctx, ps, shader->pm4);
+ pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
if (pm4 == NULL)
return;
- si_pm4_inval_shader_cache(pm4);
+ db_shader_control = S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) |
+ S_02880C_ALPHA_TO_MASK_DISABLE(sctx->fb_cb0_is_integer);
- db_shader_control = S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
for (i = 0; i < shader->shader.ninput; i++) {
switch (shader->shader.input[i].name) {
case TGSI_SEMANTIC_POSITION:
if (shader->shader.input[i].centroid) {
- /* fragcoord_interp_mode will be written to
- * SPI_BARYC_CNTL.POS_FLOAT_LOCATION
+ /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
* Possible vaules:
* 0 -> Position = pixel center (default)
* 1 -> Position = pixel centroid
* 2 -> Position = iterated sample number XXX:
* What does this mean?
*/
- fragcoord_interp_mode = 1;
+ spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(1);
}
/* Fall through */
case TGSI_SEMANTIC_FACE:
continue;
}
-
- if (shader->shader.input[i].interpolate == TGSI_INTERPOLATE_LINEAR)
- have_linear = TRUE;
- if (shader->shader.input[i].interpolate == TGSI_INTERPOLATE_PERSPECTIVE)
- have_perspective = TRUE;
- if (shader->shader.input[i].centroid)
- have_centroid = TRUE;
}
for (i = 0; i < shader->shader.noutput; i++) {
db_shader_control |= S_02880C_KILL_ENABLE(1);
exports_ps = 0;
- num_cout = 0;
for (i = 0; i < shader->shader.noutput; i++) {
if (shader->shader.output[i].name == TGSI_SEMANTIC_POSITION ||
shader->shader.output[i].name == TGSI_SEMANTIC_STENCIL)
exports_ps |= 1;
- else if (shader->shader.output[i].name == TGSI_SEMANTIC_COLOR) {
- if (shader->shader.fs_write_all)
- num_cout = shader->shader.nr_cbufs;
- else
- num_cout++;
- }
}
if (!exports_ps) {
/* always at least export 1 component per pixel */
exports_ps = 2;
}
- spi_ps_in_control = S_0286D8_NUM_INTERP(shader->shader.ninterp);
-
- spi_baryc_cntl = 0;
- if (have_perspective)
- spi_baryc_cntl |= have_centroid ?
- S_0286E0_PERSP_CENTROID_CNTL(1) : S_0286E0_PERSP_CENTER_CNTL(1);
- if (have_linear)
- spi_baryc_cntl |= have_centroid ?
- S_0286E0_LINEAR_CENTROID_CNTL(1) : S_0286E0_LINEAR_CENTER_CNTL(1);
- spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(fragcoord_interp_mode);
+ spi_ps_in_control = S_0286D8_NUM_INTERP(shader->shader.ninterp) |
+ S_0286D8_BC_OPTIMIZE_DISABLE(1);
si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
spi_ps_input_ena = shader->spi_ps_input_ena;
num_user_sgprs = SI_PS_NUM_USER_SGPR;
num_sgprs = shader->num_sgprs;
- if (num_user_sgprs > num_sgprs)
- num_sgprs = num_user_sgprs;
- /* Last 2 reserved SGPRs are used for VCC */
- num_sgprs += 2;
+ /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
+ if ((num_user_sgprs + 1) > num_sgprs) {
+ /* Last 2 reserved SGPRs are used for VCC */
+ num_sgprs = num_user_sgprs + 1 + 2;
+ }
assert(num_sgprs <= 104);
si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
S_00B028_VGPRS((shader->num_vgprs - 1) / 4) |
S_00B028_SGPRS((num_sgprs - 1) / 8));
si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
+ S_00B02C_EXTRA_LDS_SIZE(shader->lds_size) |
S_00B02C_USER_SGPR(num_user_sgprs));
- if (rctx->chip_class >= CIK) {
- si_pm4_set_reg(pm4, R_00B01C_SPI_SHADER_PGM_RSRC3_PS,
- S_00B01C_CU_EN(0xffff));
- }
si_pm4_set_reg(pm4, R_02880C_DB_SHADER_CONTROL, db_shader_control);
- shader->sprite_coord_enable = rctx->sprite_coord_enable;
- si_pm4_bind_state(rctx, ps, shader->pm4);
+ shader->cb0_is_integer = sctx->fb_cb0_is_integer;
+ shader->sprite_coord_enable = sctx->sprite_coord_enable;
+ si_pm4_bind_state(sctx, ps, shader->pm4);
+ sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
/*
return result;
}
-static bool si_update_draw_info_state(struct r600_context *rctx,
- const struct pipe_draw_info *info)
+static unsigned si_conv_prim_to_gs_out(unsigned mode)
{
- struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx);
- struct si_shader *vs = &rctx->vs_shader->current->shader;
+ static const int prim_conv[] = {
+ [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
+ [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
+ };
+ assert(mode < Elements(prim_conv));
+
+ return prim_conv[mode];
+}
+
+static bool si_update_draw_info_state(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ const struct pipe_index_buffer *ib)
+{
+ struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
+ struct si_shader *vs = &sctx->vs_shader->current->shader;
unsigned prim = si_conv_pipe_prim(info->mode);
+ unsigned gs_out_prim = si_conv_prim_to_gs_out(info->mode);
unsigned ls_mask = 0;
if (pm4 == NULL)
return false;
}
- if (rctx->chip_class >= CIK)
+ if (sctx->b.chip_class >= CIK) {
+ struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
+ bool wd_switch_on_eop = prim == V_008958_DI_PT_POLYGON ||
+ prim == V_008958_DI_PT_LINELOOP ||
+ prim == V_008958_DI_PT_TRIFAN ||
+ prim == V_008958_DI_PT_TRISTRIP_ADJ ||
+ info->primitive_restart ||
+ (rs ? rs->line_stipple_enable : false);
+ /* If the WD switch is false, the IA switch must be false too. */
+ bool ia_switch_on_eop = wd_switch_on_eop;
+
+ si_pm4_set_reg(pm4, R_028AA8_IA_MULTI_VGT_PARAM,
+ S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
+ S_028AA8_PARTIAL_VS_WAVE_ON(1) |
+ S_028AA8_PRIMGROUP_SIZE(63) |
+ S_028AA8_WD_SWITCH_ON_EOP(wd_switch_on_eop));
+ si_pm4_set_reg(pm4, R_028B74_VGT_DISPATCH_DRAW_INDEX,
+ ib->index_size == 4 ? 0xFC000000 : 0xFC00);
+
si_pm4_set_reg(pm4, R_030908_VGT_PRIMITIVE_TYPE, prim);
- else
+ } else {
si_pm4_set_reg(pm4, R_008958_VGT_PRIMITIVE_TYPE, prim);
- si_pm4_set_reg(pm4, R_028400_VGT_MAX_VTX_INDX, ~0);
- si_pm4_set_reg(pm4, R_028404_VGT_MIN_VTX_INDX, 0);
+ }
+
+ si_pm4_set_reg(pm4, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
si_pm4_set_reg(pm4, R_028408_VGT_INDX_OFFSET,
info->indexed ? info->index_bias : info->start);
si_pm4_set_reg(pm4, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info->restart_index);
ls_mask = 2;
si_pm4_set_reg(pm4, R_028A0C_PA_SC_LINE_STIPPLE,
S_028A0C_AUTO_RESET_CNTL(ls_mask) |
- rctx->pa_sc_line_stipple);
+ sctx->pa_sc_line_stipple);
if (info->mode == PIPE_PRIM_QUADS || info->mode == PIPE_PRIM_QUAD_STRIP || info->mode == PIPE_PRIM_POLYGON) {
si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL,
- S_028814_PROVOKING_VTX_LAST(1) | rctx->pa_su_sc_mode_cntl);
+ S_028814_PROVOKING_VTX_LAST(1) | sctx->pa_su_sc_mode_cntl);
} else {
- si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL, rctx->pa_su_sc_mode_cntl);
+ si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL, sctx->pa_su_sc_mode_cntl);
}
si_pm4_set_reg(pm4, R_02881C_PA_CL_VS_OUT_CNTL,
S_02881C_USE_VTX_POINT_SIZE(vs->vs_out_point_size) |
+ S_02881C_USE_VTX_EDGE_FLAG(vs->vs_out_edgeflag) |
+ S_02881C_USE_VTX_RENDER_TARGET_INDX(vs->vs_out_layer) |
S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs->clip_dist_write & 0x0F) != 0) |
S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs->clip_dist_write & 0xF0) != 0) |
S_02881C_VS_OUT_MISC_VEC_ENA(vs->vs_out_misc_write) |
- (rctx->queued.named.rasterizer->clip_plane_enable &
+ (sctx->queued.named.rasterizer->clip_plane_enable &
vs->clip_dist_write));
si_pm4_set_reg(pm4, R_028810_PA_CL_CLIP_CNTL,
- rctx->queued.named.rasterizer->pa_cl_clip_cntl |
+ sctx->queued.named.rasterizer->pa_cl_clip_cntl |
(vs->clip_dist_write ? 0 :
- rctx->queued.named.rasterizer->clip_plane_enable & 0x3F));
+ sctx->queued.named.rasterizer->clip_plane_enable & 0x3F));
- si_pm4_set_state(rctx, draw_info, pm4);
+ si_pm4_set_state(sctx, draw_info, pm4);
return true;
}
-static void si_update_spi_map(struct r600_context *rctx)
+static void si_update_spi_map(struct si_context *sctx)
{
- struct si_shader *ps = &rctx->ps_shader->current->shader;
- struct si_shader *vs = &rctx->vs_shader->current->shader;
- struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx);
+ struct si_shader *ps = &sctx->ps_shader->current->shader;
+ struct si_shader *vs = &sctx->vs_shader->current->shader;
+ struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
unsigned i, j, tmp;
for (i = 0; i < ps->ninput; i++) {
if (ps->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT ||
(ps->input[i].interpolate == TGSI_INTERPOLATE_COLOR &&
- rctx->ps_shader->current->key.ps.flatshade)) {
+ sctx->ps_shader->current->key.ps.flatshade)) {
tmp |= S_028644_FLAT_SHADE(1);
}
if (name == TGSI_SEMANTIC_GENERIC &&
- rctx->sprite_coord_enable & (1 << ps->input[i].sid)) {
+ sctx->sprite_coord_enable & (1 << ps->input[i].sid)) {
tmp |= S_028644_PT_SPRITE_TEX(1);
}
tmp);
if (name == TGSI_SEMANTIC_COLOR &&
- rctx->ps_shader->current->key.ps.color_two_side) {
+ sctx->ps_shader->current->key.ps.color_two_side) {
name = TGSI_SEMANTIC_BCOLOR;
param_offset++;
goto bcolor;
}
}
- si_pm4_set_state(rctx, spi, pm4);
+ si_pm4_set_state(sctx, spi, pm4);
}
-static void si_update_derived_state(struct r600_context *rctx)
+static void si_update_derived_state(struct si_context *sctx)
{
- struct pipe_context * ctx = (struct pipe_context*)rctx;
+ struct pipe_context * ctx = (struct pipe_context*)sctx;
unsigned vs_dirty = 0, ps_dirty = 0;
- if (!rctx->blitter->running) {
+ if (!sctx->blitter->running) {
/* Flush depth textures which need to be flushed. */
- if (rctx->vs_samplers.depth_texture_mask) {
- si_flush_depth_textures(rctx, &rctx->vs_samplers);
- }
- if (rctx->ps_samplers.depth_texture_mask) {
- si_flush_depth_textures(rctx, &rctx->ps_samplers);
+ for (int i = 0; i < SI_NUM_SHADERS; i++) {
+ if (sctx->samplers[i].depth_texture_mask) {
+ si_flush_depth_textures(sctx, &sctx->samplers[i]);
+ }
+ if (sctx->samplers[i].compressed_colortex_mask) {
+ si_decompress_color_textures(sctx, &sctx->samplers[i]);
+ }
}
}
- si_shader_select(ctx, rctx->vs_shader, &vs_dirty);
+ si_shader_select(ctx, sctx->vs_shader, &vs_dirty);
- if (!rctx->vs_shader->current->pm4) {
- si_pipe_shader_vs(ctx, rctx->vs_shader->current);
+ if (!sctx->vs_shader->current->pm4) {
+ si_pipe_shader_vs(ctx, sctx->vs_shader->current);
vs_dirty = 0;
}
if (vs_dirty) {
- si_pm4_bind_state(rctx, vs, rctx->vs_shader->current->pm4);
+ si_pm4_bind_state(sctx, vs, sctx->vs_shader->current->pm4);
}
- si_shader_select(ctx, rctx->ps_shader, &ps_dirty);
+ si_shader_select(ctx, sctx->ps_shader, &ps_dirty);
- if (!rctx->ps_shader->current->pm4) {
- si_pipe_shader_ps(ctx, rctx->ps_shader->current);
+ if (!sctx->ps_shader->current->pm4) {
+ si_pipe_shader_ps(ctx, sctx->ps_shader->current);
ps_dirty = 0;
}
- if (!rctx->ps_shader->current->bo) {
- if (!rctx->dummy_pixel_shader->pm4)
- si_pipe_shader_ps(ctx, rctx->dummy_pixel_shader);
- else
- si_pm4_bind_state(rctx, vs, rctx->dummy_pixel_shader->pm4);
-
+ if (sctx->ps_shader->current->cb0_is_integer != sctx->fb_cb0_is_integer) {
+ si_pipe_shader_ps(ctx, sctx->ps_shader->current);
ps_dirty = 0;
}
if (ps_dirty) {
- si_pm4_bind_state(rctx, ps, rctx->ps_shader->current->pm4);
+ si_pm4_bind_state(sctx, ps, sctx->ps_shader->current->pm4);
}
- if (si_pm4_state_changed(rctx, ps) || si_pm4_state_changed(rctx, vs)) {
+ if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs)) {
/* XXX: Emitting the PS state even when only the VS changed
* fixes random failures with piglit glsl-max-varyings.
* Not sure why...
*/
- rctx->emitted.named.ps = NULL;
- si_update_spi_map(rctx);
+ sctx->emitted.named.ps = NULL;
+ si_update_spi_map(sctx);
}
}
-static void si_constant_buffer_update(struct r600_context *rctx)
+static void si_vertex_buffer_update(struct si_context *sctx)
{
- struct pipe_context *ctx = &rctx->context;
- struct si_pm4_state *pm4;
- unsigned shader, i;
- uint64_t va;
-
- if (!rctx->constbuf_state[PIPE_SHADER_VERTEX].dirty_mask &&
- !rctx->constbuf_state[PIPE_SHADER_FRAGMENT].dirty_mask)
- return;
-
- for (shader = PIPE_SHADER_VERTEX ; shader <= PIPE_SHADER_FRAGMENT; shader++) {
- struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
-
- pm4 = CALLOC_STRUCT(si_pm4_state);
- if (!pm4)
- continue;
-
- si_pm4_inval_shader_cache(pm4);
- si_pm4_sh_data_begin(pm4);
-
- for (i = 0; i < 2; i++) {
- if (state->enabled_mask & (1 << i)) {
- struct pipe_constant_buffer *cb = &state->cb[i];
- struct si_resource *rbuffer = si_resource(cb->buffer);
-
- va = r600_resource_va(ctx->screen, (void*)rbuffer);
- va += cb->buffer_offset;
-
- si_pm4_add_bo(pm4, rbuffer, RADEON_USAGE_READ);
-
- /* Fill in a T# buffer resource description */
- si_pm4_sh_data_add(pm4, va);
- si_pm4_sh_data_add(pm4, (S_008F04_BASE_ADDRESS_HI(va >> 32) |
- S_008F04_STRIDE(0)));
- si_pm4_sh_data_add(pm4, cb->buffer_size);
- si_pm4_sh_data_add(pm4, S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
- S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
- S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
- S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
- S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32));
- } else {
- /* Fill in an empty T# buffer resource description */
- si_pm4_sh_data_add(pm4, 0);
- si_pm4_sh_data_add(pm4, 0);
- si_pm4_sh_data_add(pm4, 0);
- si_pm4_sh_data_add(pm4, 0);
- }
- }
-
- switch (shader) {
- case PIPE_SHADER_VERTEX:
- si_pm4_sh_data_end(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0, SI_SGPR_CONST);
- si_pm4_set_state(rctx, vs_const, pm4);
- break;
-
- case PIPE_SHADER_FRAGMENT:
- si_pm4_sh_data_end(pm4, R_00B030_SPI_SHADER_USER_DATA_PS_0, SI_SGPR_CONST);
- si_pm4_set_state(rctx, ps_const, pm4);
- break;
-
- default:
- R600_ERR("unsupported %d\n", shader);
- FREE(pm4);
- return;
- }
-
- state->dirty_mask = 0;
- }
-}
-
-static void si_vertex_buffer_update(struct r600_context *rctx)
-{
- struct pipe_context *ctx = &rctx->context;
- struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx);
+ struct pipe_context *ctx = &sctx->b.b;
+ struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
bool bound[PIPE_MAX_ATTRIBS] = {};
unsigned i, count;
uint64_t va;
- si_pm4_inval_texture_cache(pm4);
+ sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
- /* bind vertex buffer once */
- count = rctx->vertex_elements->count;
+ count = sctx->vertex_elements->count;
assert(count <= 256 / 4);
si_pm4_sh_data_begin(pm4);
for (i = 0 ; i < count; i++) {
- struct pipe_vertex_element *ve = &rctx->vertex_elements->elements[i];
+ struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
struct pipe_vertex_buffer *vb;
- struct si_resource *rbuffer;
+ struct r600_resource *rbuffer;
unsigned offset;
- if (ve->vertex_buffer_index >= rctx->nr_vertex_buffers)
+ if (ve->vertex_buffer_index >= sctx->nr_vertex_buffers)
continue;
- vb = &rctx->vertex_buffer[ve->vertex_buffer_index];
- rbuffer = (struct si_resource*)vb->buffer;
+ vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
+ rbuffer = (struct r600_resource*)vb->buffer;
if (rbuffer == NULL)
continue;
vb->stride + 1);
else
si_pm4_sh_data_add(pm4, vb->buffer->width0 - offset);
- si_pm4_sh_data_add(pm4, rctx->vertex_elements->rsrc_word3[i]);
+ si_pm4_sh_data_add(pm4, sctx->vertex_elements->rsrc_word3[i]);
if (!bound[ve->vertex_buffer_index]) {
si_pm4_add_bo(pm4, rbuffer, RADEON_USAGE_READ);
}
}
si_pm4_sh_data_end(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0, SI_SGPR_VERTEX_BUFFER);
- si_pm4_set_state(rctx, vertex_buffers, pm4);
+ si_pm4_set_state(sctx, vertex_buffers, pm4);
}
-static void si_state_draw(struct r600_context *rctx,
+static void si_state_draw(struct si_context *sctx,
const struct pipe_draw_info *info,
const struct pipe_index_buffer *ib)
{
- struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx);
+ struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
if (pm4 == NULL)
return;
/* queries need some special values
* (this is non-zero if any query is active) */
- if (rctx->num_cs_dw_queries_suspend) {
- struct si_state_dsa *dsa = rctx->queued.named.dsa;
+ if (sctx->b.num_occlusion_queries > 0) {
+ if (sctx->b.chip_class >= CIK) {
+ si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
+ S_028004_PERFECT_ZPASS_COUNTS(1) |
+ S_028004_SAMPLE_RATE(sctx->fb_log_samples) |
+ S_028004_ZPASS_ENABLE(1) |
+ S_028004_SLICE_EVEN_ENABLE(1) |
+ S_028004_SLICE_ODD_ENABLE(1));
+ } else {
+ si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
+ S_028004_PERFECT_ZPASS_COUNTS(1) |
+ S_028004_SAMPLE_RATE(sctx->fb_log_samples));
+ }
+ }
- si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
- S_028004_PERFECT_ZPASS_COUNTS(1));
- si_pm4_set_reg(pm4, R_02800C_DB_RENDER_OVERRIDE,
- dsa->db_render_override |
- S_02800C_NOOP_CULL_DISABLE(1));
+ if (info->count_from_stream_output) {
+ struct r600_so_target *t =
+ (struct r600_so_target*)info->count_from_stream_output;
+ uint64_t va = r600_resource_va(&sctx->screen->b.b,
+ &t->buf_filled_size->b.b);
+ va += t->buf_filled_size_offset;
+
+ si_pm4_set_reg(pm4, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
+ t->stride_in_dw);
+
+ si_pm4_cmd_begin(pm4, PKT3_COPY_DATA);
+ si_pm4_cmd_add(pm4,
+ COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ COPY_DATA_WR_CONFIRM);
+ si_pm4_cmd_add(pm4, va); /* src address lo */
+ si_pm4_cmd_add(pm4, va >> 32UL); /* src address hi */
+ si_pm4_cmd_add(pm4, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
+ si_pm4_cmd_add(pm4, 0); /* unused */
+ si_pm4_add_bo(pm4, t->buf_filled_size, RADEON_USAGE_READ);
+ si_pm4_cmd_end(pm4, true);
}
/* draw packet */
si_pm4_cmd_begin(pm4, PKT3_INDEX_TYPE);
if (ib->index_size == 4) {
- si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_32 | (R600_BIG_ENDIAN ?
+ si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_32 | (SI_BIG_ENDIAN ?
V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
} else {
- si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_16 | (R600_BIG_ENDIAN ?
+ si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_16 | (SI_BIG_ENDIAN ?
V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
}
- si_pm4_cmd_end(pm4, rctx->predicate_drawing);
+ si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
si_pm4_cmd_begin(pm4, PKT3_NUM_INSTANCES);
si_pm4_cmd_add(pm4, info->instance_count);
- si_pm4_cmd_end(pm4, rctx->predicate_drawing);
+ si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
if (info->indexed) {
uint32_t max_size = (ib->buffer->width0 - ib->offset) /
- rctx->index_buffer.index_size;
+ sctx->index_buffer.index_size;
uint64_t va;
- va = r600_resource_va(&rctx->screen->screen, ib->buffer);
+ va = r600_resource_va(&sctx->screen->b.b, ib->buffer);
va += ib->offset;
- si_pm4_add_bo(pm4, (struct si_resource *)ib->buffer, RADEON_USAGE_READ);
+ si_pm4_add_bo(pm4, (struct r600_resource *)ib->buffer, RADEON_USAGE_READ);
si_cmd_draw_index_2(pm4, max_size, va, info->count,
V_0287F0_DI_SRC_SEL_DMA,
- rctx->predicate_drawing);
+ sctx->b.predicate_drawing);
} else {
uint32_t initiator = V_0287F0_DI_SRC_SEL_AUTO_INDEX;
initiator |= S_0287F0_USE_OPAQUE(!!info->count_from_stream_output);
- si_cmd_draw_index_auto(pm4, info->count, initiator, rctx->predicate_drawing);
+ si_cmd_draw_index_auto(pm4, info->count, initiator, sctx->b.predicate_drawing);
}
- si_pm4_set_state(rctx, draw, pm4);
+
+ si_pm4_set_state(sctx, draw, pm4);
}
+void si_emit_cache_flush(struct r600_common_context *sctx, struct r600_atom *atom)
+{
+ struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
+ uint32_t cp_coher_cntl = 0;
+
+ /* XXX SI flushes both ICACHE and KCACHE if either flag is set.
+ * XXX CIK shouldn't have this issue. Test CIK before separating the flags
+ * XXX to ensure there is no regression. Also find out if there is another
+ * XXX way to flush either ICACHE or KCACHE but not both for SI. */
+ if (sctx->flags & (R600_CONTEXT_INV_SHADER_CACHE |
+ R600_CONTEXT_INV_CONST_CACHE)) {
+ cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
+ S_0085F0_SH_KCACHE_ACTION_ENA(1);
+ }
+ if (sctx->flags & (R600_CONTEXT_INV_TEX_CACHE |
+ R600_CONTEXT_STREAMOUT_FLUSH)) {
+ cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1) |
+ S_0085F0_TCL1_ACTION_ENA(1);
+ }
+ if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB) {
+ cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
+ S_0085F0_CB0_DEST_BASE_ENA(1) |
+ S_0085F0_CB1_DEST_BASE_ENA(1) |
+ S_0085F0_CB2_DEST_BASE_ENA(1) |
+ S_0085F0_CB3_DEST_BASE_ENA(1) |
+ S_0085F0_CB4_DEST_BASE_ENA(1) |
+ S_0085F0_CB5_DEST_BASE_ENA(1) |
+ S_0085F0_CB6_DEST_BASE_ENA(1) |
+ S_0085F0_CB7_DEST_BASE_ENA(1);
+ }
+ if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB) {
+ cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
+ S_0085F0_DB_DEST_BASE_ENA(1);
+ }
+
+ if (cp_coher_cntl) {
+ if (sctx->chip_class >= CIK) {
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
+ radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
+ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
+ radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
+ radeon_emit(cs, 0); /* CP_COHER_BASE */
+ radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
+ radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
+ } else {
+ radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
+ radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
+ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
+ radeon_emit(cs, 0); /* CP_COHER_BASE */
+ radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
+ }
+ }
+
+ if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
+ }
+ if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
+ }
+
+ if (sctx->flags & (R600_CONTEXT_WAIT_3D_IDLE |
+ R600_CONTEXT_PS_PARTIAL_FLUSH)) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ } else if (sctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) {
+ /* Needed if streamout buffers are going to be used as a source. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ }
+
+ sctx->flags = 0;
+}
+
+const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 13 }; /* number of CS dwords */
+
void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
- struct r600_context *rctx = (struct r600_context *)ctx;
+ struct si_context *sctx = (struct si_context *)ctx;
struct pipe_index_buffer ib = {};
- uint32_t cp_coher_cntl;
+ uint32_t i;
if (!info->count && (info->indexed || !info->count_from_stream_output))
return;
- if (!rctx->ps_shader || !rctx->vs_shader)
+ if (!sctx->ps_shader || !sctx->vs_shader)
return;
- si_update_derived_state(rctx);
- si_constant_buffer_update(rctx);
- si_vertex_buffer_update(rctx);
+ si_update_derived_state(sctx);
+ si_vertex_buffer_update(sctx);
if (info->indexed) {
/* Initialize the index buffer struct. */
- pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
- ib.user_buffer = rctx->index_buffer.user_buffer;
- ib.index_size = rctx->index_buffer.index_size;
- ib.offset = rctx->index_buffer.offset + info->start * ib.index_size;
+ pipe_resource_reference(&ib.buffer, sctx->index_buffer.buffer);
+ ib.user_buffer = sctx->index_buffer.user_buffer;
+ ib.index_size = sctx->index_buffer.index_size;
+ ib.offset = sctx->index_buffer.offset + info->start * ib.index_size;
/* Translate or upload, if needed. */
- r600_translate_index_buffer(rctx, &ib, info->count);
+ if (ib.index_size == 1) {
+ struct pipe_resource *out_buffer = NULL;
+ unsigned out_offset;
+ void *ptr;
+
+ u_upload_alloc(sctx->b.uploader, 0, info->count * 2,
+ &out_offset, &out_buffer, &ptr);
+
+ util_shorten_ubyte_elts_to_userptr(
+ &sctx->b.b, &ib, 0, ib.offset, info->count, ptr);
+
+ pipe_resource_reference(&ib.buffer, NULL);
+ ib.user_buffer = NULL;
+ ib.buffer = out_buffer;
+ ib.offset = out_offset;
+ ib.index_size = 2;
+ }
if (ib.user_buffer && !ib.buffer) {
- r600_upload_index_buffer(rctx, &ib, info->count);
+ u_upload_data(sctx->b.uploader, 0, info->count * ib.index_size,
+ ib.user_buffer, &ib.offset, &ib.buffer);
}
-
- } else if (info->count_from_stream_output) {
- r600_context_draw_opaque_count(rctx, (struct r600_so_target*)info->count_from_stream_output);
}
- rctx->vs_shader_so_strides = rctx->vs_shader->current->so_strides;
-
- if (!si_update_draw_info_state(rctx, info))
+ if (!si_update_draw_info_state(sctx, info, &ib))
return;
- si_state_draw(rctx, info, &ib);
+ si_state_draw(sctx, info, &ib);
- cp_coher_cntl = si_pm4_sync_flags(rctx);
- if (cp_coher_cntl) {
- struct si_pm4_state *pm4 = si_pm4_alloc_state(rctx);
+ sctx->pm4_dirty_cdwords += si_pm4_dirty_dw(sctx);
- if (pm4 == NULL)
- return;
+ /* Check flush flags. */
+ if (sctx->b.flags)
+ sctx->atoms.cache_flush->dirty = true;
- si_cmd_surface_sync(pm4, cp_coher_cntl);
- si_pm4_set_state(rctx, sync, pm4);
- }
+ si_need_cs_space(sctx, 0, TRUE);
/* Emit states. */
- rctx->pm4_dirty_cdwords += si_pm4_dirty_dw(rctx);
-
- si_need_cs_space(rctx, 0, TRUE);
-
- si_pm4_emit_dirty(rctx);
- rctx->pm4_dirty_cdwords = 0;
-
-#if R600_TRACE_CS
- if (rctx->screen->trace_bo) {
- r600_trace_emit(rctx);
+ for (i = 0; i < SI_NUM_ATOMS(sctx); i++) {
+ if (sctx->atoms.array[i]->dirty) {
+ sctx->atoms.array[i]->emit(&sctx->b, sctx->atoms.array[i]);
+ sctx->atoms.array[i]->dirty = false;
+ }
}
-#endif
-#if 0
- /* Enable stream out if needed. */
- if (rctx->streamout_start) {
- r600_context_streamout_begin(rctx);
- rctx->streamout_start = FALSE;
+ si_pm4_emit_dirty(sctx);
+ sctx->pm4_dirty_cdwords = 0;
+
+#if SI_TRACE_CS
+ if (sctx->screen->b.trace_bo) {
+ si_trace_emit(sctx);
}
#endif
- rctx->flags |= R600_CONTEXT_DST_CACHES_DIRTY;
-
/* Set the depth buffer as dirty. */
- if (rctx->framebuffer.zsbuf) {
- struct pipe_surface *surf = rctx->framebuffer.zsbuf;
- struct r600_resource_texture *rtex = (struct r600_resource_texture *)surf->texture;
+ if (sctx->framebuffer.zsbuf) {
+ struct pipe_surface *surf = sctx->framebuffer.zsbuf;
+ struct r600_texture *rtex = (struct r600_texture *)surf->texture;
- rtex->dirty_db_mask |= 1 << surf->u.tex.level;
+ rtex->dirty_level_mask |= 1 << surf->u.tex.level;
+ }
+ if (sctx->fb_compressed_cb_mask) {
+ struct pipe_surface *surf;
+ struct r600_texture *rtex;
+ unsigned mask = sctx->fb_compressed_cb_mask;
+
+ do {
+ unsigned i = u_bit_scan(&mask);
+ surf = sctx->framebuffer.cbufs[i];
+ rtex = (struct r600_texture*)surf->texture;
+
+ rtex->dirty_level_mask |= 1 << surf->u.tex.level;
+ } while (mask);
}
pipe_resource_reference(&ib.buffer, NULL);
+ sctx->b.num_draw_calls++;
}