* Christian König <christian.koenig@amd.com>
*/
-#include "util/u_memory.h"
-#include "util/u_framebuffer.h"
-#include "util/u_blitter.h"
-#include "util/u_index_modify.h"
-#include "util/u_upload_mgr.h"
-#include "tgsi/tgsi_parse.h"
#include "si_pipe.h"
#include "si_shader.h"
-#include "si_state.h"
-#include "../radeon/r600_cs.h"
+#include "radeon/r600_cs.h"
#include "sid.h"
-/*
- * Shaders
- */
-
-static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *shader)
-{
- struct si_context *sctx = (struct si_context *)ctx;
- struct si_pm4_state *pm4;
- unsigned num_sgprs, num_user_sgprs;
- unsigned nparams, i, vgpr_comp_cnt;
- uint64_t va;
-
- si_pm4_delete_state(sctx, vs, shader->pm4);
- pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
-
- if (pm4 == NULL)
- return;
-
- /* Certain attributes (position, psize, etc.) don't count as params.
- * VS is required to export at least one param and r600_shader_from_tgsi()
- * takes care of adding a dummy export.
- */
- for (nparams = 0, i = 0 ; i < shader->shader.noutput; i++) {
- switch (shader->shader.output[i].name) {
- case TGSI_SEMANTIC_CLIPVERTEX:
- case TGSI_SEMANTIC_POSITION:
- case TGSI_SEMANTIC_PSIZE:
- break;
- default:
- nparams++;
- }
- }
- if (nparams < 1)
- nparams = 1;
-
- si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG,
- S_0286C4_VS_EXPORT_COUNT(nparams - 1));
-
- si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
- S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
- S_02870C_POS1_EXPORT_FORMAT(shader->shader.nr_pos_exports > 1 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS2_EXPORT_FORMAT(shader->shader.nr_pos_exports > 2 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS3_EXPORT_FORMAT(shader->shader.nr_pos_exports > 3 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE));
-
- va = r600_resource_va(ctx->screen, (void *)shader->bo);
- si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ);
- si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
- si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40);
-
- num_user_sgprs = SI_VS_NUM_USER_SGPR;
- num_sgprs = shader->num_sgprs;
- if (num_user_sgprs > num_sgprs) {
- /* Last 2 reserved SGPRs are used for VCC */
- num_sgprs = num_user_sgprs + 2;
- }
- assert(num_sgprs <= 104);
-
- vgpr_comp_cnt = shader->shader.uses_instanceid ? 3 : 0;
-
- si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
- S_00B128_VGPRS((shader->num_vgprs - 1) / 4) |
- S_00B128_SGPRS((num_sgprs - 1) / 8) |
- S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt));
- si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS,
- S_00B12C_USER_SGPR(num_user_sgprs) |
- S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
- S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
- S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
- S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
- S_00B12C_SO_EN(!!shader->selector->so.num_outputs));
-
- si_pm4_bind_state(sctx, vs, shader->pm4);
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
-}
+#include "util/u_index_modify.h"
+#include "util/u_upload_mgr.h"
-static void si_pipe_shader_ps(struct pipe_context *ctx, struct si_pipe_shader *shader)
+static void si_decompress_textures(struct si_context *sctx)
{
- struct si_context *sctx = (struct si_context *)ctx;
- struct si_pm4_state *pm4;
- unsigned i, exports_ps, spi_ps_in_control, db_shader_control;
- unsigned num_sgprs, num_user_sgprs;
- unsigned spi_baryc_cntl = 0, spi_ps_input_ena, spi_shader_z_format;
- uint64_t va;
-
- si_pm4_delete_state(sctx, ps, shader->pm4);
- pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
-
- if (pm4 == NULL)
- return;
-
- db_shader_control = S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) |
- S_02880C_ALPHA_TO_MASK_DISABLE(sctx->fb_cb0_is_integer);
-
- for (i = 0; i < shader->shader.ninput; i++) {
- switch (shader->shader.input[i].name) {
- case TGSI_SEMANTIC_POSITION:
- if (shader->shader.input[i].centroid) {
- /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
- * Possible vaules:
- * 0 -> Position = pixel center (default)
- * 1 -> Position = pixel centroid
- * 2 -> Position = iterated sample number XXX:
- * What does this mean?
- */
- spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(1);
+ if (!sctx->blitter->running) {
+ /* Flush depth textures which need to be flushed. */
+ for (int i = 0; i < SI_NUM_SHADERS; i++) {
+ if (sctx->samplers[i].depth_texture_mask) {
+ si_flush_depth_textures(sctx, &sctx->samplers[i]);
+ }
+ if (sctx->samplers[i].compressed_colortex_mask) {
+ si_decompress_color_textures(sctx, &sctx->samplers[i]);
}
- /* Fall through */
- case TGSI_SEMANTIC_FACE:
- continue;
}
}
-
- for (i = 0; i < shader->shader.noutput; i++) {
- if (shader->shader.output[i].name == TGSI_SEMANTIC_POSITION)
- db_shader_control |= S_02880C_Z_EXPORT_ENABLE(1);
- if (shader->shader.output[i].name == TGSI_SEMANTIC_STENCIL)
- db_shader_control |= S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(1);
- }
- if (shader->shader.uses_kill || shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
- db_shader_control |= S_02880C_KILL_ENABLE(1);
-
- exports_ps = 0;
- for (i = 0; i < shader->shader.noutput; i++) {
- if (shader->shader.output[i].name == TGSI_SEMANTIC_POSITION ||
- shader->shader.output[i].name == TGSI_SEMANTIC_STENCIL)
- exports_ps |= 1;
- }
- if (!exports_ps) {
- /* always at least export 1 component per pixel */
- exports_ps = 2;
- }
-
- spi_ps_in_control = S_0286D8_NUM_INTERP(shader->shader.ninterp) |
- S_0286D8_BC_OPTIMIZE_DISABLE(1);
-
- si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
- spi_ps_input_ena = shader->spi_ps_input_ena;
- /* we need to enable at least one of them, otherwise we hang the GPU */
- assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena) ||
- G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) ||
- G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena) ||
- G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena) ||
- G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena) ||
- G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena) ||
- G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena) ||
- G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena));
-
- si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, spi_ps_input_ena);
- si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR, spi_ps_input_ena);
- si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
-
- if (G_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(db_shader_control))
- spi_shader_z_format = V_028710_SPI_SHADER_32_GR;
- else if (G_02880C_Z_EXPORT_ENABLE(db_shader_control))
- spi_shader_z_format = V_028710_SPI_SHADER_32_R;
- else
- spi_shader_z_format = 0;
- si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT, spi_shader_z_format);
- si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT,
- shader->spi_shader_col_format);
- si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, shader->cb_shader_mask);
-
- va = r600_resource_va(ctx->screen, (void *)shader->bo);
- si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ);
- si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
- si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40);
-
- num_user_sgprs = SI_PS_NUM_USER_SGPR;
- num_sgprs = shader->num_sgprs;
- /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
- if ((num_user_sgprs + 1) > num_sgprs) {
- /* Last 2 reserved SGPRs are used for VCC */
- num_sgprs = num_user_sgprs + 1 + 2;
- }
- assert(num_sgprs <= 104);
-
- si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
- S_00B028_VGPRS((shader->num_vgprs - 1) / 4) |
- S_00B028_SGPRS((num_sgprs - 1) / 8));
- si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
- S_00B02C_EXTRA_LDS_SIZE(shader->lds_size) |
- S_00B02C_USER_SGPR(num_user_sgprs));
-
- si_pm4_set_reg(pm4, R_02880C_DB_SHADER_CONTROL, db_shader_control);
-
- shader->cb0_is_integer = sctx->fb_cb0_is_integer;
- shader->sprite_coord_enable = sctx->sprite_coord_enable;
- si_pm4_bind_state(sctx, ps, shader->pm4);
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
-/*
- * Drawing
- */
-
-static unsigned si_conv_pipe_prim(unsigned pprim)
+static unsigned si_conv_pipe_prim(unsigned mode)
{
static const unsigned prim_conv[] = {
[PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
[PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
[PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
[PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
- [PIPE_PRIM_LINES_ADJACENCY] = ~0,
- [PIPE_PRIM_LINE_STRIP_ADJACENCY] = ~0,
- [PIPE_PRIM_TRIANGLES_ADJACENCY] = ~0,
- [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = ~0
+ [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
+ [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
+ [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
+ [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
+ [R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
};
- unsigned result = prim_conv[pprim];
- if (result == ~0) {
- R600_ERR("unsupported primitive type %d\n", pprim);
- }
- return result;
+ assert(mode < Elements(prim_conv));
+ return prim_conv[mode];
}
static unsigned si_conv_prim_to_gs_out(unsigned mode)
[PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
[PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
[PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
+ [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
};
assert(mode < Elements(prim_conv));
return prim_conv[mode];
}
-static bool si_update_draw_info_state(struct si_context *sctx,
- const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib)
+static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
+ const struct pipe_draw_info *info)
{
- struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
- struct si_shader *vs = &sctx->vs_shader->current->shader;
- unsigned prim = si_conv_pipe_prim(info->mode);
- unsigned gs_out_prim = si_conv_prim_to_gs_out(info->mode);
- unsigned ls_mask = 0;
+ struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
+ unsigned prim = info->mode;
+ unsigned primgroup_size = 128; /* recommended without a GS */
+
+ /* SWITCH_ON_EOP(0) is always preferable. */
+ bool wd_switch_on_eop = false;
+ bool ia_switch_on_eop = false;
+ bool partial_vs_wave = false;
- if (pm4 == NULL)
- return false;
+ if (sctx->gs_shader)
+ primgroup_size = 64; /* recommended with a GS */
- if (prim == ~0) {
- FREE(pm4);
- return false;
+ /* This is a hardware requirement. */
+ if ((rs && rs->line_stipple_enable) ||
+ (sctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) {
+ ia_switch_on_eop = true;
+ wd_switch_on_eop = true;
}
- if (sctx->b.chip_class >= CIK) {
- struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
- bool wd_switch_on_eop = prim == V_008958_DI_PT_POLYGON ||
- prim == V_008958_DI_PT_LINELOOP ||
- prim == V_008958_DI_PT_TRIFAN ||
- prim == V_008958_DI_PT_TRISTRIP_ADJ ||
- info->primitive_restart ||
- (rs ? rs->line_stipple_enable : false);
- /* If the WD switch is false, the IA switch must be false too. */
- bool ia_switch_on_eop = wd_switch_on_eop;
+ if (sctx->b.streamout.streamout_enabled ||
+ sctx->b.streamout.prims_gen_query_enabled)
+ partial_vs_wave = true;
- si_pm4_set_reg(pm4, R_028AA8_IA_MULTI_VGT_PARAM,
- S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
- S_028AA8_PARTIAL_VS_WAVE_ON(1) |
- S_028AA8_PRIMGROUP_SIZE(63) |
- S_028AA8_WD_SWITCH_ON_EOP(wd_switch_on_eop));
- si_pm4_set_reg(pm4, R_028B74_VGT_DISPATCH_DRAW_INDEX,
- ib->index_size == 4 ? 0xFC000000 : 0xFC00);
+ if (sctx->b.chip_class >= CIK) {
+ /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
+ * 4 shader engines. Set 1 to pass the assertion below.
+ * The other cases are hardware requirements. */
+ if (sctx->b.screen->info.max_se < 4 ||
+ prim == PIPE_PRIM_POLYGON ||
+ prim == PIPE_PRIM_LINE_LOOP ||
+ prim == PIPE_PRIM_TRIANGLE_FAN ||
+ prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
+ info->primitive_restart)
+ wd_switch_on_eop = true;
+
+ /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
+ * We don't know that for indirect drawing, so treat it as
+ * always problematic. */
+ if (sctx->b.family == CHIP_HAWAII &&
+ (info->indirect || info->instance_count > 1))
+ wd_switch_on_eop = true;
- si_pm4_set_reg(pm4, R_030908_VGT_PRIMITIVE_TYPE, prim);
- } else {
- si_pm4_set_reg(pm4, R_008958_VGT_PRIMITIVE_TYPE, prim);
+ /* If the WD switch is false, the IA switch must be false too. */
+ assert(wd_switch_on_eop || !ia_switch_on_eop);
}
- si_pm4_set_reg(pm4, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
- si_pm4_set_reg(pm4, R_028408_VGT_INDX_OFFSET,
- info->indexed ? info->index_bias : info->start);
- si_pm4_set_reg(pm4, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info->restart_index);
- si_pm4_set_reg(pm4, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
- si_pm4_set_reg(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0 + SI_SGPR_START_INSTANCE * 4,
- info->start_instance);
-
- if (prim == V_008958_DI_PT_LINELIST)
- ls_mask = 1;
- else if (prim == V_008958_DI_PT_LINESTRIP)
- ls_mask = 2;
- si_pm4_set_reg(pm4, R_028A0C_PA_SC_LINE_STIPPLE,
- S_028A0C_AUTO_RESET_CNTL(ls_mask) |
- sctx->pa_sc_line_stipple);
-
- if (info->mode == PIPE_PRIM_QUADS || info->mode == PIPE_PRIM_QUAD_STRIP || info->mode == PIPE_PRIM_POLYGON) {
- si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL,
- S_028814_PROVOKING_VTX_LAST(1) | sctx->pa_su_sc_mode_cntl);
- } else {
- si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL, sctx->pa_su_sc_mode_cntl);
- }
- si_pm4_set_reg(pm4, R_02881C_PA_CL_VS_OUT_CNTL,
- S_02881C_USE_VTX_POINT_SIZE(vs->vs_out_point_size) |
- S_02881C_USE_VTX_EDGE_FLAG(vs->vs_out_edgeflag) |
- S_02881C_USE_VTX_RENDER_TARGET_INDX(vs->vs_out_layer) |
- S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs->clip_dist_write & 0x0F) != 0) |
- S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs->clip_dist_write & 0xF0) != 0) |
- S_02881C_VS_OUT_MISC_VEC_ENA(vs->vs_out_misc_write) |
- (sctx->queued.named.rasterizer->clip_plane_enable &
- vs->clip_dist_write));
- si_pm4_set_reg(pm4, R_028810_PA_CL_CLIP_CNTL,
- sctx->queued.named.rasterizer->pa_cl_clip_cntl |
- (vs->clip_dist_write ? 0 :
- sctx->queued.named.rasterizer->clip_plane_enable & 0x3F));
-
- si_pm4_set_state(sctx, draw_info, pm4);
- return true;
+ return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
+ S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
+ S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
+ S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0);
}
-static void si_update_spi_map(struct si_context *sctx)
+static void si_emit_rasterizer_prim_state(struct si_context *sctx, unsigned mode)
{
- struct si_shader *ps = &sctx->ps_shader->current->shader;
- struct si_shader *vs = &sctx->vs_shader->current->shader;
- struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
- unsigned i, j, tmp;
-
- for (i = 0; i < ps->ninput; i++) {
- unsigned name = ps->input[i].name;
- unsigned param_offset = ps->input[i].param_offset;
-
- if (name == TGSI_SEMANTIC_POSITION)
- /* Read from preloaded VGPRs, not parameters */
- continue;
-
-bcolor:
- tmp = 0;
-
- if (ps->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT ||
- (ps->input[i].interpolate == TGSI_INTERPOLATE_COLOR &&
- sctx->ps_shader->current->key.ps.flatshade)) {
- tmp |= S_028644_FLAT_SHADE(1);
- }
-
- if (name == TGSI_SEMANTIC_GENERIC &&
- sctx->sprite_coord_enable & (1 << ps->input[i].sid)) {
- tmp |= S_028644_PT_SPRITE_TEX(1);
- }
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- for (j = 0; j < vs->noutput; j++) {
- if (name == vs->output[j].name &&
- ps->input[i].sid == vs->output[j].sid) {
- tmp |= S_028644_OFFSET(vs->output[j].param_offset);
- break;
- }
- }
+ if (sctx->gs_shader)
+ mode = sctx->gs_shader->gs_output_prim;
- if (j == vs->noutput) {
- /* No corresponding output found, load defaults into input */
- tmp |= S_028644_OFFSET(0x20);
- }
+ if (mode == sctx->last_rast_prim)
+ return;
- si_pm4_set_reg(pm4,
- R_028644_SPI_PS_INPUT_CNTL_0 + param_offset * 4,
- tmp);
+ r600_write_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
+ sctx->pa_sc_line_stipple |
+ S_028A0C_AUTO_RESET_CNTL(mode == PIPE_PRIM_LINES ? 1 :
+ mode == PIPE_PRIM_LINE_STRIP ? 2 : 0));
- if (name == TGSI_SEMANTIC_COLOR &&
- sctx->ps_shader->current->key.ps.color_two_side) {
- name = TGSI_SEMANTIC_BCOLOR;
- param_offset++;
- goto bcolor;
- }
- }
+ r600_write_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL,
+ sctx->pa_su_sc_mode_cntl |
+ S_028814_PROVOKING_VTX_LAST(mode == PIPE_PRIM_QUADS ||
+ mode == PIPE_PRIM_QUAD_STRIP ||
+ mode == PIPE_PRIM_POLYGON));
- si_pm4_set_state(sctx, spi, pm4);
+ sctx->last_rast_prim = mode;
}
-static void si_update_derived_state(struct si_context *sctx)
+static void si_emit_draw_registers(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ const struct pipe_index_buffer *ib)
{
- struct pipe_context * ctx = (struct pipe_context*)sctx;
- unsigned vs_dirty = 0, ps_dirty = 0;
-
- if (!sctx->blitter->running) {
- /* Flush depth textures which need to be flushed. */
- for (int i = 0; i < SI_NUM_SHADERS; i++) {
- if (sctx->samplers[i].depth_texture_mask) {
- si_flush_depth_textures(sctx, &sctx->samplers[i]);
- }
- if (sctx->samplers[i].compressed_colortex_mask) {
- si_decompress_color_textures(sctx, &sctx->samplers[i]);
- }
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ unsigned prim = si_conv_pipe_prim(info->mode);
+ unsigned gs_out_prim =
+ si_conv_prim_to_gs_out(sctx->gs_shader ?
+ sctx->gs_shader->gs_output_prim :
+ info->mode);
+ unsigned ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info);
+
+ /* Draw state. */
+ if (prim != sctx->last_prim ||
+ ia_multi_vgt_param != sctx->last_multi_vgt_param) {
+ if (sctx->b.chip_class >= CIK) {
+ radeon_emit(cs, PKT3(PKT3_DRAW_PREAMBLE, 2, 0));
+ radeon_emit(cs, prim); /* VGT_PRIMITIVE_TYPE */
+ radeon_emit(cs, ia_multi_vgt_param); /* IA_MULTI_VGT_PARAM */
+ radeon_emit(cs, 0); /* VGT_LS_HS_CONFIG */
+ } else {
+ r600_write_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
+ r600_write_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
}
+ sctx->last_prim = prim;
+ sctx->last_multi_vgt_param = ia_multi_vgt_param;
}
- si_shader_select(ctx, sctx->vs_shader, &vs_dirty);
-
- if (!sctx->vs_shader->current->pm4) {
- si_pipe_shader_vs(ctx, sctx->vs_shader->current);
- vs_dirty = 0;
+ if (gs_out_prim != sctx->last_gs_out_prim) {
+ r600_write_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
+ sctx->last_gs_out_prim = gs_out_prim;
}
- if (vs_dirty) {
- si_pm4_bind_state(sctx, vs, sctx->vs_shader->current->pm4);
+ /* Primitive restart. */
+ if (info->primitive_restart != sctx->last_primitive_restart_en) {
+ r600_write_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
+ sctx->last_primitive_restart_en = info->primitive_restart;
+
+ if (info->primitive_restart &&
+ (info->restart_index != sctx->last_restart_index ||
+ sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
+ r600_write_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
+ info->restart_index);
+ sctx->last_restart_index = info->restart_index;
+ }
}
+}
+static void si_emit_draw_packets(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ const struct pipe_index_buffer *ib)
+{
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ unsigned sh_base_reg = (sctx->gs_shader ? R_00B330_SPI_SHADER_USER_DATA_ES_0 :
+ R_00B130_SPI_SHADER_USER_DATA_VS_0);
- si_shader_select(ctx, sctx->ps_shader, &ps_dirty);
+ if (info->count_from_stream_output) {
+ struct r600_so_target *t =
+ (struct r600_so_target*)info->count_from_stream_output;
+ uint64_t va = t->buf_filled_size->gpu_address +
+ t->buf_filled_size_offset;
- if (!sctx->ps_shader->current->pm4) {
- si_pipe_shader_ps(ctx, sctx->ps_shader->current);
- ps_dirty = 0;
- }
- if (sctx->ps_shader->current->cb0_is_integer != sctx->fb_cb0_is_integer) {
- si_pipe_shader_ps(ctx, sctx->ps_shader->current);
- ps_dirty = 0;
- }
+ r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
+ t->stride_in_dw);
- if (ps_dirty) {
- si_pm4_bind_state(sctx, ps, sctx->ps_shader->current->pm4);
- }
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ COPY_DATA_WR_CONFIRM);
+ radeon_emit(cs, va); /* src address lo */
+ radeon_emit(cs, va >> 32); /* src address hi */
+ radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
+ radeon_emit(cs, 0); /* unused */
- if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs)) {
- /* XXX: Emitting the PS state even when only the VS changed
- * fixes random failures with piglit glsl-max-varyings.
- * Not sure why...
- */
- sctx->emitted.named.ps = NULL;
- si_update_spi_map(sctx);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ t->buf_filled_size, RADEON_USAGE_READ,
+ RADEON_PRIO_MIN);
}
-}
-static void si_vertex_buffer_update(struct si_context *sctx)
-{
- struct pipe_context *ctx = &sctx->b.b;
- struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
- bool bound[PIPE_MAX_ATTRIBS] = {};
- unsigned i, count;
- uint64_t va;
+ /* draw packet */
+ if (info->indexed) {
+ radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
-
- count = sctx->vertex_elements->count;
- assert(count <= 256 / 4);
-
- si_pm4_sh_data_begin(pm4);
- for (i = 0 ; i < count; i++) {
- struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
- struct pipe_vertex_buffer *vb;
- struct r600_resource *rbuffer;
- unsigned offset;
-
- if (ve->vertex_buffer_index >= sctx->nr_vertex_buffers)
- continue;
-
- vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
- rbuffer = (struct r600_resource*)vb->buffer;
- if (rbuffer == NULL)
- continue;
-
- offset = 0;
- offset += vb->buffer_offset;
- offset += ve->src_offset;
-
- va = r600_resource_va(ctx->screen, (void*)rbuffer);
- va += offset;
-
- /* Fill in T# buffer resource description */
- si_pm4_sh_data_add(pm4, va & 0xFFFFFFFF);
- si_pm4_sh_data_add(pm4, (S_008F04_BASE_ADDRESS_HI(va >> 32) |
- S_008F04_STRIDE(vb->stride)));
- if (vb->stride)
- /* Round up by rounding down and adding 1 */
- si_pm4_sh_data_add(pm4,
- (vb->buffer->width0 - offset -
- util_format_get_blocksize(ve->src_format)) /
- vb->stride + 1);
- else
- si_pm4_sh_data_add(pm4, vb->buffer->width0 - offset);
- si_pm4_sh_data_add(pm4, sctx->vertex_elements->rsrc_word3[i]);
-
- if (!bound[ve->vertex_buffer_index]) {
- si_pm4_add_bo(pm4, rbuffer, RADEON_USAGE_READ);
- bound[ve->vertex_buffer_index] = true;
+ if (ib->index_size == 4) {
+ radeon_emit(cs, V_028A7C_VGT_INDEX_32 | (SI_BIG_ENDIAN ?
+ V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
+ } else {
+ radeon_emit(cs, V_028A7C_VGT_INDEX_16 | (SI_BIG_ENDIAN ?
+ V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
}
}
- si_pm4_sh_data_end(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0, SI_SGPR_VERTEX_BUFFER);
- si_pm4_set_state(sctx, vertex_buffers, pm4);
-}
-static void si_state_draw(struct si_context *sctx,
- const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib)
-{
- struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
+ if (!info->indirect) {
+ int base_vertex;
- if (pm4 == NULL)
- return;
+ radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
+ radeon_emit(cs, info->instance_count);
- /* queries need some special values
- * (this is non-zero if any query is active) */
- if (sctx->b.num_occlusion_queries > 0) {
- if (sctx->b.chip_class >= CIK) {
- si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
- S_028004_PERFECT_ZPASS_COUNTS(1) |
- S_028004_SAMPLE_RATE(sctx->fb_log_samples) |
- S_028004_ZPASS_ENABLE(1) |
- S_028004_SLICE_EVEN_ENABLE(1) |
- S_028004_SLICE_ODD_ENABLE(1));
- } else {
- si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
- S_028004_PERFECT_ZPASS_COUNTS(1) |
- S_028004_SAMPLE_RATE(sctx->fb_log_samples));
- }
- }
+ /* Base vertex and start instance. */
+ base_vertex = info->indexed ? info->index_bias : info->start;
- if (info->count_from_stream_output) {
- struct r600_so_target *t =
- (struct r600_so_target*)info->count_from_stream_output;
- uint64_t va = r600_resource_va(&sctx->screen->b.b,
- &t->buf_filled_size->b.b);
- va += t->buf_filled_size_offset;
-
- si_pm4_set_reg(pm4, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
- t->stride_in_dw);
-
- si_pm4_cmd_begin(pm4, PKT3_COPY_DATA);
- si_pm4_cmd_add(pm4,
- COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- COPY_DATA_WR_CONFIRM);
- si_pm4_cmd_add(pm4, va); /* src address lo */
- si_pm4_cmd_add(pm4, va >> 32UL); /* src address hi */
- si_pm4_cmd_add(pm4, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
- si_pm4_cmd_add(pm4, 0); /* unused */
- si_pm4_add_bo(pm4, t->buf_filled_size, RADEON_USAGE_READ);
- si_pm4_cmd_end(pm4, true);
- }
+ if (base_vertex != sctx->last_base_vertex ||
+ sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
+ info->start_instance != sctx->last_start_instance ||
+ sh_base_reg != sctx->last_sh_base_reg) {
+ si_write_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
+ radeon_emit(cs, base_vertex);
+ radeon_emit(cs, info->start_instance);
- /* draw packet */
- si_pm4_cmd_begin(pm4, PKT3_INDEX_TYPE);
- if (ib->index_size == 4) {
- si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_32 | (SI_BIG_ENDIAN ?
- V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
+ sctx->last_base_vertex = base_vertex;
+ sctx->last_start_instance = info->start_instance;
+ sctx->last_sh_base_reg = sh_base_reg;
+ }
} else {
- si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_16 | (SI_BIG_ENDIAN ?
- V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
- }
- si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
+ si_invalidate_draw_sh_constants(sctx);
- si_pm4_cmd_begin(pm4, PKT3_NUM_INSTANCES);
- si_pm4_cmd_add(pm4, info->instance_count);
- si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ (struct r600_resource *)info->indirect,
+ RADEON_USAGE_READ, RADEON_PRIO_MIN);
+ }
if (info->indexed) {
- uint32_t max_size = (ib->buffer->width0 - ib->offset) /
- sctx->index_buffer.index_size;
- uint64_t va;
- va = r600_resource_va(&sctx->screen->b.b, ib->buffer);
- va += ib->offset;
-
- si_pm4_add_bo(pm4, (struct r600_resource *)ib->buffer, RADEON_USAGE_READ);
- si_cmd_draw_index_2(pm4, max_size, va, info->count,
- V_0287F0_DI_SRC_SEL_DMA,
- sctx->b.predicate_drawing);
+ uint32_t index_max_size = (ib->buffer->width0 - ib->offset) /
+ ib->index_size;
+ uint64_t index_va = r600_resource(ib->buffer)->gpu_address + ib->offset;
+
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ (struct r600_resource *)ib->buffer,
+ RADEON_USAGE_READ, RADEON_PRIO_MIN);
+
+ if (info->indirect) {
+ uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
+
+ assert(indirect_va % 8 == 0);
+ assert(index_va % 2 == 0);
+ assert(info->indirect_offset % 4 == 0);
+
+ radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
+ radeon_emit(cs, 1);
+ radeon_emit(cs, indirect_va);
+ radeon_emit(cs, indirect_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
+ radeon_emit(cs, index_va);
+ radeon_emit(cs, index_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
+ radeon_emit(cs, index_max_size);
+
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_INDIRECT, 3, sctx->b.predicate_drawing));
+ radeon_emit(cs, info->indirect_offset);
+ radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
+ } else {
+ index_va += info->start * ib->index_size;
+
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, sctx->b.predicate_drawing));
+ radeon_emit(cs, index_max_size);
+ radeon_emit(cs, index_va);
+ radeon_emit(cs, (index_va >> 32UL) & 0xFF);
+ radeon_emit(cs, info->count);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
+ }
} else {
- uint32_t initiator = V_0287F0_DI_SRC_SEL_AUTO_INDEX;
- initiator |= S_0287F0_USE_OPAQUE(!!info->count_from_stream_output);
- si_cmd_draw_index_auto(pm4, info->count, initiator, sctx->b.predicate_drawing);
+ if (info->indirect) {
+ uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
+
+ assert(indirect_va % 8 == 0);
+ assert(info->indirect_offset % 4 == 0);
+
+ radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
+ radeon_emit(cs, 1);
+ radeon_emit(cs, indirect_va);
+ radeon_emit(cs, indirect_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDIRECT, 3, sctx->b.predicate_drawing));
+ radeon_emit(cs, info->indirect_offset);
+ radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX);
+ } else {
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, sctx->b.predicate_drawing));
+ radeon_emit(cs, info->count);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
+ S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
+ }
}
-
- si_pm4_set_state(sctx, draw, pm4);
}
+#define BOTH_ICACHE_KCACHE (SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_KCACHE)
+
void si_emit_cache_flush(struct r600_common_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
uint32_t cp_coher_cntl = 0;
-
- /* XXX SI flushes both ICACHE and KCACHE if either flag is set.
- * XXX CIK shouldn't have this issue. Test CIK before separating the flags
- * XXX to ensure there is no regression. Also find out if there is another
- * XXX way to flush either ICACHE or KCACHE but not both for SI. */
- if (sctx->flags & (R600_CONTEXT_INV_SHADER_CACHE |
- R600_CONTEXT_INV_CONST_CACHE)) {
- cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
- S_0085F0_SH_KCACHE_ACTION_ENA(1);
- }
- if (sctx->flags & (R600_CONTEXT_INV_TEX_CACHE |
- R600_CONTEXT_STREAMOUT_FLUSH)) {
- cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1) |
- S_0085F0_TCL1_ACTION_ENA(1);
+ uint32_t sqc_caches = 0;
+ uint32_t compute =
+ PKT3_SHADER_TYPE_S(!!(sctx->flags & SI_CONTEXT_FLAG_COMPUTE));
+
+ /* SI has a bug that it always flushes ICACHE and KCACHE if either
+ * bit is set. An alternative way is to write SQC_CACHES. */
+ if (sctx->chip_class == SI &&
+ sctx->flags & BOTH_ICACHE_KCACHE &&
+ (sctx->flags & BOTH_ICACHE_KCACHE) != BOTH_ICACHE_KCACHE) {
+ sqc_caches =
+ S_008C08_INST_INVALIDATE(!!(sctx->flags & SI_CONTEXT_INV_ICACHE)) |
+ S_008C08_DATA_INVALIDATE(!!(sctx->flags & SI_CONTEXT_INV_KCACHE));
+ } else {
+ if (sctx->flags & SI_CONTEXT_INV_ICACHE)
+ cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
+ if (sctx->flags & SI_CONTEXT_INV_KCACHE)
+ cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
}
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB) {
+
+ if (sctx->flags & SI_CONTEXT_INV_TC_L1)
+ cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
+ if (sctx->flags & SI_CONTEXT_INV_TC_L2)
+ cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
+
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
S_0085F0_CB0_DEST_BASE_ENA(1) |
S_0085F0_CB1_DEST_BASE_ENA(1) |
S_0085F0_CB6_DEST_BASE_ENA(1) |
S_0085F0_CB7_DEST_BASE_ENA(1);
}
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB) {
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
S_0085F0_DB_DEST_BASE_ENA(1);
}
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB_META) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
+ }
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB_META) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
+ }
+ if (sctx->flags & SI_CONTEXT_FLUSH_WITH_INV_L2) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH) | EVENT_INDEX(7) |
+ EVENT_WRITE_INV_L2);
+ }
+
+ /* FLUSH_AND_INV events must be emitted before PS_PARTIAL_FLUSH.
+ * Otherwise, clearing CMASK (CB meta) with CP DMA isn't reliable.
+ *
+ * I think the reason is that FLUSH_AND_INV is only added to a queue
+ * and it is PS_PARTIAL_FLUSH that waits for it to complete.
+ */
+ if (sctx->flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ } else if (sctx->flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ }
+ if (sctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
+ }
+ if (sctx->flags & SI_CONTEXT_VGT_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
+ }
+ if (sctx->flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
+ }
+
+ /* SURFACE_SYNC must be emitted after partial flushes.
+ * It looks like SURFACE_SYNC flushes caches immediately and doesn't
+ * wait for any engines. This should be last.
+ */
+ if (sqc_caches) {
+ r600_write_config_reg(cs, R_008C08_SQC_CACHES, sqc_caches);
+ cs->buf[cs->cdw-3] |= compute; /* set the compute bit in the header */
+ }
if (cp_coher_cntl) {
if (sctx->chip_class >= CIK) {
- radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) | compute);
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
} else {
- radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
+ radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0) | compute);
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
radeon_emit(cs, 0); /* CP_COHER_BASE */
}
}
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
- }
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
- }
-
- if (sctx->flags & (R600_CONTEXT_WAIT_3D_IDLE |
- R600_CONTEXT_PS_PARTIAL_FLUSH)) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- } else if (sctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) {
- /* Needed if streamout buffers are going to be used as a source. */
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- }
-
sctx->flags = 0;
}
-const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 13 }; /* number of CS dwords */
+const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 24 }; /* number of CS dwords */
+
+static void si_get_draw_start_count(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ unsigned *start, unsigned *count)
+{
+ if (info->indirect) {
+ struct r600_resource *indirect =
+ (struct r600_resource*)info->indirect;
+ int *data = r600_buffer_map_sync_with_rings(&sctx->b,
+ indirect, PIPE_TRANSFER_READ);
+ data += info->indirect_offset/sizeof(int);
+ *start = data[2];
+ *count = data[0];
+ } else {
+ *start = info->start;
+ *count = info->count;
+ }
+}
void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct pipe_index_buffer ib = {};
uint32_t i;
- if (!info->count && (info->indexed || !info->count_from_stream_output))
+ if (!info->count && !info->indirect &&
+ (info->indexed || !info->count_from_stream_output))
return;
if (!sctx->ps_shader || !sctx->vs_shader)
return;
- si_update_derived_state(sctx);
- si_vertex_buffer_update(sctx);
+ si_decompress_textures(sctx);
+ si_update_shaders(sctx);
+
+ if (sctx->vertex_buffers_dirty) {
+ si_update_vertex_buffers(sctx);
+ sctx->vertex_buffers_dirty = false;
+ }
if (info->indexed) {
/* Initialize the index buffer struct. */
pipe_resource_reference(&ib.buffer, sctx->index_buffer.buffer);
ib.user_buffer = sctx->index_buffer.user_buffer;
ib.index_size = sctx->index_buffer.index_size;
- ib.offset = sctx->index_buffer.offset + info->start * ib.index_size;
+ ib.offset = sctx->index_buffer.offset;
/* Translate or upload, if needed. */
if (ib.index_size == 1) {
struct pipe_resource *out_buffer = NULL;
- unsigned out_offset;
+ unsigned out_offset, start, count, start_offset;
void *ptr;
- u_upload_alloc(sctx->b.uploader, 0, info->count * 2,
+ si_get_draw_start_count(sctx, info, &start, &count);
+ start_offset = start * ib.index_size;
+
+ u_upload_alloc(sctx->b.uploader, start_offset, count * 2,
&out_offset, &out_buffer, &ptr);
- util_shorten_ubyte_elts_to_userptr(
- &sctx->b.b, &ib, 0, ib.offset, info->count, ptr);
+ util_shorten_ubyte_elts_to_userptr(&sctx->b.b, &ib, 0,
+ ib.offset + start_offset,
+ count, ptr);
pipe_resource_reference(&ib.buffer, NULL);
ib.user_buffer = NULL;
ib.buffer = out_buffer;
- ib.offset = out_offset;
+ /* info->start will be added by the drawing code */
+ ib.offset = out_offset - start_offset;
ib.index_size = 2;
- }
+ } else if (ib.user_buffer && !ib.buffer) {
+ unsigned start, count, start_offset;
+
+ si_get_draw_start_count(sctx, info, &start, &count);
+ start_offset = start * ib.index_size;
- if (ib.user_buffer && !ib.buffer) {
- u_upload_data(sctx->b.uploader, 0, info->count * ib.index_size,
- ib.user_buffer, &ib.offset, &ib.buffer);
+ u_upload_data(sctx->b.uploader, start_offset, count * ib.index_size,
+ (char*)ib.user_buffer + start_offset,
+ &ib.offset, &ib.buffer);
+ /* info->start will be added by the drawing code */
+ ib.offset -= start_offset;
}
}
- if (!si_update_draw_info_state(sctx, info, &ib))
- return;
-
- si_state_draw(sctx, info, &ib);
-
- sctx->pm4_dirty_cdwords += si_pm4_dirty_dw(sctx);
+ if (info->indexed && r600_resource(ib.buffer)->TC_L2_dirty) {
+ sctx->b.flags |= SI_CONTEXT_INV_TC_L2;
+ r600_resource(ib.buffer)->TC_L2_dirty = false;
+ }
/* Check flush flags. */
if (sctx->b.flags)
- sctx->atoms.cache_flush->dirty = true;
+ sctx->atoms.s.cache_flush->dirty = true;
+
+ if (sctx->emit_scratch_reloc) {
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ r600_write_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
+ sctx->spi_tmpring_size);
+
+ if (sctx->scratch_buffer) {
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ sctx->scratch_buffer, RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SHADER_RESOURCE_RW);
+
+ }
+ sctx->emit_scratch_reloc = false;
+ }
si_need_cs_space(sctx, 0, TRUE);
}
si_pm4_emit_dirty(sctx);
- sctx->pm4_dirty_cdwords = 0;
+ si_emit_rasterizer_prim_state(sctx, info->mode);
+ si_emit_draw_registers(sctx, info, &ib);
+ si_emit_draw_packets(sctx, info, &ib);
#if SI_TRACE_CS
if (sctx->screen->b.trace_bo) {
}
#endif
+ /* Workaround for a VGT hang when streamout is enabled.
+ * It must be done after drawing. */
+ if (sctx->b.family == CHIP_HAWAII &&
+ (sctx->b.streamout.streamout_enabled ||
+ sctx->b.streamout.prims_gen_query_enabled)) {
+ sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
+ }
+
/* Set the depth buffer as dirty. */
- if (sctx->framebuffer.zsbuf) {
- struct pipe_surface *surf = sctx->framebuffer.zsbuf;
+ if (sctx->framebuffer.state.zsbuf) {
+ struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
struct r600_texture *rtex = (struct r600_texture *)surf->texture;
rtex->dirty_level_mask |= 1 << surf->u.tex.level;
}
- if (sctx->fb_compressed_cb_mask) {
+ if (sctx->framebuffer.compressed_cb_mask) {
struct pipe_surface *surf;
struct r600_texture *rtex;
- unsigned mask = sctx->fb_compressed_cb_mask;
+ unsigned mask = sctx->framebuffer.compressed_cb_mask;
do {
unsigned i = u_bit_scan(&mask);
- surf = sctx->framebuffer.cbufs[i];
+ surf = sctx->framebuffer.state.cbufs[i];
rtex = (struct r600_texture*)surf->texture;
rtex->dirty_level_mask |= 1 << surf->u.tex.level;
pipe_resource_reference(&ib.buffer, NULL);
sctx->b.num_draw_calls++;
}
+
+#if SI_TRACE_CS
+void si_trace_emit(struct si_context *sctx)
+{
+ struct si_screen *sscreen = sctx->screen;
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ uint64_t va;
+
+ va = sscreen->b.trace_bo->gpu_address;
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, sscreen->b.trace_bo,
+ RADEON_USAGE_READWRITE, RADEON_PRIO_MIN);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC) |
+ PKT3_WRITE_DATA_WR_CONFIRM |
+ PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
+ radeon_emit(cs, va & 0xFFFFFFFFUL);
+ radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
+ radeon_emit(cs, cs->cdw);
+ radeon_emit(cs, sscreen->b.cs_count);
+}
+#endif