#include "radeon/r600_cs.h"
#include "sid.h"
-#include "util/u_format.h"
#include "util/u_index_modify.h"
-#include "util/u_memory.h"
-#include "util/u_prim.h"
#include "util/u_upload_mgr.h"
+#include "util/u_prim.h"
-/*
- * Shaders
- */
-
-static void si_pipe_shader_es(struct pipe_context *ctx, struct si_pipe_shader *shader)
-{
- struct si_context *sctx = (struct si_context *)ctx;
- struct si_pm4_state *pm4;
- unsigned num_sgprs, num_user_sgprs;
- unsigned vgpr_comp_cnt;
- uint64_t va;
-
- si_pm4_delete_state(sctx, es, shader->pm4);
- pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
-
- if (pm4 == NULL)
- return;
-
- va = shader->bo->gpu_address;
- si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
-
- vgpr_comp_cnt = shader->shader.uses_instanceid ? 3 : 0;
-
- num_user_sgprs = SI_VS_NUM_USER_SGPR;
- num_sgprs = shader->num_sgprs;
- /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
- if ((num_user_sgprs + 1) > num_sgprs) {
- /* Last 2 reserved SGPRs are used for VCC */
- num_sgprs = num_user_sgprs + 1 + 2;
- }
- assert(num_sgprs <= 104);
-
- si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
- si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40);
- si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES,
- S_00B328_VGPRS((shader->num_vgprs - 1) / 4) |
- S_00B328_SGPRS((num_sgprs - 1) / 8) |
- S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt));
- si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES,
- S_00B32C_USER_SGPR(num_user_sgprs));
-
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
-}
-
-static void si_pipe_shader_gs(struct pipe_context *ctx, struct si_pipe_shader *shader)
-{
- struct si_context *sctx = (struct si_context *)ctx;
- unsigned gs_vert_itemsize = shader->shader.noutput * (16 >> 2);
- unsigned gs_max_vert_out = shader->shader.gs_max_out_vertices;
- unsigned gsvs_itemsize = gs_vert_itemsize * gs_max_vert_out;
- unsigned cut_mode;
- struct si_pm4_state *pm4;
- unsigned num_sgprs, num_user_sgprs;
- uint64_t va;
-
- /* The GSVS_RING_ITEMSIZE register takes 15 bits */
- assert(gsvs_itemsize < (1 << 15));
-
- si_pm4_delete_state(sctx, gs, shader->pm4);
- pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
-
- if (pm4 == NULL)
- return;
-
- if (gs_max_vert_out <= 128) {
- cut_mode = V_028A40_GS_CUT_128;
- } else if (gs_max_vert_out <= 256) {
- cut_mode = V_028A40_GS_CUT_256;
- } else if (gs_max_vert_out <= 512) {
- cut_mode = V_028A40_GS_CUT_512;
- } else {
- assert(gs_max_vert_out <= 1024);
- cut_mode = V_028A40_GS_CUT_1024;
- }
-
- si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
- S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
- S_028A40_CUT_MODE(cut_mode)|
- S_028A40_ES_WRITE_OPTIMIZE(1) |
- S_028A40_GS_WRITE_OPTIMIZE(1));
-
- si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, gsvs_itemsize);
- si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, gsvs_itemsize);
- si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, gsvs_itemsize);
-
- si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
- shader->shader.nparam * (16 >> 2));
- si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize);
-
- si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, gs_max_vert_out);
-
- si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, gs_vert_itemsize);
-
- va = shader->bo->gpu_address;
- si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
- si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
- si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
-
- num_user_sgprs = SI_GS_NUM_USER_SGPR;
- num_sgprs = shader->num_sgprs;
- /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
- if ((num_user_sgprs + 2) > num_sgprs) {
- /* Last 2 reserved SGPRs are used for VCC */
- num_sgprs = num_user_sgprs + 2 + 2;
- }
- assert(num_sgprs <= 104);
-
- si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
- S_00B228_VGPRS((shader->num_vgprs - 1) / 4) |
- S_00B228_SGPRS((num_sgprs - 1) / 8));
- si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
- S_00B22C_USER_SGPR(num_user_sgprs));
-
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
-}
-
-static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *shader)
-{
- struct si_context *sctx = (struct si_context *)ctx;
- struct si_pm4_state *pm4;
- unsigned num_sgprs, num_user_sgprs;
- unsigned nparams, i, vgpr_comp_cnt;
- uint64_t va;
-
- si_pm4_delete_state(sctx, vs, shader->pm4);
- pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
-
- if (pm4 == NULL)
- return;
-
- va = shader->bo->gpu_address;
- si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
-
- vgpr_comp_cnt = shader->shader.uses_instanceid ? 3 : 0;
-
- num_user_sgprs = SI_VS_NUM_USER_SGPR;
- num_sgprs = shader->num_sgprs;
- if (num_user_sgprs > num_sgprs) {
- /* Last 2 reserved SGPRs are used for VCC */
- num_sgprs = num_user_sgprs + 2;
- }
- assert(num_sgprs <= 104);
-
- /* Certain attributes (position, psize, etc.) don't count as params.
- * VS is required to export at least one param and r600_shader_from_tgsi()
- * takes care of adding a dummy export.
- */
- for (nparams = 0, i = 0 ; i < shader->shader.noutput; i++) {
- switch (shader->shader.output[i].name) {
- case TGSI_SEMANTIC_CLIPVERTEX:
- case TGSI_SEMANTIC_POSITION:
- case TGSI_SEMANTIC_PSIZE:
- break;
- default:
- nparams++;
- }
- }
- if (nparams < 1)
- nparams = 1;
-
- si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG,
- S_0286C4_VS_EXPORT_COUNT(nparams - 1));
-
- si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
- S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
- S_02870C_POS1_EXPORT_FORMAT(shader->shader.nr_pos_exports > 1 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS2_EXPORT_FORMAT(shader->shader.nr_pos_exports > 2 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS3_EXPORT_FORMAT(shader->shader.nr_pos_exports > 3 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE));
-
- si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
- si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40);
- si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
- S_00B128_VGPRS((shader->num_vgprs - 1) / 4) |
- S_00B128_SGPRS((num_sgprs - 1) / 8) |
- S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt));
- si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS,
- S_00B12C_USER_SGPR(num_user_sgprs) |
- S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
- S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
- S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
- S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
- S_00B12C_SO_EN(!!shader->selector->so.num_outputs));
-
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
-}
-
-static void si_pipe_shader_ps(struct pipe_context *ctx, struct si_pipe_shader *shader)
+static void si_decompress_textures(struct si_context *sctx)
{
- struct si_context *sctx = (struct si_context *)ctx;
- struct si_pm4_state *pm4;
- unsigned i, spi_ps_in_control, db_shader_control;
- unsigned num_sgprs, num_user_sgprs;
- unsigned spi_baryc_cntl = 0, spi_ps_input_ena;
- uint64_t va;
-
- si_pm4_delete_state(sctx, ps, shader->pm4);
- pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
-
- if (pm4 == NULL)
- return;
-
- db_shader_control = S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) |
- S_02880C_ALPHA_TO_MASK_DISABLE(sctx->framebuffer.cb0_is_integer);
-
- for (i = 0; i < shader->shader.ninput; i++) {
- switch (shader->shader.input[i].name) {
- case TGSI_SEMANTIC_POSITION:
- if (shader->shader.input[i].centroid) {
- /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
- * Possible vaules:
- * 0 -> Position = pixel center (default)
- * 1 -> Position = pixel centroid
- * 2 -> Position = iterated sample number XXX:
- * What does this mean?
- */
- spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(1);
+ if (!sctx->blitter->running) {
+ /* Flush depth textures which need to be flushed. */
+ for (int i = 0; i < SI_NUM_SHADERS; i++) {
+ if (sctx->samplers[i].depth_texture_mask) {
+ si_flush_depth_textures(sctx, &sctx->samplers[i]);
+ }
+ if (sctx->samplers[i].compressed_colortex_mask) {
+ si_decompress_color_textures(sctx, &sctx->samplers[i]);
}
- /* Fall through */
- case TGSI_SEMANTIC_FACE:
- continue;
}
}
-
- db_shader_control |= shader->db_shader_control;
-
- if (shader->shader.uses_kill || shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
- db_shader_control |= S_02880C_KILL_ENABLE(1);
-
- spi_ps_in_control = S_0286D8_NUM_INTERP(shader->shader.nparam) |
- S_0286D8_BC_OPTIMIZE_DISABLE(1);
-
- si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
- spi_ps_input_ena = shader->spi_ps_input_ena;
- /* we need to enable at least one of them, otherwise we hang the GPU */
- assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena) ||
- G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) ||
- G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena) ||
- G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena) ||
- G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena) ||
- G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena) ||
- G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena) ||
- G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena));
-
- si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, spi_ps_input_ena);
- si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR, spi_ps_input_ena);
- si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
-
- si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT, shader->spi_shader_z_format);
- si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT,
- shader->spi_shader_col_format);
- si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, shader->cb_shader_mask);
-
- va = shader->bo->gpu_address;
- si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
- si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
- si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40);
-
- num_user_sgprs = SI_PS_NUM_USER_SGPR;
- num_sgprs = shader->num_sgprs;
- /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
- if ((num_user_sgprs + 1) > num_sgprs) {
- /* Last 2 reserved SGPRs are used for VCC */
- num_sgprs = num_user_sgprs + 1 + 2;
- }
- assert(num_sgprs <= 104);
-
- si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
- S_00B028_VGPRS((shader->num_vgprs - 1) / 4) |
- S_00B028_SGPRS((num_sgprs - 1) / 8));
- si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
- S_00B02C_EXTRA_LDS_SIZE(shader->lds_size) |
- S_00B02C_USER_SGPR(num_user_sgprs));
-
- si_pm4_set_reg(pm4, R_02880C_DB_SHADER_CONTROL, db_shader_control);
-
- shader->cb0_is_integer = sctx->framebuffer.cb0_is_integer;
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
-/*
- * Drawing
- */
-
-static unsigned si_conv_pipe_prim(unsigned pprim)
+static unsigned si_conv_pipe_prim(unsigned mode)
{
static const unsigned prim_conv[] = {
[PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
[PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
[PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
[PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
+ [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
[R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
};
- unsigned result = prim_conv[pprim];
- if (result == ~0) {
- R600_ERR("unsupported primitive type %d\n", pprim);
- }
- return result;
+ assert(mode < Elements(prim_conv));
+ return prim_conv[mode];
}
static unsigned si_conv_prim_to_gs_out(unsigned mode)
[PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
[PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
[PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
[R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
};
assert(mode < Elements(prim_conv));
return prim_conv[mode];
}
+/**
+ * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
+ * LS.LDS_SIZE is shared by all 3 shader stages.
+ *
+ * The information about LDS and other non-compile-time parameters is then
+ * written to userdata SGPRs.
+ */
+static void si_emit_derived_tess_state(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ unsigned *num_patches)
+{
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct si_shader_selector *ls = sctx->vs_shader;
+ /* The TES pointer will only be used for sctx->last_tcs.
+ * It would be wrong to think that TCS = TES. */
+ struct si_shader_selector *tcs =
+ sctx->tcs_shader ? sctx->tcs_shader : sctx->tes_shader;
+ unsigned tes_sh_base = sctx->shader_userdata.sh_base[PIPE_SHADER_TESS_EVAL];
+ unsigned num_tcs_input_cp = info->vertices_per_patch;
+ unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
+ unsigned num_tcs_patch_outputs;
+ unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size;
+ unsigned input_patch_size, output_patch_size, output_patch0_offset;
+ unsigned perpatch_output_offset, lds_size, ls_rsrc2;
+ unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets;
+
+ *num_patches = 1; /* TODO: calculate this */
+
+ if (sctx->last_ls == ls->current &&
+ sctx->last_tcs == tcs &&
+ sctx->last_tes_sh_base == tes_sh_base &&
+ sctx->last_num_tcs_input_cp == num_tcs_input_cp)
+ return;
+
+ sctx->last_ls = ls->current;
+ sctx->last_tcs = tcs;
+ sctx->last_tes_sh_base = tes_sh_base;
+ sctx->last_num_tcs_input_cp = num_tcs_input_cp;
+
+ /* This calculates how shader inputs and outputs among VS, TCS, and TES
+ * are laid out in LDS. */
+ num_tcs_inputs = util_last_bit64(ls->outputs_written);
+
+ if (sctx->tcs_shader) {
+ num_tcs_outputs = util_last_bit64(tcs->outputs_written);
+ num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
+ num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
+ } else {
+ /* No TCS. Route varyings from LS to TES. */
+ num_tcs_outputs = num_tcs_inputs;
+ num_tcs_output_cp = num_tcs_input_cp;
+ num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
+ }
+
+ input_vertex_size = num_tcs_inputs * 16;
+ output_vertex_size = num_tcs_outputs * 16;
+
+ input_patch_size = num_tcs_input_cp * input_vertex_size;
+
+ pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
+ output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
+
+ output_patch0_offset = sctx->tcs_shader ? input_patch_size * *num_patches : 0;
+ perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
+
+ lds_size = output_patch0_offset + output_patch_size * *num_patches;
+ ls_rsrc2 = ls->current->ls_rsrc2;
+
+ if (sctx->b.chip_class >= CIK) {
+ assert(lds_size <= 65536);
+ ls_rsrc2 |= S_00B52C_LDS_SIZE(align(lds_size, 512) / 512);
+ } else {
+ assert(lds_size <= 32768);
+ ls_rsrc2 |= S_00B52C_LDS_SIZE(align(lds_size, 256) / 256);
+ }
+
+ /* Due to a hw bug, RSRC2_LS must be written twice with another
+ * LS register written in between. */
+ if (sctx->b.chip_class == CIK && sctx->b.family != CHIP_HAWAII)
+ si_write_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
+ si_write_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
+ radeon_emit(cs, ls->current->ls_rsrc1);
+ radeon_emit(cs, ls_rsrc2);
+
+ /* Compute userdata SGPRs. */
+ assert(((input_vertex_size / 4) & ~0xff) == 0);
+ assert(((output_vertex_size / 4) & ~0xff) == 0);
+ assert(((input_patch_size / 4) & ~0x1fff) == 0);
+ assert(((output_patch_size / 4) & ~0x1fff) == 0);
+ assert(((output_patch0_offset / 16) & ~0xffff) == 0);
+ assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
+ assert(num_tcs_input_cp <= 32);
+ assert(num_tcs_output_cp <= 32);
+
+ tcs_in_layout = (input_patch_size / 4) |
+ ((input_vertex_size / 4) << 13);
+ tcs_out_layout = (output_patch_size / 4) |
+ ((output_vertex_size / 4) << 13);
+ tcs_out_offsets = (output_patch0_offset / 16) |
+ ((perpatch_output_offset / 16) << 16);
+
+ /* Set them for LS. */
+ si_write_sh_reg(cs,
+ R_00B530_SPI_SHADER_USER_DATA_LS_0 + SI_SGPR_LS_OUT_LAYOUT * 4,
+ tcs_in_layout);
+
+ /* Set them for TCS. */
+ si_write_sh_reg_seq(cs,
+ R_00B430_SPI_SHADER_USER_DATA_HS_0 + SI_SGPR_TCS_OUT_OFFSETS * 4, 3);
+ radeon_emit(cs, tcs_out_offsets);
+ radeon_emit(cs, tcs_out_layout | (num_tcs_input_cp << 26));
+ radeon_emit(cs, tcs_in_layout);
+
+ /* Set them for TES. */
+ si_write_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TCS_OUT_OFFSETS * 4, 2);
+ radeon_emit(cs, tcs_out_offsets);
+ radeon_emit(cs, tcs_out_layout | (num_tcs_output_cp << 26));
+}
+
static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
- const struct pipe_draw_info *info)
+ const struct pipe_draw_info *info,
+ unsigned num_patches)
{
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
unsigned prim = info->mode;
/* SWITCH_ON_EOP(0) is always preferable. */
bool wd_switch_on_eop = false;
bool ia_switch_on_eop = false;
+ bool ia_switch_on_eoi = false;
bool partial_vs_wave = false;
+ bool partial_es_wave = false;
if (sctx->gs_shader)
primgroup_size = 64; /* recommended with a GS */
+ if (sctx->tes_shader) {
+ unsigned num_cp_out =
+ sctx->tcs_shader ?
+ sctx->tcs_shader->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
+ info->vertices_per_patch;
+ unsigned max_size = 256 / MAX2(info->vertices_per_patch, num_cp_out);
+
+ primgroup_size = MIN2(primgroup_size, max_size);
+
+ /* primgroup_size must be set to a multiple of NUM_PATCHES */
+ primgroup_size = (primgroup_size / num_patches) * num_patches;
+
+ /* SWITCH_ON_EOI must be set if PrimID is used.
+ * If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
+ if ((sctx->tcs_shader && sctx->tcs_shader->info.uses_primid) ||
+ sctx->tes_shader->info.uses_primid) {
+ ia_switch_on_eoi = true;
+ partial_es_wave = true;
+ }
+
+ /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
+ if ((sctx->b.family == CHIP_TAHITI ||
+ sctx->b.family == CHIP_PITCAIRN ||
+ sctx->b.family == CHIP_BONAIRE) &&
+ sctx->gs_shader)
+ partial_vs_wave = true;
+ }
+
/* This is a hardware requirement. */
if ((rs && rs->line_stipple_enable) ||
(sctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) {
(info->indirect || info->instance_count > 1))
wd_switch_on_eop = true;
+ /* USE_OPAQUE doesn't work when WD_SWITCH_ON_EOP is 0. */
+ if (info->count_from_stream_output)
+ wd_switch_on_eop = true;
+
/* If the WD switch is false, the IA switch must be false too. */
assert(wd_switch_on_eop || !ia_switch_on_eop);
}
+ /* Hw bug with single-primitive instances and SWITCH_ON_EOI
+ * on multi-SE chips. */
+ if (sctx->b.screen->info.max_se >= 2 && ia_switch_on_eoi &&
+ (info->indirect ||
+ (info->instance_count > 1 &&
+ u_prims_for_vertices(info->mode, info->count) <= 1)))
+ sctx->b.flags |= SI_CONTEXT_VGT_FLUSH;
+
+ /* Instancing bug on 2 SE chips. */
+ if (sctx->b.screen->info.max_se == 2 && ia_switch_on_eoi &&
+ (info->indirect || info->instance_count > 1))
+ partial_vs_wave = true;
+
return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
+ S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
+ S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
- S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0);
+ S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0) |
+ S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx->b.chip_class >= VI ? 2 : 0);
}
-static bool si_update_draw_info_state(struct si_context *sctx,
- const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib)
+static unsigned si_get_ls_hs_config(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ unsigned num_patches)
{
- struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
- struct si_shader *vs = si_get_vs_state(sctx);
- unsigned prim = si_conv_pipe_prim(info->mode);
- unsigned gs_out_prim =
- si_conv_prim_to_gs_out(sctx->gs_shader ?
- sctx->gs_shader->current->shader.gs_output_prim :
- info->mode);
- unsigned ls_mask = 0;
- unsigned ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info);
-
- if (pm4 == NULL)
- return false;
-
- if (prim == ~0) {
- FREE(pm4);
- return false;
- }
+ unsigned num_output_cp;
- if (sctx->b.chip_class >= CIK) {
- si_pm4_set_reg(pm4, R_028B74_VGT_DISPATCH_DRAW_INDEX,
- ib->index_size == 4 ? 0xFC000000 : 0xFC00);
-
- si_pm4_cmd_begin(pm4, PKT3_DRAW_PREAMBLE);
- si_pm4_cmd_add(pm4, prim); /* VGT_PRIMITIVE_TYPE */
- si_pm4_cmd_add(pm4, ia_multi_vgt_param); /* IA_MULTI_VGT_PARAM */
- si_pm4_cmd_add(pm4, 0); /* VGT_LS_HS_CONFIG */
- si_pm4_cmd_end(pm4, false);
- } else {
- si_pm4_set_reg(pm4, R_008958_VGT_PRIMITIVE_TYPE, prim);
- si_pm4_set_reg(pm4, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
- }
+ if (!sctx->tes_shader)
+ return 0;
- si_pm4_set_reg(pm4, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
- si_pm4_set_reg(pm4, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info->restart_index);
- si_pm4_set_reg(pm4, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
-
- if (prim == V_008958_DI_PT_LINELIST)
- ls_mask = 1;
- else if (prim == V_008958_DI_PT_LINESTRIP)
- ls_mask = 2;
- si_pm4_set_reg(pm4, R_028A0C_PA_SC_LINE_STIPPLE,
- S_028A0C_AUTO_RESET_CNTL(ls_mask) |
- sctx->pa_sc_line_stipple);
-
- if (info->mode == PIPE_PRIM_QUADS || info->mode == PIPE_PRIM_QUAD_STRIP || info->mode == PIPE_PRIM_POLYGON) {
- si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL,
- S_028814_PROVOKING_VTX_LAST(1) | sctx->pa_su_sc_mode_cntl);
- } else {
- si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL, sctx->pa_su_sc_mode_cntl);
- }
- si_pm4_set_reg(pm4, R_02881C_PA_CL_VS_OUT_CNTL,
- S_02881C_USE_VTX_POINT_SIZE(vs->vs_out_point_size) |
- S_02881C_USE_VTX_EDGE_FLAG(vs->vs_out_edgeflag) |
- S_02881C_USE_VTX_RENDER_TARGET_INDX(vs->vs_out_layer) |
- S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs->clip_dist_write & 0x0F) != 0) |
- S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs->clip_dist_write & 0xF0) != 0) |
- S_02881C_VS_OUT_MISC_VEC_ENA(vs->vs_out_misc_write) |
- (sctx->queued.named.rasterizer->clip_plane_enable &
- vs->clip_dist_write));
- si_pm4_set_reg(pm4, R_028810_PA_CL_CLIP_CNTL,
- sctx->queued.named.rasterizer->pa_cl_clip_cntl |
- (vs->clip_dist_write ? 0 :
- sctx->queued.named.rasterizer->clip_plane_enable & 0x3F));
-
- si_pm4_set_state(sctx, draw_info, pm4);
- return true;
+ num_output_cp = sctx->tcs_shader ?
+ sctx->tcs_shader->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
+ info->vertices_per_patch;
+
+ return S_028B58_NUM_PATCHES(num_patches) |
+ S_028B58_HS_NUM_INPUT_CP(info->vertices_per_patch) |
+ S_028B58_HS_NUM_OUTPUT_CP(num_output_cp);
}
-static void si_update_spi_map(struct si_context *sctx)
+static void si_emit_scratch_reloc(struct si_context *sctx)
{
- struct si_shader *ps = &sctx->ps_shader->current->shader;
- struct si_shader *vs = si_get_vs_state(sctx);
- struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
- unsigned i, j, tmp;
-
- for (i = 0; i < ps->ninput; i++) {
- unsigned name = ps->input[i].name;
- unsigned param_offset = ps->input[i].param_offset;
-
- if (name == TGSI_SEMANTIC_POSITION)
- /* Read from preloaded VGPRs, not parameters */
- continue;
-
-bcolor:
- tmp = 0;
-
- if (ps->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT ||
- (ps->input[i].interpolate == TGSI_INTERPOLATE_COLOR &&
- sctx->ps_shader->current->key.ps.flatshade)) {
- tmp |= S_028644_FLAT_SHADE(1);
- }
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- if (name == TGSI_SEMANTIC_GENERIC &&
- sctx->sprite_coord_enable & (1 << ps->input[i].sid)) {
- tmp |= S_028644_PT_SPRITE_TEX(1);
- }
+ if (!sctx->emit_scratch_reloc)
+ return;
- for (j = 0; j < vs->noutput; j++) {
- if (name == vs->output[j].name &&
- ps->input[i].sid == vs->output[j].sid) {
- tmp |= S_028644_OFFSET(vs->output[j].param_offset);
- break;
- }
- }
+ r600_write_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
+ sctx->spi_tmpring_size);
- if (j == vs->noutput) {
- /* No corresponding output found, load defaults into input */
- tmp |= S_028644_OFFSET(0x20);
- }
+ if (sctx->scratch_buffer) {
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ sctx->scratch_buffer, RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SHADER_RESOURCE_RW);
- si_pm4_set_reg(pm4,
- R_028644_SPI_PS_INPUT_CNTL_0 + param_offset * 4,
- tmp);
-
- if (name == TGSI_SEMANTIC_COLOR &&
- sctx->ps_shader->current->key.ps.color_two_side) {
- name = TGSI_SEMANTIC_BCOLOR;
- param_offset++;
- goto bcolor;
- }
}
-
- si_pm4_set_state(sctx, spi, pm4);
+ sctx->emit_scratch_reloc = false;
}
-/* Initialize state related to ESGS / GSVS ring buffers */
-static void si_init_gs_rings(struct si_context *sctx)
+/* rast_prim is the primitive type after GS. */
+static void si_emit_rasterizer_prim_state(struct si_context *sctx)
{
- unsigned size = 128 * 1024;
-
- assert(!sctx->gs_rings);
- sctx->gs_rings = si_pm4_alloc_state(sctx);
-
- sctx->esgs_ring.buffer =
- pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
- PIPE_USAGE_DEFAULT, size);
- sctx->esgs_ring.buffer_size = size;
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ unsigned rast_prim = sctx->current_rast_prim;
+ struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
+
+ /* Skip this if not rendering lines. */
+ if (rast_prim != PIPE_PRIM_LINES &&
+ rast_prim != PIPE_PRIM_LINE_LOOP &&
+ rast_prim != PIPE_PRIM_LINE_STRIP &&
+ rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
+ rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
+ return;
- size = 64 * 1024 * 1024;
- sctx->gsvs_ring.buffer =
- pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
- PIPE_USAGE_DEFAULT, size);
- sctx->gsvs_ring.buffer_size = size;
+ if (rast_prim == sctx->last_rast_prim &&
+ rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
+ return;
- if (sctx->b.chip_class >= CIK) {
- si_pm4_set_reg(sctx->gs_rings, R_030900_VGT_ESGS_RING_SIZE,
- sctx->esgs_ring.buffer_size / 256);
- si_pm4_set_reg(sctx->gs_rings, R_030904_VGT_GSVS_RING_SIZE,
- sctx->gsvs_ring.buffer_size / 256);
- } else {
- si_pm4_set_reg(sctx->gs_rings, R_0088C8_VGT_ESGS_RING_SIZE,
- sctx->esgs_ring.buffer_size / 256);
- si_pm4_set_reg(sctx->gs_rings, R_0088CC_VGT_GSVS_RING_SIZE,
- sctx->gsvs_ring.buffer_size / 256);
- }
+ r600_write_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
+ rs->pa_sc_line_stipple |
+ S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 :
+ rast_prim == PIPE_PRIM_LINE_STRIP ? 2 : 0));
- si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, SI_RING_ESGS,
- &sctx->esgs_ring, 0, sctx->esgs_ring.buffer_size,
- true, true, 4, 64);
- si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_GEOMETRY, SI_RING_ESGS,
- &sctx->esgs_ring, 0, sctx->esgs_ring.buffer_size,
- false, false, 0, 0);
- si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, SI_RING_GSVS,
- &sctx->gsvs_ring, 0, sctx->gsvs_ring.buffer_size,
- false, false, 0, 0);
+ sctx->last_rast_prim = rast_prim;
+ sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
}
-static void si_update_derived_state(struct si_context *sctx)
+static void si_emit_draw_registers(struct si_context *sctx,
+ const struct pipe_draw_info *info)
{
- struct pipe_context * ctx = (struct pipe_context*)sctx;
-
- if (!sctx->blitter->running) {
- /* Flush depth textures which need to be flushed. */
- for (int i = 0; i < SI_NUM_SHADERS; i++) {
- if (sctx->samplers[i].depth_texture_mask) {
- si_flush_depth_textures(sctx, &sctx->samplers[i]);
- }
- if (sctx->samplers[i].compressed_colortex_mask) {
- si_decompress_color_textures(sctx, &sctx->samplers[i]);
- }
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ unsigned prim = si_conv_pipe_prim(info->mode);
+ unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
+ unsigned ia_multi_vgt_param, ls_hs_config, num_patches = 0;
+
+ if (sctx->tes_shader)
+ si_emit_derived_tess_state(sctx, info, &num_patches);
+
+ ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches);
+ ls_hs_config = si_get_ls_hs_config(sctx, info, num_patches);
+
+ /* Draw state. */
+ if (prim != sctx->last_prim ||
+ ia_multi_vgt_param != sctx->last_multi_vgt_param ||
+ ls_hs_config != sctx->last_ls_hs_config) {
+ if (sctx->b.chip_class >= CIK) {
+ radeon_emit(cs, PKT3(PKT3_DRAW_PREAMBLE, 2, 0));
+ radeon_emit(cs, prim); /* VGT_PRIMITIVE_TYPE */
+ radeon_emit(cs, ia_multi_vgt_param); /* IA_MULTI_VGT_PARAM */
+ radeon_emit(cs, ls_hs_config); /* VGT_LS_HS_CONFIG */
+ } else {
+ r600_write_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
+ r600_write_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
+ r600_write_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
}
+ sctx->last_prim = prim;
+ sctx->last_multi_vgt_param = ia_multi_vgt_param;
+ sctx->last_ls_hs_config = ls_hs_config;
}
- if (sctx->gs_shader) {
- si_shader_select(ctx, sctx->gs_shader);
-
- if (!sctx->gs_shader->current->pm4) {
- si_pipe_shader_gs(ctx, sctx->gs_shader->current);
- si_pipe_shader_vs(ctx,
- sctx->gs_shader->current->gs_copy_shader);
- }
-
- si_pm4_bind_state(sctx, gs, sctx->gs_shader->current->pm4);
- si_pm4_bind_state(sctx, vs, sctx->gs_shader->current->gs_copy_shader->pm4);
-
- sctx->b.streamout.stride_in_dw = sctx->gs_shader->so.stride;
-
- si_shader_select(ctx, sctx->vs_shader);
-
- if (!sctx->vs_shader->current->pm4)
- si_pipe_shader_es(ctx, sctx->vs_shader->current);
-
- si_pm4_bind_state(sctx, es, sctx->vs_shader->current->pm4);
-
- if (!sctx->gs_rings)
- si_init_gs_rings(sctx);
- if (sctx->emitted.named.gs_rings != sctx->gs_rings)
- sctx->b.flags |= R600_CONTEXT_VGT_FLUSH;
- si_pm4_bind_state(sctx, gs_rings, sctx->gs_rings);
-
- si_set_ring_buffer(ctx, PIPE_SHADER_GEOMETRY, SI_RING_GSVS,
- &sctx->gsvs_ring,
- sctx->gs_shader->current->shader.gs_max_out_vertices *
- sctx->gs_shader->current->shader.noutput * 16,
- 64, true, true, 4, 16);
-
- if (!sctx->gs_on) {
- sctx->gs_on = si_pm4_alloc_state(sctx);
-
- si_pm4_set_reg(sctx->gs_on, R_028B54_VGT_SHADER_STAGES_EN,
- S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
- S_028B54_GS_EN(1) |
- S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER));
- }
- si_pm4_bind_state(sctx, gs_onoff, sctx->gs_on);
- } else {
- si_shader_select(ctx, sctx->vs_shader);
-
- if (!sctx->vs_shader->current->pm4)
- si_pipe_shader_vs(ctx, sctx->vs_shader->current);
-
- si_pm4_bind_state(sctx, vs, sctx->vs_shader->current->pm4);
-
- sctx->b.streamout.stride_in_dw = sctx->vs_shader->so.stride;
-
- if (!sctx->gs_off) {
- sctx->gs_off = si_pm4_alloc_state(sctx);
-
- si_pm4_set_reg(sctx->gs_off, R_028A40_VGT_GS_MODE, 0);
- si_pm4_set_reg(sctx->gs_off, R_028B54_VGT_SHADER_STAGES_EN, 0);
- }
- si_pm4_bind_state(sctx, gs_onoff, sctx->gs_off);
- si_pm4_bind_state(sctx, gs_rings, NULL);
- si_pm4_bind_state(sctx, gs, NULL);
- si_pm4_bind_state(sctx, es, NULL);
+ if (gs_out_prim != sctx->last_gs_out_prim) {
+ r600_write_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
+ sctx->last_gs_out_prim = gs_out_prim;
}
- si_shader_select(ctx, sctx->ps_shader);
-
- if (!sctx->ps_shader->current->pm4 ||
- sctx->ps_shader->current->cb0_is_integer != sctx->framebuffer.cb0_is_integer)
- si_pipe_shader_ps(ctx, sctx->ps_shader->current);
-
- si_pm4_bind_state(sctx, ps, sctx->ps_shader->current->pm4);
+ /* Primitive restart. */
+ if (info->primitive_restart != sctx->last_primitive_restart_en) {
+ r600_write_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
+ sctx->last_primitive_restart_en = info->primitive_restart;
- if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs)) {
- /* XXX: Emitting the PS state even when only the VS changed
- * fixes random failures with piglit glsl-max-varyings.
- * Not sure why...
- */
- sctx->emitted.named.ps = NULL;
- si_update_spi_map(sctx);
+ if (info->primitive_restart &&
+ (info->restart_index != sctx->last_restart_index ||
+ sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
+ r600_write_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
+ info->restart_index);
+ sctx->last_restart_index = info->restart_index;
+ }
}
}
-static void si_state_draw(struct si_context *sctx,
- const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib)
+static void si_emit_draw_packets(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ const struct pipe_index_buffer *ib)
{
- unsigned sh_base_reg = (sctx->gs_shader ? R_00B330_SPI_SHADER_USER_DATA_ES_0 :
- R_00B130_SPI_SHADER_USER_DATA_VS_0);
- struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
-
- if (pm4 == NULL)
- return;
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ unsigned sh_base_reg = sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX];
if (info->count_from_stream_output) {
struct r600_so_target *t =
uint64_t va = t->buf_filled_size->gpu_address +
t->buf_filled_size_offset;
- si_pm4_set_reg(pm4, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
- t->stride_in_dw);
-
- si_pm4_cmd_begin(pm4, PKT3_COPY_DATA);
- si_pm4_cmd_add(pm4,
- COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- COPY_DATA_WR_CONFIRM);
- si_pm4_cmd_add(pm4, va); /* src address lo */
- si_pm4_cmd_add(pm4, va >> 32UL); /* src address hi */
- si_pm4_cmd_add(pm4, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
- si_pm4_cmd_add(pm4, 0); /* unused */
- si_pm4_add_bo(pm4, t->buf_filled_size, RADEON_USAGE_READ,
- RADEON_PRIO_MIN);
- si_pm4_cmd_end(pm4, true);
+ r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
+ t->stride_in_dw);
+
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ COPY_DATA_WR_CONFIRM);
+ radeon_emit(cs, va); /* src address lo */
+ radeon_emit(cs, va >> 32); /* src address hi */
+ radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
+ radeon_emit(cs, 0); /* unused */
+
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ t->buf_filled_size, RADEON_USAGE_READ,
+ RADEON_PRIO_MIN);
}
/* draw packet */
- si_pm4_cmd_begin(pm4, PKT3_INDEX_TYPE);
- if (ib->index_size == 4) {
- si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_32 | (SI_BIG_ENDIAN ?
- V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
- } else {
- si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_16 | (SI_BIG_ENDIAN ?
- V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
+ if (info->indexed) {
+ radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
+
+ /* index type */
+ switch (ib->index_size) {
+ case 1:
+ radeon_emit(cs, V_028A7C_VGT_INDEX_8);
+ break;
+ case 2:
+ radeon_emit(cs, V_028A7C_VGT_INDEX_16 |
+ (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
+ V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
+ break;
+ case 4:
+ radeon_emit(cs, V_028A7C_VGT_INDEX_32 |
+ (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
+ V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
+ break;
+ default:
+ assert(!"unreachable");
+ return;
+ }
}
- si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
if (!info->indirect) {
- si_pm4_cmd_begin(pm4, PKT3_NUM_INSTANCES);
- si_pm4_cmd_add(pm4, info->instance_count);
- si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
-
- si_pm4_set_reg(pm4, sh_base_reg + SI_SGPR_BASE_VERTEX * 4,
- info->indexed ? info->index_bias : info->start);
- si_pm4_set_reg(pm4, sh_base_reg + SI_SGPR_START_INSTANCE * 4,
- info->start_instance);
+ int base_vertex;
+
+ radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
+ radeon_emit(cs, info->instance_count);
+
+ /* Base vertex and start instance. */
+ base_vertex = info->indexed ? info->index_bias : info->start;
+
+ if (base_vertex != sctx->last_base_vertex ||
+ sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
+ info->start_instance != sctx->last_start_instance ||
+ sh_base_reg != sctx->last_sh_base_reg) {
+ si_write_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
+ radeon_emit(cs, base_vertex);
+ radeon_emit(cs, info->start_instance);
+
+ sctx->last_base_vertex = base_vertex;
+ sctx->last_start_instance = info->start_instance;
+ sctx->last_sh_base_reg = sh_base_reg;
+ }
} else {
- si_pm4_add_bo(pm4, (struct r600_resource *)info->indirect,
- RADEON_USAGE_READ, RADEON_PRIO_MIN);
+ si_invalidate_draw_sh_constants(sctx);
+
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ (struct r600_resource *)info->indirect,
+ RADEON_USAGE_READ, RADEON_PRIO_MIN);
}
if (info->indexed) {
- uint32_t max_size = (ib->buffer->width0 - ib->offset) /
- sctx->index_buffer.index_size;
- uint64_t va = r600_resource(ib->buffer)->gpu_address + ib->offset;
+ uint32_t index_max_size = (ib->buffer->width0 - ib->offset) /
+ ib->index_size;
+ uint64_t index_va = r600_resource(ib->buffer)->gpu_address + ib->offset;
- si_pm4_add_bo(pm4, (struct r600_resource *)ib->buffer, RADEON_USAGE_READ,
- RADEON_PRIO_MIN);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ (struct r600_resource *)ib->buffer,
+ RADEON_USAGE_READ, RADEON_PRIO_MIN);
if (info->indirect) {
uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
- si_cmd_draw_index_indirect(pm4, indirect_va, va, max_size,
- info->indirect_offset,
- sh_base_reg + SI_SGPR_BASE_VERTEX * 4,
- sh_base_reg + SI_SGPR_START_INSTANCE * 4,
- sctx->b.predicate_drawing);
+
+ assert(indirect_va % 8 == 0);
+ assert(index_va % 2 == 0);
+ assert(info->indirect_offset % 4 == 0);
+
+ radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
+ radeon_emit(cs, 1);
+ radeon_emit(cs, indirect_va);
+ radeon_emit(cs, indirect_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
+ radeon_emit(cs, index_va);
+ radeon_emit(cs, index_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
+ radeon_emit(cs, index_max_size);
+
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_INDIRECT, 3, sctx->b.predicate_drawing));
+ radeon_emit(cs, info->indirect_offset);
+ radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
} else {
- va += info->start * ib->index_size;
- si_cmd_draw_index_2(pm4, max_size, va, info->count,
- V_0287F0_DI_SRC_SEL_DMA,
- sctx->b.predicate_drawing);
+ index_va += info->start * ib->index_size;
+
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, sctx->b.predicate_drawing));
+ radeon_emit(cs, index_max_size);
+ radeon_emit(cs, index_va);
+ radeon_emit(cs, (index_va >> 32UL) & 0xFF);
+ radeon_emit(cs, info->count);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
}
} else {
if (info->indirect) {
uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
- si_cmd_draw_indirect(pm4, indirect_va, info->indirect_offset,
- sh_base_reg + SI_SGPR_BASE_VERTEX * 4,
- sh_base_reg + SI_SGPR_START_INSTANCE * 4,
- sctx->b.predicate_drawing);
+
+ assert(indirect_va % 8 == 0);
+ assert(info->indirect_offset % 4 == 0);
+
+ radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
+ radeon_emit(cs, 1);
+ radeon_emit(cs, indirect_va);
+ radeon_emit(cs, indirect_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDIRECT, 3, sctx->b.predicate_drawing));
+ radeon_emit(cs, info->indirect_offset);
+ radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX);
} else {
- si_cmd_draw_index_auto(pm4, info->count,
- V_0287F0_DI_SRC_SEL_AUTO_INDEX |
- S_0287F0_USE_OPAQUE(!!info->count_from_stream_output),
- sctx->b.predicate_drawing);
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, sctx->b.predicate_drawing));
+ radeon_emit(cs, info->count);
+ radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
+ S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
}
}
-
- si_pm4_set_state(sctx, draw, pm4);
}
+#define BOTH_ICACHE_KCACHE (SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_KCACHE)
+
void si_emit_cache_flush(struct r600_common_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
uint32_t cp_coher_cntl = 0;
+ uint32_t compute =
+ PKT3_SHADER_TYPE_S(!!(sctx->flags & SI_CONTEXT_FLAG_COMPUTE));
+
+ /* SI has a bug that it always flushes ICACHE and KCACHE if either
+ * bit is set. An alternative way is to write SQC_CACHES, but that
+ * doesn't seem to work reliably. Since the bug doesn't affect
+ * correctness (it only does more work than necessary) and
+ * the performance impact is likely negligible, there is no plan
+ * to fix it.
+ */
- /* XXX SI flushes both ICACHE and KCACHE if either flag is set.
- * XXX CIK shouldn't have this issue. Test CIK before separating the flags
- * XXX to ensure there is no regression. Also find out if there is another
- * XXX way to flush either ICACHE or KCACHE but not both for SI. */
- if (sctx->flags & (R600_CONTEXT_INV_SHADER_CACHE |
- R600_CONTEXT_INV_CONST_CACHE)) {
- cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
- S_0085F0_SH_KCACHE_ACTION_ENA(1);
- }
- if (sctx->flags & (R600_CONTEXT_INV_TEX_CACHE |
- R600_CONTEXT_STREAMOUT_FLUSH)) {
- cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1) |
- S_0085F0_TCL1_ACTION_ENA(1);
+ if (sctx->flags & SI_CONTEXT_INV_ICACHE)
+ cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
+ if (sctx->flags & SI_CONTEXT_INV_KCACHE)
+ cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
+
+ if (sctx->flags & SI_CONTEXT_INV_TC_L1)
+ cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
+ if (sctx->flags & SI_CONTEXT_INV_TC_L2) {
+ cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
+
+ /* TODO: this might not be needed. */
+ if (sctx->chip_class >= VI)
+ cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1);
}
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB) {
+
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
S_0085F0_CB0_DEST_BASE_ENA(1) |
S_0085F0_CB1_DEST_BASE_ENA(1) |
S_0085F0_CB6_DEST_BASE_ENA(1) |
S_0085F0_CB7_DEST_BASE_ENA(1);
}
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB) {
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
S_0085F0_DB_DEST_BASE_ENA(1);
}
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB_META) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
+ }
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB_META) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
+ }
+ if (sctx->flags & SI_CONTEXT_FLUSH_WITH_INV_L2) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH) | EVENT_INDEX(7) |
+ EVENT_WRITE_INV_L2);
+ }
+
+ /* FLUSH_AND_INV events must be emitted before PS_PARTIAL_FLUSH.
+ * Otherwise, clearing CMASK (CB meta) with CP DMA isn't reliable.
+ *
+ * I think the reason is that FLUSH_AND_INV is only added to a queue
+ * and it is PS_PARTIAL_FLUSH that waits for it to complete.
+ */
+ if (sctx->flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ } else if (sctx->flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ }
+ if (sctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
+ }
+ if (sctx->flags & SI_CONTEXT_VGT_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
+ }
+ if (sctx->flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
+ }
+
+ /* SURFACE_SYNC must be emitted after partial flushes.
+ * It looks like SURFACE_SYNC flushes caches immediately and doesn't
+ * wait for any engines. This should be last.
+ */
if (cp_coher_cntl) {
if (sctx->chip_class >= CIK) {
- radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) | compute);
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
} else {
- radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
+ radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0) | compute);
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
radeon_emit(cs, 0); /* CP_COHER_BASE */
}
}
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
- }
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
- }
-
- if (sctx->flags & (R600_CONTEXT_WAIT_3D_IDLE |
- R600_CONTEXT_PS_PARTIAL_FLUSH)) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- } else if (sctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) {
- /* Needed if streamout buffers are going to be used as a source. */
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- }
-
- if (sctx->flags & R600_CONTEXT_VGT_FLUSH) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
- }
- if (sctx->flags & R600_CONTEXT_VGT_STREAMOUT_SYNC) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
- }
-
sctx->flags = 0;
}
-const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 17 }; /* number of CS dwords */
+const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 24 }; /* number of CS dwords */
static void si_get_draw_start_count(struct si_context *sctx,
const struct pipe_draw_info *info,
{
struct si_context *sctx = (struct si_context *)ctx;
struct pipe_index_buffer ib = {};
- uint32_t i;
+ unsigned i;
if (!info->count && !info->indirect &&
(info->indexed || !info->count_from_stream_output))
return;
- if (!sctx->ps_shader || !sctx->vs_shader)
+ if (!sctx->ps_shader || !sctx->vs_shader) {
+ assert(0);
+ return;
+ }
+ if (!!sctx->tes_shader != (info->mode == PIPE_PRIM_PATCHES)) {
+ assert(0);
return;
+ }
- si_update_derived_state(sctx);
+ si_decompress_textures(sctx);
- if (sctx->vertex_buffers_dirty) {
- si_update_vertex_buffers(sctx);
- sctx->vertex_buffers_dirty = false;
- }
+ /* Set the rasterization primitive type.
+ *
+ * This must be done after si_decompress_textures, which can call
+ * draw_vbo recursively, and before si_update_shaders, which uses
+ * current_rast_prim for this draw_vbo call. */
+ if (sctx->gs_shader)
+ sctx->current_rast_prim = sctx->gs_shader->gs_output_prim;
+ else if (sctx->tes_shader)
+ sctx->current_rast_prim =
+ sctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
+ else
+ sctx->current_rast_prim = info->mode;
+
+ si_update_shaders(sctx);
+ if (!si_upload_shader_descriptors(sctx))
+ return;
if (info->indexed) {
/* Initialize the index buffer struct. */
ib.offset = sctx->index_buffer.offset;
/* Translate or upload, if needed. */
- if (ib.index_size == 1) {
+ /* 8-bit indices are supported on VI. */
+ if (sctx->b.chip_class <= CIK && ib.index_size == 1) {
struct pipe_resource *out_buffer = NULL;
unsigned out_offset, start, count, start_offset;
void *ptr;
}
}
- if (!si_update_draw_info_state(sctx, info, &ib))
- return;
-
- si_state_draw(sctx, info, &ib);
-
- sctx->pm4_dirty_cdwords += si_pm4_dirty_dw(sctx);
+ /* TODO: VI should read index buffers through TC, so this shouldn't be
+ * needed on VI. */
+ if (info->indexed && r600_resource(ib.buffer)->TC_L2_dirty) {
+ sctx->b.flags |= SI_CONTEXT_INV_TC_L2;
+ r600_resource(ib.buffer)->TC_L2_dirty = false;
+ }
/* Check flush flags. */
if (sctx->b.flags)
- sctx->atoms.s.cache_flush->dirty = true;
+ si_mark_atom_dirty(sctx, sctx->atoms.s.cache_flush);
si_need_cs_space(sctx, 0, TRUE);
}
si_pm4_emit_dirty(sctx);
- sctx->pm4_dirty_cdwords = 0;
+ si_emit_scratch_reloc(sctx);
+ si_emit_rasterizer_prim_state(sctx);
+ si_emit_draw_registers(sctx, info);
+ si_emit_draw_packets(sctx, info, &ib);
-#if SI_TRACE_CS
- if (sctx->screen->b.trace_bo) {
+ if (sctx->trace_buf)
si_trace_emit(sctx);
- }
-#endif
/* Workaround for a VGT hang when streamout is enabled.
* It must be done after drawing. */
- if (sctx->b.family == CHIP_HAWAII &&
+ if ((sctx->b.family == CHIP_HAWAII || sctx->b.family == CHIP_TONGA) &&
(sctx->b.streamout.streamout_enabled ||
sctx->b.streamout.prims_gen_query_enabled)) {
- sctx->b.flags |= R600_CONTEXT_VGT_STREAMOUT_SYNC;
+ sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
}
/* Set the depth buffer as dirty. */
pipe_resource_reference(&ib.buffer, NULL);
sctx->b.num_draw_calls++;
}
+
+void si_trace_emit(struct si_context *sctx)
+{
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+
+ sctx->trace_id++;
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, sctx->trace_buf,
+ RADEON_USAGE_READWRITE, RADEON_PRIO_MIN);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_ME));
+ radeon_emit(cs, sctx->trace_buf->gpu_address);
+ radeon_emit(cs, sctx->trace_buf->gpu_address >> 32);
+ radeon_emit(cs, sctx->trace_id);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, SI_ENCODE_TRACE_POINT(sctx->trace_id));
+}