#include "si_pipe.h"
#include "si_shader.h"
-#include "../radeon/r600_cs.h"
+#include "radeon/r600_cs.h"
#include "sid.h"
-#include "util/u_blitter.h"
#include "util/u_format.h"
#include "util/u_index_modify.h"
#include "util/u_memory.h"
+#include "util/u_prim.h"
#include "util/u_upload_mgr.h"
/*
* Shaders
*/
-static void si_pipe_shader_es(struct pipe_context *ctx, struct si_pipe_shader *shader)
+static void si_shader_es(struct pipe_context *ctx, struct si_shader *shader)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_pm4_state *pm4;
if (pm4 == NULL)
return;
- va = r600_resource_va(ctx->screen, (void *)shader->bo);
+ va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
- vgpr_comp_cnt = shader->shader.uses_instanceid ? 3 : 0;
+ vgpr_comp_cnt = shader->uses_instanceid ? 3 : 0;
num_user_sgprs = SI_VS_NUM_USER_SGPR;
num_sgprs = shader->num_sgprs;
S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt));
si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES,
S_00B32C_USER_SGPR(num_user_sgprs));
-
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
-static void si_pipe_shader_gs(struct pipe_context *ctx, struct si_pipe_shader *shader)
+static void si_shader_gs(struct pipe_context *ctx, struct si_shader *shader)
{
struct si_context *sctx = (struct si_context *)ctx;
- unsigned gs_vert_itemsize = shader->shader.noutput * (16 >> 2);
- unsigned gs_max_vert_out = shader->shader.gs_max_out_vertices;
+ unsigned gs_vert_itemsize = shader->noutput * (16 >> 2);
+ unsigned gs_max_vert_out = shader->selector->gs_max_out_vertices;
unsigned gsvs_itemsize = gs_vert_itemsize * gs_max_vert_out;
unsigned cut_mode;
struct si_pm4_state *pm4;
si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, gsvs_itemsize);
si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
- shader->shader.nparam * (16 >> 2));
+ util_bitcount64(shader->selector->gs_used_inputs) * (16 >> 2));
si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize);
si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, gs_max_vert_out);
si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, gs_vert_itemsize);
- va = r600_resource_va(ctx->screen, (void *)shader->bo);
+ va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
S_00B228_SGPRS((num_sgprs - 1) / 8));
si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
S_00B22C_USER_SGPR(num_user_sgprs));
-
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
-static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *shader)
+static void si_shader_vs(struct pipe_context *ctx, struct si_shader *shader)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_pm4_state *pm4;
if (pm4 == NULL)
return;
- va = r600_resource_va(ctx->screen, (void *)shader->bo);
+ va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
- vgpr_comp_cnt = shader->shader.uses_instanceid ? 3 : 0;
+ vgpr_comp_cnt = shader->uses_instanceid ? 3 : 0;
+
+ if (shader->is_gs_copy_shader)
+ num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
+ else
+ num_user_sgprs = SI_VS_NUM_USER_SGPR;
- num_user_sgprs = SI_VS_NUM_USER_SGPR;
num_sgprs = shader->num_sgprs;
if (num_user_sgprs > num_sgprs) {
/* Last 2 reserved SGPRs are used for VCC */
* VS is required to export at least one param and r600_shader_from_tgsi()
* takes care of adding a dummy export.
*/
- for (nparams = 0, i = 0 ; i < shader->shader.noutput; i++) {
- switch (shader->shader.output[i].name) {
+ for (nparams = 0, i = 0 ; i < shader->noutput; i++) {
+ switch (shader->output[i].name) {
case TGSI_SEMANTIC_CLIPVERTEX:
case TGSI_SEMANTIC_POSITION:
case TGSI_SEMANTIC_PSIZE:
si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
- S_02870C_POS1_EXPORT_FORMAT(shader->shader.nr_pos_exports > 1 ?
+ S_02870C_POS1_EXPORT_FORMAT(shader->nr_pos_exports > 1 ?
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS2_EXPORT_FORMAT(shader->shader.nr_pos_exports > 2 ?
+ S_02870C_POS2_EXPORT_FORMAT(shader->nr_pos_exports > 2 ?
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS3_EXPORT_FORMAT(shader->shader.nr_pos_exports > 3 ?
+ S_02870C_POS3_EXPORT_FORMAT(shader->nr_pos_exports > 3 ?
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE));
S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
S_00B12C_SO_EN(!!shader->selector->so.num_outputs));
-
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
-static void si_pipe_shader_ps(struct pipe_context *ctx, struct si_pipe_shader *shader)
+static void si_shader_ps(struct pipe_context *ctx, struct si_shader *shader)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_pm4_state *pm4;
- unsigned i, exports_ps, spi_ps_in_control, db_shader_control;
+ unsigned i, spi_ps_in_control;
unsigned num_sgprs, num_user_sgprs;
- unsigned spi_baryc_cntl = 0, spi_ps_input_ena, spi_shader_z_format;
+ unsigned spi_baryc_cntl = 0, spi_ps_input_ena;
uint64_t va;
si_pm4_delete_state(sctx, ps, shader->pm4);
if (pm4 == NULL)
return;
- db_shader_control = S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) |
- S_02880C_ALPHA_TO_MASK_DISABLE(sctx->framebuffer.cb0_is_integer);
-
- for (i = 0; i < shader->shader.ninput; i++) {
- switch (shader->shader.input[i].name) {
+ for (i = 0; i < shader->ninput; i++) {
+ switch (shader->input[i].name) {
case TGSI_SEMANTIC_POSITION:
- if (shader->shader.input[i].centroid) {
+ if (shader->input[i].centroid) {
/* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
* Possible vaules:
* 0 -> Position = pixel center (default)
}
}
- for (i = 0; i < shader->shader.noutput; i++) {
- if (shader->shader.output[i].name == TGSI_SEMANTIC_POSITION)
- db_shader_control |= S_02880C_Z_EXPORT_ENABLE(1);
- if (shader->shader.output[i].name == TGSI_SEMANTIC_STENCIL)
- db_shader_control |= S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(1);
- }
- if (shader->shader.uses_kill || shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
- db_shader_control |= S_02880C_KILL_ENABLE(1);
-
- exports_ps = 0;
- for (i = 0; i < shader->shader.noutput; i++) {
- if (shader->shader.output[i].name == TGSI_SEMANTIC_POSITION ||
- shader->shader.output[i].name == TGSI_SEMANTIC_STENCIL)
- exports_ps |= 1;
- }
- if (!exports_ps) {
- /* always at least export 1 component per pixel */
- exports_ps = 2;
- }
-
- spi_ps_in_control = S_0286D8_NUM_INTERP(shader->shader.nparam) |
+ spi_ps_in_control = S_0286D8_NUM_INTERP(shader->nparam) |
S_0286D8_BC_OPTIMIZE_DISABLE(1);
si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR, spi_ps_input_ena);
si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
- if (G_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(db_shader_control))
- spi_shader_z_format = V_028710_SPI_SHADER_32_GR;
- else if (G_02880C_Z_EXPORT_ENABLE(db_shader_control))
- spi_shader_z_format = V_028710_SPI_SHADER_32_R;
- else
- spi_shader_z_format = 0;
- si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT, spi_shader_z_format);
+ si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT, shader->spi_shader_z_format);
si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT,
shader->spi_shader_col_format);
si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, shader->cb_shader_mask);
- va = r600_resource_va(ctx->screen, (void *)shader->bo);
+ va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40);
si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
S_00B02C_EXTRA_LDS_SIZE(shader->lds_size) |
S_00B02C_USER_SGPR(num_user_sgprs));
-
- si_pm4_set_reg(pm4, R_02880C_DB_SHADER_CONTROL, db_shader_control);
-
- shader->cb0_is_integer = sctx->framebuffer.cb0_is_integer;
- shader->sprite_coord_enable = sctx->sprite_coord_enable;
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
/*
[PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
[PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
[PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
- [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ
+ [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
+ [R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
};
unsigned result = prim_conv[pprim];
if (result == ~0) {
[PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
[PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
[PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
+ [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
};
assert(mode < Elements(prim_conv));
return prim_conv[mode];
}
+static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
+ const struct pipe_draw_info *info)
+{
+ struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
+ unsigned prim = info->mode;
+ unsigned primgroup_size = 128; /* recommended without a GS */
+
+ /* SWITCH_ON_EOP(0) is always preferable. */
+ bool wd_switch_on_eop = false;
+ bool ia_switch_on_eop = false;
+ bool partial_vs_wave = false;
+
+ if (sctx->gs_shader)
+ primgroup_size = 64; /* recommended with a GS */
+
+ /* This is a hardware requirement. */
+ if ((rs && rs->line_stipple_enable) ||
+ (sctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) {
+ ia_switch_on_eop = true;
+ wd_switch_on_eop = true;
+ }
+
+ if (sctx->b.streamout.streamout_enabled ||
+ sctx->b.streamout.prims_gen_query_enabled)
+ partial_vs_wave = true;
+
+ if (sctx->b.chip_class >= CIK) {
+ /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
+ * 4 shader engines. Set 1 to pass the assertion below.
+ * The other cases are hardware requirements. */
+ if (sctx->b.screen->info.max_se < 4 ||
+ prim == PIPE_PRIM_POLYGON ||
+ prim == PIPE_PRIM_LINE_LOOP ||
+ prim == PIPE_PRIM_TRIANGLE_FAN ||
+ prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
+ info->primitive_restart)
+ wd_switch_on_eop = true;
+
+ /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
+ * We don't know that for indirect drawing, so treat it as
+ * always problematic. */
+ if (sctx->b.family == CHIP_HAWAII &&
+ (info->indirect || info->instance_count > 1))
+ wd_switch_on_eop = true;
+
+ /* If the WD switch is false, the IA switch must be false too. */
+ assert(wd_switch_on_eop || !ia_switch_on_eop);
+ }
+
+ return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
+ S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
+ S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
+ S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0);
+}
+
static bool si_update_draw_info_state(struct si_context *sctx,
const struct pipe_draw_info *info,
const struct pipe_index_buffer *ib)
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned gs_out_prim =
si_conv_prim_to_gs_out(sctx->gs_shader ?
- sctx->gs_shader->current->shader.gs_output_prim :
+ sctx->gs_shader->gs_output_prim :
info->mode);
unsigned ls_mask = 0;
+ unsigned ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info);
if (pm4 == NULL)
return false;
}
if (sctx->b.chip_class >= CIK) {
- struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
- bool wd_switch_on_eop = prim == V_008958_DI_PT_POLYGON ||
- prim == V_008958_DI_PT_LINELOOP ||
- prim == V_008958_DI_PT_TRIFAN ||
- prim == V_008958_DI_PT_TRISTRIP_ADJ ||
- info->primitive_restart ||
- (rs ? rs->line_stipple_enable : false);
- /* If the WD switch is false, the IA switch must be false too. */
- bool ia_switch_on_eop = wd_switch_on_eop;
-
- si_pm4_set_reg(pm4, R_028AA8_IA_MULTI_VGT_PARAM,
- S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
- S_028AA8_PARTIAL_VS_WAVE_ON(1) |
- S_028AA8_PRIMGROUP_SIZE(63) |
- S_028AA8_WD_SWITCH_ON_EOP(wd_switch_on_eop));
si_pm4_set_reg(pm4, R_028B74_VGT_DISPATCH_DRAW_INDEX,
ib->index_size == 4 ? 0xFC000000 : 0xFC00);
- si_pm4_set_reg(pm4, R_030908_VGT_PRIMITIVE_TYPE, prim);
+ si_pm4_cmd_begin(pm4, PKT3_DRAW_PREAMBLE);
+ si_pm4_cmd_add(pm4, prim); /* VGT_PRIMITIVE_TYPE */
+ si_pm4_cmd_add(pm4, ia_multi_vgt_param); /* IA_MULTI_VGT_PARAM */
+ si_pm4_cmd_add(pm4, 0); /* VGT_LS_HS_CONFIG */
+ si_pm4_cmd_end(pm4, false);
} else {
si_pm4_set_reg(pm4, R_008958_VGT_PRIMITIVE_TYPE, prim);
+ si_pm4_set_reg(pm4, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
}
si_pm4_set_reg(pm4, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
- si_pm4_set_reg(pm4, R_028408_VGT_INDX_OFFSET,
- info->indexed ? info->index_bias : info->start);
si_pm4_set_reg(pm4, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info->restart_index);
si_pm4_set_reg(pm4, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
- si_pm4_set_reg(pm4, SI_SGPR_START_INSTANCE * 4 +
- (sctx->gs_shader ? R_00B330_SPI_SHADER_USER_DATA_ES_0 :
- R_00B130_SPI_SHADER_USER_DATA_VS_0),
- info->start_instance);
if (prim == V_008958_DI_PT_LINELIST)
ls_mask = 1;
static void si_update_spi_map(struct si_context *sctx)
{
- struct si_shader *ps = &sctx->ps_shader->current->shader;
+ struct si_shader *ps = sctx->ps_shader->current;
struct si_shader *vs = si_get_vs_state(sctx);
struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
unsigned i, j, tmp;
/* Initialize state related to ESGS / GSVS ring buffers */
static void si_init_gs_rings(struct si_context *sctx)
{
- unsigned size = 128 * 1024;
+ unsigned esgs_ring_size = 128 * 1024;
+ unsigned gsvs_ring_size = 64 * 1024 * 1024;
assert(!sctx->gs_rings);
sctx->gs_rings = si_pm4_alloc_state(sctx);
- sctx->esgs_ring.buffer =
- pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
- PIPE_USAGE_DEFAULT, size);
- sctx->esgs_ring.buffer_size = size;
+ sctx->esgs_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_DEFAULT, esgs_ring_size);
- size = 64 * 1024 * 1024;
- sctx->gsvs_ring.buffer =
- pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
- PIPE_USAGE_DEFAULT, size);
- sctx->gsvs_ring.buffer_size = size;
+ sctx->gsvs_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_DEFAULT, gsvs_ring_size);
if (sctx->b.chip_class >= CIK) {
si_pm4_set_reg(sctx->gs_rings, R_030900_VGT_ESGS_RING_SIZE,
- sctx->esgs_ring.buffer_size / 256);
+ esgs_ring_size / 256);
si_pm4_set_reg(sctx->gs_rings, R_030904_VGT_GSVS_RING_SIZE,
- sctx->gsvs_ring.buffer_size / 256);
+ gsvs_ring_size / 256);
} else {
si_pm4_set_reg(sctx->gs_rings, R_0088C8_VGT_ESGS_RING_SIZE,
- sctx->esgs_ring.buffer_size / 256);
+ esgs_ring_size / 256);
si_pm4_set_reg(sctx->gs_rings, R_0088CC_VGT_GSVS_RING_SIZE,
- sctx->gsvs_ring.buffer_size / 256);
+ gsvs_ring_size / 256);
}
si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, SI_RING_ESGS,
- &sctx->esgs_ring, 0, sctx->esgs_ring.buffer_size,
+ sctx->esgs_ring, 0, esgs_ring_size,
true, true, 4, 64);
si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_GEOMETRY, SI_RING_ESGS,
- &sctx->esgs_ring, 0, sctx->esgs_ring.buffer_size,
+ sctx->esgs_ring, 0, esgs_ring_size,
false, false, 0, 0);
si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, SI_RING_GSVS,
- &sctx->gsvs_ring, 0, sctx->gsvs_ring.buffer_size,
+ sctx->gsvs_ring, 0, gsvs_ring_size,
false, false, 0, 0);
}
si_shader_select(ctx, sctx->gs_shader);
if (!sctx->gs_shader->current->pm4) {
- si_pipe_shader_gs(ctx, sctx->gs_shader->current);
- si_pipe_shader_vs(ctx,
- sctx->gs_shader->current->gs_copy_shader);
+ si_shader_gs(ctx, sctx->gs_shader->current);
+ si_shader_vs(ctx, sctx->gs_shader->current->gs_copy_shader);
}
si_pm4_bind_state(sctx, gs, sctx->gs_shader->current->pm4);
si_shader_select(ctx, sctx->vs_shader);
if (!sctx->vs_shader->current->pm4)
- si_pipe_shader_es(ctx, sctx->vs_shader->current);
+ si_shader_es(ctx, sctx->vs_shader->current);
si_pm4_bind_state(sctx, es, sctx->vs_shader->current->pm4);
si_pm4_bind_state(sctx, gs_rings, sctx->gs_rings);
si_set_ring_buffer(ctx, PIPE_SHADER_GEOMETRY, SI_RING_GSVS,
- &sctx->gsvs_ring,
- sctx->gs_shader->current->shader.gs_max_out_vertices *
- sctx->gs_shader->current->shader.noutput * 16,
+ sctx->gsvs_ring,
+ sctx->gs_shader->gs_max_out_vertices *
+ sctx->gs_shader->current->noutput * 16,
64, true, true, 4, 16);
if (!sctx->gs_on) {
si_shader_select(ctx, sctx->vs_shader);
if (!sctx->vs_shader->current->pm4)
- si_pipe_shader_vs(ctx, sctx->vs_shader->current);
+ si_shader_vs(ctx, sctx->vs_shader->current);
si_pm4_bind_state(sctx, vs, sctx->vs_shader->current->pm4);
si_shader_select(ctx, sctx->ps_shader);
- if (!sctx->ps_shader->current->pm4 ||
- sctx->ps_shader->current->cb0_is_integer != sctx->framebuffer.cb0_is_integer)
- si_pipe_shader_ps(ctx, sctx->ps_shader->current);
+ if (!sctx->ps_shader->current->pm4)
+ si_shader_ps(ctx, sctx->ps_shader->current);
si_pm4_bind_state(sctx, ps, sctx->ps_shader->current->pm4);
sctx->emitted.named.ps = NULL;
si_update_spi_map(sctx);
}
-}
-static void si_vertex_buffer_update(struct si_context *sctx)
-{
- struct pipe_context *ctx = &sctx->b.b;
- struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
- bool bound[PIPE_MAX_ATTRIBS] = {};
- unsigned i, count;
- uint64_t va;
-
- sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
-
- count = sctx->vertex_elements->count;
- assert(count <= 256 / 4);
-
- si_pm4_sh_data_begin(pm4);
- for (i = 0 ; i < count; i++) {
- struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
- struct pipe_vertex_buffer *vb;
- struct r600_resource *rbuffer;
- unsigned offset;
-
- if (ve->vertex_buffer_index >= sctx->nr_vertex_buffers)
- continue;
-
- vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
- rbuffer = (struct r600_resource*)vb->buffer;
- if (rbuffer == NULL)
- continue;
-
- offset = 0;
- offset += vb->buffer_offset;
- offset += ve->src_offset;
-
- va = r600_resource_va(ctx->screen, (void*)rbuffer);
- va += offset;
-
- /* Fill in T# buffer resource description */
- si_pm4_sh_data_add(pm4, va & 0xFFFFFFFF);
- si_pm4_sh_data_add(pm4, (S_008F04_BASE_ADDRESS_HI(va >> 32) |
- S_008F04_STRIDE(vb->stride)));
- if (vb->stride)
- /* Round up by rounding down and adding 1 */
- si_pm4_sh_data_add(pm4,
- (vb->buffer->width0 - offset -
- util_format_get_blocksize(ve->src_format)) /
- vb->stride + 1);
- else
- si_pm4_sh_data_add(pm4, vb->buffer->width0 - offset);
- si_pm4_sh_data_add(pm4, sctx->vertex_elements->rsrc_word3[i]);
-
- if (!bound[ve->vertex_buffer_index]) {
- si_pm4_add_bo(pm4, rbuffer, RADEON_USAGE_READ,
- RADEON_PRIO_SHADER_BUFFER_RO);
- bound[ve->vertex_buffer_index] = true;
- }
+ if (sctx->ps_db_shader_control != sctx->ps_shader->current->db_shader_control) {
+ sctx->ps_db_shader_control = sctx->ps_shader->current->db_shader_control;
+ sctx->db_render_state.dirty = true;
}
- si_pm4_sh_data_end(pm4, sctx->gs_shader ?
- R_00B330_SPI_SHADER_USER_DATA_ES_0 :
- R_00B130_SPI_SHADER_USER_DATA_VS_0,
- SI_SGPR_VERTEX_BUFFER);
- si_pm4_set_state(sctx, vertex_buffers, pm4);
}
static void si_state_draw(struct si_context *sctx,
const struct pipe_draw_info *info,
const struct pipe_index_buffer *ib)
{
+ unsigned sh_base_reg = (sctx->gs_shader ? R_00B330_SPI_SHADER_USER_DATA_ES_0 :
+ R_00B130_SPI_SHADER_USER_DATA_VS_0);
struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
if (pm4 == NULL)
return;
- /* queries need some special values
- * (this is non-zero if any query is active) */
- if (sctx->b.num_occlusion_queries > 0) {
- if (sctx->b.chip_class >= CIK) {
- si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
- S_028004_PERFECT_ZPASS_COUNTS(1) |
- S_028004_SAMPLE_RATE(sctx->framebuffer.log_samples) |
- S_028004_ZPASS_ENABLE(1) |
- S_028004_SLICE_EVEN_ENABLE(1) |
- S_028004_SLICE_ODD_ENABLE(1));
- } else {
- si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
- S_028004_PERFECT_ZPASS_COUNTS(1) |
- S_028004_SAMPLE_RATE(sctx->framebuffer.log_samples));
- }
- }
-
if (info->count_from_stream_output) {
struct r600_so_target *t =
(struct r600_so_target*)info->count_from_stream_output;
- uint64_t va = r600_resource_va(&sctx->screen->b.b,
- &t->buf_filled_size->b.b);
- va += t->buf_filled_size_offset;
+ uint64_t va = t->buf_filled_size->gpu_address +
+ t->buf_filled_size_offset;
si_pm4_set_reg(pm4, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
t->stride_in_dw);
}
si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
- si_pm4_cmd_begin(pm4, PKT3_NUM_INSTANCES);
- si_pm4_cmd_add(pm4, info->instance_count);
- si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
+ if (!info->indirect) {
+ si_pm4_cmd_begin(pm4, PKT3_NUM_INSTANCES);
+ si_pm4_cmd_add(pm4, info->instance_count);
+ si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
+
+ si_pm4_set_reg(pm4, sh_base_reg + SI_SGPR_BASE_VERTEX * 4,
+ info->indexed ? info->index_bias : info->start);
+ si_pm4_set_reg(pm4, sh_base_reg + SI_SGPR_START_INSTANCE * 4,
+ info->start_instance);
+ } else {
+ si_pm4_add_bo(pm4, (struct r600_resource *)info->indirect,
+ RADEON_USAGE_READ, RADEON_PRIO_MIN);
+ }
if (info->indexed) {
uint32_t max_size = (ib->buffer->width0 - ib->offset) /
sctx->index_buffer.index_size;
- uint64_t va;
- va = r600_resource_va(&sctx->screen->b.b, ib->buffer);
- va += ib->offset;
+ uint64_t va = r600_resource(ib->buffer)->gpu_address + ib->offset;
si_pm4_add_bo(pm4, (struct r600_resource *)ib->buffer, RADEON_USAGE_READ,
RADEON_PRIO_MIN);
- si_cmd_draw_index_2(pm4, max_size, va, info->count,
- V_0287F0_DI_SRC_SEL_DMA,
- sctx->b.predicate_drawing);
+
+ if (info->indirect) {
+ uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
+ si_cmd_draw_index_indirect(pm4, indirect_va, va, max_size,
+ info->indirect_offset,
+ sh_base_reg + SI_SGPR_BASE_VERTEX * 4,
+ sh_base_reg + SI_SGPR_START_INSTANCE * 4,
+ sctx->b.predicate_drawing);
+ } else {
+ va += info->start * ib->index_size;
+ si_cmd_draw_index_2(pm4, max_size, va, info->count,
+ V_0287F0_DI_SRC_SEL_DMA,
+ sctx->b.predicate_drawing);
+ }
} else {
- uint32_t initiator = V_0287F0_DI_SRC_SEL_AUTO_INDEX;
- initiator |= S_0287F0_USE_OPAQUE(!!info->count_from_stream_output);
- si_cmd_draw_index_auto(pm4, info->count, initiator, sctx->b.predicate_drawing);
+ if (info->indirect) {
+ uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
+ si_cmd_draw_indirect(pm4, indirect_va, info->indirect_offset,
+ sh_base_reg + SI_SGPR_BASE_VERTEX * 4,
+ sh_base_reg + SI_SGPR_START_INSTANCE * 4,
+ sctx->b.predicate_drawing);
+ } else {
+ si_cmd_draw_index_auto(pm4, info->count,
+ V_0287F0_DI_SRC_SEL_AUTO_INDEX |
+ S_0287F0_USE_OPAQUE(!!info->count_from_stream_output),
+ sctx->b.predicate_drawing);
+ }
}
si_pm4_set_state(sctx, draw, pm4);
{
struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
uint32_t cp_coher_cntl = 0;
+ uint32_t compute =
+ PKT3_SHADER_TYPE_S(!!(sctx->flags & R600_CONTEXT_FLAG_COMPUTE));
/* XXX SI flushes both ICACHE and KCACHE if either flag is set.
* XXX CIK shouldn't have this issue. Test CIK before separating the flags
if (cp_coher_cntl) {
if (sctx->chip_class >= CIK) {
- radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) | compute);
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
} else {
- radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
+ radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0) | compute);
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
radeon_emit(cs, 0); /* CP_COHER_BASE */
}
if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
}
if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
}
+ if (sctx->flags & R600_CONTEXT_FLUSH_WITH_INV_L2) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH) | EVENT_INDEX(7) |
+ EVENT_WRITE_INV_L2);
+ }
if (sctx->flags & (R600_CONTEXT_WAIT_3D_IDLE |
R600_CONTEXT_PS_PARTIAL_FLUSH)) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
} else if (sctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) {
/* Needed if streamout buffers are going to be used as a source. */
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
+ if (sctx->flags & R600_CONTEXT_CS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
+ }
+
if (sctx->flags & R600_CONTEXT_VGT_FLUSH) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
}
+ if (sctx->flags & R600_CONTEXT_VGT_STREAMOUT_SYNC) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
+ }
sctx->flags = 0;
}
-const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 13 }; /* number of CS dwords */
+const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 21 }; /* number of CS dwords */
+
+static void si_get_draw_start_count(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ unsigned *start, unsigned *count)
+{
+ if (info->indirect) {
+ struct r600_resource *indirect =
+ (struct r600_resource*)info->indirect;
+ int *data = r600_buffer_map_sync_with_rings(&sctx->b,
+ indirect, PIPE_TRANSFER_READ);
+ data += info->indirect_offset/sizeof(int);
+ *start = data[2];
+ *count = data[0];
+ } else {
+ *start = info->start;
+ *count = info->count;
+ }
+}
void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct pipe_index_buffer ib = {};
uint32_t i;
- if (!info->count && (info->indexed || !info->count_from_stream_output))
+ if (!info->count && !info->indirect &&
+ (info->indexed || !info->count_from_stream_output))
return;
if (!sctx->ps_shader || !sctx->vs_shader)
return;
si_update_derived_state(sctx);
- si_vertex_buffer_update(sctx);
+
+ if (sctx->vertex_buffers_dirty) {
+ si_update_vertex_buffers(sctx);
+ sctx->vertex_buffers_dirty = false;
+ }
if (info->indexed) {
/* Initialize the index buffer struct. */
pipe_resource_reference(&ib.buffer, sctx->index_buffer.buffer);
ib.user_buffer = sctx->index_buffer.user_buffer;
ib.index_size = sctx->index_buffer.index_size;
- ib.offset = sctx->index_buffer.offset + info->start * ib.index_size;
+ ib.offset = sctx->index_buffer.offset;
/* Translate or upload, if needed. */
if (ib.index_size == 1) {
struct pipe_resource *out_buffer = NULL;
- unsigned out_offset;
+ unsigned out_offset, start, count, start_offset;
void *ptr;
- u_upload_alloc(sctx->b.uploader, 0, info->count * 2,
+ si_get_draw_start_count(sctx, info, &start, &count);
+ start_offset = start * ib.index_size;
+
+ u_upload_alloc(sctx->b.uploader, start_offset, count * 2,
&out_offset, &out_buffer, &ptr);
- util_shorten_ubyte_elts_to_userptr(
- &sctx->b.b, &ib, 0, ib.offset, info->count, ptr);
+ util_shorten_ubyte_elts_to_userptr(&sctx->b.b, &ib, 0,
+ ib.offset + start_offset,
+ count, ptr);
pipe_resource_reference(&ib.buffer, NULL);
ib.user_buffer = NULL;
ib.buffer = out_buffer;
- ib.offset = out_offset;
+ /* info->start will be added by the drawing code */
+ ib.offset = out_offset - start_offset;
ib.index_size = 2;
- }
+ } else if (ib.user_buffer && !ib.buffer) {
+ unsigned start, count, start_offset;
+
+ si_get_draw_start_count(sctx, info, &start, &count);
+ start_offset = start * ib.index_size;
- if (ib.user_buffer && !ib.buffer) {
- u_upload_data(sctx->b.uploader, 0, info->count * ib.index_size,
- ib.user_buffer, &ib.offset, &ib.buffer);
+ u_upload_data(sctx->b.uploader, start_offset, count * ib.index_size,
+ (char*)ib.user_buffer + start_offset,
+ &ib.offset, &ib.buffer);
+ /* info->start will be added by the drawing code */
+ ib.offset -= start_offset;
}
}
}
#endif
+ /* Workaround for a VGT hang when streamout is enabled.
+ * It must be done after drawing. */
+ if (sctx->b.family == CHIP_HAWAII &&
+ (sctx->b.streamout.streamout_enabled ||
+ sctx->b.streamout.prims_gen_query_enabled)) {
+ sctx->b.flags |= R600_CONTEXT_VGT_STREAMOUT_SYNC;
+ }
+
/* Set the depth buffer as dirty. */
if (sctx->framebuffer.state.zsbuf) {
struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
pipe_resource_reference(&ib.buffer, NULL);
sctx->b.num_draw_calls++;
}
+
+#if SI_TRACE_CS
+void si_trace_emit(struct si_context *sctx)
+{
+ struct si_screen *sscreen = sctx->screen;
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ uint64_t va;
+
+ va = sscreen->b.trace_bo->gpu_address;
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, sscreen->b.trace_bo,
+ RADEON_USAGE_READWRITE, RADEON_PRIO_MIN);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC) |
+ PKT3_WRITE_DATA_WR_CONFIRM |
+ PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
+ radeon_emit(cs, va & 0xFFFFFFFFUL);
+ radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
+ radeon_emit(cs, cs->cdw);
+ radeon_emit(cs, sscreen->b.cs_count);
+}
+#endif