S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt));
si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES,
S_00B32C_USER_SGPR(num_user_sgprs));
-
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
static void si_shader_gs(struct pipe_context *ctx, struct si_shader *shader)
{
struct si_context *sctx = (struct si_context *)ctx;
unsigned gs_vert_itemsize = shader->noutput * (16 >> 2);
- unsigned gs_max_vert_out = shader->gs_max_out_vertices;
+ unsigned gs_max_vert_out = shader->selector->gs_max_out_vertices;
unsigned gsvs_itemsize = gs_vert_itemsize * gs_max_vert_out;
unsigned cut_mode;
struct si_pm4_state *pm4;
si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, gsvs_itemsize);
si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
- shader->nparam * (16 >> 2));
+ util_bitcount64(shader->selector->gs_used_inputs) * (16 >> 2));
si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize);
si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, gs_max_vert_out);
S_00B228_SGPRS((num_sgprs - 1) / 8));
si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
S_00B22C_USER_SGPR(num_user_sgprs));
-
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
static void si_shader_vs(struct pipe_context *ctx, struct si_shader *shader)
vgpr_comp_cnt = shader->uses_instanceid ? 3 : 0;
- num_user_sgprs = SI_VS_NUM_USER_SGPR;
+ if (shader->is_gs_copy_shader)
+ num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
+ else
+ num_user_sgprs = SI_VS_NUM_USER_SGPR;
+
num_sgprs = shader->num_sgprs;
if (num_user_sgprs > num_sgprs) {
/* Last 2 reserved SGPRs are used for VCC */
S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
S_00B12C_SO_EN(!!shader->selector->so.num_outputs));
-
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
static void si_shader_ps(struct pipe_context *ctx, struct si_shader *shader)
si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
S_00B02C_EXTRA_LDS_SIZE(shader->lds_size) |
S_00B02C_USER_SGPR(num_user_sgprs));
-
- sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
/*
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned gs_out_prim =
si_conv_prim_to_gs_out(sctx->gs_shader ?
- sctx->gs_shader->current->gs_output_prim :
+ sctx->gs_shader->gs_output_prim :
info->mode);
unsigned ls_mask = 0;
unsigned ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info);
/* Initialize state related to ESGS / GSVS ring buffers */
static void si_init_gs_rings(struct si_context *sctx)
{
- unsigned size = 128 * 1024;
+ unsigned esgs_ring_size = 128 * 1024;
+ unsigned gsvs_ring_size = 64 * 1024 * 1024;
assert(!sctx->gs_rings);
sctx->gs_rings = si_pm4_alloc_state(sctx);
- sctx->esgs_ring.buffer =
- pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
- PIPE_USAGE_DEFAULT, size);
- sctx->esgs_ring.buffer_size = size;
+ sctx->esgs_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_DEFAULT, esgs_ring_size);
- size = 64 * 1024 * 1024;
- sctx->gsvs_ring.buffer =
- pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
- PIPE_USAGE_DEFAULT, size);
- sctx->gsvs_ring.buffer_size = size;
+ sctx->gsvs_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_DEFAULT, gsvs_ring_size);
if (sctx->b.chip_class >= CIK) {
si_pm4_set_reg(sctx->gs_rings, R_030900_VGT_ESGS_RING_SIZE,
- sctx->esgs_ring.buffer_size / 256);
+ esgs_ring_size / 256);
si_pm4_set_reg(sctx->gs_rings, R_030904_VGT_GSVS_RING_SIZE,
- sctx->gsvs_ring.buffer_size / 256);
+ gsvs_ring_size / 256);
} else {
si_pm4_set_reg(sctx->gs_rings, R_0088C8_VGT_ESGS_RING_SIZE,
- sctx->esgs_ring.buffer_size / 256);
+ esgs_ring_size / 256);
si_pm4_set_reg(sctx->gs_rings, R_0088CC_VGT_GSVS_RING_SIZE,
- sctx->gsvs_ring.buffer_size / 256);
+ gsvs_ring_size / 256);
}
si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, SI_RING_ESGS,
- &sctx->esgs_ring, 0, sctx->esgs_ring.buffer_size,
+ sctx->esgs_ring, 0, esgs_ring_size,
true, true, 4, 64);
si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_GEOMETRY, SI_RING_ESGS,
- &sctx->esgs_ring, 0, sctx->esgs_ring.buffer_size,
+ sctx->esgs_ring, 0, esgs_ring_size,
false, false, 0, 0);
si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, SI_RING_GSVS,
- &sctx->gsvs_ring, 0, sctx->gsvs_ring.buffer_size,
+ sctx->gsvs_ring, 0, gsvs_ring_size,
false, false, 0, 0);
}
si_pm4_bind_state(sctx, gs_rings, sctx->gs_rings);
si_set_ring_buffer(ctx, PIPE_SHADER_GEOMETRY, SI_RING_GSVS,
- &sctx->gsvs_ring,
- sctx->gs_shader->current->gs_max_out_vertices *
+ sctx->gsvs_ring,
+ sctx->gs_shader->gs_max_out_vertices *
sctx->gs_shader->current->noutput * 16,
64, true, true, 4, 16);
si_shader_select(ctx, sctx->ps_shader);
+ if (!sctx->ps_shader->current) {
+ struct si_shader_selector *sel;
+
+ /* use a dummy shader if compiling the shader (variant) failed */
+ si_make_dummy_ps(sctx);
+ sel = sctx->dummy_pixel_shader;
+ si_shader_select(ctx, sel);
+ sctx->ps_shader->current = sel->current;
+ }
+
if (!sctx->ps_shader->current->pm4)
si_shader_ps(ctx, sctx->ps_shader->current);
{
struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
uint32_t cp_coher_cntl = 0;
+ uint32_t compute =
+ PKT3_SHADER_TYPE_S(!!(sctx->flags & R600_CONTEXT_FLAG_COMPUTE));
/* XXX SI flushes both ICACHE and KCACHE if either flag is set.
* XXX CIK shouldn't have this issue. Test CIK before separating the flags
if (cp_coher_cntl) {
if (sctx->chip_class >= CIK) {
- radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) | compute);
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
} else {
- radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
+ radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0) | compute);
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
radeon_emit(cs, 0); /* CP_COHER_BASE */
}
if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
}
if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
}
+ if (sctx->flags & R600_CONTEXT_FLUSH_WITH_INV_L2) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH) | EVENT_INDEX(7) |
+ EVENT_WRITE_INV_L2);
+ }
if (sctx->flags & (R600_CONTEXT_WAIT_3D_IDLE |
R600_CONTEXT_PS_PARTIAL_FLUSH)) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
} else if (sctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) {
/* Needed if streamout buffers are going to be used as a source. */
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
+ if (sctx->flags & R600_CONTEXT_CS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
+ }
+
if (sctx->flags & R600_CONTEXT_VGT_FLUSH) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
}
if (sctx->flags & R600_CONTEXT_VGT_STREAMOUT_SYNC) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
}
sctx->flags = 0;
}
-const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 17 }; /* number of CS dwords */
+const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 21 }; /* number of CS dwords */
static void si_get_draw_start_count(struct si_context *sctx,
const struct pipe_draw_info *info,
pipe_resource_reference(&ib.buffer, NULL);
sctx->b.num_draw_calls++;
}
+
+#if SI_TRACE_CS
+void si_trace_emit(struct si_context *sctx)
+{
+ struct si_screen *sscreen = sctx->screen;
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ uint64_t va;
+
+ va = sscreen->b.trace_bo->gpu_address;
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, sscreen->b.trace_bo,
+ RADEON_USAGE_READWRITE, RADEON_PRIO_MIN);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC) |
+ PKT3_WRITE_DATA_WR_CONFIRM |
+ PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
+ radeon_emit(cs, va & 0xFFFFFFFFUL);
+ radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
+ radeon_emit(cs, cs->cdw);
+ radeon_emit(cs, sscreen->b.cs_count);
+}
+#endif