uint64_t src_va, unsigned size, unsigned flags,
enum si_coherency coher)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint32_t header = 0, command = 0;
- assert(size);
assert(size <= cp_dma_max_byte_count(sctx));
if (sctx->chip_class >= GFX9)
/* Src and dst flags. */
if (sctx->chip_class >= GFX9 && !(flags & CP_DMA_CLEAR) &&
src_va == dst_va)
- header |= S_411_DSL_SEL(V_411_NOWHERE); /* prefetch only */
+ header |= S_411_DST_SEL(V_411_NOWHERE); /* prefetch only */
else if (flags & CP_DMA_USE_L2)
- header |= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2);
+ header |= S_411_DST_SEL(V_411_DST_ADDR_TC_L2);
if (flags & CP_DMA_CLEAR)
header |= S_411_SRC_SEL(V_411_DATA);
}
}
+void si_cp_dma_wait_for_idle(struct si_context *sctx)
+{
+ /* Issue a dummy DMA that copies zero bytes.
+ *
+ * The DMA engine will see that there's no work to do and skip this
+ * DMA request, however, the CP will see the sync flag and still wait
+ * for all DMAs to complete.
+ */
+ si_emit_cp_dma(sctx, 0, 0, 0, CP_DMA_SYNC, SI_COHERENCY_NONE);
+}
+
static unsigned get_flush_flags(struct si_context *sctx, enum si_coherency coher)
{
switch (coher) {
/* This must be done after need_cs_space. */
if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource*)dst,
+ r600_resource(dst),
RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
if (src)
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource*)src,
+ r600_resource(src),
RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
}
if (!sctx->scratch_buffer ||
sctx->scratch_buffer->b.b.width0 < scratch_size) {
r600_resource_reference(&sctx->scratch_buffer, NULL);
- sctx->scratch_buffer = (struct r600_resource*)
+ sctx->scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
if (!sctx->scratch_buffer)
return;
- si_mark_atom_dirty(sctx, &sctx->scratch_state);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
}
si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b,
sctx->vertex_elements->desc_list_byte_size);
}
-void cik_emit_prefetch_L2(struct si_context *sctx)
+/**
+ * Prefetch shaders and VBO descriptors.
+ *
+ * \param vertex_stage_only Whether only the the API VS and VBO descriptors
+ * should be prefetched.
+ */
+void cik_emit_prefetch_L2(struct si_context *sctx, bool vertex_stage_only)
{
+ unsigned mask = sctx->prefetch_L2_mask;
+ assert(mask);
+
/* Prefetch shaders and VBO descriptors to TC L2. */
if (sctx->chip_class >= GFX9) {
/* Choose the right spot for the VBO prefetch. */
if (sctx->tes_shader.cso) {
- if (sctx->prefetch_L2_mask & SI_PREFETCH_HS)
+ if (mask & SI_PREFETCH_HS)
cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
+ if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
cik_prefetch_VBO_descriptors(sctx);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
+ if (vertex_stage_only) {
+ sctx->prefetch_L2_mask &= ~(SI_PREFETCH_HS |
+ SI_PREFETCH_VBO_DESCRIPTORS);
+ return;
+ }
+
+ if (mask & SI_PREFETCH_GS)
cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
+ if (mask & SI_PREFETCH_VS)
cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
} else if (sctx->gs_shader.cso) {
- if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
+ if (mask & SI_PREFETCH_GS)
cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
+ if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
cik_prefetch_VBO_descriptors(sctx);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
+ if (vertex_stage_only) {
+ sctx->prefetch_L2_mask &= ~(SI_PREFETCH_GS |
+ SI_PREFETCH_VBO_DESCRIPTORS);
+ return;
+ }
+
+ if (mask & SI_PREFETCH_VS)
cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
} else {
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
+ if (mask & SI_PREFETCH_VS)
cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
+ if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
cik_prefetch_VBO_descriptors(sctx);
+ if (vertex_stage_only) {
+ sctx->prefetch_L2_mask &= ~(SI_PREFETCH_VS |
+ SI_PREFETCH_VBO_DESCRIPTORS);
+ return;
+ }
}
} else {
/* SI-CI-VI */
/* Choose the right spot for the VBO prefetch. */
if (sctx->tes_shader.cso) {
- if (sctx->prefetch_L2_mask & SI_PREFETCH_LS)
+ if (mask & SI_PREFETCH_LS)
cik_prefetch_shader_async(sctx, sctx->queued.named.ls);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
+ if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
cik_prefetch_VBO_descriptors(sctx);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_HS)
+ if (vertex_stage_only) {
+ sctx->prefetch_L2_mask &= ~(SI_PREFETCH_LS |
+ SI_PREFETCH_VBO_DESCRIPTORS);
+ return;
+ }
+
+ if (mask & SI_PREFETCH_HS)
cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_ES)
+ if (mask & SI_PREFETCH_ES)
cik_prefetch_shader_async(sctx, sctx->queued.named.es);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
+ if (mask & SI_PREFETCH_GS)
cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
+ if (mask & SI_PREFETCH_VS)
cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
} else if (sctx->gs_shader.cso) {
- if (sctx->prefetch_L2_mask & SI_PREFETCH_ES)
+ if (mask & SI_PREFETCH_ES)
cik_prefetch_shader_async(sctx, sctx->queued.named.es);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
+ if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
cik_prefetch_VBO_descriptors(sctx);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
+ if (vertex_stage_only) {
+ sctx->prefetch_L2_mask &= ~(SI_PREFETCH_ES |
+ SI_PREFETCH_VBO_DESCRIPTORS);
+ return;
+ }
+
+ if (mask & SI_PREFETCH_GS)
cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
+ if (mask & SI_PREFETCH_VS)
cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
} else {
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
+ if (mask & SI_PREFETCH_VS)
cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
- if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
+ if (mask & SI_PREFETCH_VBO_DESCRIPTORS)
cik_prefetch_VBO_descriptors(sctx);
+ if (vertex_stage_only) {
+ sctx->prefetch_L2_mask &= ~(SI_PREFETCH_VS |
+ SI_PREFETCH_VBO_DESCRIPTORS);
+ return;
+ }
}
}
- if (sctx->prefetch_L2_mask & SI_PREFETCH_PS)
+ if (mask & SI_PREFETCH_PS)
cik_prefetch_shader_async(sctx, sctx->queued.named.ps);
sctx->prefetch_L2_mask = 0;