#include "si_pipe.h"
#include "radeon/r600_cs.h"
-static unsigned si_descriptor_list_cs_space(unsigned count, unsigned element_size)
-{
- /* Ensure we have enough space to start a new range in a hole */
- assert(element_size >= 3);
-
- /* 5 dwords for write to L2 + 3 bytes for the packet header of
- * every disjoint range written to CE RAM.
- */
- return 5 + (3 * count / 2) + count * element_size;
-}
-
-static unsigned si_ce_needed_cs_space(void)
-{
- unsigned space = 0;
-
- space += si_descriptor_list_cs_space(SI_NUM_SHADER_BUFFERS +
- SI_NUM_CONST_BUFFERS, 4);
- /* two 8-byte images share one 16-byte slot */
- space += si_descriptor_list_cs_space(SI_NUM_IMAGES / 2 +
- SI_NUM_SAMPLERS, 16);
- space *= SI_NUM_SHADERS;
-
- space += si_descriptor_list_cs_space(SI_NUM_RW_BUFFERS, 4);
-
- /* Increment CE counter packet */
- space += 2;
-
- return space;
-}
-
void si_destroy_saved_cs(struct si_saved_cs *scs)
{
radeon_clear_saved_cs(&scs->gfx);
- radeon_clear_saved_cs(&scs->ce);
r600_resource_reference(&scs->trace_buf, NULL);
free(scs);
}
void si_need_cs_space(struct si_context *ctx)
{
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
- struct radeon_winsys_cs *ce_ib = ctx->ce_ib;
/* There is no need to flush the DMA IB here, because
* r600_need_dma_space always flushes the GFX IB if there is
/* If the CS is sufficiently large, don't count the space needed
* and just flush if there is not enough space left.
*/
- if (!ctx->b.ws->cs_check_space(cs, 2048) ||
- (ce_ib && !ctx->b.ws->cs_check_space(ce_ib, si_ce_needed_cs_space())))
+ if (!ctx->b.ws->cs_check_space(cs, 2048))
ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
ctx->gfx_flush_in_progress = true;
- /* This CE dump should be done in parallel with the last draw. */
- if (ctx->ce_ib)
- si_ce_save_all_descriptors_at_ib_end(ctx);
-
r600_preflush_suspend_features(&ctx->b);
ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
/* Save the IB for debug contexts. */
radeon_save_cs(ws, cs, &ctx->current_saved_cs->gfx, true);
- if (ctx->ce_ib)
- radeon_save_cs(ws, ctx->ce_ib, &ctx->current_saved_cs->ce, false);
ctx->current_saved_cs->flushed = true;
}
static void si_begin_cs_debug(struct si_context *ctx)
{
- static const uint32_t zeros[2];
+ static const uint32_t zeros[1];
assert(!ctx->current_saved_cs);
ctx->current_saved_cs = calloc(1, sizeof(*ctx->current_saved_cs));
if (ctx->init_config_gs_rings)
si_pm4_emit(ctx, ctx->init_config_gs_rings);
- if (ctx->ce_preamble_ib)
- si_ce_enable_loads(ctx->ce_preamble_ib);
- else if (ctx->ce_ib)
- si_ce_enable_loads(ctx->ce_ib);
-
- if (ctx->ce_ib)
- si_ce_restore_all_descriptors_at_ib_start(ctx);
-
if (ctx->queued.named.ls)
ctx->prefetch_L2_mask |= SI_PREFETCH_LS;
if (ctx->queued.named.hs)