void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
boolean count_draw_in)
{
+ struct radeon_winsys_cs *dma = ctx->b.rings.dma.cs;
+
+ /* Flush the DMA IB if it's not empty. */
+ if (dma && dma->cdw)
+ ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
if (!ctx->b.ws->cs_memory_below_limit(ctx->b.rings.gfx.cs, ctx->b.vram, ctx->b.gtt)) {
ctx->b.gtt = 0;
num_dw += ctx->b.rings.gfx.cs->cdw;
if (count_draw_in) {
- unsigned i;
+ uint64_t mask;
/* The number of dwords all the dirty states would take. */
- i = r600_next_dirty_atom(ctx, 0);
- while (i < R600_NUM_ATOMS) {
- num_dw += ctx->atoms[i]->num_dw;
+ mask = ctx->dirty_atoms;
+ while (mask != 0) {
+ num_dw += ctx->atoms[u_bit_scan64(&mask)]->num_dw;
if (ctx->screen->b.trace_bo) {
num_dw += R600_TRACE_CS_DWORDS;
}
- i = r600_next_dirty_atom(ctx, i + 1);
}
/* The upper-bound of how much space a draw command would take. */
if (cs->cdw == ctx->b.initial_gfx_cs_size && !fence)
return;
- ctx->b.rings.gfx.flushing = true;
-
r600_preflush_suspend_features(&ctx->b);
/* flush the framebuffer cache */
/* Flush the CS. */
ctx->b.ws->cs_flush(cs, flags, fence, ctx->screen->b.cs_count++);
- ctx->b.rings.gfx.flushing = false;
r600_begin_new_cs(ctx);
}
void r600_begin_new_cs(struct r600_context *ctx)
{
unsigned shader;
- int i;
+
ctx->b.flags = 0;
ctx->b.gtt = 0;
ctx->b.vram = 0;
r600_mark_atom_dirty(ctx, &ctx->poly_offset_state.atom);
r600_mark_atom_dirty(ctx, &ctx->vgt_state.atom);
r600_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
- for (i = 0; i < R600_MAX_VIEWPORTS; i++) {
- r600_mark_atom_dirty(ctx, &ctx->scissor[i].atom);
- r600_mark_atom_dirty(ctx, &ctx->viewport[i].atom);
- }
+ ctx->scissor.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->scissor.atom.num_dw = R600_MAX_VIEWPORTS * 4;
+ r600_mark_atom_dirty(ctx, &ctx->scissor.atom);
+ ctx->viewport.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->viewport.atom.num_dw = R600_MAX_VIEWPORTS * 8;
+ r600_mark_atom_dirty(ctx, &ctx->viewport.atom);
if (ctx->b.chip_class < EVERGREEN) {
r600_mark_atom_dirty(ctx, &ctx->config_state.atom);
}
/* This must be done after r600_need_cs_space. */
src_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)src,
- RADEON_USAGE_READ, RADEON_PRIO_MIN);
+ RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
dst_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)dst,
- RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
+ RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
radeon_emit(cs, src_offset); /* SRC_ADDR_LO [31:0] */
csize = size < R600_DMA_COPY_MAX_SIZE_DW ? size : R600_DMA_COPY_MAX_SIZE_DW;
/* emit reloc before writing cs so that cs is always in consistent state */
radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, rsrc, RADEON_USAGE_READ,
- RADEON_PRIO_MIN);
+ RADEON_PRIO_SDMA_BUFFER);
radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, rdst, RADEON_USAGE_WRITE,
- RADEON_PRIO_MIN);
+ RADEON_PRIO_SDMA_BUFFER);
cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize);
cs->buf[cs->cdw++] = dst_offset & 0xfffffffc;
cs->buf[cs->cdw++] = src_offset & 0xfffffffc;