void evergreen_context_draw(struct r600_context *ctx, const struct r600_draw *draw)
{
unsigned ndwords = 7;
- struct r600_block *dirty_block = NULL;
- struct r600_block *next_block;
uint32_t *pm4;
uint64_t va;
/* when increasing ndwords, bump the max limit too */
assert(ndwords <= R600_MAX_DRAW_CS_DWORDS);
- r600_need_cs_space(ctx, 0, TRUE);
- assert(ctx->pm4_cdwords + ctx->pm4_dirty_cdwords + ndwords < RADEON_MAX_CMDBUF_DWORDS);
-
- /* enough room to copy packet */
- LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &ctx->dirty,list) {
- r600_context_block_emit_dirty(ctx, dirty_block);
- }
-
- LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &ctx->resource_dirty,list) {
- r600_context_block_resource_emit_dirty(ctx, dirty_block);
- }
-
- /* Enable stream out if needed. */
- if (ctx->streamout_start) {
- r600_context_streamout_begin(ctx);
- ctx->streamout_start = FALSE;
- }
-
/* queries need some special values
* (this is non-zero if any query is active) */
if (ctx->num_cs_dw_queries_suspend) {
pm4[6] = draw->vgt_draw_initiator;
}
ctx->pm4_cdwords += ndwords;
-
- ctx->flags |= (R600_CONTEXT_DRAW_PENDING | R600_CONTEXT_DST_CACHES_DIRTY);
-
- /* all dirty state have been scheduled in current cs */
- ctx->pm4_dirty_cdwords = 0;
}
void evergreen_context_flush_dest_caches(struct r600_context *ctx)
void r600_context_streamout_begin(struct r600_context *ctx);
void r600_context_streamout_end(struct r600_context *ctx);
void r600_context_draw_opaque_count(struct r600_context *ctx, struct r600_so_target *t);
+void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in);
+void r600_context_block_emit_dirty(struct r600_context *ctx, struct r600_block *block);
+void r600_context_block_resource_emit_dirty(struct r600_context *ctx, struct r600_block *block);
int evergreen_context_init(struct r600_context *ctx);
void evergreen_context_draw(struct r600_context *ctx, const struct r600_draw *draw);
void r600_context_draw(struct r600_context *ctx, const struct r600_draw *draw)
{
unsigned ndwords = 7;
- struct r600_block *dirty_block = NULL;
- struct r600_block *next_block;
uint32_t *pm4;
if (draw->indices) {
/* when increasing ndwords, bump the max limit too */
assert(ndwords <= R600_MAX_DRAW_CS_DWORDS);
- r600_need_cs_space(ctx, 0, TRUE);
- assert(ctx->pm4_cdwords + ctx->pm4_dirty_cdwords + ndwords < RADEON_MAX_CMDBUF_DWORDS);
-
- /* enough room to copy packet */
- LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &ctx->dirty, list) {
- r600_context_block_emit_dirty(ctx, dirty_block);
- }
-
- LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &ctx->resource_dirty, list) {
- r600_context_block_resource_emit_dirty(ctx, dirty_block);
- }
-
- /* Enable stream out if needed. */
- if (ctx->streamout_start) {
- r600_context_streamout_begin(ctx);
- ctx->streamout_start = FALSE;
- }
-
/* queries need some special values
* (this is non-zero if any query is active) */
if (ctx->num_cs_dw_queries_suspend) {
/* draw packet */
pm4 = &ctx->pm4[ctx->pm4_cdwords];
-
pm4[0] = PKT3(PKT3_INDEX_TYPE, 0, ctx->predicate_drawing);
pm4[1] = draw->vgt_index_type;
pm4[2] = PKT3(PKT3_NUM_INSTANCES, 0, ctx->predicate_drawing);
pm4[6] = draw->vgt_draw_initiator;
}
ctx->pm4_cdwords += ndwords;
-
- ctx->flags |= (R600_CONTEXT_DST_CACHES_DIRTY | R600_CONTEXT_DRAW_PENDING);
-
- /* all dirty state have been scheduled in current cs */
- ctx->pm4_dirty_cdwords = 0;
}
void r600_context_flush(struct r600_context *ctx, unsigned flags)
/*
* r600_hw_context.c
*/
-void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
- boolean count_draw_in);
-
void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
unsigned flush_mask, struct r600_resource *rbo);
struct r600_resource *r600_context_reg_bo(struct r600_context *ctx, unsigned offset);
int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg,
unsigned opcode, unsigned offset_base);
void r600_context_pipe_state_set_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, struct r600_block *block);
-void r600_context_block_emit_dirty(struct r600_context *ctx, struct r600_block *block);
-void r600_context_block_resource_emit_dirty(struct r600_context *ctx, struct r600_block *block);
void r600_context_dirty_block(struct r600_context *ctx, struct r600_block *block,
int dirty, int index);
int r600_setup_block_table(struct r600_context *ctx);
struct r600_draw rdraw = {};
struct pipe_index_buffer ib = {};
unsigned prim, mask, ls_mask = 0;
+ struct r600_block *dirty_block = NULL, *next_block = NULL;
if ((!info.count && (info.indexed || !info.count_from_stream_output)) ||
(info.indexed && !rctx->vbuf_mgr->index_buffer.buffer) ||
rdraw.db_render_override = dsa->db_render_override;
rdraw.db_render_control = dsa->db_render_control;
+ /* Emit states. */
+ r600_need_cs_space(rctx, 0, TRUE);
+
+ LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &rctx->dirty,list) {
+ r600_context_block_emit_dirty(rctx, dirty_block);
+ }
+ LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &rctx->resource_dirty,list) {
+ r600_context_block_resource_emit_dirty(rctx, dirty_block);
+ }
+ rctx->pm4_dirty_cdwords = 0;
+
+ /* Enable stream out if needed. */
+ if (rctx->streamout_start) {
+ r600_context_streamout_begin(rctx);
+ rctx->streamout_start = FALSE;
+ }
+
if (rctx->chip_class >= EVERGREEN) {
evergreen_context_draw(rctx, &rdraw);
} else {
r600_context_draw(rctx, &rdraw);
}
+ rctx->flags |= R600_CONTEXT_DST_CACHES_DIRTY | R600_CONTEXT_DRAW_PENDING;
+
if (rctx->framebuffer.zsbuf)
{
struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;