2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 void si_need_cs_space(struct si_context
*ctx
, unsigned num_dw
,
31 boolean count_draw_in
)
33 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
36 /* If the CS is sufficiently large, don't count the space needed
37 * and just flush if there is less than 8096 dwords left. */
38 if (cs
->max_dw
>= 24 * 1024) {
39 if (cs
->cdw
> cs
->max_dw
- 8 * 1024)
40 ctx
->b
.rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
44 /* There are two memory usage counters in the winsys for all buffers
45 * that have been added (cs_add_reloc) and two counters in the pipe
46 * driver for those that haven't been added yet.
48 if (!ctx
->b
.ws
->cs_memory_below_limit(ctx
->b
.rings
.gfx
.cs
, ctx
->b
.vram
, ctx
->b
.gtt
)) {
51 ctx
->b
.rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
57 /* The number of dwords we already used in the CS so far. */
61 for (i
= 0; i
< SI_NUM_ATOMS(ctx
); i
++) {
62 if (ctx
->atoms
.array
[i
]->dirty
) {
63 num_dw
+= ctx
->atoms
.array
[i
]->num_dw
;
67 /* The number of dwords all the dirty states would take. */
68 num_dw
+= si_pm4_dirty_dw(ctx
);
70 /* The upper-bound of how much a draw command would take. */
71 num_dw
+= SI_MAX_DRAW_CS_DWORDS
;
74 /* Count in queries_suspend. */
75 num_dw
+= ctx
->b
.num_cs_dw_nontimer_queries_suspend
+
76 ctx
->b
.num_cs_dw_timer_queries_suspend
;
78 /* Count in streamout_end at the end of CS. */
79 if (ctx
->b
.streamout
.begin_emitted
) {
80 num_dw
+= ctx
->b
.streamout
.num_dw_for_end
;
83 /* Count in render_condition(NULL) at the end of CS. */
84 if (ctx
->b
.predicate_drawing
) {
88 /* Count in framebuffer cache flushes at the end of CS. */
89 num_dw
+= ctx
->atoms
.s
.cache_flush
->num_dw
;
92 if (ctx
->screen
->b
.trace_bo
) {
93 num_dw
+= SI_TRACE_CS_DWORDS
;
97 /* Flush if there's not enough space. */
98 if (num_dw
> cs
->max_dw
) {
99 ctx
->b
.rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
103 void si_context_gfx_flush(void *context
, unsigned flags
,
104 struct pipe_fence_handle
**fence
)
106 struct si_context
*ctx
= context
;
107 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
108 struct radeon_winsys
*ws
= ctx
->b
.ws
;
110 if (cs
->cdw
== ctx
->b
.initial_gfx_cs_size
&&
111 (!fence
|| ctx
->last_gfx_fence
)) {
113 ws
->fence_reference(fence
, ctx
->last_gfx_fence
);
114 if (!(flags
& RADEON_FLUSH_ASYNC
))
115 ws
->cs_sync_flush(cs
);
119 ctx
->b
.rings
.gfx
.flushing
= true;
121 r600_preflush_suspend_features(&ctx
->b
);
123 ctx
->b
.flags
|= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER
|
124 SI_CONTEXT_INV_TC_L1
|
125 SI_CONTEXT_INV_TC_L2
|
126 /* this is probably not needed anymore */
127 SI_CONTEXT_PS_PARTIAL_FLUSH
;
128 si_emit_cache_flush(&ctx
->b
, NULL
);
130 /* force to keep tiling flags */
131 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
134 ws
->cs_flush(cs
, flags
, &ctx
->last_gfx_fence
,
135 ctx
->screen
->b
.cs_count
++);
136 ctx
->b
.rings
.gfx
.flushing
= false;
139 ws
->fence_reference(fence
, ctx
->last_gfx_fence
);
142 if (ctx
->screen
->b
.trace_bo
) {
143 struct si_screen
*sscreen
= ctx
->screen
;
146 for (i
= 0; i
< 10; i
++) {
148 if (!ws
->buffer_is_busy(sscreen
->b
.trace_bo
->buf
, RADEON_USAGE_READWRITE
)) {
153 fprintf(stderr
, "timeout on cs lockup likely happen at cs %d dw %d\n",
154 sscreen
->b
.trace_ptr
[1], sscreen
->b
.trace_ptr
[0]);
156 fprintf(stderr
, "cs %d executed in %dms\n", sscreen
->b
.trace_ptr
[1], i
* 5);
161 si_begin_new_cs(ctx
);
164 void si_begin_new_cs(struct si_context
*ctx
)
166 /* Flush read caches at the beginning of CS. */
167 ctx
->b
.flags
|= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER
|
168 SI_CONTEXT_INV_TC_L1
|
169 SI_CONTEXT_INV_TC_L2
|
170 SI_CONTEXT_INV_KCACHE
|
171 SI_CONTEXT_INV_ICACHE
;
173 /* set all valid group as dirty so they get reemited on
176 si_pm4_reset_emitted(ctx
);
178 /* The CS initialization should be emitted before everything else. */
179 si_pm4_emit(ctx
, ctx
->init_config
);
181 ctx
->clip_regs
.dirty
= true;
182 ctx
->framebuffer
.atom
.dirty
= true;
183 ctx
->msaa_sample_locs
.dirty
= true;
184 ctx
->msaa_config
.dirty
= true;
185 ctx
->db_render_state
.dirty
= true;
186 ctx
->b
.streamout
.enable_atom
.dirty
= true;
187 si_all_descriptors_begin_new_cs(ctx
);
189 r600_postflush_resume_features(&ctx
->b
);
191 ctx
->b
.initial_gfx_cs_size
= ctx
->b
.rings
.gfx
.cs
->cdw
;
193 /* Invalidate various draw states so that they are emitted before
194 * the first draw call. */
195 si_invalidate_draw_sh_constants(ctx
);
196 ctx
->last_primitive_restart_en
= -1;
197 ctx
->last_restart_index
= SI_RESTART_INDEX_UNKNOWN
;
198 ctx
->last_gs_out_prim
= -1;
200 ctx
->last_multi_vgt_param
= -1;
201 ctx
->last_ls_hs_config
= -1;
202 ctx
->last_rast_prim
= -1;
203 ctx
->last_sc_line_stipple
= ~0;
204 ctx
->emit_scratch_reloc
= true;
206 ctx
->last_tcs
= NULL
;
207 ctx
->last_tes_sh_base
= -1;
208 ctx
->last_num_tcs_input_cp
= -1;