2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "radeon/r600_cs.h"
27 void si_destroy_saved_cs(struct si_saved_cs
*scs
)
29 si_clear_saved_cs(&scs
->gfx
);
30 r600_resource_reference(&scs
->trace_buf
, NULL
);
35 void si_need_cs_space(struct si_context
*ctx
)
37 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx
.cs
;
39 /* There is no need to flush the DMA IB here, because
40 * r600_need_dma_space always flushes the GFX IB if there is
41 * a conflict, which means any unflushed DMA commands automatically
42 * precede the GFX IB (= they had no dependency on the GFX IB when
43 * they were submitted).
46 /* There are two memory usage counters in the winsys for all buffers
47 * that have been added (cs_add_buffer) and two counters in the pipe
48 * driver for those that haven't been added yet.
50 if (unlikely(!radeon_cs_memory_below_limit(ctx
->b
.screen
, ctx
->b
.gfx
.cs
,
51 ctx
->b
.vram
, ctx
->b
.gtt
))) {
54 ctx
->b
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
60 /* If the CS is sufficiently large, don't count the space needed
61 * and just flush if there is not enough space left.
63 if (!ctx
->b
.ws
->cs_check_space(cs
, 2048))
64 ctx
->b
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
67 void si_context_gfx_flush(void *context
, unsigned flags
,
68 struct pipe_fence_handle
**fence
)
70 struct si_context
*ctx
= context
;
71 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx
.cs
;
72 struct radeon_winsys
*ws
= ctx
->b
.ws
;
74 if (ctx
->gfx_flush_in_progress
)
77 if (!radeon_emitted(cs
, ctx
->b
.initial_gfx_cs_size
))
80 if (si_check_device_reset(&ctx
->b
))
83 if (ctx
->screen
->b
.debug_flags
& DBG(CHECK_VM
))
84 flags
&= ~RADEON_FLUSH_ASYNC
;
86 /* If the state tracker is flushing the GFX IB, r600_flush_from_st is
87 * responsible for flushing the DMA IB and merging the fences from both.
88 * This code is only needed when the driver flushes the GFX IB
89 * internally, and it never asks for a fence handle.
91 if (radeon_emitted(ctx
->b
.dma
.cs
, 0)) {
92 assert(fence
== NULL
); /* internal flushes only */
93 ctx
->b
.dma
.flush(ctx
, flags
, NULL
);
96 ctx
->gfx_flush_in_progress
= true;
98 si_preflush_suspend_features(&ctx
->b
);
100 ctx
->streamout
.suspended
= false;
101 if (ctx
->streamout
.begin_emitted
) {
102 si_emit_streamout_end(ctx
);
103 ctx
->streamout
.suspended
= true;
106 ctx
->b
.flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
|
107 SI_CONTEXT_PS_PARTIAL_FLUSH
;
109 /* DRM 3.1.0 doesn't flush TC for VI correctly. */
110 if (ctx
->b
.chip_class
== VI
&& ctx
->b
.screen
->info
.drm_minor
<= 1)
111 ctx
->b
.flags
|= SI_CONTEXT_INV_GLOBAL_L2
|
112 SI_CONTEXT_INV_VMEM_L1
;
114 si_emit_cache_flush(ctx
);
116 if (ctx
->current_saved_cs
) {
118 si_log_hw_flush(ctx
);
120 /* Save the IB for debug contexts. */
121 si_save_cs(ws
, cs
, &ctx
->current_saved_cs
->gfx
, true);
122 ctx
->current_saved_cs
->flushed
= true;
126 ws
->cs_flush(cs
, flags
, &ctx
->b
.last_gfx_fence
);
128 ws
->fence_reference(fence
, ctx
->b
.last_gfx_fence
);
129 ctx
->b
.num_gfx_cs_flushes
++;
131 /* Check VM faults if needed. */
132 if (ctx
->screen
->b
.debug_flags
& DBG(CHECK_VM
)) {
133 /* Use conservative timeout 800ms, after which we won't wait any
134 * longer and assume the GPU is hung.
136 ctx
->b
.ws
->fence_wait(ctx
->b
.ws
, ctx
->b
.last_gfx_fence
, 800*1000*1000);
138 si_check_vm_faults(&ctx
->b
, &ctx
->current_saved_cs
->gfx
, RING_GFX
);
141 if (ctx
->current_saved_cs
)
142 si_saved_cs_reference(&ctx
->current_saved_cs
, NULL
);
144 si_begin_new_cs(ctx
);
145 ctx
->gfx_flush_in_progress
= false;
148 static void si_begin_cs_debug(struct si_context
*ctx
)
150 static const uint32_t zeros
[1];
151 assert(!ctx
->current_saved_cs
);
153 ctx
->current_saved_cs
= calloc(1, sizeof(*ctx
->current_saved_cs
));
154 if (!ctx
->current_saved_cs
)
157 pipe_reference_init(&ctx
->current_saved_cs
->reference
, 1);
159 ctx
->current_saved_cs
->trace_buf
= (struct r600_resource
*)
160 pipe_buffer_create(ctx
->b
.b
.screen
, 0,
161 PIPE_USAGE_STAGING
, 8);
162 if (!ctx
->current_saved_cs
->trace_buf
) {
163 free(ctx
->current_saved_cs
);
164 ctx
->current_saved_cs
= NULL
;
168 pipe_buffer_write_nooverlap(&ctx
->b
.b
, &ctx
->current_saved_cs
->trace_buf
->b
.b
,
169 0, sizeof(zeros
), zeros
);
170 ctx
->current_saved_cs
->trace_id
= 0;
174 radeon_add_to_buffer_list(&ctx
->b
, &ctx
->b
.gfx
, ctx
->current_saved_cs
->trace_buf
,
175 RADEON_USAGE_READWRITE
, RADEON_PRIO_TRACE
);
178 void si_begin_new_cs(struct si_context
*ctx
)
181 si_begin_cs_debug(ctx
);
183 /* Flush read caches at the beginning of CS not flushed by the kernel. */
184 if (ctx
->b
.chip_class
>= CIK
)
185 ctx
->b
.flags
|= SI_CONTEXT_INV_SMEM_L1
|
186 SI_CONTEXT_INV_ICACHE
;
188 ctx
->b
.flags
|= R600_CONTEXT_START_PIPELINE_STATS
;
190 /* set all valid group as dirty so they get reemited on
193 si_pm4_reset_emitted(ctx
);
195 /* The CS initialization should be emitted before everything else. */
196 si_pm4_emit(ctx
, ctx
->init_config
);
197 if (ctx
->init_config_gs_rings
)
198 si_pm4_emit(ctx
, ctx
->init_config_gs_rings
);
200 if (ctx
->queued
.named
.ls
)
201 ctx
->prefetch_L2_mask
|= SI_PREFETCH_LS
;
202 if (ctx
->queued
.named
.hs
)
203 ctx
->prefetch_L2_mask
|= SI_PREFETCH_HS
;
204 if (ctx
->queued
.named
.es
)
205 ctx
->prefetch_L2_mask
|= SI_PREFETCH_ES
;
206 if (ctx
->queued
.named
.gs
)
207 ctx
->prefetch_L2_mask
|= SI_PREFETCH_GS
;
208 if (ctx
->queued
.named
.vs
)
209 ctx
->prefetch_L2_mask
|= SI_PREFETCH_VS
;
210 if (ctx
->queued
.named
.ps
)
211 ctx
->prefetch_L2_mask
|= SI_PREFETCH_PS
;
212 if (ctx
->vertex_buffers
.buffer
&& ctx
->vertex_elements
)
213 ctx
->prefetch_L2_mask
|= SI_PREFETCH_VBO_DESCRIPTORS
;
215 /* CLEAR_STATE disables all colorbuffers, so only enable bound ones. */
216 bool has_clear_state
= ctx
->screen
->has_clear_state
;
217 if (has_clear_state
) {
218 ctx
->framebuffer
.dirty_cbufs
=
219 u_bit_consecutive(0, ctx
->framebuffer
.state
.nr_cbufs
);
220 /* CLEAR_STATE disables the zbuffer, so only enable it if it's bound. */
221 ctx
->framebuffer
.dirty_zsbuf
= ctx
->framebuffer
.state
.zsbuf
!= NULL
;
223 ctx
->framebuffer
.dirty_cbufs
= u_bit_consecutive(0, 8);
224 ctx
->framebuffer
.dirty_zsbuf
= true;
226 /* This should always be marked as dirty to set the framebuffer scissor
228 si_mark_atom_dirty(ctx
, &ctx
->framebuffer
.atom
);
230 si_mark_atom_dirty(ctx
, &ctx
->clip_regs
);
231 /* CLEAR_STATE sets zeros. */
232 if (!has_clear_state
|| ctx
->clip_state
.any_nonzeros
)
233 si_mark_atom_dirty(ctx
, &ctx
->clip_state
.atom
);
234 ctx
->msaa_sample_locs
.nr_samples
= 0;
235 si_mark_atom_dirty(ctx
, &ctx
->msaa_sample_locs
.atom
);
236 si_mark_atom_dirty(ctx
, &ctx
->msaa_config
);
237 /* CLEAR_STATE sets 0xffff. */
238 if (!has_clear_state
|| ctx
->sample_mask
.sample_mask
!= 0xffff)
239 si_mark_atom_dirty(ctx
, &ctx
->sample_mask
.atom
);
240 si_mark_atom_dirty(ctx
, &ctx
->cb_render_state
);
241 /* CLEAR_STATE sets zeros. */
242 if (!has_clear_state
|| ctx
->blend_color
.any_nonzeros
)
243 si_mark_atom_dirty(ctx
, &ctx
->blend_color
.atom
);
244 si_mark_atom_dirty(ctx
, &ctx
->db_render_state
);
245 if (ctx
->b
.chip_class
>= GFX9
)
246 si_mark_atom_dirty(ctx
, &ctx
->dpbb_state
);
247 si_mark_atom_dirty(ctx
, &ctx
->stencil_ref
.atom
);
248 si_mark_atom_dirty(ctx
, &ctx
->spi_map
);
249 si_mark_atom_dirty(ctx
, &ctx
->streamout
.enable_atom
);
250 si_mark_atom_dirty(ctx
, &ctx
->b
.render_cond_atom
);
251 si_all_descriptors_begin_new_cs(ctx
);
252 si_all_resident_buffers_begin_new_cs(ctx
);
254 ctx
->scissors
.dirty_mask
= (1 << SI_MAX_VIEWPORTS
) - 1;
255 ctx
->viewports
.dirty_mask
= (1 << SI_MAX_VIEWPORTS
) - 1;
256 ctx
->viewports
.depth_range_dirty_mask
= (1 << SI_MAX_VIEWPORTS
) - 1;
257 si_mark_atom_dirty(ctx
, &ctx
->scissors
.atom
);
258 si_mark_atom_dirty(ctx
, &ctx
->viewports
.atom
);
260 si_mark_atom_dirty(ctx
, &ctx
->scratch_state
);
261 if (ctx
->scratch_buffer
) {
262 r600_context_add_resource_size(&ctx
->b
.b
,
263 &ctx
->scratch_buffer
->b
.b
);
266 if (ctx
->streamout
.suspended
) {
267 ctx
->streamout
.append_bitmask
= ctx
->streamout
.enabled_mask
;
268 si_streamout_buffers_dirty(ctx
);
271 si_postflush_resume_features(&ctx
->b
);
273 assert(!ctx
->b
.gfx
.cs
->prev_dw
);
274 ctx
->b
.initial_gfx_cs_size
= ctx
->b
.gfx
.cs
->current
.cdw
;
276 /* Invalidate various draw states so that they are emitted before
277 * the first draw call. */
278 si_invalidate_draw_sh_constants(ctx
);
279 ctx
->last_index_size
= -1;
280 ctx
->last_primitive_restart_en
= -1;
281 ctx
->last_restart_index
= SI_RESTART_INDEX_UNKNOWN
;
282 ctx
->last_gs_out_prim
= -1;
284 ctx
->last_multi_vgt_param
= -1;
285 ctx
->last_rast_prim
= -1;
286 ctx
->last_sc_line_stipple
= ~0;
287 ctx
->last_vs_state
= ~0;
289 ctx
->last_tcs
= NULL
;
290 ctx
->last_tes_sh_base
= -1;
291 ctx
->last_num_tcs_input_cp
= -1;
293 ctx
->cs_shader_state
.initialized
= false;