2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include "radeon/r600_cs.h"
30 void si_destroy_saved_cs(struct si_saved_cs
*scs
)
32 radeon_clear_saved_cs(&scs
->gfx
);
33 r600_resource_reference(&scs
->trace_buf
, NULL
);
38 void si_need_cs_space(struct si_context
*ctx
)
40 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx
.cs
;
42 /* There is no need to flush the DMA IB here, because
43 * r600_need_dma_space always flushes the GFX IB if there is
44 * a conflict, which means any unflushed DMA commands automatically
45 * precede the GFX IB (= they had no dependency on the GFX IB when
46 * they were submitted).
49 /* There are two memory usage counters in the winsys for all buffers
50 * that have been added (cs_add_buffer) and two counters in the pipe
51 * driver for those that haven't been added yet.
53 if (unlikely(!radeon_cs_memory_below_limit(ctx
->b
.screen
, ctx
->b
.gfx
.cs
,
54 ctx
->b
.vram
, ctx
->b
.gtt
))) {
57 ctx
->b
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
63 /* If the CS is sufficiently large, don't count the space needed
64 * and just flush if there is not enough space left.
66 if (!ctx
->b
.ws
->cs_check_space(cs
, 2048))
67 ctx
->b
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
70 void si_context_gfx_flush(void *context
, unsigned flags
,
71 struct pipe_fence_handle
**fence
)
73 struct si_context
*ctx
= context
;
74 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx
.cs
;
75 struct radeon_winsys
*ws
= ctx
->b
.ws
;
77 if (ctx
->gfx_flush_in_progress
)
80 if (!radeon_emitted(cs
, ctx
->b
.initial_gfx_cs_size
))
83 if (r600_check_device_reset(&ctx
->b
))
86 if (ctx
->screen
->b
.debug_flags
& DBG_CHECK_VM
)
87 flags
&= ~RADEON_FLUSH_ASYNC
;
89 /* If the state tracker is flushing the GFX IB, r600_flush_from_st is
90 * responsible for flushing the DMA IB and merging the fences from both.
91 * This code is only needed when the driver flushes the GFX IB
92 * internally, and it never asks for a fence handle.
94 if (radeon_emitted(ctx
->b
.dma
.cs
, 0)) {
95 assert(fence
== NULL
); /* internal flushes only */
96 ctx
->b
.dma
.flush(ctx
, flags
, NULL
);
99 ctx
->gfx_flush_in_progress
= true;
101 r600_preflush_suspend_features(&ctx
->b
);
103 ctx
->b
.flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
|
104 SI_CONTEXT_PS_PARTIAL_FLUSH
;
106 /* DRM 3.1.0 doesn't flush TC for VI correctly. */
107 if (ctx
->b
.chip_class
== VI
&& ctx
->b
.screen
->info
.drm_minor
<= 1)
108 ctx
->b
.flags
|= SI_CONTEXT_INV_GLOBAL_L2
|
109 SI_CONTEXT_INV_VMEM_L1
;
111 si_emit_cache_flush(ctx
);
113 if (ctx
->current_saved_cs
) {
115 si_log_hw_flush(ctx
);
117 /* Save the IB for debug contexts. */
118 radeon_save_cs(ws
, cs
, &ctx
->current_saved_cs
->gfx
, true);
119 ctx
->current_saved_cs
->flushed
= true;
123 ws
->cs_flush(cs
, flags
, &ctx
->b
.last_gfx_fence
);
125 ws
->fence_reference(fence
, ctx
->b
.last_gfx_fence
);
126 ctx
->b
.num_gfx_cs_flushes
++;
128 /* Check VM faults if needed. */
129 if (ctx
->screen
->b
.debug_flags
& DBG_CHECK_VM
) {
130 /* Use conservative timeout 800ms, after which we won't wait any
131 * longer and assume the GPU is hung.
133 ctx
->b
.ws
->fence_wait(ctx
->b
.ws
, ctx
->b
.last_gfx_fence
, 800*1000*1000);
135 si_check_vm_faults(&ctx
->b
, &ctx
->current_saved_cs
->gfx
, RING_GFX
);
138 if (ctx
->current_saved_cs
)
139 si_saved_cs_reference(&ctx
->current_saved_cs
, NULL
);
141 si_begin_new_cs(ctx
);
142 ctx
->gfx_flush_in_progress
= false;
145 static void si_begin_cs_debug(struct si_context
*ctx
)
147 static const uint32_t zeros
[1];
148 assert(!ctx
->current_saved_cs
);
150 ctx
->current_saved_cs
= calloc(1, sizeof(*ctx
->current_saved_cs
));
151 if (!ctx
->current_saved_cs
)
154 pipe_reference_init(&ctx
->current_saved_cs
->reference
, 1);
156 ctx
->current_saved_cs
->trace_buf
= (struct r600_resource
*)
157 pipe_buffer_create(ctx
->b
.b
.screen
, 0,
158 PIPE_USAGE_STAGING
, 8);
159 if (!ctx
->current_saved_cs
->trace_buf
) {
160 free(ctx
->current_saved_cs
);
161 ctx
->current_saved_cs
= NULL
;
165 pipe_buffer_write_nooverlap(&ctx
->b
.b
, &ctx
->current_saved_cs
->trace_buf
->b
.b
,
166 0, sizeof(zeros
), zeros
);
167 ctx
->current_saved_cs
->trace_id
= 0;
171 radeon_add_to_buffer_list(&ctx
->b
, &ctx
->b
.gfx
, ctx
->current_saved_cs
->trace_buf
,
172 RADEON_USAGE_READWRITE
, RADEON_PRIO_TRACE
);
175 void si_begin_new_cs(struct si_context
*ctx
)
178 si_begin_cs_debug(ctx
);
180 /* Flush read caches at the beginning of CS not flushed by the kernel. */
181 if (ctx
->b
.chip_class
>= CIK
)
182 ctx
->b
.flags
|= SI_CONTEXT_INV_SMEM_L1
|
183 SI_CONTEXT_INV_ICACHE
;
185 ctx
->b
.flags
|= R600_CONTEXT_START_PIPELINE_STATS
;
187 /* set all valid group as dirty so they get reemited on
190 si_pm4_reset_emitted(ctx
);
192 /* The CS initialization should be emitted before everything else. */
193 si_pm4_emit(ctx
, ctx
->init_config
);
194 if (ctx
->init_config_gs_rings
)
195 si_pm4_emit(ctx
, ctx
->init_config_gs_rings
);
197 if (ctx
->queued
.named
.ls
)
198 ctx
->prefetch_L2_mask
|= SI_PREFETCH_LS
;
199 if (ctx
->queued
.named
.hs
)
200 ctx
->prefetch_L2_mask
|= SI_PREFETCH_HS
;
201 if (ctx
->queued
.named
.es
)
202 ctx
->prefetch_L2_mask
|= SI_PREFETCH_ES
;
203 if (ctx
->queued
.named
.gs
)
204 ctx
->prefetch_L2_mask
|= SI_PREFETCH_GS
;
205 if (ctx
->queued
.named
.vs
)
206 ctx
->prefetch_L2_mask
|= SI_PREFETCH_VS
;
207 if (ctx
->queued
.named
.ps
)
208 ctx
->prefetch_L2_mask
|= SI_PREFETCH_PS
;
209 if (ctx
->vertex_buffers
.buffer
&& ctx
->vertex_elements
)
210 ctx
->prefetch_L2_mask
|= SI_PREFETCH_VBO_DESCRIPTORS
;
212 /* CLEAR_STATE disables all colorbuffers, so only enable bound ones. */
213 bool has_clear_state
= ctx
->screen
->has_clear_state
;
214 if (has_clear_state
) {
215 ctx
->framebuffer
.dirty_cbufs
=
216 u_bit_consecutive(0, ctx
->framebuffer
.state
.nr_cbufs
);
217 /* CLEAR_STATE disables the zbuffer, so only enable it if it's bound. */
218 ctx
->framebuffer
.dirty_zsbuf
= ctx
->framebuffer
.state
.zsbuf
!= NULL
;
220 ctx
->framebuffer
.dirty_cbufs
= u_bit_consecutive(0, 8);
221 ctx
->framebuffer
.dirty_zsbuf
= true;
223 /* This should always be marked as dirty to set the framebuffer scissor
225 si_mark_atom_dirty(ctx
, &ctx
->framebuffer
.atom
);
227 si_mark_atom_dirty(ctx
, &ctx
->clip_regs
);
228 /* CLEAR_STATE sets zeros. */
229 if (!has_clear_state
|| ctx
->clip_state
.any_nonzeros
)
230 si_mark_atom_dirty(ctx
, &ctx
->clip_state
.atom
);
231 ctx
->msaa_sample_locs
.nr_samples
= 0;
232 si_mark_atom_dirty(ctx
, &ctx
->msaa_sample_locs
.atom
);
233 si_mark_atom_dirty(ctx
, &ctx
->msaa_config
);
234 /* CLEAR_STATE sets 0xffff. */
235 if (!has_clear_state
|| ctx
->sample_mask
.sample_mask
!= 0xffff)
236 si_mark_atom_dirty(ctx
, &ctx
->sample_mask
.atom
);
237 si_mark_atom_dirty(ctx
, &ctx
->cb_render_state
);
238 /* CLEAR_STATE sets zeros. */
239 if (!has_clear_state
|| ctx
->blend_color
.any_nonzeros
)
240 si_mark_atom_dirty(ctx
, &ctx
->blend_color
.atom
);
241 si_mark_atom_dirty(ctx
, &ctx
->db_render_state
);
242 if (ctx
->b
.chip_class
>= GFX9
)
243 si_mark_atom_dirty(ctx
, &ctx
->dpbb_state
);
244 si_mark_atom_dirty(ctx
, &ctx
->stencil_ref
.atom
);
245 si_mark_atom_dirty(ctx
, &ctx
->spi_map
);
246 si_mark_atom_dirty(ctx
, &ctx
->b
.streamout
.enable_atom
);
247 si_mark_atom_dirty(ctx
, &ctx
->b
.render_cond_atom
);
248 si_all_descriptors_begin_new_cs(ctx
);
249 si_all_resident_buffers_begin_new_cs(ctx
);
251 ctx
->b
.scissors
.dirty_mask
= (1 << R600_MAX_VIEWPORTS
) - 1;
252 ctx
->b
.viewports
.dirty_mask
= (1 << R600_MAX_VIEWPORTS
) - 1;
253 ctx
->b
.viewports
.depth_range_dirty_mask
= (1 << R600_MAX_VIEWPORTS
) - 1;
254 si_mark_atom_dirty(ctx
, &ctx
->b
.scissors
.atom
);
255 si_mark_atom_dirty(ctx
, &ctx
->b
.viewports
.atom
);
257 si_mark_atom_dirty(ctx
, &ctx
->scratch_state
);
258 if (ctx
->scratch_buffer
) {
259 r600_context_add_resource_size(&ctx
->b
.b
,
260 &ctx
->scratch_buffer
->b
.b
);
263 r600_postflush_resume_features(&ctx
->b
);
265 assert(!ctx
->b
.gfx
.cs
->prev_dw
);
266 ctx
->b
.initial_gfx_cs_size
= ctx
->b
.gfx
.cs
->current
.cdw
;
268 /* Invalidate various draw states so that they are emitted before
269 * the first draw call. */
270 si_invalidate_draw_sh_constants(ctx
);
271 ctx
->last_index_size
= -1;
272 ctx
->last_primitive_restart_en
= -1;
273 ctx
->last_restart_index
= SI_RESTART_INDEX_UNKNOWN
;
274 ctx
->last_gs_out_prim
= -1;
276 ctx
->last_multi_vgt_param
= -1;
277 ctx
->last_rast_prim
= -1;
278 ctx
->last_sc_line_stipple
= ~0;
279 ctx
->last_vs_state
= ~0;
281 ctx
->last_tcs
= NULL
;
282 ctx
->last_tes_sh_base
= -1;
283 ctx
->last_num_tcs_input_cp
= -1;
285 ctx
->cs_shader_state
.initialized
= false;