2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 static unsigned si_descriptor_list_cs_space(unsigned count
, unsigned element_size
)
31 /* Ensure we have enough space to start a new range in a hole */
32 assert(element_size
>= 3);
34 /* 5 dwords for possible load to reinitialize when we have no preamble
35 * IB + 5 dwords for write to L2 + 3 bytes for every range written to
38 return 5 + 5 + 3 + count
* element_size
;
41 static unsigned si_ce_needed_cs_space(void)
45 space
+= si_descriptor_list_cs_space(SI_NUM_CONST_BUFFERS
, 4);
46 space
+= si_descriptor_list_cs_space(SI_NUM_SHADER_BUFFERS
, 4);
47 space
+= si_descriptor_list_cs_space(SI_NUM_SAMPLERS
, 16);
48 space
+= si_descriptor_list_cs_space(SI_NUM_IMAGES
, 8);
49 space
*= SI_NUM_SHADERS
;
51 space
+= si_descriptor_list_cs_space(SI_NUM_RW_BUFFERS
, 4);
53 /* Increment CE counter packet */
60 void si_need_cs_space(struct si_context
*ctx
)
62 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx
.cs
;
63 struct radeon_winsys_cs
*ce_ib
= ctx
->ce_ib
;
64 struct radeon_winsys_cs
*dma
= ctx
->b
.dma
.cs
;
66 /* Flush the DMA IB if it's not empty. */
67 if (radeon_emitted(dma
, 0))
68 ctx
->b
.dma
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
70 /* There are two memory usage counters in the winsys for all buffers
71 * that have been added (cs_add_buffer) and two counters in the pipe
72 * driver for those that haven't been added yet.
74 if (unlikely(!ctx
->b
.ws
->cs_memory_below_limit(ctx
->b
.gfx
.cs
,
75 ctx
->b
.vram
, ctx
->b
.gtt
))) {
78 ctx
->b
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
84 /* If the CS is sufficiently large, don't count the space needed
85 * and just flush if there is not enough space left.
87 if (!ctx
->b
.ws
->cs_check_space(cs
, 2048) ||
88 (ce_ib
&& !ctx
->b
.ws
->cs_check_space(ce_ib
, si_ce_needed_cs_space())))
89 ctx
->b
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
92 void si_context_gfx_flush(void *context
, unsigned flags
,
93 struct pipe_fence_handle
**fence
)
95 struct si_context
*ctx
= context
;
96 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx
.cs
;
97 struct radeon_winsys
*ws
= ctx
->b
.ws
;
99 if (ctx
->gfx_flush_in_progress
)
102 ctx
->gfx_flush_in_progress
= true;
104 if (!radeon_emitted(cs
, ctx
->b
.initial_gfx_cs_size
) &&
105 (!fence
|| ctx
->last_gfx_fence
)) {
107 ws
->fence_reference(fence
, ctx
->last_gfx_fence
);
108 if (!(flags
& RADEON_FLUSH_ASYNC
))
109 ws
->cs_sync_flush(cs
);
110 ctx
->gfx_flush_in_progress
= false;
114 r600_preflush_suspend_features(&ctx
->b
);
116 ctx
->b
.flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
|
117 SI_CONTEXT_PS_PARTIAL_FLUSH
;
119 /* DRM 3.1.0 doesn't flush TC for VI correctly. */
120 if (ctx
->b
.chip_class
== VI
&& ctx
->b
.screen
->info
.drm_minor
<= 1)
121 ctx
->b
.flags
|= SI_CONTEXT_INV_GLOBAL_L2
|
122 SI_CONTEXT_INV_VMEM_L1
;
124 si_emit_cache_flush(ctx
, NULL
);
126 /* force to keep tiling flags */
127 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
133 /* Save the IB for debug contexts. */
134 radeon_clear_saved_cs(&ctx
->last_gfx
);
135 radeon_save_cs(ws
, cs
, &ctx
->last_gfx
);
136 r600_resource_reference(&ctx
->last_trace_buf
, ctx
->trace_buf
);
137 r600_resource_reference(&ctx
->trace_buf
, NULL
);
141 ws
->cs_flush(cs
, flags
, &ctx
->last_gfx_fence
);
144 ws
->fence_reference(fence
, ctx
->last_gfx_fence
);
146 /* Check VM faults if needed. */
147 if (ctx
->screen
->b
.debug_flags
& DBG_CHECK_VM
) {
148 /* Use conservative timeout 800ms, after which we won't wait any
149 * longer and assume the GPU is hung.
151 ctx
->b
.ws
->fence_wait(ctx
->b
.ws
, ctx
->last_gfx_fence
, 800*1000*1000);
153 si_check_vm_faults(&ctx
->b
, &ctx
->last_gfx
, RING_GFX
);
156 si_begin_new_cs(ctx
);
157 ctx
->gfx_flush_in_progress
= false;
160 void si_begin_new_cs(struct si_context
*ctx
)
165 /* Create a buffer used for writing trace IDs and initialize it to 0. */
166 assert(!ctx
->trace_buf
);
167 ctx
->trace_buf
= (struct r600_resource
*)
168 pipe_buffer_create(ctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
169 PIPE_USAGE_STAGING
, 4);
171 pipe_buffer_write_nooverlap(&ctx
->b
.b
, &ctx
->trace_buf
->b
.b
,
172 0, sizeof(zero
), &zero
);
179 /* Flush read caches at the beginning of CS not flushed by the kernel. */
180 if (ctx
->b
.chip_class
>= CIK
)
181 ctx
->b
.flags
|= SI_CONTEXT_INV_SMEM_L1
|
182 SI_CONTEXT_INV_ICACHE
;
184 ctx
->b
.flags
|= R600_CONTEXT_START_PIPELINE_STATS
;
186 /* set all valid group as dirty so they get reemited on
189 si_pm4_reset_emitted(ctx
);
191 /* The CS initialization should be emitted before everything else. */
192 si_pm4_emit(ctx
, ctx
->init_config
);
193 if (ctx
->init_config_gs_rings
)
194 si_pm4_emit(ctx
, ctx
->init_config_gs_rings
);
196 if (ctx
->ce_preamble_ib
)
197 si_ce_enable_loads(ctx
->ce_preamble_ib
);
199 si_ce_enable_loads(ctx
->ce_ib
);
201 if (ctx
->ce_preamble_ib
)
202 si_ce_reinitialize_all_descriptors(ctx
);
204 ctx
->framebuffer
.dirty_cbufs
= (1 << 8) - 1;
205 ctx
->framebuffer
.dirty_zsbuf
= true;
206 si_mark_atom_dirty(ctx
, &ctx
->framebuffer
.atom
);
208 si_mark_atom_dirty(ctx
, &ctx
->clip_regs
);
209 si_mark_atom_dirty(ctx
, &ctx
->clip_state
.atom
);
210 ctx
->msaa_sample_locs
.nr_samples
= 0;
211 si_mark_atom_dirty(ctx
, &ctx
->msaa_sample_locs
.atom
);
212 si_mark_atom_dirty(ctx
, &ctx
->msaa_config
);
213 si_mark_atom_dirty(ctx
, &ctx
->sample_mask
.atom
);
214 si_mark_atom_dirty(ctx
, &ctx
->cb_render_state
);
215 si_mark_atom_dirty(ctx
, &ctx
->blend_color
.atom
);
216 si_mark_atom_dirty(ctx
, &ctx
->db_render_state
);
217 si_mark_atom_dirty(ctx
, &ctx
->stencil_ref
.atom
);
218 si_mark_atom_dirty(ctx
, &ctx
->spi_map
);
219 si_mark_atom_dirty(ctx
, &ctx
->b
.streamout
.enable_atom
);
220 si_mark_atom_dirty(ctx
, &ctx
->b
.render_cond_atom
);
221 si_all_descriptors_begin_new_cs(ctx
);
223 ctx
->b
.scissors
.dirty_mask
= (1 << R600_MAX_VIEWPORTS
) - 1;
224 ctx
->b
.viewports
.dirty_mask
= (1 << R600_MAX_VIEWPORTS
) - 1;
225 si_mark_atom_dirty(ctx
, &ctx
->b
.scissors
.atom
);
226 si_mark_atom_dirty(ctx
, &ctx
->b
.viewports
.atom
);
228 r600_postflush_resume_features(&ctx
->b
);
230 assert(!ctx
->b
.gfx
.cs
->prev_dw
);
231 ctx
->b
.initial_gfx_cs_size
= ctx
->b
.gfx
.cs
->current
.cdw
;
233 /* Invalidate various draw states so that they are emitted before
234 * the first draw call. */
235 si_invalidate_draw_sh_constants(ctx
);
236 ctx
->last_primitive_restart_en
= -1;
237 ctx
->last_restart_index
= SI_RESTART_INDEX_UNKNOWN
;
238 ctx
->last_gs_out_prim
= -1;
240 ctx
->last_multi_vgt_param
= -1;
241 ctx
->last_ls_hs_config
= -1;
242 ctx
->last_rast_prim
= -1;
243 ctx
->last_sc_line_stipple
= ~0;
244 ctx
->last_vtx_reuse_depth
= -1;
245 ctx
->emit_scratch_reloc
= true;
247 ctx
->last_tcs
= NULL
;
248 ctx
->last_tes_sh_base
= -1;
249 ctx
->last_num_tcs_input_cp
= -1;
251 ctx
->cs_shader_state
.initialized
= false;