2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 void si_need_cs_space(struct si_context
*ctx
)
32 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx
.cs
;
33 struct radeon_winsys_cs
*dma
= ctx
->b
.dma
.cs
;
35 /* Flush the DMA IB if it's not empty. */
37 ctx
->b
.dma
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
39 /* There are two memory usage counters in the winsys for all buffers
40 * that have been added (cs_add_buffer) and two counters in the pipe
41 * driver for those that haven't been added yet.
43 if (unlikely(!ctx
->b
.ws
->cs_memory_below_limit(ctx
->b
.gfx
.cs
,
44 ctx
->b
.vram
, ctx
->b
.gtt
))) {
47 ctx
->b
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
53 /* If the CS is sufficiently large, don't count the space needed
54 * and just flush if there is not enough space left.
56 if (unlikely(cs
->cdw
> cs
->max_dw
- 2048))
57 ctx
->b
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
60 void si_context_gfx_flush(void *context
, unsigned flags
,
61 struct pipe_fence_handle
**fence
)
63 struct si_context
*ctx
= context
;
64 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx
.cs
;
65 struct radeon_winsys
*ws
= ctx
->b
.ws
;
67 if (ctx
->gfx_flush_in_progress
)
70 ctx
->gfx_flush_in_progress
= true;
72 if (cs
->cdw
== ctx
->b
.initial_gfx_cs_size
&&
73 (!fence
|| ctx
->last_gfx_fence
)) {
75 ws
->fence_reference(fence
, ctx
->last_gfx_fence
);
76 if (!(flags
& RADEON_FLUSH_ASYNC
))
77 ws
->cs_sync_flush(cs
);
78 ctx
->gfx_flush_in_progress
= false;
82 r600_preflush_suspend_features(&ctx
->b
);
84 ctx
->b
.flags
|= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER
|
85 SI_CONTEXT_INV_VMEM_L1
|
86 SI_CONTEXT_INV_GLOBAL_L2
|
87 /* this is probably not needed anymore */
88 SI_CONTEXT_PS_PARTIAL_FLUSH
;
89 si_emit_cache_flush(ctx
, NULL
);
91 /* force to keep tiling flags */
92 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
100 /* Save the IB for debug contexts. */
102 ctx
->last_ib_dw_size
= cs
->cdw
;
103 ctx
->last_ib
= malloc(cs
->cdw
* 4);
104 memcpy(ctx
->last_ib
, cs
->buf
, cs
->cdw
* 4);
105 r600_resource_reference(&ctx
->last_trace_buf
, ctx
->trace_buf
);
106 r600_resource_reference(&ctx
->trace_buf
, NULL
);
108 /* Save the buffer list. */
109 if (ctx
->last_bo_list
) {
110 for (i
= 0; i
< ctx
->last_bo_count
; i
++)
111 pb_reference(&ctx
->last_bo_list
[i
].buf
, NULL
);
112 free(ctx
->last_bo_list
);
114 ctx
->last_bo_count
= ws
->cs_get_buffer_list(cs
, NULL
);
115 ctx
->last_bo_list
= calloc(ctx
->last_bo_count
,
116 sizeof(ctx
->last_bo_list
[0]));
117 ws
->cs_get_buffer_list(cs
, ctx
->last_bo_list
);
121 ws
->cs_flush(cs
, flags
, &ctx
->last_gfx_fence
,
122 ctx
->screen
->b
.cs_count
++);
125 ws
->fence_reference(fence
, ctx
->last_gfx_fence
);
127 /* Check VM faults if needed. */
128 if (ctx
->screen
->b
.debug_flags
& DBG_CHECK_VM
)
129 si_check_vm_faults(ctx
);
131 si_begin_new_cs(ctx
);
132 ctx
->gfx_flush_in_progress
= false;
135 void si_begin_new_cs(struct si_context
*ctx
)
140 /* Create a buffer used for writing trace IDs and initialize it to 0. */
141 assert(!ctx
->trace_buf
);
142 ctx
->trace_buf
= (struct r600_resource
*)
143 pipe_buffer_create(ctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
144 PIPE_USAGE_STAGING
, 4);
146 pipe_buffer_write_nooverlap(&ctx
->b
.b
, &ctx
->trace_buf
->b
.b
,
147 0, sizeof(zero
), &zero
);
154 /* Flush read caches at the beginning of CS. */
155 ctx
->b
.flags
|= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER
|
156 SI_CONTEXT_INV_VMEM_L1
|
157 SI_CONTEXT_INV_GLOBAL_L2
|
158 SI_CONTEXT_INV_SMEM_L1
|
159 SI_CONTEXT_INV_ICACHE
;
161 /* set all valid group as dirty so they get reemited on
164 si_pm4_reset_emitted(ctx
);
166 /* The CS initialization should be emitted before everything else. */
167 si_pm4_emit(ctx
, ctx
->init_config
);
168 if (ctx
->init_config_gs_rings
)
169 si_pm4_emit(ctx
, ctx
->init_config_gs_rings
);
171 ctx
->framebuffer
.dirty_cbufs
= (1 << 8) - 1;
172 ctx
->framebuffer
.dirty_zsbuf
= true;
173 si_mark_atom_dirty(ctx
, &ctx
->framebuffer
.atom
);
175 si_mark_atom_dirty(ctx
, &ctx
->clip_regs
);
176 si_mark_atom_dirty(ctx
, &ctx
->clip_state
.atom
);
177 si_mark_atom_dirty(ctx
, &ctx
->msaa_sample_locs
);
178 si_mark_atom_dirty(ctx
, &ctx
->msaa_config
);
179 si_mark_atom_dirty(ctx
, &ctx
->sample_mask
.atom
);
180 si_mark_atom_dirty(ctx
, &ctx
->cb_render_state
);
181 si_mark_atom_dirty(ctx
, &ctx
->blend_color
.atom
);
182 si_mark_atom_dirty(ctx
, &ctx
->db_render_state
);
183 si_mark_atom_dirty(ctx
, &ctx
->stencil_ref
.atom
);
184 si_mark_atom_dirty(ctx
, &ctx
->spi_map
);
185 si_mark_atom_dirty(ctx
, &ctx
->b
.streamout
.enable_atom
);
186 si_mark_atom_dirty(ctx
, &ctx
->b
.render_cond_atom
);
187 si_all_descriptors_begin_new_cs(ctx
);
189 ctx
->scissors
.dirty_mask
= (1 << SI_MAX_VIEWPORTS
) - 1;
190 ctx
->viewports
.dirty_mask
= (1 << SI_MAX_VIEWPORTS
) - 1;
191 si_mark_atom_dirty(ctx
, &ctx
->scissors
.atom
);
192 si_mark_atom_dirty(ctx
, &ctx
->viewports
.atom
);
194 r600_postflush_resume_features(&ctx
->b
);
196 ctx
->b
.initial_gfx_cs_size
= ctx
->b
.gfx
.cs
->cdw
;
198 /* Invalidate various draw states so that they are emitted before
199 * the first draw call. */
200 si_invalidate_draw_sh_constants(ctx
);
201 ctx
->last_primitive_restart_en
= -1;
202 ctx
->last_restart_index
= SI_RESTART_INDEX_UNKNOWN
;
203 ctx
->last_gs_out_prim
= -1;
205 ctx
->last_multi_vgt_param
= -1;
206 ctx
->last_ls_hs_config
= -1;
207 ctx
->last_rast_prim
= -1;
208 ctx
->last_sc_line_stipple
= ~0;
209 ctx
->emit_scratch_reloc
= true;
211 ctx
->last_tcs
= NULL
;
212 ctx
->last_tes_sh_base
= -1;
213 ctx
->last_num_tcs_input_cp
= -1;