2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "../radeon/r600_cs.h"
30 #include "util/u_memory.h"
35 void si_need_cs_space(struct si_context
*ctx
, unsigned num_dw
,
36 boolean count_draw_in
)
40 /* The number of dwords we already used in the CS so far. */
41 num_dw
+= ctx
->b
.rings
.gfx
.cs
->cdw
;
43 for (i
= 0; i
< SI_NUM_ATOMS(ctx
); i
++) {
44 if (ctx
->atoms
.array
[i
]->dirty
) {
45 num_dw
+= ctx
->atoms
.array
[i
]->num_dw
;
50 /* The number of dwords all the dirty states would take. */
51 num_dw
+= ctx
->pm4_dirty_cdwords
;
53 /* The upper-bound of how much a draw command would take. */
54 num_dw
+= SI_MAX_DRAW_CS_DWORDS
;
57 /* Count in queries_suspend. */
58 num_dw
+= ctx
->b
.num_cs_dw_nontimer_queries_suspend
;
60 /* Count in streamout_end at the end of CS. */
61 if (ctx
->b
.streamout
.begin_emitted
) {
62 num_dw
+= ctx
->b
.streamout
.num_dw_for_end
;
65 /* Count in render_condition(NULL) at the end of CS. */
66 if (ctx
->b
.predicate_drawing
) {
70 /* Count in framebuffer cache flushes at the end of CS. */
71 num_dw
+= ctx
->atoms
.cache_flush
->num_dw
;
74 if (ctx
->screen
->b
.trace_bo
) {
75 num_dw
+= SI_TRACE_CS_DWORDS
;
79 /* Flush if there's not enough space. */
80 if (num_dw
> RADEON_MAX_CMDBUF_DWORDS
) {
81 si_flush(&ctx
->b
.b
, NULL
, RADEON_FLUSH_ASYNC
);
85 void si_context_flush(struct si_context
*ctx
, unsigned flags
)
87 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
93 ctx
->b
.nontimer_queries_suspended
= false;
94 if (ctx
->b
.num_cs_dw_nontimer_queries_suspend
) {
95 r600_suspend_nontimer_queries(&ctx
->b
);
96 ctx
->b
.nontimer_queries_suspended
= true;
99 ctx
->b
.streamout
.suspended
= false;
101 if (ctx
->b
.streamout
.begin_emitted
) {
102 r600_emit_streamout_end(&ctx
->b
);
103 ctx
->b
.streamout
.suspended
= true;
106 ctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV_CB
|
107 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
108 R600_CONTEXT_FLUSH_AND_INV_DB
|
109 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
110 R600_CONTEXT_INV_TEX_CACHE
;
111 si_emit_cache_flush(&ctx
->b
, NULL
);
113 /* this is probably not needed anymore */
114 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
115 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
117 /* force to keep tiling flags */
118 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
121 if (ctx
->screen
->b
.trace_bo
) {
122 struct si_screen
*sscreen
= ctx
->screen
;
125 for (i
= 0; i
< cs
->cdw
; i
++) {
126 fprintf(stderr
, "[%4d] [%5d] 0x%08x\n", sscreen
->b
.cs_count
, i
, cs
->buf
[i
]);
128 sscreen
->b
.cs_count
++;
133 ctx
->b
.ws
->cs_flush(ctx
->b
.rings
.gfx
.cs
, flags
, 0);
136 if (ctx
->screen
->b
.trace_bo
) {
137 struct si_screen
*sscreen
= ctx
->screen
;
140 for (i
= 0; i
< 10; i
++) {
142 if (!ctx
->ws
->buffer_is_busy(sscreen
->b
.trace_bo
->buf
, RADEON_USAGE_READWRITE
)) {
147 fprintf(stderr
, "timeout on cs lockup likely happen at cs %d dw %d\n",
148 sscreen
->b
.trace_ptr
[1], sscreen
->b
.trace_ptr
[0]);
150 fprintf(stderr
, "cs %d executed in %dms\n", sscreen
->b
.trace_ptr
[1], i
* 5);
155 si_begin_new_cs(ctx
);
158 void si_begin_new_cs(struct si_context
*ctx
)
160 ctx
->pm4_dirty_cdwords
= 0;
162 /* Flush read caches at the beginning of CS. */
163 ctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
164 R600_CONTEXT_INV_CONST_CACHE
|
165 R600_CONTEXT_INV_SHADER_CACHE
;
167 /* set all valid group as dirty so they get reemited on
170 si_pm4_reset_emitted(ctx
);
172 /* The CS initialization should be emitted before everything else. */
173 si_pm4_emit(ctx
, ctx
->queued
.named
.init
);
174 ctx
->emitted
.named
.init
= ctx
->queued
.named
.init
;
176 if (ctx
->b
.streamout
.suspended
) {
177 ctx
->b
.streamout
.append_bitmask
= ctx
->b
.streamout
.enabled_mask
;
178 r600_streamout_buffers_dirty(&ctx
->b
);
182 if (ctx
->b
.nontimer_queries_suspended
) {
183 r600_resume_nontimer_queries(&ctx
->b
);
186 si_all_descriptors_begin_new_cs(ctx
);
190 void si_trace_emit(struct si_context
*sctx
)
192 struct si_screen
*sscreen
= sctx
->screen
;
193 struct radeon_winsys_cs
*cs
= sctx
->cs
;
196 va
= r600_resource_va(&sscreen
->screen
, (void*)sscreen
->b
.trace_bo
);
197 r600_context_bo_reloc(sctx
, sscreen
->b
.trace_bo
, RADEON_USAGE_READWRITE
);
198 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_WRITE_DATA
, 4, 0);
199 cs
->buf
[cs
->cdw
++] = PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC
) |
200 PKT3_WRITE_DATA_WR_CONFIRM
|
201 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME
);
202 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
;
203 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFFFFFFFUL
;
204 cs
->buf
[cs
->cdw
++] = cs
->cdw
;
205 cs
->buf
[cs
->cdw
++] = sscreen
->b
.cs_count
;