2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "si_build_pm4.h"
30 #include "util/os_time.h"
31 #include "util/u_upload_mgr.h"
34 void si_need_gfx_cs_space(struct si_context
*ctx
)
36 struct radeon_cmdbuf
*cs
= ctx
->gfx_cs
;
38 /* There is no need to flush the DMA IB here, because
39 * si_need_dma_space always flushes the GFX IB if there is
40 * a conflict, which means any unflushed DMA commands automatically
41 * precede the GFX IB (= they had no dependency on the GFX IB when
42 * they were submitted).
45 /* There are two memory usage counters in the winsys for all buffers
46 * that have been added (cs_add_buffer) and two counters in the pipe
47 * driver for those that haven't been added yet.
49 if (unlikely(!radeon_cs_memory_below_limit(ctx
->screen
, ctx
->gfx_cs
,
50 ctx
->vram
, ctx
->gtt
))) {
53 si_flush_gfx_cs(ctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
59 unsigned need_dwords
= si_get_minimum_num_gfx_cs_dwords(ctx
);
60 if (!ctx
->ws
->cs_check_space(cs
, need_dwords
, false))
61 si_flush_gfx_cs(ctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
64 void si_unref_sdma_uploads(struct si_context
*sctx
)
66 for (unsigned i
= 0; i
< sctx
->num_sdma_uploads
; i
++) {
67 si_resource_reference(&sctx
->sdma_uploads
[i
].dst
, NULL
);
68 si_resource_reference(&sctx
->sdma_uploads
[i
].src
, NULL
);
70 sctx
->num_sdma_uploads
= 0;
73 void si_flush_gfx_cs(struct si_context
*ctx
, unsigned flags
,
74 struct pipe_fence_handle
**fence
)
76 struct radeon_cmdbuf
*cs
= ctx
->gfx_cs
;
77 struct radeon_winsys
*ws
= ctx
->ws
;
78 const unsigned wait_ps_cs
= SI_CONTEXT_PS_PARTIAL_FLUSH
|
79 SI_CONTEXT_CS_PARTIAL_FLUSH
;
80 unsigned wait_flags
= 0;
82 if (ctx
->gfx_flush_in_progress
)
85 if (!ctx
->screen
->info
.kernel_flushes_tc_l2_after_ib
) {
86 wait_flags
|= wait_ps_cs
|
88 } else if (ctx
->chip_class
== GFX6
) {
89 /* The kernel flushes L2 before shaders are finished. */
90 wait_flags
|= wait_ps_cs
;
91 } else if (!(flags
& RADEON_FLUSH_START_NEXT_GFX_IB_NOW
)) {
92 wait_flags
|= wait_ps_cs
;
95 /* Drop this flush if it's a no-op. */
96 if (!radeon_emitted(cs
, ctx
->initial_gfx_cs_size
) &&
97 (!wait_flags
|| !ctx
->gfx_last_ib_is_busy
))
100 if (si_check_device_reset(ctx
))
103 if (ctx
->screen
->debug_flags
& DBG(CHECK_VM
))
104 flags
&= ~PIPE_FLUSH_ASYNC
;
106 ctx
->gfx_flush_in_progress
= true;
108 /* If the state tracker is flushing the GFX IB, si_flush_from_st is
109 * responsible for flushing the DMA IB and merging the fences from both.
110 * If the driver flushes the GFX IB internally, and it should never ask
111 * for a fence handle.
113 assert(!radeon_emitted(ctx
->dma_cs
, 0) || fence
== NULL
);
115 /* Update the sdma_uploads list by flushing the uploader. */
116 u_upload_unmap(ctx
->b
.const_uploader
);
118 /* Execute SDMA uploads. */
119 ctx
->sdma_uploads_in_progress
= true;
120 for (unsigned i
= 0; i
< ctx
->num_sdma_uploads
; i
++) {
121 struct si_sdma_upload
*up
= &ctx
->sdma_uploads
[i
];
124 assert(up
->src_offset
% 4 == 0 && up
->dst_offset
% 4 == 0 &&
127 u_box_1d(up
->src_offset
, up
->size
, &box
);
128 ctx
->dma_copy(&ctx
->b
, &up
->dst
->b
.b
, 0, up
->dst_offset
, 0, 0,
129 &up
->src
->b
.b
, 0, &box
);
131 ctx
->sdma_uploads_in_progress
= false;
132 si_unref_sdma_uploads(ctx
);
134 /* Flush SDMA (preamble IB). */
135 if (radeon_emitted(ctx
->dma_cs
, 0))
136 si_flush_dma_cs(ctx
, flags
, NULL
);
138 if (radeon_emitted(ctx
->prim_discard_compute_cs
, 0)) {
139 struct radeon_cmdbuf
*compute_cs
= ctx
->prim_discard_compute_cs
;
140 si_compute_signal_gfx(ctx
);
142 /* Make sure compute shaders are idle before leaving the IB, so that
143 * the next IB doesn't overwrite GDS that might be in use. */
144 radeon_emit(compute_cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
145 radeon_emit(compute_cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
) |
148 /* Save the GDS prim restart counter if needed. */
149 if (ctx
->preserve_prim_restart_gds_at_flush
) {
150 si_cp_copy_data(ctx
, compute_cs
,
151 COPY_DATA_DST_MEM
, ctx
->wait_mem_scratch
, 4,
152 COPY_DATA_GDS
, NULL
, 4);
156 if (ctx
->has_graphics
) {
157 if (!LIST_IS_EMPTY(&ctx
->active_queries
))
158 si_suspend_queries(ctx
);
160 ctx
->streamout
.suspended
= false;
161 if (ctx
->streamout
.begin_emitted
) {
162 si_emit_streamout_end(ctx
);
163 ctx
->streamout
.suspended
= true;
165 /* Since streamout uses GDS on gfx10, we need to make
166 * GDS idle when we leave the IB, otherwise another
167 * process might overwrite it while our shaders are busy.
169 if (ctx
->chip_class
>= GFX10
)
170 wait_flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
;
174 /* Make sure CP DMA is idle at the end of IBs after L2 prefetches
175 * because the kernel doesn't wait for it. */
176 if (ctx
->chip_class
>= GFX7
)
177 si_cp_dma_wait_for_idle(ctx
);
179 /* Wait for draw calls to finish if needed. */
181 ctx
->flags
|= wait_flags
;
182 ctx
->emit_cache_flush(ctx
);
184 ctx
->gfx_last_ib_is_busy
= (wait_flags
& wait_ps_cs
) != wait_ps_cs
;
186 if (ctx
->current_saved_cs
) {
189 /* Save the IB for debug contexts. */
190 si_save_cs(ws
, cs
, &ctx
->current_saved_cs
->gfx
, true);
191 ctx
->current_saved_cs
->flushed
= true;
192 ctx
->current_saved_cs
->time_flush
= os_time_get_nano();
194 si_log_hw_flush(ctx
);
197 if (si_compute_prim_discard_enabled(ctx
)) {
198 /* The compute IB can start after the previous gfx IB starts. */
199 if (radeon_emitted(ctx
->prim_discard_compute_cs
, 0) &&
200 ctx
->last_gfx_fence
) {
201 ctx
->ws
->cs_add_fence_dependency(ctx
->gfx_cs
,
203 RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY
|
204 RADEON_DEPENDENCY_START_FENCE
);
207 /* Remember the last execution barrier. It's in the IB.
208 * It will signal the start of the next compute IB.
210 if (flags
& RADEON_FLUSH_START_NEXT_GFX_IB_NOW
&&
211 ctx
->last_pkt3_write_data
) {
212 *ctx
->last_pkt3_write_data
= PKT3(PKT3_WRITE_DATA
, 3, 0);
213 ctx
->last_pkt3_write_data
= NULL
;
215 si_resource_reference(&ctx
->last_ib_barrier_buf
, ctx
->barrier_buf
);
216 ctx
->last_ib_barrier_buf_offset
= ctx
->barrier_buf_offset
;
217 si_resource_reference(&ctx
->barrier_buf
, NULL
);
219 ws
->fence_reference(&ctx
->last_ib_barrier_fence
, NULL
);
224 ws
->cs_flush(cs
, flags
, &ctx
->last_gfx_fence
);
226 ws
->fence_reference(fence
, ctx
->last_gfx_fence
);
228 ctx
->num_gfx_cs_flushes
++;
230 if (si_compute_prim_discard_enabled(ctx
)) {
231 /* Remember the last execution barrier, which is the last fence
234 if (!(flags
& RADEON_FLUSH_START_NEXT_GFX_IB_NOW
)) {
235 ctx
->last_pkt3_write_data
= NULL
;
236 si_resource_reference(&ctx
->last_ib_barrier_buf
, NULL
);
237 ws
->fence_reference(&ctx
->last_ib_barrier_fence
, ctx
->last_gfx_fence
);
241 /* Check VM faults if needed. */
242 if (ctx
->screen
->debug_flags
& DBG(CHECK_VM
)) {
243 /* Use conservative timeout 800ms, after which we won't wait any
244 * longer and assume the GPU is hung.
246 ctx
->ws
->fence_wait(ctx
->ws
, ctx
->last_gfx_fence
, 800*1000*1000);
248 si_check_vm_faults(ctx
, &ctx
->current_saved_cs
->gfx
, RING_GFX
);
251 if (ctx
->current_saved_cs
)
252 si_saved_cs_reference(&ctx
->current_saved_cs
, NULL
);
254 si_begin_new_gfx_cs(ctx
);
255 ctx
->gfx_flush_in_progress
= false;
258 static void si_begin_gfx_cs_debug(struct si_context
*ctx
)
260 static const uint32_t zeros
[1];
261 assert(!ctx
->current_saved_cs
);
263 ctx
->current_saved_cs
= calloc(1, sizeof(*ctx
->current_saved_cs
));
264 if (!ctx
->current_saved_cs
)
267 pipe_reference_init(&ctx
->current_saved_cs
->reference
, 1);
269 ctx
->current_saved_cs
->trace_buf
= si_resource(
270 pipe_buffer_create(ctx
->b
.screen
, 0, PIPE_USAGE_STAGING
, 8));
271 if (!ctx
->current_saved_cs
->trace_buf
) {
272 free(ctx
->current_saved_cs
);
273 ctx
->current_saved_cs
= NULL
;
277 pipe_buffer_write_nooverlap(&ctx
->b
, &ctx
->current_saved_cs
->trace_buf
->b
.b
,
278 0, sizeof(zeros
), zeros
);
279 ctx
->current_saved_cs
->trace_id
= 0;
283 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, ctx
->current_saved_cs
->trace_buf
,
284 RADEON_USAGE_READWRITE
, RADEON_PRIO_TRACE
);
287 void si_begin_new_gfx_cs(struct si_context
*ctx
)
290 si_begin_gfx_cs_debug(ctx
);
293 ctx
->ws
->cs_add_buffer(ctx
->gfx_cs
, ctx
->gds
,
294 RADEON_USAGE_READWRITE
, 0, 0);
296 ctx
->ws
->cs_add_buffer(ctx
->gfx_cs
, ctx
->gds_oa
,
297 RADEON_USAGE_READWRITE
, 0, 0);
302 /* Always invalidate caches at the beginning of IBs, because external
303 * users (e.g. BO evictions and SDMA/UVD/VCE IBs) can modify our
306 * Note that the cache flush done by the kernel at the end of GFX IBs
307 * isn't useful here, because that flush can finish after the following
310 * TODO: Do we also need to invalidate CB & DB caches?
312 ctx
->flags
|= SI_CONTEXT_INV_ICACHE
|
313 SI_CONTEXT_INV_SCACHE
|
314 SI_CONTEXT_INV_VCACHE
|
316 SI_CONTEXT_START_PIPELINE_STATS
;
318 ctx
->cs_shader_state
.initialized
= false;
319 si_all_descriptors_begin_new_cs(ctx
);
321 if (!ctx
->has_graphics
) {
322 ctx
->initial_gfx_cs_size
= ctx
->gfx_cs
->current
.cdw
;
326 /* set all valid group as dirty so they get reemited on
329 si_pm4_reset_emitted(ctx
);
331 /* The CS initialization should be emitted before everything else. */
332 si_pm4_emit(ctx
, ctx
->init_config
);
333 if (ctx
->init_config_gs_rings
)
334 si_pm4_emit(ctx
, ctx
->init_config_gs_rings
);
336 if (ctx
->queued
.named
.ls
)
337 ctx
->prefetch_L2_mask
|= SI_PREFETCH_LS
;
338 if (ctx
->queued
.named
.hs
)
339 ctx
->prefetch_L2_mask
|= SI_PREFETCH_HS
;
340 if (ctx
->queued
.named
.es
)
341 ctx
->prefetch_L2_mask
|= SI_PREFETCH_ES
;
342 if (ctx
->queued
.named
.gs
)
343 ctx
->prefetch_L2_mask
|= SI_PREFETCH_GS
;
344 if (ctx
->queued
.named
.vs
)
345 ctx
->prefetch_L2_mask
|= SI_PREFETCH_VS
;
346 if (ctx
->queued
.named
.ps
)
347 ctx
->prefetch_L2_mask
|= SI_PREFETCH_PS
;
348 if (ctx
->vb_descriptors_buffer
&& ctx
->vertex_elements
)
349 ctx
->prefetch_L2_mask
|= SI_PREFETCH_VBO_DESCRIPTORS
;
351 /* CLEAR_STATE disables all colorbuffers, so only enable bound ones. */
352 bool has_clear_state
= ctx
->screen
->has_clear_state
;
353 if (has_clear_state
) {
354 ctx
->framebuffer
.dirty_cbufs
=
355 u_bit_consecutive(0, ctx
->framebuffer
.state
.nr_cbufs
);
356 /* CLEAR_STATE disables the zbuffer, so only enable it if it's bound. */
357 ctx
->framebuffer
.dirty_zsbuf
= ctx
->framebuffer
.state
.zsbuf
!= NULL
;
359 ctx
->framebuffer
.dirty_cbufs
= u_bit_consecutive(0, 8);
360 ctx
->framebuffer
.dirty_zsbuf
= true;
362 /* This should always be marked as dirty to set the framebuffer scissor
364 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.framebuffer
);
366 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.clip_regs
);
367 /* CLEAR_STATE sets zeros. */
368 if (!has_clear_state
|| ctx
->clip_state
.any_nonzeros
)
369 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.clip_state
);
370 ctx
->sample_locs_num_samples
= 0;
371 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.msaa_sample_locs
);
372 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.msaa_config
);
373 /* CLEAR_STATE sets 0xffff. */
374 if (!has_clear_state
|| ctx
->sample_mask
!= 0xffff)
375 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.sample_mask
);
376 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.cb_render_state
);
377 /* CLEAR_STATE sets zeros. */
378 if (!has_clear_state
|| ctx
->blend_color
.any_nonzeros
)
379 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.blend_color
);
380 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.db_render_state
);
381 if (ctx
->chip_class
>= GFX9
)
382 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.dpbb_state
);
383 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.stencil_ref
);
384 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.spi_map
);
385 if (ctx
->chip_class
< GFX10
)
386 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.streamout_enable
);
387 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.render_cond
);
388 /* CLEAR_STATE disables all window rectangles. */
389 if (!has_clear_state
|| ctx
->num_window_rectangles
> 0)
390 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.window_rectangles
);
392 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.guardband
);
393 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.scissors
);
394 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.viewports
);
396 si_mark_atom_dirty(ctx
, &ctx
->atoms
.s
.scratch_state
);
397 if (ctx
->scratch_buffer
) {
398 si_context_add_resource_size(ctx
, &ctx
->scratch_buffer
->b
.b
);
401 if (ctx
->streamout
.suspended
) {
402 ctx
->streamout
.append_bitmask
= ctx
->streamout
.enabled_mask
;
403 si_streamout_buffers_dirty(ctx
);
406 if (!LIST_IS_EMPTY(&ctx
->active_queries
))
407 si_resume_queries(ctx
);
409 assert(!ctx
->gfx_cs
->prev_dw
);
410 ctx
->initial_gfx_cs_size
= ctx
->gfx_cs
->current
.cdw
;
412 /* Invalidate various draw states so that they are emitted before
413 * the first draw call. */
414 si_invalidate_draw_sh_constants(ctx
);
415 ctx
->last_index_size
= -1;
416 ctx
->last_primitive_restart_en
= -1;
417 ctx
->last_restart_index
= SI_RESTART_INDEX_UNKNOWN
;
419 ctx
->last_multi_vgt_param
= -1;
420 ctx
->last_rast_prim
= -1;
421 ctx
->last_sc_line_stipple
= ~0;
422 ctx
->last_vs_state
= ~0;
424 ctx
->last_tcs
= NULL
;
425 ctx
->last_tes_sh_base
= -1;
426 ctx
->last_num_tcs_input_cp
= -1;
427 ctx
->last_ls_hs_config
= -1; /* impossible value */
429 ctx
->prim_discard_compute_ib_initialized
= false;
431 /* Compute-based primitive discard:
432 * The index ring is divided into 2 halves. Switch between the halves
433 * in the same fashion as doublebuffering.
435 if (ctx
->index_ring_base
)
436 ctx
->index_ring_base
= 0;
438 ctx
->index_ring_base
= ctx
->index_ring_size_per_ib
;
440 ctx
->index_ring_offset
= 0;
442 if (has_clear_state
) {
443 ctx
->tracked_regs
.reg_value
[SI_TRACKED_DB_RENDER_CONTROL
] = 0x00000000;
444 ctx
->tracked_regs
.reg_value
[SI_TRACKED_DB_COUNT_CONTROL
] = 0x00000000;
445 ctx
->tracked_regs
.reg_value
[SI_TRACKED_DB_RENDER_OVERRIDE2
] = 0x00000000;
446 ctx
->tracked_regs
.reg_value
[SI_TRACKED_DB_SHADER_CONTROL
] = 0x00000000;
447 ctx
->tracked_regs
.reg_value
[SI_TRACKED_CB_TARGET_MASK
] = 0xffffffff;
448 ctx
->tracked_regs
.reg_value
[SI_TRACKED_CB_DCC_CONTROL
] = 0x00000000;
449 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SX_PS_DOWNCONVERT
] = 0x00000000;
450 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SX_BLEND_OPT_EPSILON
] = 0x00000000;
451 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SX_BLEND_OPT_CONTROL
] = 0x00000000;
452 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_SC_LINE_CNTL
] = 0x00001000;
453 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_SC_AA_CONFIG
] = 0x00000000;
454 ctx
->tracked_regs
.reg_value
[SI_TRACKED_DB_EQAA
] = 0x00000000;
455 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_SC_MODE_CNTL_1
] = 0x00000000;
456 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_SU_PRIM_FILTER_CNTL
] = 0;
457 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL
] = 0x00000000;
458 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_CL_VS_OUT_CNTL
] = 0x00000000;
459 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_CL_CLIP_CNTL
] = 0x00090000;
460 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_SC_BINNER_CNTL_0
] = 0x00000003;
461 ctx
->tracked_regs
.reg_value
[SI_TRACKED_DB_DFSM_CONTROL
] = 0x00000000;
462 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ
] = 0x3f800000;
463 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ
] = 0x3f800000;
464 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ
] = 0x3f800000;
465 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ
] = 0x3f800000;
466 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET
] = 0;
467 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_SU_VTX_CNTL
] = 0x00000005;
468 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_SC_CLIPRECT_RULE
] = 0xffff;
469 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_ESGS_RING_ITEMSIZE
] = 0x00000000;
470 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GSVS_RING_OFFSET_1
] = 0x00000000;
471 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GSVS_RING_OFFSET_2
] = 0x00000000;
472 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GSVS_RING_OFFSET_3
] = 0x00000000;
473 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GSVS_RING_ITEMSIZE
] = 0x00000000;
474 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GS_MAX_VERT_OUT
] = 0x00000000;
475 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GS_VERT_ITEMSIZE
] = 0x00000000;
476 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1
] = 0x00000000;
477 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2
] = 0x00000000;
478 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3
] = 0x00000000;
479 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GS_INSTANCE_CNT
] = 0x00000000;
480 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GS_ONCHIP_CNTL
] = 0x00000000;
481 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP
] = 0x00000000;
482 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_GS_MODE
] = 0x00000000;
483 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_PRIMITIVEID_EN
] = 0x00000000;
484 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_REUSE_OFF
] = 0x00000000;
485 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SPI_VS_OUT_CONFIG
] = 0x00000000;
486 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SPI_SHADER_POS_FORMAT
] = 0x00000000;
487 ctx
->tracked_regs
.reg_value
[SI_TRACKED_PA_CL_VTE_CNTL
] = 0x00000000;
488 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SPI_PS_INPUT_ENA
] = 0x00000000;
489 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SPI_PS_INPUT_ADDR
] = 0x00000000;
490 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SPI_BARYC_CNTL
] = 0x00000000;
491 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SPI_PS_IN_CONTROL
] = 0x00000002;
492 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SPI_SHADER_Z_FORMAT
] = 0x00000000;
493 ctx
->tracked_regs
.reg_value
[SI_TRACKED_SPI_SHADER_COL_FORMAT
] = 0x00000000;
494 ctx
->tracked_regs
.reg_value
[SI_TRACKED_CB_SHADER_MASK
] = 0xffffffff;
495 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_TF_PARAM
] = 0x00000000;
496 ctx
->tracked_regs
.reg_value
[SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL
] = 0x0000001e; /* From GFX8 */
498 /* Set all saved registers state to saved. */
499 ctx
->tracked_regs
.reg_saved
= 0xffffffffffffffff;
501 /* Set all saved registers state to unknown. */
502 ctx
->tracked_regs
.reg_saved
= 0;
505 /* 0xffffffff is a impossible value to register SPI_PS_INPUT_CNTL_n */
506 memset(ctx
->tracked_regs
.spi_ps_input_cntl
, 0xff, sizeof(uint32_t) * 32);