2 * Copyright 2020 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_build_pm4.h"
27 #include "ac_shadowed_regs.h"
28 #include "util/u_memory.h"
30 static void si_build_load_reg(struct si_screen
*sscreen
, struct si_pm4_state
*pm4
,
31 enum ac_reg_range_type type
,
32 struct si_resource
*shadow_regs
)
34 uint64_t gpu_address
= shadow_regs
->gpu_address
;
35 unsigned packet
, num_ranges
, offset
;
36 const struct ac_reg_range
*ranges
;
38 ac_get_reg_ranges(sscreen
->info
.chip_class
, sscreen
->info
.family
,
39 type
, &num_ranges
, &ranges
);
42 case SI_REG_RANGE_UCONFIG
:
43 gpu_address
+= SI_SHADOWED_UCONFIG_REG_OFFSET
;
44 offset
= CIK_UCONFIG_REG_OFFSET
;
45 packet
= PKT3_LOAD_UCONFIG_REG
;
47 case SI_REG_RANGE_CONTEXT
:
48 gpu_address
+= SI_SHADOWED_CONTEXT_REG_OFFSET
;
49 offset
= SI_CONTEXT_REG_OFFSET
;
50 packet
= PKT3_LOAD_CONTEXT_REG
;
53 gpu_address
+= SI_SHADOWED_SH_REG_OFFSET
;
54 offset
= SI_SH_REG_OFFSET
;
55 packet
= PKT3_LOAD_SH_REG
;
59 si_pm4_cmd_add(pm4
, PKT3(packet
, 1 + num_ranges
* 2, 0));
60 si_pm4_cmd_add(pm4
, gpu_address
);
61 si_pm4_cmd_add(pm4
, gpu_address
>> 32);
62 for (unsigned i
= 0; i
< num_ranges
; i
++) {
63 si_pm4_cmd_add(pm4
, (ranges
[i
].offset
- offset
) / 4);
64 si_pm4_cmd_add(pm4
, ranges
[i
].size
/ 4);
68 static struct si_pm4_state
*
69 si_create_shadowing_ib_preamble(struct si_context
*sctx
)
71 struct si_pm4_state
*pm4
= CALLOC_STRUCT(si_pm4_state
);
73 if (sctx
->chip_class
== GFX10
) {
74 /* SQ_NON_EVENT must be emitted before GE_PC_ALLOC is written. */
75 si_pm4_cmd_add(pm4
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
76 si_pm4_cmd_add(pm4
, EVENT_TYPE(V_028A90_SQ_NON_EVENT
) | EVENT_INDEX(0));
79 if (sctx
->screen
->dpbb_allowed
) {
80 si_pm4_cmd_add(pm4
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
81 si_pm4_cmd_add(pm4
, EVENT_TYPE(V_028A90_BREAK_BATCH
) | EVENT_INDEX(0));
84 /* Wait for idle, because we'll update VGT ring pointers. */
85 si_pm4_cmd_add(pm4
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
86 si_pm4_cmd_add(pm4
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
88 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
89 si_pm4_cmd_add(pm4
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
90 si_pm4_cmd_add(pm4
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
92 if (sctx
->chip_class
>= GFX10
) {
93 unsigned gcr_cntl
= S_586_GL2_INV(1) | S_586_GL2_WB(1) |
94 S_586_GLM_INV(1) | S_586_GLM_WB(1) |
95 S_586_GL1_INV(1) | S_586_GLV_INV(1) |
96 S_586_GLK_INV(1) | S_586_GLI_INV(V_586_GLI_ALL
);
98 si_pm4_cmd_add(pm4
, PKT3(PKT3_ACQUIRE_MEM
, 6, 0));
99 si_pm4_cmd_add(pm4
, 0); /* CP_COHER_CNTL */
100 si_pm4_cmd_add(pm4
, 0xffffffff); /* CP_COHER_SIZE */
101 si_pm4_cmd_add(pm4
, 0xffffff); /* CP_COHER_SIZE_HI */
102 si_pm4_cmd_add(pm4
, 0); /* CP_COHER_BASE */
103 si_pm4_cmd_add(pm4
, 0); /* CP_COHER_BASE_HI */
104 si_pm4_cmd_add(pm4
, 0x0000000A); /* POLL_INTERVAL */
105 si_pm4_cmd_add(pm4
, gcr_cntl
); /* GCR_CNTL */
106 } else if (sctx
->chip_class
== GFX9
) {
107 unsigned cp_coher_cntl
= S_0301F0_SH_ICACHE_ACTION_ENA(1) |
108 S_0301F0_SH_KCACHE_ACTION_ENA(1) |
109 S_0301F0_TC_ACTION_ENA(1) |
110 S_0301F0_TCL1_ACTION_ENA(1) |
111 S_0301F0_TC_WB_ACTION_ENA(1);
113 si_pm4_cmd_add(pm4
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0));
114 si_pm4_cmd_add(pm4
, cp_coher_cntl
); /* CP_COHER_CNTL */
115 si_pm4_cmd_add(pm4
, 0xffffffff); /* CP_COHER_SIZE */
116 si_pm4_cmd_add(pm4
, 0xffffff); /* CP_COHER_SIZE_HI */
117 si_pm4_cmd_add(pm4
, 0); /* CP_COHER_BASE */
118 si_pm4_cmd_add(pm4
, 0); /* CP_COHER_BASE_HI */
119 si_pm4_cmd_add(pm4
, 0x0000000A); /* POLL_INTERVAL */
121 unreachable("invalid chip");
124 si_pm4_cmd_add(pm4
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
125 si_pm4_cmd_add(pm4
, 0);
127 si_pm4_cmd_add(pm4
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
129 CC0_UPDATE_LOAD_ENABLES(1) |
130 CC0_LOAD_PER_CONTEXT_STATE(1) |
131 CC0_LOAD_CS_SH_REGS(1) |
132 CC0_LOAD_GFX_SH_REGS(1) |
133 CC0_LOAD_GLOBAL_UCONFIG(1));
135 CC1_UPDATE_SHADOW_ENABLES(1) |
136 CC1_SHADOW_PER_CONTEXT_STATE(1) |
137 CC1_SHADOW_CS_SH_REGS(1) |
138 CC1_SHADOW_GFX_SH_REGS(1) |
139 CC1_SHADOW_GLOBAL_UCONFIG(1));
141 for (unsigned i
= 0; i
< SI_NUM_SHADOWED_REG_RANGES
; i
++)
142 si_build_load_reg(sctx
->screen
, pm4
, i
, sctx
->shadowed_regs
);
147 void si_init_cp_reg_shadowing(struct si_context
*sctx
)
149 if (sctx
->screen
->info
.mid_command_buffer_preemption_enabled
||
150 sctx
->screen
->debug_flags
& DBG(SHADOW_REGS
)) {
151 sctx
->shadowed_regs
=
152 si_aligned_buffer_create(sctx
->b
.screen
,
153 SI_RESOURCE_FLAG_UNMAPPABLE
,
155 SI_SHADOWED_REG_BUFFER_SIZE
,
157 if (!sctx
->shadowed_regs
)
158 fprintf(stderr
, "radeonsi: cannot create a shadowed_regs buffer\n");
161 si_init_cs_preamble_state(sctx
, sctx
->shadowed_regs
!= NULL
);
163 if (sctx
->shadowed_regs
) {
164 /* We need to clear the shadowed reg buffer. */
165 si_cp_dma_clear_buffer(sctx
, sctx
->gfx_cs
, &sctx
->shadowed_regs
->b
.b
,
166 0, sctx
->shadowed_regs
->bo_size
, 0, 0, SI_COHERENCY_CP
,
169 /* Create the shadowing preamble. */
170 struct si_pm4_state
*shadowing_preamble
=
171 si_create_shadowing_ib_preamble(sctx
);
173 /* Initialize shadowed registers as follows. */
174 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, sctx
->shadowed_regs
,
175 RADEON_USAGE_READWRITE
, RADEON_PRIO_DESCRIPTORS
);
176 si_pm4_emit(sctx
, shadowing_preamble
);
177 ac_emulate_clear_state(&sctx
->screen
->info
, sctx
->gfx_cs
,
178 radeon_set_context_reg_seq_array
);
179 si_pm4_emit(sctx
, sctx
->cs_preamble_state
);
181 /* The register values are shadowed, so we won't need to set them again. */
182 si_pm4_free_state(sctx
, sctx
->cs_preamble_state
, ~0);
183 sctx
->cs_preamble_state
= NULL
;
185 si_set_tracked_regs_to_clear_state(sctx
);
187 /* Setup preemption. The shadowing preamble will be executed as a preamble IB,
188 * which will load register values from memory on a context switch.
190 sctx
->ws
->cs_setup_preemption(sctx
->gfx_cs
, shadowing_preamble
->pm4
,
191 shadowing_preamble
->ndw
);
192 si_pm4_free_state(sctx
, shadowing_preamble
, ~0);