ba03250b31e93339c8dfb96ecc328352daf70f92
[mesa.git] / src / gallium / drivers / radeonsi / si_cp_reg_shadowing.c
1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_build_pm4.h"
26 #include "ac_shadowed_regs.h"
27 #include "util/u_memory.h"
28
29 static void si_build_load_reg(struct si_screen *sscreen, struct si_pm4_state *pm4,
30 enum ac_reg_range_type type,
31 struct si_resource *shadow_regs)
32 {
33 uint64_t gpu_address = shadow_regs->gpu_address;
34 unsigned packet, num_ranges, offset;
35 const struct ac_reg_range *ranges;
36
37 ac_get_reg_ranges(sscreen->info.chip_class, sscreen->info.family,
38 type, &num_ranges, &ranges);
39
40 switch (type) {
41 case SI_REG_RANGE_UCONFIG:
42 gpu_address += SI_SHADOWED_UCONFIG_REG_OFFSET;
43 offset = CIK_UCONFIG_REG_OFFSET;
44 packet = PKT3_LOAD_UCONFIG_REG;
45 break;
46 case SI_REG_RANGE_CONTEXT:
47 gpu_address += SI_SHADOWED_CONTEXT_REG_OFFSET;
48 offset = SI_CONTEXT_REG_OFFSET;
49 packet = PKT3_LOAD_CONTEXT_REG;
50 break;
51 default:
52 gpu_address += SI_SHADOWED_SH_REG_OFFSET;
53 offset = SI_SH_REG_OFFSET;
54 packet = PKT3_LOAD_SH_REG;
55 break;
56 }
57
58 si_pm4_cmd_add(pm4, PKT3(packet, 1 + num_ranges * 2, 0));
59 si_pm4_cmd_add(pm4, gpu_address);
60 si_pm4_cmd_add(pm4, gpu_address >> 32);
61 for (unsigned i = 0; i < num_ranges; i++) {
62 si_pm4_cmd_add(pm4, (ranges[i].offset - offset) / 4);
63 si_pm4_cmd_add(pm4, ranges[i].size / 4);
64 }
65 }
66
67 static struct si_pm4_state *
68 si_create_shadowing_ib_preamble(struct si_context *sctx)
69 {
70 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
71
72 if (sctx->chip_class == GFX10) {
73 /* SQ_NON_EVENT must be emitted before GE_PC_ALLOC is written. */
74 si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
75 si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_SQ_NON_EVENT) | EVENT_INDEX(0));
76 }
77
78 if (sctx->screen->dpbb_allowed) {
79 si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
80 si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
81 }
82
83 /* Wait for idle, because we'll update VGT ring pointers. */
84 si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
85 si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
86
87 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
88 si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
89 si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
90
91 if (sctx->chip_class >= GFX10) {
92 unsigned gcr_cntl = S_586_GL2_INV(1) | S_586_GL2_WB(1) |
93 S_586_GLM_INV(1) | S_586_GLM_WB(1) |
94 S_586_GL1_INV(1) | S_586_GLV_INV(1) |
95 S_586_GLK_INV(1) | S_586_GLI_INV(V_586_GLI_ALL);
96
97 si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
98 si_pm4_cmd_add(pm4, 0); /* CP_COHER_CNTL */
99 si_pm4_cmd_add(pm4, 0xffffffff); /* CP_COHER_SIZE */
100 si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
101 si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
102 si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
103 si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
104 si_pm4_cmd_add(pm4, gcr_cntl); /* GCR_CNTL */
105 } else if (sctx->chip_class == GFX9) {
106 unsigned cp_coher_cntl = S_0301F0_SH_ICACHE_ACTION_ENA(1) |
107 S_0301F0_SH_KCACHE_ACTION_ENA(1) |
108 S_0301F0_TC_ACTION_ENA(1) |
109 S_0301F0_TCL1_ACTION_ENA(1) |
110 S_0301F0_TC_WB_ACTION_ENA(1);
111
112 si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
113 si_pm4_cmd_add(pm4, cp_coher_cntl); /* CP_COHER_CNTL */
114 si_pm4_cmd_add(pm4, 0xffffffff); /* CP_COHER_SIZE */
115 si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
116 si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
117 si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
118 si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
119 } else {
120 unreachable("invalid chip");
121 }
122
123 si_pm4_cmd_add(pm4, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
124 si_pm4_cmd_add(pm4, 0);
125
126 si_pm4_cmd_add(pm4, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
127 si_pm4_cmd_add(pm4,
128 CC0_UPDATE_LOAD_ENABLES(1) |
129 CC0_LOAD_PER_CONTEXT_STATE(1) |
130 CC0_LOAD_CS_SH_REGS(1) |
131 CC0_LOAD_GFX_SH_REGS(1) |
132 CC0_LOAD_GLOBAL_UCONFIG(1));
133 si_pm4_cmd_add(pm4,
134 CC1_UPDATE_SHADOW_ENABLES(1) |
135 CC1_SHADOW_PER_CONTEXT_STATE(1) |
136 CC1_SHADOW_CS_SH_REGS(1) |
137 CC1_SHADOW_GFX_SH_REGS(1) |
138 CC1_SHADOW_GLOBAL_UCONFIG(1));
139
140 for (unsigned i = 0; i < SI_NUM_SHADOWED_REG_RANGES; i++)
141 si_build_load_reg(sctx->screen, pm4, i, sctx->shadowed_regs);
142
143 return pm4;
144 }
145
146 void si_init_cp_reg_shadowing(struct si_context *sctx)
147 {
148 if (sctx->screen->debug_flags & DBG(SHADOW_REGS)) {
149 sctx->shadowed_regs =
150 si_aligned_buffer_create(sctx->b.screen,
151 SI_RESOURCE_FLAG_UNMAPPABLE,
152 PIPE_USAGE_DEFAULT,
153 SI_SHADOWED_REG_BUFFER_SIZE,
154 4096);
155 if (!sctx->shadowed_regs)
156 fprintf(stderr, "radeonsi: cannot create a shadowed_regs buffer\n");
157 }
158
159 si_init_cs_preamble_state(sctx, sctx->shadowed_regs != NULL);
160
161 if (sctx->shadowed_regs) {
162 /* We need to clear the shadowed reg buffer. */
163 si_cp_dma_clear_buffer(sctx, sctx->gfx_cs, &sctx->shadowed_regs->b.b,
164 0, sctx->shadowed_regs->bo_size, 0, 0, SI_COHERENCY_CP,
165 L2_BYPASS);
166
167 /* Create the shadowing preamble. */
168 struct si_pm4_state *shadowing_preamble =
169 si_create_shadowing_ib_preamble(sctx);
170
171 /* Initialize shadowed registers as follows. */
172 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, sctx->shadowed_regs,
173 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
174 si_pm4_emit(sctx, shadowing_preamble);
175 ac_emulate_clear_state(&sctx->screen->info, sctx->gfx_cs,
176 radeon_set_context_reg_seq_array);
177 si_pm4_emit(sctx, sctx->cs_preamble_state);
178
179 /* The register values are shadowed, so we won't need to set them again. */
180 si_pm4_free_state(sctx, sctx->cs_preamble_state, ~0);
181
182 /* Execute the shadowing preamble as cs_preamble, which will
183 * load register values from memory.
184 */
185 sctx->cs_preamble_state = shadowing_preamble;
186
187 si_set_tracked_regs_to_clear_state(sctx);
188 }
189 }