radv/sqtt: update SPI_CONFIG_CNTL.EXP_PRIORITY_ORDER value
[mesa.git] / src / amd / vulkan / radv_sqtt.c
1 /*
2 * Copyright © 2020 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25
26 #include "radv_private.h"
27 #include "radv_cs.h"
28 #include "sid.h"
29
30 #define SQTT_BUFFER_ALIGN_SHIFT 12
31
32 static uint64_t
33 radv_thread_trace_get_info_offset(unsigned se)
34 {
35 return sizeof(struct radv_thread_trace_info) * se;
36 }
37
38 static uint64_t
39 radv_thread_trace_get_data_offset(struct radv_device *device, unsigned se)
40 {
41 uint64_t data_offset;
42
43 data_offset = align64(sizeof(struct radv_thread_trace_info) * 4,
44 1 << SQTT_BUFFER_ALIGN_SHIFT);
45 data_offset += device->thread_trace_buffer_size * se;
46
47 return data_offset;
48 }
49
50 static uint64_t
51 radv_thread_trace_get_info_va(struct radv_device *device, unsigned se)
52 {
53 uint64_t va = radv_buffer_get_va(device->thread_trace_bo);
54 return va + radv_thread_trace_get_info_offset(se);
55 }
56
57 static uint64_t
58 radv_thread_trace_get_data_va(struct radv_device *device, unsigned se)
59 {
60 uint64_t va = radv_buffer_get_va(device->thread_trace_bo);
61 return va + radv_thread_trace_get_data_offset(device, se);
62 }
63
64 static void
65 radv_emit_thread_trace_start(struct radv_device *device,
66 struct radeon_cmdbuf *cs,
67 uint32_t queue_family_index)
68 {
69 uint32_t shifted_size = device->thread_trace_buffer_size >> SQTT_BUFFER_ALIGN_SHIFT;
70 unsigned max_se = device->physical_device->rad_info.max_se;
71
72 assert(device->physical_device->rad_info.chip_class == GFX9);
73
74 for (unsigned se = 0; se < max_se; se++) {
75 uint64_t data_va = radv_thread_trace_get_data_va(device, se);
76 uint64_t shifted_va = data_va >> SQTT_BUFFER_ALIGN_SHIFT;
77
78 /* Target SEx and SH0. */
79 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
80 S_030800_SE_INDEX(se) |
81 S_030800_SH_INDEX(0) |
82 S_030800_INSTANCE_BROADCAST_WRITES(1));
83
84 /* Order seems important for the following 4 registers. */
85 radeon_set_uconfig_reg(cs, R_030CDC_SQ_THREAD_TRACE_BASE2,
86 S_030CDC_ADDR_HI(shifted_va >> 32));
87
88 radeon_set_uconfig_reg(cs, R_030CC0_SQ_THREAD_TRACE_BASE,
89 S_030CC0_ADDR(shifted_va));
90
91 radeon_set_uconfig_reg(cs, R_030CC4_SQ_THREAD_TRACE_SIZE,
92 S_030CC4_SIZE(shifted_size));
93
94 radeon_set_uconfig_reg(cs, R_030CD4_SQ_THREAD_TRACE_CTRL,
95 S_030CD4_RESET_BUFFER(1));
96
97 radeon_set_uconfig_reg(cs, R_030CC8_SQ_THREAD_TRACE_MASK,
98 S_030CC8_CU_SEL(2) |
99 S_030CC8_SH_SEL(0) |
100 S_030CC8_SIMD_EN(0xf) |
101 S_030CC8_VM_ID_MASK(0) |
102 S_030CC8_REG_STALL_EN(1) |
103 S_030CC8_SPI_STALL_EN(1) |
104 S_030CC8_SQ_STALL_EN(1));
105
106 /* Trace all tokens and registers. */
107 radeon_set_uconfig_reg(cs, R_030CCC_SQ_THREAD_TRACE_TOKEN_MASK,
108 S_030CCC_TOKEN_MASK(0xbfff) |
109 S_030CCC_REG_MASK(0xff) |
110 S_030CCC_REG_DROP_ON_STALL(0));
111
112 /* Enable SQTT perf counters for all CUs. */
113 radeon_set_uconfig_reg(cs, R_030CD0_SQ_THREAD_TRACE_PERF_MASK,
114 S_030CD0_SH0_MASK(0xffff) |
115 S_030CD0_SH1_MASK(0xffff));
116
117 radeon_set_uconfig_reg(cs, R_030CE0_SQ_THREAD_TRACE_TOKEN_MASK2,
118 S_030CE0_INST_MASK(0xffffffff));
119
120 radeon_set_uconfig_reg(cs, R_030CEC_SQ_THREAD_TRACE_HIWATER,
121 S_030CEC_HIWATER(4));
122
123 /* Reset thread trace status errors. */
124 radeon_set_uconfig_reg(cs, R_030CE8_SQ_THREAD_TRACE_STATUS,
125 S_030CE8_UTC_ERROR(0));
126
127 /* Enable the thread trace mode. */
128 radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE,
129 S_030CD8_MASK_PS(1) |
130 S_030CD8_MASK_VS(1) |
131 S_030CD8_MASK_GS(1) |
132 S_030CD8_MASK_ES(1) |
133 S_030CD8_MASK_HS(1) |
134 S_030CD8_MASK_LS(1) |
135 S_030CD8_MASK_CS(1) |
136 S_030CD8_AUTOFLUSH_EN(1) | /* periodically flush SQTT data to memory */
137 S_030CD8_TC_PERF_EN(1) | /* count SQTT traffic in TCC perf counters */
138 S_030CD8_MODE(1));
139 }
140
141 /* Restore global broadcasting. */
142 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
143 S_030800_SE_BROADCAST_WRITES(1) |
144 S_030800_SH_BROADCAST_WRITES(1) |
145 S_030800_INSTANCE_BROADCAST_WRITES(1));
146
147 /* Start the thread trace with a different event based on the queue. */
148 if (queue_family_index == RADV_QUEUE_COMPUTE &&
149 device->physical_device->rad_info.chip_class >= GFX7) {
150 radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE,
151 S_00B878_THREAD_TRACE_ENABLE(1));
152 } else {
153 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
154 radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_START) | EVENT_INDEX(0));
155 }
156 }
157
158 static const uint32_t thread_trace_info_regs[] =
159 {
160 R_030CE4_SQ_THREAD_TRACE_WPTR,
161 R_030CE8_SQ_THREAD_TRACE_STATUS,
162 R_030CF0_SQ_THREAD_TRACE_CNTR,
163 };
164
165 static void
166 radv_emit_thread_trace_stop(struct radv_device *device,
167 struct radeon_cmdbuf *cs,
168 uint32_t queue_family_index)
169 {
170 unsigned max_se = device->physical_device->rad_info.max_se;
171
172 assert(device->physical_device->rad_info.chip_class == GFX9);
173
174 /* Stop the thread trace with a different event based on the queue. */
175 if (queue_family_index == RADV_QUEUE_COMPUTE &&
176 device->physical_device->rad_info.chip_class >= GFX7) {
177 radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE,
178 S_00B878_THREAD_TRACE_ENABLE(0));
179 } else {
180 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
181 radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_STOP) | EVENT_INDEX(0));
182 }
183
184 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
185 radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_FINISH) | EVENT_INDEX(0));
186
187 for (unsigned se = 0; se < max_se; se++) {
188 /* Target SEi and SH0. */
189 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
190 S_030800_SE_INDEX(se) |
191 S_030800_SH_INDEX(0) |
192 S_030800_INSTANCE_BROADCAST_WRITES(1));
193
194 /* Disable the thread trace mode. */
195 radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE,
196 S_030CD8_MODE(0));
197
198 /* Wait for thread trace completion. */
199 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
200 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
201 radeon_emit(cs, R_030CE8_SQ_THREAD_TRACE_STATUS >> 2); /* register */
202 radeon_emit(cs, 0);
203 radeon_emit(cs, 0); /* reference value */
204 radeon_emit(cs, S_030CE8_BUSY(1)); /* mask */
205 radeon_emit(cs, 4); /* poll interval */
206
207 /* Get the VA where the info struct is stored for this SE. */
208 uint64_t info_va = radv_thread_trace_get_info_va(device, se);
209
210 /* Copy back the info struct one DWORD at a time. */
211 for (unsigned i = 0; i < ARRAY_SIZE(thread_trace_info_regs); i++) {
212 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
213 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) |
214 COPY_DATA_DST_SEL(COPY_DATA_TC_L2) |
215 COPY_DATA_WR_CONFIRM);
216 radeon_emit(cs, thread_trace_info_regs[i] >> 2);
217 radeon_emit(cs, 0); /* unused */
218 radeon_emit(cs, (info_va + i * 4));
219 radeon_emit(cs, (info_va + i * 4) >> 32);
220 }
221 }
222
223 /* Restore global broadcasting. */
224 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
225 S_030800_SE_BROADCAST_WRITES(1) |
226 S_030800_SH_BROADCAST_WRITES(1) |
227 S_030800_INSTANCE_BROADCAST_WRITES(1));
228 }
229
230 static void
231 radv_emit_spi_config_cntl(struct radeon_cmdbuf *cs, bool enable)
232 {
233 radeon_set_uconfig_reg(cs, R_031100_SPI_CONFIG_CNTL,
234 S_031100_GPR_WRITE_PRIORITY(0x2c688) |
235 S_031100_EXP_PRIORITY_ORDER(3) |
236 S_031100_ENABLE_SQG_TOP_EVENTS(enable) |
237 S_031100_ENABLE_SQG_BOP_EVENTS(enable));
238 }
239
240 static void
241 radv_emit_wait_for_idle(struct radv_device *device,
242 struct radeon_cmdbuf *cs, int family)
243 {
244 si_cs_emit_cache_flush(cs, device->physical_device->rad_info.chip_class,
245 NULL, 0,
246 family == RING_COMPUTE &&
247 device->physical_device->rad_info.chip_class >= GFX7,
248 (family == RADV_QUEUE_COMPUTE ?
249 RADV_CMD_FLAG_CS_PARTIAL_FLUSH :
250 (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
251 RADV_CMD_FLAG_INV_ICACHE |
252 RADV_CMD_FLAG_INV_SCACHE |
253 RADV_CMD_FLAG_INV_VCACHE |
254 RADV_CMD_FLAG_INV_L2, 0);
255 }
256
257 static void
258 radv_thread_trace_init_cs(struct radv_device *device)
259 {
260 struct radeon_winsys *ws = device->ws;
261
262 /* Thread trace start CS. */
263 for (int family = 0; family < 2; ++family) {
264 device->thread_trace_start_cs[family] = ws->cs_create(ws, family);
265 switch (family) {
266 case RADV_QUEUE_GENERAL:
267 radeon_emit(device->thread_trace_start_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
268 radeon_emit(device->thread_trace_start_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
269 radeon_emit(device->thread_trace_start_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
270 break;
271 case RADV_QUEUE_COMPUTE:
272 radeon_emit(device->thread_trace_start_cs[family], PKT3(PKT3_NOP, 0, 0));
273 radeon_emit(device->thread_trace_start_cs[family], 0);
274 break;
275 }
276
277 radv_cs_add_buffer(ws, device->thread_trace_start_cs[family],
278 device->thread_trace_bo);
279
280 /* Make sure to wait-for-idle before starting SQTT. */
281 radv_emit_wait_for_idle(device,
282 device->thread_trace_start_cs[family],
283 family);
284
285 /* Enable SQG events that collects thread trace data. */
286 radv_emit_spi_config_cntl(device->thread_trace_start_cs[family], true);
287
288 radv_emit_thread_trace_start(device,
289 device->thread_trace_start_cs[family],
290 family);
291
292 ws->cs_finalize(device->thread_trace_start_cs[family]);
293 }
294
295 /* Thread trace stop CS. */
296 for (int family = 0; family < 2; ++family) {
297 device->thread_trace_stop_cs[family] = ws->cs_create(ws, family);
298 switch (family) {
299 case RADV_QUEUE_GENERAL:
300 radeon_emit(device->thread_trace_stop_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
301 radeon_emit(device->thread_trace_stop_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
302 radeon_emit(device->thread_trace_stop_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
303 break;
304 case RADV_QUEUE_COMPUTE:
305 radeon_emit(device->thread_trace_stop_cs[family], PKT3(PKT3_NOP, 0, 0));
306 radeon_emit(device->thread_trace_stop_cs[family], 0);
307 break;
308 }
309
310 radv_cs_add_buffer(ws, device->thread_trace_stop_cs[family],
311 device->thread_trace_bo);
312
313 /* Make sure to wait-for-idle before stopping SQTT. */
314 radv_emit_wait_for_idle(device,
315 device->thread_trace_stop_cs[family],
316 family);
317
318 radv_emit_thread_trace_stop(device,
319 device->thread_trace_stop_cs[family],
320 family);
321
322 /* Restore previous state by disabling SQG events. */
323 radv_emit_spi_config_cntl(device->thread_trace_stop_cs[family], false);
324
325 ws->cs_finalize(device->thread_trace_stop_cs[family]);
326 }
327 }
328
329 static bool
330 radv_thread_trace_init_bo(struct radv_device *device)
331 {
332 struct radeon_winsys *ws = device->ws;
333 uint64_t size;
334
335 /* Compute total size of the thread trace BO for 4 SEs. */
336 size = align64(sizeof(struct radv_thread_trace_info) * 4,
337 1 << SQTT_BUFFER_ALIGN_SHIFT);
338 size += device->thread_trace_buffer_size * 4;
339
340 device->thread_trace_bo = ws->buffer_create(ws, size, 4096,
341 RADEON_DOMAIN_VRAM,
342 RADEON_FLAG_CPU_ACCESS |
343 RADEON_FLAG_NO_INTERPROCESS_SHARING |
344 RADEON_FLAG_ZERO_VRAM,
345 RADV_BO_PRIORITY_SCRATCH);
346 if (!device->thread_trace_bo)
347 return false;
348
349 device->thread_trace_ptr = ws->buffer_map(device->thread_trace_bo);
350 if (!device->thread_trace_ptr)
351 return false;
352
353 return true;
354 }
355
356 bool
357 radv_thread_trace_init(struct radv_device *device)
358 {
359 if (!radv_thread_trace_init_bo(device))
360 return false;
361
362 radv_thread_trace_init_cs(device);
363 return true;
364 }
365
366 void
367 radv_thread_trace_finish(struct radv_device *device)
368 {
369 struct radeon_winsys *ws = device->ws;
370
371 if (unlikely(device->thread_trace_bo))
372 ws->buffer_destroy(device->thread_trace_bo);
373
374 for (unsigned i = 0; i < 2; i++) {
375 if (device->thread_trace_start_cs[i])
376 ws->cs_destroy(device->thread_trace_start_cs[i]);
377 if (device->thread_trace_stop_cs[i])
378 ws->cs_destroy(device->thread_trace_stop_cs[i]);
379 }
380 }
381
382 bool
383 radv_begin_thread_trace(struct radv_queue *queue)
384 {
385 int family = queue->queue_family_index;
386 struct radeon_cmdbuf *cs = queue->device->thread_trace_start_cs[family];
387 return radv_queue_internal_submit(queue, cs);
388 }
389
390 bool
391 radv_end_thread_trace(struct radv_queue *queue)
392 {
393 int family = queue->queue_family_index;
394 struct radeon_cmdbuf *cs = queue->device->thread_trace_stop_cs[family];
395 return radv_queue_internal_submit(queue, cs);
396 }
397
398 bool
399 radv_get_thread_trace(struct radv_queue *queue,
400 struct radv_thread_trace *thread_trace)
401 {
402 struct radv_device *device = queue->device;
403 unsigned max_se = device->physical_device->rad_info.max_se;
404 void *thread_trace_ptr = device->thread_trace_ptr;
405
406 memset(thread_trace, 0, sizeof(*thread_trace));
407 thread_trace->num_traces = max_se;
408
409 for (unsigned se = 0; se < max_se; se++) {
410 uint64_t info_offset = radv_thread_trace_get_info_offset(se);
411 uint64_t data_offset = radv_thread_trace_get_data_offset(device, se);
412 void *info_ptr = thread_trace_ptr + info_offset;
413 void *data_ptr = thread_trace_ptr + data_offset;
414 struct radv_thread_trace_info *info =
415 (struct radv_thread_trace_info *)info_ptr;
416 struct radv_thread_trace_se thread_trace_se = {};
417
418 if (info->cur_offset < info->write_counter) {
419 uint32_t expected_size =
420 (info->write_counter * 32) / 1024;
421 uint32_t available_size =
422 (info->cur_offset * 32) / 1024;
423
424 fprintf(stderr, "Failed to get the thread trace "
425 "because the buffer is too small. The "
426 "hardware needs %d KB but the "
427 "buffer size is %d KB.\n",
428 expected_size, available_size);
429 fprintf(stderr, "Please update the buffer size with "
430 "RADV_THREAD_TRACE_BUFER_SIZE=<size_in_bytes>\n");
431 return false;
432 }
433
434 thread_trace_se.data_ptr = data_ptr;
435 thread_trace_se.info = *info;
436 thread_trace_se.shader_engine = se;
437 thread_trace_se.compute_unit = 0;
438
439 thread_trace->traces[se] = thread_trace_se;
440 }
441
442 return true;
443 }