radv/sqtt: add support for GFX10
[mesa.git] / src / amd / vulkan / radv_sqtt.c
1 /*
2 * Copyright © 2020 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25
26 #include "radv_private.h"
27 #include "radv_cs.h"
28 #include "sid.h"
29
30 #define SQTT_BUFFER_ALIGN_SHIFT 12
31
32 static uint64_t
33 radv_thread_trace_get_info_offset(unsigned se)
34 {
35 return sizeof(struct radv_thread_trace_info) * se;
36 }
37
38 static uint64_t
39 radv_thread_trace_get_data_offset(struct radv_device *device, unsigned se)
40 {
41 uint64_t data_offset;
42
43 data_offset = align64(sizeof(struct radv_thread_trace_info) * 4,
44 1 << SQTT_BUFFER_ALIGN_SHIFT);
45 data_offset += device->thread_trace_buffer_size * se;
46
47 return data_offset;
48 }
49
50 static uint64_t
51 radv_thread_trace_get_info_va(struct radv_device *device, unsigned se)
52 {
53 uint64_t va = radv_buffer_get_va(device->thread_trace_bo);
54 return va + radv_thread_trace_get_info_offset(se);
55 }
56
57 static uint64_t
58 radv_thread_trace_get_data_va(struct radv_device *device, unsigned se)
59 {
60 uint64_t va = radv_buffer_get_va(device->thread_trace_bo);
61 return va + radv_thread_trace_get_data_offset(device, se);
62 }
63
64 static void
65 radv_emit_thread_trace_start(struct radv_device *device,
66 struct radeon_cmdbuf *cs,
67 uint32_t queue_family_index)
68 {
69 uint32_t shifted_size = device->thread_trace_buffer_size >> SQTT_BUFFER_ALIGN_SHIFT;
70 unsigned max_se = device->physical_device->rad_info.max_se;
71
72 assert(device->physical_device->rad_info.chip_class >= GFX9);
73
74 for (unsigned se = 0; se < max_se; se++) {
75 uint64_t data_va = radv_thread_trace_get_data_va(device, se);
76 uint64_t shifted_va = data_va >> SQTT_BUFFER_ALIGN_SHIFT;
77
78 /* Target SEx and SH0. */
79 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
80 S_030800_SE_INDEX(se) |
81 S_030800_SH_INDEX(0) |
82 S_030800_INSTANCE_BROADCAST_WRITES(1));
83
84 if (device->physical_device->rad_info.chip_class == GFX9) {
85 /* Order seems important for the following 4 registers. */
86 radeon_set_uconfig_reg(cs, R_030CDC_SQ_THREAD_TRACE_BASE2,
87 S_030CDC_ADDR_HI(shifted_va >> 32));
88
89 radeon_set_uconfig_reg(cs, R_030CC0_SQ_THREAD_TRACE_BASE,
90 S_030CC0_ADDR(shifted_va));
91
92 radeon_set_uconfig_reg(cs, R_030CC4_SQ_THREAD_TRACE_SIZE,
93 S_030CC4_SIZE(shifted_size));
94
95 radeon_set_uconfig_reg(cs, R_030CD4_SQ_THREAD_TRACE_CTRL,
96 S_030CD4_RESET_BUFFER(1));
97
98 radeon_set_uconfig_reg(cs, R_030CC8_SQ_THREAD_TRACE_MASK,
99 S_030CC8_CU_SEL(2) |
100 S_030CC8_SH_SEL(0) |
101 S_030CC8_SIMD_EN(0xf) |
102 S_030CC8_VM_ID_MASK(0) |
103 S_030CC8_REG_STALL_EN(1) |
104 S_030CC8_SPI_STALL_EN(1) |
105 S_030CC8_SQ_STALL_EN(1));
106
107 /* Trace all tokens and registers. */
108 radeon_set_uconfig_reg(cs, R_030CCC_SQ_THREAD_TRACE_TOKEN_MASK,
109 S_030CCC_TOKEN_MASK(0xbfff) |
110 S_030CCC_REG_MASK(0xff) |
111 S_030CCC_REG_DROP_ON_STALL(0));
112
113 /* Enable SQTT perf counters for all CUs. */
114 radeon_set_uconfig_reg(cs, R_030CD0_SQ_THREAD_TRACE_PERF_MASK,
115 S_030CD0_SH0_MASK(0xffff) |
116 S_030CD0_SH1_MASK(0xffff));
117
118 radeon_set_uconfig_reg(cs, R_030CE0_SQ_THREAD_TRACE_TOKEN_MASK2,
119 S_030CE0_INST_MASK(0xffffffff));
120
121 radeon_set_uconfig_reg(cs, R_030CEC_SQ_THREAD_TRACE_HIWATER,
122 S_030CEC_HIWATER(4));
123
124 /* Reset thread trace status errors. */
125 radeon_set_uconfig_reg(cs, R_030CE8_SQ_THREAD_TRACE_STATUS,
126 S_030CE8_UTC_ERROR(0));
127
128 /* Enable the thread trace mode. */
129 radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE,
130 S_030CD8_MASK_PS(1) |
131 S_030CD8_MASK_VS(1) |
132 S_030CD8_MASK_GS(1) |
133 S_030CD8_MASK_ES(1) |
134 S_030CD8_MASK_HS(1) |
135 S_030CD8_MASK_LS(1) |
136 S_030CD8_MASK_CS(1) |
137 S_030CD8_AUTOFLUSH_EN(1) | /* periodically flush SQTT data to memory */
138 S_030CD8_TC_PERF_EN(1) | /* count SQTT traffic in TCC perf counters */
139 S_030CD8_MODE(1));
140 } else {
141 /* Order seems important for the following 2 registers. */
142 radeon_set_privileged_config_reg(cs, R_008D04_SQ_THREAD_TRACE_BUF0_SIZE,
143 S_008D04_SIZE(shifted_size) |
144 S_008D04_BASE_HI(shifted_va >> 32));
145
146 radeon_set_privileged_config_reg(cs, R_008D00_SQ_THREAD_TRACE_BUF0_BASE,
147 S_008D00_BASE_LO(shifted_va));
148
149 radeon_set_privileged_config_reg(cs, R_008D14_SQ_THREAD_TRACE_MASK,
150 S_008D14_WTYPE_INCLUDE(0x7f) | /* all shader stages */
151 S_008D14_SA_SEL(0) |
152 S_008D14_WGP_SEL(0) |
153 S_008D14_SIMD_SEL(0));
154
155 radeon_set_privileged_config_reg(cs, R_008D18_SQ_THREAD_TRACE_TOKEN_MASK,
156 S_008D18_REG_INCLUDE(V_008D18_REG_INCLUDE_SQDEC |
157 V_008D18_REG_INCLUDE_SHDEC |
158 V_008D18_REG_INCLUDE_GFXUDEC |
159 V_008D18_REG_INCLUDE_CONTEXT |
160 V_008D18_REG_INCLUDE_COMP |
161 V_008D18_REG_INCLUDE_CONTEXT |
162 V_008D18_REG_INCLUDE_CONFIG) |
163 S_008D18_TOKEN_EXCLUDE(V_008D18_TOKEN_EXCLUDE_PERF));
164
165 /* Should be emitted last (it enables thread traces). */
166 radeon_set_privileged_config_reg(cs, R_008D1C_SQ_THREAD_TRACE_CTRL,
167 S_008D1C_MODE(1) |
168 S_008D1C_HIWATER(5) |
169 S_008D1C_UTIL_TIMER(1) |
170 S_008D1C_RT_FREQ(2) | /* 4096 clk */
171 S_008D1C_DRAW_EVENT_EN(1) |
172 S_008D1C_REG_STALL_EN(1) |
173 S_008D1C_SPI_STALL_EN(1) |
174 S_008D1C_SQ_STALL_EN(1) |
175 S_008D1C_REG_DROP_ON_STALL(0));
176 }
177 }
178
179 /* Restore global broadcasting. */
180 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
181 S_030800_SE_BROADCAST_WRITES(1) |
182 S_030800_SH_BROADCAST_WRITES(1) |
183 S_030800_INSTANCE_BROADCAST_WRITES(1));
184
185 /* Start the thread trace with a different event based on the queue. */
186 if (queue_family_index == RADV_QUEUE_COMPUTE &&
187 device->physical_device->rad_info.chip_class >= GFX7) {
188 radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE,
189 S_00B878_THREAD_TRACE_ENABLE(1));
190 } else {
191 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
192 radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_START) | EVENT_INDEX(0));
193 }
194 }
195
196 static const uint32_t gfx9_thread_trace_info_regs[] =
197 {
198 R_030CE4_SQ_THREAD_TRACE_WPTR,
199 R_030CE8_SQ_THREAD_TRACE_STATUS,
200 R_030CF0_SQ_THREAD_TRACE_CNTR,
201 };
202
203 static const uint32_t gfx10_thread_trace_info_regs[] =
204 {
205 R_008D10_SQ_THREAD_TRACE_WPTR,
206 R_008D20_SQ_THREAD_TRACE_STATUS,
207 R_008D24_SQ_THREAD_TRACE_DROPPED_CNTR,
208 };
209
210 static void
211 radv_emit_thread_trace_stop(struct radv_device *device,
212 struct radeon_cmdbuf *cs,
213 uint32_t queue_family_index)
214 {
215 unsigned max_se = device->physical_device->rad_info.max_se;
216
217 assert(device->physical_device->rad_info.chip_class >= GFX9);
218
219 /* Stop the thread trace with a different event based on the queue. */
220 if (queue_family_index == RADV_QUEUE_COMPUTE &&
221 device->physical_device->rad_info.chip_class >= GFX7) {
222 radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE,
223 S_00B878_THREAD_TRACE_ENABLE(0));
224 } else {
225 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
226 radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_STOP) | EVENT_INDEX(0));
227 }
228
229 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
230 radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_FINISH) | EVENT_INDEX(0));
231
232 for (unsigned se = 0; se < max_se; se++) {
233 /* Target SEi and SH0. */
234 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
235 S_030800_SE_INDEX(se) |
236 S_030800_SH_INDEX(0) |
237 S_030800_INSTANCE_BROADCAST_WRITES(1));
238
239 if (device->physical_device->rad_info.chip_class == GFX9) {
240 /* Disable the thread trace mode. */
241 radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE,
242 S_030CD8_MODE(0));
243
244 /* Wait for thread trace completion. */
245 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
246 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
247 radeon_emit(cs, R_030CE8_SQ_THREAD_TRACE_STATUS >> 2); /* register */
248 radeon_emit(cs, 0);
249 radeon_emit(cs, 0); /* reference value */
250 radeon_emit(cs, S_030CE8_BUSY(1)); /* mask */
251 radeon_emit(cs, 4); /* poll interval */
252
253 /* Get the VA where the info struct is stored for this SE. */
254 uint64_t info_va = radv_thread_trace_get_info_va(device, se);
255
256 /* Copy back the info struct one DWORD at a time. */
257 for (unsigned i = 0; i < ARRAY_SIZE(gfx9_thread_trace_info_regs); i++) {
258 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
259 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) |
260 COPY_DATA_DST_SEL(COPY_DATA_TC_L2) |
261 COPY_DATA_WR_CONFIRM);
262 radeon_emit(cs, gfx9_thread_trace_info_regs[i] >> 2);
263 radeon_emit(cs, 0); /* unused */
264 radeon_emit(cs, (info_va + i * 4));
265 radeon_emit(cs, (info_va + i * 4) >> 32);
266 }
267 } else {
268 assert(device->physical_device->rad_info.chip_class == GFX10);
269
270 /* Make sure to wait for the trace buffer. */
271 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
272 radeon_emit(cs, WAIT_REG_MEM_NOT_EQUAL); /* wait until the register is equal to the reference value */
273 radeon_emit(cs, R_008D20_SQ_THREAD_TRACE_STATUS >> 2); /* register */
274 radeon_emit(cs, 0);
275 radeon_emit(cs, 0); /* reference value */
276 radeon_emit(cs, S_008D20_FINISH_DONE(1)); /* mask */
277 radeon_emit(cs, 4); /* poll interval */
278
279 /* Disable the thread trace mode. */
280 radeon_set_privileged_config_reg(cs, R_008D1C_SQ_THREAD_TRACE_CTRL,
281 S_008D1C_MODE(0));
282
283 /* Wait for thread trace completion. */
284 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
285 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
286 radeon_emit(cs, R_008D20_SQ_THREAD_TRACE_STATUS >> 2); /* register */
287 radeon_emit(cs, 0);
288 radeon_emit(cs, 0); /* reference value */
289 radeon_emit(cs, S_008D20_BUSY(1)); /* mask */
290 radeon_emit(cs, 4); /* poll interval */
291
292 /* Get the VA where the info struct is stored for this SE. */
293 uint64_t info_va = radv_thread_trace_get_info_va(device, se);
294
295 /* Copy back the info struct one DWORD at a time. */
296 for (unsigned i = 0; i < ARRAY_SIZE(gfx10_thread_trace_info_regs); i++) {
297 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
298 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) |
299 COPY_DATA_DST_SEL(COPY_DATA_TC_L2) |
300 COPY_DATA_WR_CONFIRM);
301 radeon_emit(cs, gfx10_thread_trace_info_regs[i] >> 2);
302 radeon_emit(cs, 0); /* unused */
303 radeon_emit(cs, (info_va + i * 4));
304 radeon_emit(cs, (info_va + i * 4) >> 32);
305 }
306 }
307 }
308
309 /* Restore global broadcasting. */
310 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
311 S_030800_SE_BROADCAST_WRITES(1) |
312 S_030800_SH_BROADCAST_WRITES(1) |
313 S_030800_INSTANCE_BROADCAST_WRITES(1));
314 }
315
316 static void
317 radv_emit_spi_config_cntl(struct radv_device *device,
318 struct radeon_cmdbuf *cs, bool enable)
319 {
320 uint32_t spi_config_cntl = S_031100_GPR_WRITE_PRIORITY(0x2c688) |
321 S_031100_EXP_PRIORITY_ORDER(3) |
322 S_031100_ENABLE_SQG_TOP_EVENTS(enable) |
323 S_031100_ENABLE_SQG_BOP_EVENTS(enable);
324
325 if (device->physical_device->rad_info.chip_class == GFX10)
326 spi_config_cntl |= S_031100_PS_PKR_PRIORITY_CNTL(3);
327
328 radeon_set_uconfig_reg(cs, R_031100_SPI_CONFIG_CNTL, spi_config_cntl);
329 }
330
331 static void
332 radv_emit_wait_for_idle(struct radv_device *device,
333 struct radeon_cmdbuf *cs, int family)
334 {
335 si_cs_emit_cache_flush(cs, device->physical_device->rad_info.chip_class,
336 NULL, 0,
337 family == RING_COMPUTE &&
338 device->physical_device->rad_info.chip_class >= GFX7,
339 (family == RADV_QUEUE_COMPUTE ?
340 RADV_CMD_FLAG_CS_PARTIAL_FLUSH :
341 (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
342 RADV_CMD_FLAG_INV_ICACHE |
343 RADV_CMD_FLAG_INV_SCACHE |
344 RADV_CMD_FLAG_INV_VCACHE |
345 RADV_CMD_FLAG_INV_L2, 0);
346 }
347
348 static void
349 radv_thread_trace_init_cs(struct radv_device *device)
350 {
351 struct radeon_winsys *ws = device->ws;
352
353 /* Thread trace start CS. */
354 for (int family = 0; family < 2; ++family) {
355 device->thread_trace_start_cs[family] = ws->cs_create(ws, family);
356 switch (family) {
357 case RADV_QUEUE_GENERAL:
358 radeon_emit(device->thread_trace_start_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
359 radeon_emit(device->thread_trace_start_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
360 radeon_emit(device->thread_trace_start_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
361 break;
362 case RADV_QUEUE_COMPUTE:
363 radeon_emit(device->thread_trace_start_cs[family], PKT3(PKT3_NOP, 0, 0));
364 radeon_emit(device->thread_trace_start_cs[family], 0);
365 break;
366 }
367
368 radv_cs_add_buffer(ws, device->thread_trace_start_cs[family],
369 device->thread_trace_bo);
370
371 /* Make sure to wait-for-idle before starting SQTT. */
372 radv_emit_wait_for_idle(device,
373 device->thread_trace_start_cs[family],
374 family);
375
376 /* Enable SQG events that collects thread trace data. */
377 radv_emit_spi_config_cntl(device,
378 device->thread_trace_start_cs[family],
379 true);
380
381 radv_emit_thread_trace_start(device,
382 device->thread_trace_start_cs[family],
383 family);
384
385 ws->cs_finalize(device->thread_trace_start_cs[family]);
386 }
387
388 /* Thread trace stop CS. */
389 for (int family = 0; family < 2; ++family) {
390 device->thread_trace_stop_cs[family] = ws->cs_create(ws, family);
391 switch (family) {
392 case RADV_QUEUE_GENERAL:
393 radeon_emit(device->thread_trace_stop_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
394 radeon_emit(device->thread_trace_stop_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
395 radeon_emit(device->thread_trace_stop_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
396 break;
397 case RADV_QUEUE_COMPUTE:
398 radeon_emit(device->thread_trace_stop_cs[family], PKT3(PKT3_NOP, 0, 0));
399 radeon_emit(device->thread_trace_stop_cs[family], 0);
400 break;
401 }
402
403 radv_cs_add_buffer(ws, device->thread_trace_stop_cs[family],
404 device->thread_trace_bo);
405
406 /* Make sure to wait-for-idle before stopping SQTT. */
407 radv_emit_wait_for_idle(device,
408 device->thread_trace_stop_cs[family],
409 family);
410
411 radv_emit_thread_trace_stop(device,
412 device->thread_trace_stop_cs[family],
413 family);
414
415 /* Restore previous state by disabling SQG events. */
416 radv_emit_spi_config_cntl(device,
417 device->thread_trace_stop_cs[family],
418 false);
419
420 ws->cs_finalize(device->thread_trace_stop_cs[family]);
421 }
422 }
423
424 static bool
425 radv_thread_trace_init_bo(struct radv_device *device)
426 {
427 struct radeon_winsys *ws = device->ws;
428 uint64_t size;
429
430 /* Compute total size of the thread trace BO for 4 SEs. */
431 size = align64(sizeof(struct radv_thread_trace_info) * 4,
432 1 << SQTT_BUFFER_ALIGN_SHIFT);
433 size += device->thread_trace_buffer_size * 4;
434
435 device->thread_trace_bo = ws->buffer_create(ws, size, 4096,
436 RADEON_DOMAIN_VRAM,
437 RADEON_FLAG_CPU_ACCESS |
438 RADEON_FLAG_NO_INTERPROCESS_SHARING |
439 RADEON_FLAG_ZERO_VRAM,
440 RADV_BO_PRIORITY_SCRATCH);
441 if (!device->thread_trace_bo)
442 return false;
443
444 device->thread_trace_ptr = ws->buffer_map(device->thread_trace_bo);
445 if (!device->thread_trace_ptr)
446 return false;
447
448 return true;
449 }
450
451 bool
452 radv_thread_trace_init(struct radv_device *device)
453 {
454 if (!radv_thread_trace_init_bo(device))
455 return false;
456
457 radv_thread_trace_init_cs(device);
458 return true;
459 }
460
461 void
462 radv_thread_trace_finish(struct radv_device *device)
463 {
464 struct radeon_winsys *ws = device->ws;
465
466 if (unlikely(device->thread_trace_bo))
467 ws->buffer_destroy(device->thread_trace_bo);
468
469 for (unsigned i = 0; i < 2; i++) {
470 if (device->thread_trace_start_cs[i])
471 ws->cs_destroy(device->thread_trace_start_cs[i]);
472 if (device->thread_trace_stop_cs[i])
473 ws->cs_destroy(device->thread_trace_stop_cs[i]);
474 }
475 }
476
477 bool
478 radv_begin_thread_trace(struct radv_queue *queue)
479 {
480 int family = queue->queue_family_index;
481 struct radeon_cmdbuf *cs = queue->device->thread_trace_start_cs[family];
482 return radv_queue_internal_submit(queue, cs);
483 }
484
485 bool
486 radv_end_thread_trace(struct radv_queue *queue)
487 {
488 int family = queue->queue_family_index;
489 struct radeon_cmdbuf *cs = queue->device->thread_trace_stop_cs[family];
490 return radv_queue_internal_submit(queue, cs);
491 }
492
493 static bool
494 radv_is_thread_trace_complete(struct radv_device *device,
495 const struct radv_thread_trace_info *info)
496 {
497 if (device->physical_device->rad_info.chip_class == GFX10) {
498 /* GFX10 doesn't have THREAD_TRACE_CNTR but it reports the
499 * number of dropped bytes for all SEs via
500 * THREAD_TRACE_DROPPED_CNTR.
501 */
502 return info->gfx10_dropped_cntr == 0;
503 }
504
505 /* Otherwise, compare the current thread trace offset with the number
506 * of written bytes.
507 */
508 return info->cur_offset < info->gfx9_write_counter;
509 }
510
511 static uint32_t
512 radv_get_expected_buffer_size(struct radv_device *device,
513 const struct radv_thread_trace_info *info)
514 {
515 if (device->physical_device->rad_info.chip_class == GFX10) {
516 uint32_t dropped_cntr_per_se = info->gfx10_dropped_cntr / device->physical_device->rad_info.max_se;
517 return ((info->cur_offset * 32) + dropped_cntr_per_se) / 1024;
518 }
519
520 return (info->gfx9_write_counter * 32) / 1024;
521 }
522
523 bool
524 radv_get_thread_trace(struct radv_queue *queue,
525 struct radv_thread_trace *thread_trace)
526 {
527 struct radv_device *device = queue->device;
528 unsigned max_se = device->physical_device->rad_info.max_se;
529 void *thread_trace_ptr = device->thread_trace_ptr;
530
531 memset(thread_trace, 0, sizeof(*thread_trace));
532 thread_trace->num_traces = max_se;
533
534 for (unsigned se = 0; se < max_se; se++) {
535 uint64_t info_offset = radv_thread_trace_get_info_offset(se);
536 uint64_t data_offset = radv_thread_trace_get_data_offset(device, se);
537 void *info_ptr = thread_trace_ptr + info_offset;
538 void *data_ptr = thread_trace_ptr + data_offset;
539 struct radv_thread_trace_info *info =
540 (struct radv_thread_trace_info *)info_ptr;
541 struct radv_thread_trace_se thread_trace_se = {};
542
543 if (!radv_is_thread_trace_complete(device, info)) {
544 uint32_t expected_size =
545 radv_get_expected_buffer_size(device, info);
546 uint32_t available_size =
547 (info->cur_offset * 32) / 1024;
548
549 fprintf(stderr, "Failed to get the thread trace "
550 "because the buffer is too small. The "
551 "hardware needs %d KB but the "
552 "buffer size is %d KB.\n",
553 expected_size, available_size);
554 fprintf(stderr, "Please update the buffer size with "
555 "RADV_THREAD_TRACE_BUFER_SIZE=<size_in_bytes>\n");
556 return false;
557 }
558
559 thread_trace_se.data_ptr = data_ptr;
560 thread_trace_se.info = *info;
561 thread_trace_se.shader_engine = se;
562 thread_trace_se.compute_unit = 0;
563
564 thread_trace->traces[se] = thread_trace_se;
565 }
566
567 return true;
568 }