2 * Copyright © 2020 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "radv_private.h"
30 #define SQTT_BUFFER_ALIGN_SHIFT 12
33 radv_thread_trace_get_info_offset(unsigned se
)
35 return sizeof(struct radv_thread_trace_info
) * se
;
39 radv_thread_trace_get_data_offset(struct radv_device
*device
, unsigned se
)
43 data_offset
= align64(sizeof(struct radv_thread_trace_info
) * 4,
44 1 << SQTT_BUFFER_ALIGN_SHIFT
);
45 data_offset
+= device
->thread_trace_buffer_size
* se
;
51 radv_thread_trace_get_info_va(struct radv_device
*device
, unsigned se
)
53 uint64_t va
= radv_buffer_get_va(device
->thread_trace_bo
);
54 return va
+ radv_thread_trace_get_info_offset(se
);
58 radv_thread_trace_get_data_va(struct radv_device
*device
, unsigned se
)
60 uint64_t va
= radv_buffer_get_va(device
->thread_trace_bo
);
61 return va
+ radv_thread_trace_get_data_offset(device
, se
);
65 radv_emit_thread_trace_start(struct radv_device
*device
,
66 struct radeon_cmdbuf
*cs
,
67 uint32_t queue_family_index
)
69 uint32_t shifted_size
= device
->thread_trace_buffer_size
>> SQTT_BUFFER_ALIGN_SHIFT
;
70 unsigned max_se
= device
->physical_device
->rad_info
.max_se
;
72 assert(device
->physical_device
->rad_info
.chip_class
== GFX9
);
74 for (unsigned se
= 0; se
< max_se
; se
++) {
75 uint64_t data_va
= radv_thread_trace_get_data_va(device
, se
);
76 uint64_t shifted_va
= data_va
>> SQTT_BUFFER_ALIGN_SHIFT
;
78 /* Target SEx and SH0. */
79 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
80 S_030800_SE_INDEX(se
) |
81 S_030800_SH_INDEX(0) |
82 S_030800_INSTANCE_BROADCAST_WRITES(1));
84 /* Order seems important for the following 4 registers. */
85 radeon_set_uconfig_reg(cs
, R_030CDC_SQ_THREAD_TRACE_BASE2
,
86 S_030CDC_ADDR_HI(shifted_va
>> 32));
88 radeon_set_uconfig_reg(cs
, R_030CC0_SQ_THREAD_TRACE_BASE
,
89 S_030CC0_ADDR(shifted_va
));
91 radeon_set_uconfig_reg(cs
, R_030CC4_SQ_THREAD_TRACE_SIZE
,
92 S_030CC4_SIZE(shifted_size
));
94 radeon_set_uconfig_reg(cs
, R_030CD4_SQ_THREAD_TRACE_CTRL
,
95 S_030CD4_RESET_BUFFER(1));
97 radeon_set_uconfig_reg(cs
, R_030CC8_SQ_THREAD_TRACE_MASK
,
100 S_030CC8_SIMD_EN(0xf) |
101 S_030CC8_VM_ID_MASK(0) |
102 S_030CC8_REG_STALL_EN(1) |
103 S_030CC8_SPI_STALL_EN(1) |
104 S_030CC8_SQ_STALL_EN(1));
106 /* Trace all tokens and registers. */
107 radeon_set_uconfig_reg(cs
, R_030CCC_SQ_THREAD_TRACE_TOKEN_MASK
,
108 S_030CCC_TOKEN_MASK(0xbfff) |
109 S_030CCC_REG_MASK(0xff) |
110 S_030CCC_REG_DROP_ON_STALL(0));
112 /* Enable SQTT perf counters for all CUs. */
113 radeon_set_uconfig_reg(cs
, R_030CD0_SQ_THREAD_TRACE_PERF_MASK
,
114 S_030CD0_SH0_MASK(0xffff) |
115 S_030CD0_SH1_MASK(0xffff));
117 radeon_set_uconfig_reg(cs
, R_030CE0_SQ_THREAD_TRACE_TOKEN_MASK2
,
118 S_030CE0_INST_MASK(0xffffffff));
120 radeon_set_uconfig_reg(cs
, R_030CEC_SQ_THREAD_TRACE_HIWATER
,
121 S_030CEC_HIWATER(4));
123 /* Reset thread trace status errors. */
124 radeon_set_uconfig_reg(cs
, R_030CE8_SQ_THREAD_TRACE_STATUS
,
125 S_030CE8_UTC_ERROR(0));
127 /* Enable the thread trace mode. */
128 radeon_set_uconfig_reg(cs
, R_030CD8_SQ_THREAD_TRACE_MODE
,
129 S_030CD8_MASK_PS(1) |
130 S_030CD8_MASK_VS(1) |
131 S_030CD8_MASK_GS(1) |
132 S_030CD8_MASK_ES(1) |
133 S_030CD8_MASK_HS(1) |
134 S_030CD8_MASK_LS(1) |
135 S_030CD8_MASK_CS(1) |
136 S_030CD8_AUTOFLUSH_EN(1) | /* periodically flush SQTT data to memory */
137 S_030CD8_TC_PERF_EN(1) | /* count SQTT traffic in TCC perf counters */
141 /* Restore global broadcasting. */
142 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
143 S_030800_SE_BROADCAST_WRITES(1) |
144 S_030800_SH_BROADCAST_WRITES(1) |
145 S_030800_INSTANCE_BROADCAST_WRITES(1));
147 /* Start the thread trace with a different event based on the queue. */
148 if (queue_family_index
== RADV_QUEUE_COMPUTE
&&
149 device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
150 radeon_set_sh_reg(cs
, R_00B878_COMPUTE_THREAD_TRACE_ENABLE
,
151 S_00B878_THREAD_TRACE_ENABLE(1));
153 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
154 radeon_emit(cs
, EVENT_TYPE(V_028A90_THREAD_TRACE_START
) | EVENT_INDEX(0));
158 static const uint32_t thread_trace_info_regs
[] =
160 R_030CE4_SQ_THREAD_TRACE_WPTR
,
161 R_030CE8_SQ_THREAD_TRACE_STATUS
,
162 R_030CF0_SQ_THREAD_TRACE_CNTR
,
166 radv_emit_thread_trace_stop(struct radv_device
*device
,
167 struct radeon_cmdbuf
*cs
,
168 uint32_t queue_family_index
)
170 unsigned max_se
= device
->physical_device
->rad_info
.max_se
;
172 assert(device
->physical_device
->rad_info
.chip_class
== GFX9
);
174 /* Stop the thread trace with a different event based on the queue. */
175 if (queue_family_index
== RADV_QUEUE_COMPUTE
&&
176 device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
177 radeon_set_sh_reg(cs
, R_00B878_COMPUTE_THREAD_TRACE_ENABLE
,
178 S_00B878_THREAD_TRACE_ENABLE(0));
180 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
181 radeon_emit(cs
, EVENT_TYPE(V_028A90_THREAD_TRACE_STOP
) | EVENT_INDEX(0));
184 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
185 radeon_emit(cs
, EVENT_TYPE(V_028A90_THREAD_TRACE_FINISH
) | EVENT_INDEX(0));
187 for (unsigned se
= 0; se
< max_se
; se
++) {
188 /* Target SEi and SH0. */
189 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
190 S_030800_SE_INDEX(se
) |
191 S_030800_SH_INDEX(0) |
192 S_030800_INSTANCE_BROADCAST_WRITES(1));
194 /* Disable the thread trace mode. */
195 radeon_set_uconfig_reg(cs
, R_030CD8_SQ_THREAD_TRACE_MODE
,
198 /* Wait for thread trace completion. */
199 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0));
200 radeon_emit(cs
, WAIT_REG_MEM_EQUAL
); /* wait until the register is equal to the reference value */
201 radeon_emit(cs
, R_030CE8_SQ_THREAD_TRACE_STATUS
>> 2); /* register */
203 radeon_emit(cs
, 0); /* reference value */
204 radeon_emit(cs
, S_030CE8_BUSY(1)); /* mask */
205 radeon_emit(cs
, 4); /* poll interval */
207 /* Get the VA where the info struct is stored for this SE. */
208 uint64_t info_va
= radv_thread_trace_get_info_va(device
, se
);
210 /* Copy back the info struct one DWORD at a time. */
211 for (unsigned i
= 0; i
< ARRAY_SIZE(thread_trace_info_regs
); i
++) {
212 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
213 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_PERF
) |
214 COPY_DATA_DST_SEL(COPY_DATA_TC_L2
) |
215 COPY_DATA_WR_CONFIRM
);
216 radeon_emit(cs
, thread_trace_info_regs
[i
] >> 2);
217 radeon_emit(cs
, 0); /* unused */
218 radeon_emit(cs
, (info_va
+ i
* 4));
219 radeon_emit(cs
, (info_va
+ i
* 4) >> 32);
223 /* Restore global broadcasting. */
224 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
225 S_030800_SE_BROADCAST_WRITES(1) |
226 S_030800_SH_BROADCAST_WRITES(1) |
227 S_030800_INSTANCE_BROADCAST_WRITES(1));
231 radv_emit_spi_config_cntl(struct radeon_cmdbuf
*cs
, bool enable
)
233 radeon_set_uconfig_reg(cs
, R_031100_SPI_CONFIG_CNTL
,
234 S_031100_GPR_WRITE_PRIORITY(0x2c688) |
235 S_031100_EXP_PRIORITY_ORDER(3) |
236 S_031100_ENABLE_SQG_TOP_EVENTS(enable
) |
237 S_031100_ENABLE_SQG_BOP_EVENTS(enable
));
241 radv_emit_wait_for_idle(struct radv_device
*device
,
242 struct radeon_cmdbuf
*cs
, int family
)
244 si_cs_emit_cache_flush(cs
, device
->physical_device
->rad_info
.chip_class
,
246 family
== RING_COMPUTE
&&
247 device
->physical_device
->rad_info
.chip_class
>= GFX7
,
248 (family
== RADV_QUEUE_COMPUTE
?
249 RADV_CMD_FLAG_CS_PARTIAL_FLUSH
:
250 (RADV_CMD_FLAG_CS_PARTIAL_FLUSH
| RADV_CMD_FLAG_PS_PARTIAL_FLUSH
)) |
251 RADV_CMD_FLAG_INV_ICACHE
|
252 RADV_CMD_FLAG_INV_SCACHE
|
253 RADV_CMD_FLAG_INV_VCACHE
|
254 RADV_CMD_FLAG_INV_L2
, 0);
258 radv_thread_trace_init_cs(struct radv_device
*device
)
260 struct radeon_winsys
*ws
= device
->ws
;
262 /* Thread trace start CS. */
263 for (int family
= 0; family
< 2; ++family
) {
264 device
->thread_trace_start_cs
[family
] = ws
->cs_create(ws
, family
);
266 case RADV_QUEUE_GENERAL
:
267 radeon_emit(device
->thread_trace_start_cs
[family
], PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
268 radeon_emit(device
->thread_trace_start_cs
[family
], CONTEXT_CONTROL_LOAD_ENABLE(1));
269 radeon_emit(device
->thread_trace_start_cs
[family
], CONTEXT_CONTROL_SHADOW_ENABLE(1));
271 case RADV_QUEUE_COMPUTE
:
272 radeon_emit(device
->thread_trace_start_cs
[family
], PKT3(PKT3_NOP
, 0, 0));
273 radeon_emit(device
->thread_trace_start_cs
[family
], 0);
277 radv_cs_add_buffer(ws
, device
->thread_trace_start_cs
[family
],
278 device
->thread_trace_bo
);
280 /* Make sure to wait-for-idle before starting SQTT. */
281 radv_emit_wait_for_idle(device
,
282 device
->thread_trace_start_cs
[family
],
285 /* Enable SQG events that collects thread trace data. */
286 radv_emit_spi_config_cntl(device
->thread_trace_start_cs
[family
], true);
288 radv_emit_thread_trace_start(device
,
289 device
->thread_trace_start_cs
[family
],
292 ws
->cs_finalize(device
->thread_trace_start_cs
[family
]);
295 /* Thread trace stop CS. */
296 for (int family
= 0; family
< 2; ++family
) {
297 device
->thread_trace_stop_cs
[family
] = ws
->cs_create(ws
, family
);
299 case RADV_QUEUE_GENERAL
:
300 radeon_emit(device
->thread_trace_stop_cs
[family
], PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
301 radeon_emit(device
->thread_trace_stop_cs
[family
], CONTEXT_CONTROL_LOAD_ENABLE(1));
302 radeon_emit(device
->thread_trace_stop_cs
[family
], CONTEXT_CONTROL_SHADOW_ENABLE(1));
304 case RADV_QUEUE_COMPUTE
:
305 radeon_emit(device
->thread_trace_stop_cs
[family
], PKT3(PKT3_NOP
, 0, 0));
306 radeon_emit(device
->thread_trace_stop_cs
[family
], 0);
310 radv_cs_add_buffer(ws
, device
->thread_trace_stop_cs
[family
],
311 device
->thread_trace_bo
);
313 /* Make sure to wait-for-idle before stopping SQTT. */
314 radv_emit_wait_for_idle(device
,
315 device
->thread_trace_stop_cs
[family
],
318 radv_emit_thread_trace_stop(device
,
319 device
->thread_trace_stop_cs
[family
],
322 /* Restore previous state by disabling SQG events. */
323 radv_emit_spi_config_cntl(device
->thread_trace_stop_cs
[family
], false);
325 ws
->cs_finalize(device
->thread_trace_stop_cs
[family
]);
330 radv_thread_trace_init_bo(struct radv_device
*device
)
332 struct radeon_winsys
*ws
= device
->ws
;
335 /* Compute total size of the thread trace BO for 4 SEs. */
336 size
= align64(sizeof(struct radv_thread_trace_info
) * 4,
337 1 << SQTT_BUFFER_ALIGN_SHIFT
);
338 size
+= device
->thread_trace_buffer_size
* 4;
340 device
->thread_trace_bo
= ws
->buffer_create(ws
, size
, 4096,
342 RADEON_FLAG_CPU_ACCESS
|
343 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
344 RADEON_FLAG_ZERO_VRAM
,
345 RADV_BO_PRIORITY_SCRATCH
);
346 if (!device
->thread_trace_bo
)
349 device
->thread_trace_ptr
= ws
->buffer_map(device
->thread_trace_bo
);
350 if (!device
->thread_trace_ptr
)
357 radv_thread_trace_init(struct radv_device
*device
)
359 if (!radv_thread_trace_init_bo(device
))
362 radv_thread_trace_init_cs(device
);
367 radv_thread_trace_finish(struct radv_device
*device
)
369 struct radeon_winsys
*ws
= device
->ws
;
371 if (unlikely(device
->thread_trace_bo
))
372 ws
->buffer_destroy(device
->thread_trace_bo
);
374 for (unsigned i
= 0; i
< 2; i
++) {
375 if (device
->thread_trace_start_cs
[i
])
376 ws
->cs_destroy(device
->thread_trace_start_cs
[i
]);
377 if (device
->thread_trace_stop_cs
[i
])
378 ws
->cs_destroy(device
->thread_trace_stop_cs
[i
]);
383 radv_begin_thread_trace(struct radv_queue
*queue
)
385 int family
= queue
->queue_family_index
;
386 struct radeon_cmdbuf
*cs
= queue
->device
->thread_trace_start_cs
[family
];
387 return radv_queue_internal_submit(queue
, cs
);
391 radv_end_thread_trace(struct radv_queue
*queue
)
393 int family
= queue
->queue_family_index
;
394 struct radeon_cmdbuf
*cs
= queue
->device
->thread_trace_stop_cs
[family
];
395 return radv_queue_internal_submit(queue
, cs
);
399 radv_get_thread_trace(struct radv_queue
*queue
,
400 struct radv_thread_trace
*thread_trace
)
402 struct radv_device
*device
= queue
->device
;
403 unsigned max_se
= device
->physical_device
->rad_info
.max_se
;
404 void *thread_trace_ptr
= device
->thread_trace_ptr
;
406 memset(thread_trace
, 0, sizeof(*thread_trace
));
407 thread_trace
->num_traces
= max_se
;
409 for (unsigned se
= 0; se
< max_se
; se
++) {
410 uint64_t info_offset
= radv_thread_trace_get_info_offset(se
);
411 uint64_t data_offset
= radv_thread_trace_get_data_offset(device
, se
);
412 void *info_ptr
= thread_trace_ptr
+ info_offset
;
413 void *data_ptr
= thread_trace_ptr
+ data_offset
;
414 struct radv_thread_trace_info
*info
=
415 (struct radv_thread_trace_info
*)info_ptr
;
416 struct radv_thread_trace_se thread_trace_se
= {};
418 if (info
->cur_offset
< info
->write_counter
) {
419 uint32_t expected_size
=
420 (info
->write_counter
* 32) / 1024;
421 uint32_t available_size
=
422 (info
->cur_offset
* 32) / 1024;
424 fprintf(stderr
, "Failed to get the thread trace "
425 "because the buffer is too small. The "
426 "hardware needs %d KB but the "
427 "buffer size is %d KB.\n",
428 expected_size
, available_size
);
429 fprintf(stderr
, "Please update the buffer size with "
430 "RADV_THREAD_TRACE_BUFER_SIZE=<size_in_bytes>\n");
434 thread_trace_se
.data_ptr
= data_ptr
;
435 thread_trace_se
.info
= *info
;
436 thread_trace_se
.shader_engine
= se
;
437 thread_trace_se
.compute_unit
= 0;
439 thread_trace
->traces
[se
] = thread_trace_se
;