uint32_t shifted_size = device->thread_trace_buffer_size >> SQTT_BUFFER_ALIGN_SHIFT;
unsigned max_se = device->physical_device->rad_info.max_se;
- assert(device->physical_device->rad_info.chip_class >= GFX9);
+ assert(device->physical_device->rad_info.chip_class >= GFX8);
for (unsigned se = 0; se < max_se; se++) {
uint64_t data_va = radv_thread_trace_get_data_va(device, se);
S_030800_SH_INDEX(0) |
S_030800_INSTANCE_BROADCAST_WRITES(1));
- if (device->physical_device->rad_info.chip_class == GFX9) {
- /* Order seems important for the following 4 registers. */
- radeon_set_uconfig_reg(cs, R_030CDC_SQ_THREAD_TRACE_BASE2,
- S_030CDC_ADDR_HI(shifted_va >> 32));
-
- radeon_set_uconfig_reg(cs, R_030CC0_SQ_THREAD_TRACE_BASE,
- S_030CC0_ADDR(shifted_va));
-
- radeon_set_uconfig_reg(cs, R_030CC4_SQ_THREAD_TRACE_SIZE,
- S_030CC4_SIZE(shifted_size));
-
- radeon_set_uconfig_reg(cs, R_030CD4_SQ_THREAD_TRACE_CTRL,
- S_030CD4_RESET_BUFFER(1));
-
- radeon_set_uconfig_reg(cs, R_030CC8_SQ_THREAD_TRACE_MASK,
- S_030CC8_CU_SEL(2) |
- S_030CC8_SH_SEL(0) |
- S_030CC8_SIMD_EN(0xf) |
- S_030CC8_VM_ID_MASK(0) |
- S_030CC8_REG_STALL_EN(1) |
- S_030CC8_SPI_STALL_EN(1) |
- S_030CC8_SQ_STALL_EN(1));
-
- /* Trace all tokens and registers. */
- radeon_set_uconfig_reg(cs, R_030CCC_SQ_THREAD_TRACE_TOKEN_MASK,
- S_030CCC_TOKEN_MASK(0xbfff) |
- S_030CCC_REG_MASK(0xff) |
- S_030CCC_REG_DROP_ON_STALL(0));
-
- /* Enable SQTT perf counters for all CUs. */
- radeon_set_uconfig_reg(cs, R_030CD0_SQ_THREAD_TRACE_PERF_MASK,
- S_030CD0_SH0_MASK(0xffff) |
- S_030CD0_SH1_MASK(0xffff));
-
- radeon_set_uconfig_reg(cs, R_030CE0_SQ_THREAD_TRACE_TOKEN_MASK2,
- S_030CE0_INST_MASK(0xffffffff));
-
- radeon_set_uconfig_reg(cs, R_030CEC_SQ_THREAD_TRACE_HIWATER,
- S_030CEC_HIWATER(4));
-
- /* Reset thread trace status errors. */
- radeon_set_uconfig_reg(cs, R_030CE8_SQ_THREAD_TRACE_STATUS,
- S_030CE8_UTC_ERROR(0));
-
- /* Enable the thread trace mode. */
- radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE,
- S_030CD8_MASK_PS(1) |
- S_030CD8_MASK_VS(1) |
- S_030CD8_MASK_GS(1) |
- S_030CD8_MASK_ES(1) |
- S_030CD8_MASK_HS(1) |
- S_030CD8_MASK_LS(1) |
- S_030CD8_MASK_CS(1) |
- S_030CD8_AUTOFLUSH_EN(1) | /* periodically flush SQTT data to memory */
- S_030CD8_TC_PERF_EN(1) | /* count SQTT traffic in TCC perf counters */
- S_030CD8_MODE(1));
- } else {
+ if (device->physical_device->rad_info.chip_class == GFX10) {
/* Order seems important for the following 2 registers. */
radeon_set_privileged_config_reg(cs, R_008D04_SQ_THREAD_TRACE_BUF0_SIZE,
S_008D04_SIZE(shifted_size) |
S_008D1C_SPI_STALL_EN(1) |
S_008D1C_SQ_STALL_EN(1) |
S_008D1C_REG_DROP_ON_STALL(0));
+ } else {
+ /* Order seems important for the following 4 registers. */
+ radeon_set_uconfig_reg(cs, R_030CDC_SQ_THREAD_TRACE_BASE2,
+ S_030CDC_ADDR_HI(shifted_va >> 32));
+
+ radeon_set_uconfig_reg(cs, R_030CC0_SQ_THREAD_TRACE_BASE,
+ S_030CC0_ADDR(shifted_va));
+
+ radeon_set_uconfig_reg(cs, R_030CC4_SQ_THREAD_TRACE_SIZE,
+ S_030CC4_SIZE(shifted_size));
+
+ radeon_set_uconfig_reg(cs, R_030CD4_SQ_THREAD_TRACE_CTRL,
+ S_030CD4_RESET_BUFFER(1));
+
+ uint32_t thread_trace_mask = S_030CC8_CU_SEL(2) |
+ S_030CC8_SH_SEL(0) |
+ S_030CC8_SIMD_EN(0xf) |
+ S_030CC8_VM_ID_MASK(0) |
+ S_030CC8_REG_STALL_EN(1) |
+ S_030CC8_SPI_STALL_EN(1) |
+ S_030CC8_SQ_STALL_EN(1);
+
+ if (device->physical_device->rad_info.chip_class < GFX9) {
+ thread_trace_mask |= S_030CC8_RANDOM_SEED(0xffff);
+ }
+
+ radeon_set_uconfig_reg(cs, R_030CC8_SQ_THREAD_TRACE_MASK,
+ thread_trace_mask);
+
+ /* Trace all tokens and registers. */
+ radeon_set_uconfig_reg(cs, R_030CCC_SQ_THREAD_TRACE_TOKEN_MASK,
+ S_030CCC_TOKEN_MASK(0xbfff) |
+ S_030CCC_REG_MASK(0xff) |
+ S_030CCC_REG_DROP_ON_STALL(0));
+
+ /* Enable SQTT perf counters for all CUs. */
+ radeon_set_uconfig_reg(cs, R_030CD0_SQ_THREAD_TRACE_PERF_MASK,
+ S_030CD0_SH0_MASK(0xffff) |
+ S_030CD0_SH1_MASK(0xffff));
+
+ radeon_set_uconfig_reg(cs, R_030CE0_SQ_THREAD_TRACE_TOKEN_MASK2,
+ S_030CE0_INST_MASK(0xffffffff));
+
+ radeon_set_uconfig_reg(cs, R_030CEC_SQ_THREAD_TRACE_HIWATER,
+ S_030CEC_HIWATER(4));
+
+ if (device->physical_device->rad_info.chip_class == GFX9) {
+ /* Reset thread trace status errors. */
+ radeon_set_uconfig_reg(cs, R_030CE8_SQ_THREAD_TRACE_STATUS,
+ S_030CE8_UTC_ERROR(0));
+ }
+
+ /* Enable the thread trace mode. */
+ uint32_t thread_trace_mode = S_030CD8_MASK_PS(1) |
+ S_030CD8_MASK_VS(1) |
+ S_030CD8_MASK_GS(1) |
+ S_030CD8_MASK_ES(1) |
+ S_030CD8_MASK_HS(1) |
+ S_030CD8_MASK_LS(1) |
+ S_030CD8_MASK_CS(1) |
+ S_030CD8_AUTOFLUSH_EN(1) | /* periodically flush SQTT data to memory */
+ S_030CD8_MODE(1);
+
+ if (device->physical_device->rad_info.chip_class == GFX9) {
+ /* Count SQTT traffic in TCC perf counters. */
+ thread_trace_mode |= S_030CD8_TC_PERF_EN(1);
+ }
+
+ radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE,
+ thread_trace_mode);
}
}
}
}
+static const uint32_t gfx8_thread_trace_info_regs[] =
+{
+ R_030CE4_SQ_THREAD_TRACE_WPTR,
+ R_030CE8_SQ_THREAD_TRACE_STATUS,
+ R_008E40_SQ_THREAD_TRACE_CNTR,
+};
+
static const uint32_t gfx9_thread_trace_info_regs[] =
{
R_030CE4_SQ_THREAD_TRACE_WPTR,
R_008D24_SQ_THREAD_TRACE_DROPPED_CNTR,
};
+static void
+radv_copy_thread_trace_info_regs(struct radv_device *device,
+ struct radeon_cmdbuf *cs,
+ unsigned se_index)
+{
+ const uint32_t *thread_trace_info_regs = NULL;
+
+ switch (device->physical_device->rad_info.chip_class) {
+ case GFX10:
+ thread_trace_info_regs = gfx10_thread_trace_info_regs;
+ break;
+ case GFX9:
+ thread_trace_info_regs = gfx9_thread_trace_info_regs;
+ break;
+ case GFX8:
+ thread_trace_info_regs = gfx8_thread_trace_info_regs;
+ break;
+ default:
+ unreachable("Unsupported chip_class");
+ }
+
+ /* Get the VA where the info struct is stored for this SE. */
+ uint64_t info_va = radv_thread_trace_get_info_va(device, se_index);
+
+ /* Copy back the info struct one DWORD at a time. */
+ for (unsigned i = 0; i < 3; i++) {
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) |
+ COPY_DATA_DST_SEL(COPY_DATA_TC_L2) |
+ COPY_DATA_WR_CONFIRM);
+ radeon_emit(cs, thread_trace_info_regs[i] >> 2);
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, (info_va + i * 4));
+ radeon_emit(cs, (info_va + i * 4) >> 32);
+ }
+}
+
static void
radv_emit_thread_trace_stop(struct radv_device *device,
struct radeon_cmdbuf *cs,
{
unsigned max_se = device->physical_device->rad_info.max_se;
- assert(device->physical_device->rad_info.chip_class >= GFX9);
+ assert(device->physical_device->rad_info.chip_class >= GFX8);
/* Stop the thread trace with a different event based on the queue. */
if (queue_family_index == RADV_QUEUE_COMPUTE &&
S_030800_SH_INDEX(0) |
S_030800_INSTANCE_BROADCAST_WRITES(1));
- if (device->physical_device->rad_info.chip_class == GFX9) {
- /* Disable the thread trace mode. */
- radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE,
- S_030CD8_MODE(0));
-
- /* Wait for thread trace completion. */
- radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
- radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
- radeon_emit(cs, R_030CE8_SQ_THREAD_TRACE_STATUS >> 2); /* register */
- radeon_emit(cs, 0);
- radeon_emit(cs, 0); /* reference value */
- radeon_emit(cs, S_030CE8_BUSY(1)); /* mask */
- radeon_emit(cs, 4); /* poll interval */
-
- /* Get the VA where the info struct is stored for this SE. */
- uint64_t info_va = radv_thread_trace_get_info_va(device, se);
-
- /* Copy back the info struct one DWORD at a time. */
- for (unsigned i = 0; i < ARRAY_SIZE(gfx9_thread_trace_info_regs); i++) {
- radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) |
- COPY_DATA_DST_SEL(COPY_DATA_TC_L2) |
- COPY_DATA_WR_CONFIRM);
- radeon_emit(cs, gfx9_thread_trace_info_regs[i] >> 2);
- radeon_emit(cs, 0); /* unused */
- radeon_emit(cs, (info_va + i * 4));
- radeon_emit(cs, (info_va + i * 4) >> 32);
- }
- } else {
- assert(device->physical_device->rad_info.chip_class == GFX10);
-
+ if (device->physical_device->rad_info.chip_class == GFX10) {
/* Make sure to wait for the trace buffer. */
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
radeon_emit(cs, WAIT_REG_MEM_NOT_EQUAL); /* wait until the register is equal to the reference value */
radeon_emit(cs, 0); /* reference value */
radeon_emit(cs, S_008D20_BUSY(1)); /* mask */
radeon_emit(cs, 4); /* poll interval */
+ } else {
+ /* Disable the thread trace mode. */
+ radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE,
+ S_030CD8_MODE(0));
- /* Get the VA where the info struct is stored for this SE. */
- uint64_t info_va = radv_thread_trace_get_info_va(device, se);
-
- /* Copy back the info struct one DWORD at a time. */
- for (unsigned i = 0; i < ARRAY_SIZE(gfx10_thread_trace_info_regs); i++) {
- radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) |
- COPY_DATA_DST_SEL(COPY_DATA_TC_L2) |
- COPY_DATA_WR_CONFIRM);
- radeon_emit(cs, gfx10_thread_trace_info_regs[i] >> 2);
- radeon_emit(cs, 0); /* unused */
- radeon_emit(cs, (info_va + i * 4));
- radeon_emit(cs, (info_va + i * 4) >> 32);
- }
+ /* Wait for thread trace completion. */
+ radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+ radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
+ radeon_emit(cs, R_030CE8_SQ_THREAD_TRACE_STATUS >> 2); /* register */
+ radeon_emit(cs, 0);
+ radeon_emit(cs, 0); /* reference value */
+ radeon_emit(cs, S_030CE8_BUSY(1)); /* mask */
+ radeon_emit(cs, 4); /* poll interval */
}
+
+ radv_copy_thread_trace_info_regs(device, cs, se);
}
/* Restore global broadcasting. */
S_030800_INSTANCE_BROADCAST_WRITES(1));
}
+void
+radv_emit_thread_trace_userdata(struct radeon_cmdbuf *cs,
+ const void *data, uint32_t num_dwords)
+{
+ const uint32_t *dwords = (uint32_t *)data;
+
+ while (num_dwords > 0) {
+ uint32_t count = MIN2(num_dwords, 2);
+
+ radeon_set_uconfig_reg_seq(cs, R_030D08_SQ_THREAD_TRACE_USERDATA_2, count);
+ radeon_emit_array(cs, dwords, count);
+
+ dwords += count;
+ num_dwords -= count;
+ }
+}
+
static void
radv_emit_spi_config_cntl(struct radv_device *device,
struct radeon_cmdbuf *cs, bool enable)
{
- uint32_t spi_config_cntl = S_031100_GPR_WRITE_PRIORITY(0x2c688) |
- S_031100_EXP_PRIORITY_ORDER(3) |
- S_031100_ENABLE_SQG_TOP_EVENTS(enable) |
- S_031100_ENABLE_SQG_BOP_EVENTS(enable);
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ uint32_t spi_config_cntl = S_031100_GPR_WRITE_PRIORITY(0x2c688) |
+ S_031100_EXP_PRIORITY_ORDER(3) |
+ S_031100_ENABLE_SQG_TOP_EVENTS(enable) |
+ S_031100_ENABLE_SQG_BOP_EVENTS(enable);
- if (device->physical_device->rad_info.chip_class == GFX10)
- spi_config_cntl |= S_031100_PS_PKR_PRIORITY_CNTL(3);
+ if (device->physical_device->rad_info.chip_class == GFX10)
+ spi_config_cntl |= S_031100_PS_PKR_PRIORITY_CNTL(3);
- radeon_set_uconfig_reg(cs, R_031100_SPI_CONFIG_CNTL, spi_config_cntl);
+ radeon_set_uconfig_reg(cs, R_031100_SPI_CONFIG_CNTL, spi_config_cntl);
+ } else {
+ /* SPI_CONFIG_CNTL is a protected register on GFX6-GFX8. */
+ radeon_set_privileged_config_reg(cs, R_009100_SPI_CONFIG_CNTL,
+ S_009100_ENABLE_SQG_TOP_EVENTS(enable) |
+ S_009100_ENABLE_SQG_BOP_EVENTS(enable));
+ }
}
static void
radv_thread_trace_init_cs(struct radv_device *device)
{
struct radeon_winsys *ws = device->ws;
+ VkResult result;
/* Thread trace start CS. */
for (int family = 0; family < 2; ++family) {
device->thread_trace_start_cs[family] = ws->cs_create(ws, family);
+ if (!device->thread_trace_start_cs[family])
+ return;
+
switch (family) {
case RADV_QUEUE_GENERAL:
radeon_emit(device->thread_trace_start_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
- radeon_emit(device->thread_trace_start_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
- radeon_emit(device->thread_trace_start_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
+ radeon_emit(device->thread_trace_start_cs[family], CC0_UPDATE_LOAD_ENABLES(1));
+ radeon_emit(device->thread_trace_start_cs[family], CC1_UPDATE_SHADOW_ENABLES(1));
break;
case RADV_QUEUE_COMPUTE:
radeon_emit(device->thread_trace_start_cs[family], PKT3(PKT3_NOP, 0, 0));
device->thread_trace_start_cs[family],
family);
- ws->cs_finalize(device->thread_trace_start_cs[family]);
+ result = ws->cs_finalize(device->thread_trace_start_cs[family]);
+ if (result != VK_SUCCESS)
+ return;
}
/* Thread trace stop CS. */
for (int family = 0; family < 2; ++family) {
device->thread_trace_stop_cs[family] = ws->cs_create(ws, family);
+ if (!device->thread_trace_stop_cs[family])
+ return;
+
switch (family) {
case RADV_QUEUE_GENERAL:
radeon_emit(device->thread_trace_stop_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
- radeon_emit(device->thread_trace_stop_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
- radeon_emit(device->thread_trace_stop_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
+ radeon_emit(device->thread_trace_stop_cs[family], CC0_UPDATE_LOAD_ENABLES(1));
+ radeon_emit(device->thread_trace_stop_cs[family], CC1_UPDATE_SHADOW_ENABLES(1));
break;
case RADV_QUEUE_COMPUTE:
radeon_emit(device->thread_trace_stop_cs[family], PKT3(PKT3_NOP, 0, 0));
device->thread_trace_stop_cs[family],
false);
- ws->cs_finalize(device->thread_trace_stop_cs[family]);
+ result = ws->cs_finalize(device->thread_trace_stop_cs[family]);
+ if (result != VK_SUCCESS)
+ return;
}
}
/* Otherwise, compare the current thread trace offset with the number
* of written bytes.
*/
- return info->cur_offset < info->gfx9_write_counter;
+ return info->cur_offset == info->gfx9_write_counter;
}
static uint32_t
"buffer size is %d KB.\n",
expected_size, available_size);
fprintf(stderr, "Please update the buffer size with "
- "RADV_THREAD_TRACE_BUFER_SIZE=<size_in_bytes>\n");
+ "RADV_THREAD_TRACE_BUFFER_SIZE=<size_in_bytes>\n");
return false;
}