radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1);
}
-static void
+void
si_emit_compute(struct radv_physical_device *physical_device,
struct radeon_cmdbuf *cs)
{
}
}
-void
-si_init_compute(struct radv_cmd_buffer *cmd_buffer)
-{
- struct radv_physical_device *physical_device = cmd_buffer->device->physical_device;
- si_emit_compute(physical_device, cmd_buffer->cs);
-}
-
/* 12.4 fixed-point */
static unsigned radv_pack_float_12p4(float x)
{
ac_get_raster_config(&physical_device->rad_info,
&raster_config,
- &raster_config_1);
+ &raster_config_1, NULL);
/* Always use the default config when all backends are enabled
* (or when we failed to determine the enabled backends).
}
}
-static void
-si_emit_config(struct radv_physical_device *physical_device,
- struct radeon_cmdbuf *cs)
+void
+si_emit_graphics(struct radv_physical_device *physical_device,
+ struct radeon_cmdbuf *cs)
{
int i;
radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
S_00B21C_CU_EN(0xffff) | S_00B21C_WAVE_LIMIT(0x3F));
- if (physical_device->rad_info.num_good_compute_units /
- (physical_device->rad_info.max_se * physical_device->rad_info.max_sh_per_se) <= 4) {
+ if (physical_device->rad_info.num_good_cu_per_sh <= 4) {
/* Too few available compute units per SH. Disallowing
* VS to run on CU0 could hurt us more than late VS
* allocation would help.
if (physical_device->rad_info.chip_class >= VI) {
uint32_t vgt_tess_distribution;
- radeon_set_context_reg(cs, R_028424_CB_DCC_CONTROL,
- S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
- S_028424_OVERWRITE_COMBINER_WATERMARK(4));
vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) |
S_028B50_ACCUM_TRI(11) |
switch (physical_device->rad_info.family) {
case CHIP_VEGA10:
case CHIP_VEGA12:
+ case CHIP_VEGA20:
pc_lines = 4096;
break;
case CHIP_RAVEN:
+ case CHIP_RAVEN2:
pc_lines = 1024;
break;
default:
si_emit_compute(physical_device, cs);
}
-void si_init_config(struct radv_cmd_buffer *cmd_buffer)
-{
- struct radv_physical_device *physical_device = cmd_buffer->device->physical_device;
-
- si_emit_config(physical_device, cmd_buffer->cs);
-}
-
void
cik_create_gfx_config(struct radv_device *device)
{
if (!cs)
return;
- si_emit_config(device->physical_device, cs);
+ si_emit_graphics(device->physical_device, cs);
while (cs->cdw & 7) {
if (device->physical_device->rad_info.gfx_ib_pad_with_type2)
VkRect2D scissor = si_intersect_scissor(&scissors[i], &viewport_scissor);
get_viewport_xform(viewports + i, scale, translate);
- scale[0] = abs(scale[0]);
- scale[1] = abs(scale[1]);
+ scale[0] = fabsf(scale[0]);
+ scale[1] = fabsf(scale[1]);
if (scale[0] < 0.5)
scale[0] = 0.5;
if (scale[1] < 0.5)
scale[1] = 0.5;
- guardband_x = MIN2(guardband_x, (max_range - abs(translate[0])) / scale[0]);
- guardband_y = MIN2(guardband_y, (max_range - abs(translate[1])) / scale[1]);
+ guardband_x = MIN2(guardband_x, (max_range - fabsf(translate[0])) / scale[0]);
+ guardband_y = MIN2(guardband_y, (max_range - fabsf(translate[1])) / scale[1]);
radeon_emit(cs, S_028250_TL_X(scissor.offset.x) |
S_028250_TL_Y(scissor.offset.y) |
* counters) must immediately precede every timestamp event to
* prevent a GPU hang on GFX9.
*/
- if (chip_class == GFX9) {
+ if (chip_class == GFX9 && !is_mec) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
radeon_emit(cs, gfx9_eop_bug_va);
}
void
-si_emit_wait_fence(struct radeon_cmdbuf *cs,
- uint64_t va, uint32_t ref,
- uint32_t mask)
+radv_cp_wait_mem(struct radeon_cmdbuf *cs, uint32_t op, uint64_t va,
+ uint32_t ref, uint32_t mask)
{
+ assert(op == WAIT_REG_MEM_EQUAL ||
+ op == WAIT_REG_MEM_NOT_EQUAL ||
+ op == WAIT_REG_MEM_GREATER_OR_EQUAL);
+
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false));
- radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
+ radeon_emit(cs, op | WAIT_REG_MEM_MEM_SPACE(1));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, ref); /* reference value */
EOP_DATA_SEL_VALUE_32BIT,
flush_va, old_fence, *flush_cnt,
gfx9_eop_bug_va);
- si_emit_wait_fence(cs, flush_va, *flush_cnt, 0xffffffff);
+ radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, flush_va,
+ *flush_cnt, 0xffffffff);
}
/* VGT state sync */
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
}
+ /* VGT streamout state sync */
+ if (flush_bits & RADV_CMD_FLAG_VGT_STREAMOUT_SYNC) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
+ }
+
/* Make sure ME is idle (it executes most packets) before continuing.
* This prevents read-after-write hazards between PFP and ME.
*/
radv_cmd_buffer_trace_emit(cmd_buffer);
cmd_buffer->state.flush_bits = 0;
+
+ /* If the driver used a compute shader for resetting a query pool, it
+ * should be finished at this point.
+ */
+ cmd_buffer->pending_reset_query = false;
}
/* sets the CP predication state using a boolean stored at va */
void
si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer,
- bool inverted, uint64_t va)
+ bool draw_visible, uint64_t va)
{
uint32_t op = 0;
if (va) {
op = PRED_OP(PREDICATION_OP_BOOL64);
- /* By default, our internal rendering commands are discarded
- * only if the predicate is non-zero (ie. DRAW_VISIBLE). But
- * VK_EXT_conditional_rendering also allows to discard commands
- * when the predicate is zero, which means we have to use a
- * different flag.
+ /* PREDICATION_DRAW_VISIBLE means that if the 32-bit value is
+ * zero, all rendering commands are discarded. Otherwise, they
+ * are discarded if the value is non zero.
*/
- op |= inverted ? PREDICATION_DRAW_VISIBLE :
- PREDICATION_DRAW_NOT_VISIBLE;
+ op |= draw_visible ? PREDICATION_DRAW_VISIBLE :
+ PREDICATION_DRAW_NOT_VISIBLE;
}
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
size + skipped_size + realign_size,
&dma_flags);
+ dma_flags &= ~CP_DMA_SYNC;
+
si_emit_cp_dma(cmd_buffer, main_dest_va, main_src_va,
byte_count, dma_flags);