static void
si_write_harvested_raster_configs(struct radv_physical_device *physical_device,
- struct radeon_winsys_cs *cs,
+ struct radeon_cmdbuf *cs,
unsigned raster_config,
unsigned raster_config_1)
{
- unsigned sh_per_se = MAX2(physical_device->rad_info.max_sh_per_se, 1);
unsigned num_se = MAX2(physical_device->rad_info.max_se, 1);
- unsigned rb_mask = physical_device->rad_info.enabled_rb_mask;
- unsigned num_rb = MIN2(physical_device->rad_info.num_render_backends, 16);
- unsigned rb_per_pkr = MIN2(num_rb / num_se / sh_per_se, 2);
- unsigned rb_per_se = num_rb / num_se;
- unsigned se_mask[4];
+ unsigned raster_config_se[4];
unsigned se;
- se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
- se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
- se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
- se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
-
- assert(num_se == 1 || num_se == 2 || num_se == 4);
- assert(sh_per_se == 1 || sh_per_se == 2);
- assert(rb_per_pkr == 1 || rb_per_pkr == 2);
-
- /* XXX: I can't figure out what the *_XSEL and *_YSEL
- * fields are for, so I'm leaving them as their default
- * values. */
+ ac_get_harvested_configs(&physical_device->rad_info,
+ raster_config,
+ &raster_config_1,
+ raster_config_se);
for (se = 0; se < num_se; se++) {
- unsigned raster_config_se = raster_config;
- unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
- unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
- int idx = (se / 2) * 2;
-
- if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
- raster_config_se &= C_028350_SE_MAP;
-
- if (!se_mask[idx]) {
- raster_config_se |=
- S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3);
- } else {
- raster_config_se |=
- S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0);
- }
- }
-
- pkr0_mask &= rb_mask;
- pkr1_mask &= rb_mask;
- if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
- raster_config_se &= C_028350_PKR_MAP;
-
- if (!pkr0_mask) {
- raster_config_se |=
- S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3);
- } else {
- raster_config_se |=
- S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0);
- }
- }
-
- if (rb_per_se >= 2) {
- unsigned rb0_mask = 1 << (se * rb_per_se);
- unsigned rb1_mask = rb0_mask << 1;
-
- rb0_mask &= rb_mask;
- rb1_mask &= rb_mask;
- if (!rb0_mask || !rb1_mask) {
- raster_config_se &= C_028350_RB_MAP_PKR0;
-
- if (!rb0_mask) {
- raster_config_se |=
- S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3);
- } else {
- raster_config_se |=
- S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0);
- }
- }
-
- if (rb_per_se > 2) {
- rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
- rb1_mask = rb0_mask << 1;
- rb0_mask &= rb_mask;
- rb1_mask &= rb_mask;
- if (!rb0_mask || !rb1_mask) {
- raster_config_se &= C_028350_RB_MAP_PKR1;
-
- if (!rb0_mask) {
- raster_config_se |=
- S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3);
- } else {
- raster_config_se |=
- S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0);
- }
- }
- }
- }
-
/* GRBM_GFX_INDEX has a different offset on SI and CI+ */
if (physical_device->rad_info.chip_class < CIK)
radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
S_030800_SE_INDEX(se) | S_030800_SH_BROADCAST_WRITES(1) |
S_030800_INSTANCE_BROADCAST_WRITES(1));
- radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, raster_config_se);
+ radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, raster_config_se[se]);
}
/* GRBM_GFX_INDEX has a different offset on SI and CI+ */
S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
S_030800_INSTANCE_BROADCAST_WRITES(1));
- if (physical_device->rad_info.chip_class >= CIK) {
- if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
- (!se_mask[2] && !se_mask[3]))) {
- raster_config_1 &= C_028354_SE_PAIR_MAP;
-
- if (!se_mask[0] && !se_mask[1]) {
- raster_config_1 |=
- S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3);
- } else {
- raster_config_1 |=
- S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0);
- }
- }
+ if (physical_device->rad_info.chip_class >= CIK)
radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1);
- }
}
-static void
+void
si_emit_compute(struct radv_physical_device *physical_device,
- struct radeon_winsys_cs *cs)
+ struct radeon_cmdbuf *cs)
{
radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
radeon_emit(cs, 0);
radeon_emit(cs, 0);
radeon_emit(cs, 0);
- radeon_set_sh_reg_seq(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
- S_00B854_WAVES_PER_SH(0x3));
- radeon_emit(cs, 0);
+ radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
/* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
}
}
-void
-si_init_compute(struct radv_cmd_buffer *cmd_buffer)
-{
- struct radv_physical_device *physical_device = cmd_buffer->device->physical_device;
- si_emit_compute(physical_device, cmd_buffer->cs);
-}
-
/* 12.4 fixed-point */
static unsigned radv_pack_float_12p4(float x)
{
static void
si_set_raster_config(struct radv_physical_device *physical_device,
- struct radeon_winsys_cs *cs)
+ struct radeon_cmdbuf *cs)
{
unsigned num_rb = MIN2(physical_device->rad_info.num_render_backends, 16);
unsigned rb_mask = physical_device->rad_info.enabled_rb_mask;
ac_get_raster_config(&physical_device->rad_info,
&raster_config,
- &raster_config_1);
+ &raster_config_1, NULL);
/* Always use the default config when all backends are enabled
* (or when we failed to determine the enabled backends).
}
}
-static void
-si_emit_config(struct radv_physical_device *physical_device,
- struct radeon_winsys_cs *cs)
+void
+si_emit_graphics(struct radv_physical_device *physical_device,
+ struct radeon_cmdbuf *cs)
{
int i;
radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
S_00B21C_CU_EN(0xffff) | S_00B21C_WAVE_LIMIT(0x3F));
- if (physical_device->rad_info.num_good_compute_units /
- (physical_device->rad_info.max_se * physical_device->rad_info.max_sh_per_se) <= 4) {
+ if (physical_device->rad_info.num_good_cu_per_sh <= 4) {
/* Too few available compute units per SH. Disallowing
* VS to run on CU0 could hurt us more than late VS
* allocation would help.
if (physical_device->rad_info.chip_class >= VI) {
uint32_t vgt_tess_distribution;
- radeon_set_context_reg(cs, R_028424_CB_DCC_CONTROL,
- S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
- S_028424_OVERWRITE_COMBINER_WATERMARK(4));
vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) |
S_028B50_ACCUM_TRI(11) |
switch (physical_device->rad_info.family) {
case CHIP_VEGA10:
case CHIP_VEGA12:
+ case CHIP_VEGA20:
pc_lines = 4096;
break;
case CHIP_RAVEN:
+ case CHIP_RAVEN2:
pc_lines = 1024;
break;
default:
si_emit_compute(physical_device, cs);
}
-void si_init_config(struct radv_cmd_buffer *cmd_buffer)
-{
- struct radv_physical_device *physical_device = cmd_buffer->device->physical_device;
-
- si_emit_config(physical_device, cmd_buffer->cs);
-}
-
void
cik_create_gfx_config(struct radv_device *device)
{
- struct radeon_winsys_cs *cs = device->ws->cs_create(device->ws, RING_GFX);
+ struct radeon_cmdbuf *cs = device->ws->cs_create(device->ws, RING_GFX);
if (!cs)
return;
- si_emit_config(device->physical_device, cs);
+ si_emit_graphics(device->physical_device, cs);
while (cs->cdw & 7) {
if (device->physical_device->rad_info.gfx_ib_pad_with_type2)
}
void
-si_write_viewport(struct radeon_winsys_cs *cs, int first_vp,
+si_write_viewport(struct radeon_cmdbuf *cs, int first_vp,
int count, const VkViewport *viewports)
{
int i;
}
void
-si_write_scissors(struct radeon_winsys_cs *cs, int first,
+si_write_scissors(struct radeon_cmdbuf *cs, int first,
int count, const VkRect2D *scissors,
const VkViewport *viewports, bool can_use_guardband)
{
VkRect2D scissor = si_intersect_scissor(&scissors[i], &viewport_scissor);
get_viewport_xform(viewports + i, scale, translate);
- scale[0] = abs(scale[0]);
- scale[1] = abs(scale[1]);
+ scale[0] = fabsf(scale[0]);
+ scale[1] = fabsf(scale[1]);
if (scale[0] < 0.5)
scale[0] = 0.5;
if (scale[1] < 0.5)
scale[1] = 0.5;
- guardband_x = MIN2(guardband_x, (max_range - abs(translate[0])) / scale[0]);
- guardband_y = MIN2(guardband_y, (max_range - abs(translate[1])) / scale[1]);
+ guardband_x = MIN2(guardband_x, (max_range - fabsf(translate[0])) / scale[0]);
+ guardband_y = MIN2(guardband_y, (max_range - fabsf(translate[1])) / scale[1]);
radeon_emit(cs, S_028250_TL_X(scissor.offset.x) |
S_028250_TL_Y(scissor.offset.y) |
}
-void si_cs_emit_write_event_eop(struct radeon_winsys_cs *cs,
- bool predicated,
+void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs,
enum chip_class chip_class,
bool is_mec,
unsigned event, unsigned event_flags,
unsigned data_sel,
uint64_t va,
uint32_t old_fence,
- uint32_t new_fence)
+ uint32_t new_fence,
+ uint64_t gfx9_eop_bug_va)
{
unsigned op = EVENT_TYPE(event) |
EVENT_INDEX(5) |
event_flags;
unsigned is_gfx8_mec = is_mec && chip_class < GFX9;
+ unsigned sel = EOP_DATA_SEL(data_sel);
+
+ /* Wait for write confirmation before writing data, but don't send
+ * an interrupt. */
+ if (data_sel != EOP_DATA_SEL_DISCARD)
+ sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
if (chip_class >= GFX9 || is_gfx8_mec) {
- radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, predicated));
+ /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
+ * counters) must immediately precede every timestamp event to
+ * prevent a GPU hang on GFX9.
+ */
+ if (chip_class == GFX9 && !is_mec) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
+ radeon_emit(cs, gfx9_eop_bug_va);
+ radeon_emit(cs, gfx9_eop_bug_va >> 32);
+ }
+
+ radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, false));
radeon_emit(cs, op);
- radeon_emit(cs, EOP_DATA_SEL(data_sel));
+ radeon_emit(cs, sel);
radeon_emit(cs, va); /* address lo */
radeon_emit(cs, va >> 32); /* address hi */
radeon_emit(cs, new_fence); /* immediate data lo */
* (and optional cache flushes executed) before the timestamp
* is written.
*/
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, predicated));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false));
radeon_emit(cs, op);
radeon_emit(cs, va);
- radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
+ radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
radeon_emit(cs, old_fence); /* immediate data */
radeon_emit(cs, 0); /* unused */
}
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, predicated));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false));
radeon_emit(cs, op);
radeon_emit(cs, va);
- radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
+ radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
radeon_emit(cs, new_fence); /* immediate data */
radeon_emit(cs, 0); /* unused */
}
}
void
-si_emit_wait_fence(struct radeon_winsys_cs *cs,
- bool predicated,
- uint64_t va, uint32_t ref,
- uint32_t mask)
+radv_cp_wait_mem(struct radeon_cmdbuf *cs, uint32_t op, uint64_t va,
+ uint32_t ref, uint32_t mask)
{
- radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, predicated));
- radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
+ assert(op == WAIT_REG_MEM_EQUAL ||
+ op == WAIT_REG_MEM_NOT_EQUAL ||
+ op == WAIT_REG_MEM_GREATER_OR_EQUAL);
+
+ radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false));
+ radeon_emit(cs, op | WAIT_REG_MEM_MEM_SPACE(1));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, ref); /* reference value */
}
static void
-si_emit_acquire_mem(struct radeon_winsys_cs *cs,
+si_emit_acquire_mem(struct radeon_cmdbuf *cs,
bool is_mec,
- bool predicated,
bool is_gfx9,
unsigned cp_coher_cntl)
{
if (is_mec || is_gfx9) {
uint32_t hi_val = is_gfx9 ? 0xffffff : 0xff;
- radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, predicated) |
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, false) |
PKT3_SHADER_TYPE_S(is_mec));
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
} else {
/* ACQUIRE_MEM is only required on a compute ring. */
- radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, predicated));
+ radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, false));
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
radeon_emit(cs, 0); /* CP_COHER_BASE */
}
void
-si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
+si_cs_emit_cache_flush(struct radeon_cmdbuf *cs,
enum chip_class chip_class,
uint32_t *flush_cnt,
uint64_t flush_va,
bool is_mec,
- enum radv_cmd_flush_bits flush_bits)
+ enum radv_cmd_flush_bits flush_bits,
+ uint64_t gfx9_eop_bug_va)
{
unsigned cp_coher_cntl = 0;
uint32_t flush_cb_db = flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
/* Necessary for DCC */
if (chip_class >= VI) {
si_cs_emit_write_event_eop(cs,
- false,
chip_class,
is_mec,
V_028A90_FLUSH_AND_INV_CB_DATA_TS,
- 0, 0, 0, 0, 0);
+ 0,
+ EOP_DATA_SEL_DISCARD,
+ 0, 0, 0,
+ gfx9_eop_bug_va);
}
}
if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
if (chip_class >= GFX9 && flush_cb_db) {
unsigned cb_db_event, tc_flags;
-#if 0
- /* This breaks a bunch of:
- dEQP-VK.renderpass.dedicated_allocation.formats.d32_sfloat_s8_uint.input*.
- use the big hammer always.
- */
/* Set the CB/DB flush event. */
- switch (flush_cb_db) {
- case RADV_CMD_FLAG_FLUSH_AND_INV_CB:
- cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
- break;
- case RADV_CMD_FLAG_FLUSH_AND_INV_DB:
- cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
- break;
- default:
- /* both CB & DB */
- cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
- }
-#else
cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
-#endif
+
/* These are the only allowed combinations. If you need to
* do multiple operations at once, do them separately.
* All operations that invalidate L2 also seem to invalidate
assert(flush_cnt);
uint32_t old_fence = (*flush_cnt)++;
- si_cs_emit_write_event_eop(cs, false, chip_class, false, cb_db_event, tc_flags, 1,
- flush_va, old_fence, *flush_cnt);
- si_emit_wait_fence(cs, false, flush_va, *flush_cnt, 0xffffffff);
+ si_cs_emit_write_event_eop(cs, chip_class, false, cb_db_event, tc_flags,
+ EOP_DATA_SEL_VALUE_32BIT,
+ flush_va, old_fence, *flush_cnt,
+ gfx9_eop_bug_va);
+ radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, flush_va,
+ *flush_cnt, 0xffffffff);
}
/* VGT state sync */
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
}
+ /* VGT streamout state sync */
+ if (flush_bits & RADV_CMD_FLAG_VGT_STREAMOUT_SYNC) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
+ }
+
/* Make sure ME is idle (it executes most packets) before continuing.
* This prevents read-after-write hazards between PFP and ME.
*/
if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) ||
(chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) {
- si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9,
+ si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9,
cp_coher_cntl |
S_0085F0_TC_ACTION_ENA(1) |
S_0085F0_TCL1_ACTION_ENA(1) |
*
* WB doesn't work without NC.
*/
- si_emit_acquire_mem(cs, is_mec, false,
+ si_emit_acquire_mem(cs, is_mec,
chip_class >= GFX9,
cp_coher_cntl |
S_0301F0_TC_WB_ACTION_ENA(1) |
}
if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) {
si_emit_acquire_mem(cs, is_mec,
- false, chip_class >= GFX9,
+ chip_class >= GFX9,
cp_coher_cntl |
S_0085F0_TCL1_ACTION_ENA(1));
cp_coher_cntl = 0;
* Therefore, it should be last. Done in PFP.
*/
if (cp_coher_cntl)
- si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9, cp_coher_cntl);
+ si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, cp_coher_cntl);
+
+ if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
+ EVENT_INDEX(0));
+ } else if (flush_bits & RADV_CMD_FLAG_STOP_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
+ EVENT_INDEX(0));
+ }
}
void
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
RADV_CMD_FLAG_VS_PARTIAL_FLUSH |
- RADV_CMD_FLAG_VGT_FLUSH);
+ RADV_CMD_FLAG_VGT_FLUSH |
+ RADV_CMD_FLAG_START_PIPELINE_STATS |
+ RADV_CMD_FLAG_STOP_PIPELINE_STATS);
if (!cmd_buffer->state.flush_bits)
return;
cmd_buffer->device->physical_device->rad_info.chip_class,
ptr, va,
radv_cmd_buffer_uses_mec(cmd_buffer),
- cmd_buffer->state.flush_bits);
+ cmd_buffer->state.flush_bits,
+ cmd_buffer->gfx9_eop_bug_va);
if (unlikely(cmd_buffer->device->trace_bo))
radv_cmd_buffer_trace_emit(cmd_buffer);
cmd_buffer->state.flush_bits = 0;
+
+ /* If the driver used a compute shader for resetting a query pool, it
+ * should be finished at this point.
+ */
+ cmd_buffer->pending_reset_query = false;
}
/* sets the CP predication state using a boolean stored at va */
void
-si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer, uint64_t va)
+si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer,
+ bool draw_visible, uint64_t va)
{
uint32_t op = 0;
- if (va)
- op = PRED_OP(PREDICATION_OP_BOOL64) | PREDICATION_DRAW_VISIBLE;
+ if (va) {
+ op = PRED_OP(PREDICATION_OP_BOOL64);
+
+ /* PREDICATION_DRAW_VISIBLE means that if the 32-bit value is
+ * zero, all rendering commands are discarded. Otherwise, they
+ * are discarded if the value is non zero.
+ */
+ op |= draw_visible ? PREDICATION_DRAW_VISIBLE :
+ PREDICATION_DRAW_NOT_VISIBLE;
+ }
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
radeon_emit(cmd_buffer->cs, op);
uint64_t dst_va, uint64_t src_va,
unsigned size, unsigned flags)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t header = 0, command = 0;
- assert(size);
assert(size <= cp_dma_max_byte_count(cmd_buffer));
radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9);
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
!(flags & CP_DMA_CLEAR) &&
src_va == dst_va)
- header |= S_411_DSL_SEL(V_411_NOWHERE); /* prefetch only */
+ header |= S_411_DST_SEL(V_411_NOWHERE); /* prefetch only */
else if (flags & CP_DMA_USE_L2)
- header |= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2);
+ header |= S_411_DST_SEL(V_411_DST_ADDR_TC_L2);
if (flags & CP_DMA_CLEAR)
header |= S_411_SRC_SEL(V_411_DATA);
* indices. If we wanted to execute CP DMA in PFP, this packet
* should precede it.
*/
- if ((flags & CP_DMA_SYNC) && cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
- radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
- radeon_emit(cs, 0);
+ if (flags & CP_DMA_SYNC) {
+ if (cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
+ radeon_emit(cs, 0);
+ }
+
+ /* CP will see the sync flag and wait for all DMAs to complete. */
+ cmd_buffer->state.dma_is_busy = false;
}
if (unlikely(cmd_buffer->device->trace_bo))
uint64_t main_src_va, main_dest_va;
uint64_t skipped_size = 0, realign_size = 0;
+ /* Assume that we are not going to sync after the last DMA operation. */
+ cmd_buffer->state.dma_is_busy = true;
if (cmd_buffer->device->physical_device->rad_info.family <= CHIP_CARRIZO ||
cmd_buffer->device->physical_device->rad_info.family == CHIP_STONEY) {
size + skipped_size + realign_size,
&dma_flags);
+ dma_flags &= ~CP_DMA_SYNC;
+
si_emit_cp_dma(cmd_buffer, main_dest_va, main_src_va,
byte_count, dma_flags);
assert(va % 4 == 0 && size % 4 == 0);
+ /* Assume that we are not going to sync after the last DMA operation. */
+ cmd_buffer->state.dma_is_busy = true;
+
while (size) {
unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
unsigned dma_flags = CP_DMA_CLEAR;
}
}
+void si_cp_dma_wait_for_idle(struct radv_cmd_buffer *cmd_buffer)
+{
+ if (cmd_buffer->device->physical_device->rad_info.chip_class < CIK)
+ return;
+
+ if (!cmd_buffer->state.dma_is_busy)
+ return;
+
+ /* Issue a dummy DMA that copies zero bytes.
+ *
+ * The DMA engine will see that there's no work to do and skip this
+ * DMA request, however, the CP will see the sync flag and still wait
+ * for all DMAs to complete.
+ */
+ si_emit_cp_dma(cmd_buffer, 0, 0, 0, CP_DMA_SYNC);
+
+ cmd_buffer->state.dma_is_busy = false;
+}
+
/* For MSAA sample positions. */
#define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y) \
(((s0x) & 0xf) | (((unsigned)(s0y) & 0xf) << 4) | \
return max_dist[log_samples];
}
-void radv_cayman_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_samples)
+void radv_cayman_emit_msaa_sample_locs(struct radeon_cmdbuf *cs, int nr_samples)
{
switch (nr_samples) {
default: