radv: reset pending_reset_query when flushing caches
[mesa.git] / src / amd / vulkan / si_cmd_buffer.c
index 2cfa7f4c2c386f376ac633b783c18b41467e4cd9..2f57584bf82106ac9e375bc0e3af0f3d12f2d74f 100644 (file)
@@ -79,7 +79,7 @@ si_write_harvested_raster_configs(struct radv_physical_device *physical_device,
                radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1);
 }
 
-static void
+void
 si_emit_compute(struct radv_physical_device *physical_device,
                 struct radeon_cmdbuf *cs)
 {
@@ -117,13 +117,6 @@ si_emit_compute(struct radv_physical_device *physical_device,
        }
 }
 
-void
-si_init_compute(struct radv_cmd_buffer *cmd_buffer)
-{
-       struct radv_physical_device *physical_device = cmd_buffer->device->physical_device;
-       si_emit_compute(physical_device, cmd_buffer->cs);
-}
-
 /* 12.4 fixed-point */
 static unsigned radv_pack_float_12p4(float x)
 {
@@ -141,7 +134,7 @@ si_set_raster_config(struct radv_physical_device *physical_device,
 
        ac_get_raster_config(&physical_device->rad_info,
                             &raster_config,
-                            &raster_config_1);
+                            &raster_config_1, NULL);
 
        /* Always use the default config when all backends are enabled
         * (or when we failed to determine the enabled backends).
@@ -159,9 +152,9 @@ si_set_raster_config(struct radv_physical_device *physical_device,
        }
 }
 
-static void
-si_emit_config(struct radv_physical_device *physical_device,
-              struct radeon_cmdbuf *cs)
+void
+si_emit_graphics(struct radv_physical_device *physical_device,
+                struct radeon_cmdbuf *cs)
 {
        int i;
 
@@ -285,8 +278,7 @@ si_emit_config(struct radv_physical_device *physical_device,
                radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
                                  S_00B21C_CU_EN(0xffff) | S_00B21C_WAVE_LIMIT(0x3F));
 
-               if (physical_device->rad_info.num_good_compute_units /
-                   (physical_device->rad_info.max_se * physical_device->rad_info.max_sh_per_se) <= 4) {
+               if (physical_device->rad_info.num_good_cu_per_sh <= 4) {
                        /* Too few available compute units per SH. Disallowing
                         * VS to run on CU0 could hurt us more than late VS
                         * allocation would help.
@@ -313,9 +305,6 @@ si_emit_config(struct radv_physical_device *physical_device,
 
        if (physical_device->rad_info.chip_class >= VI) {
                uint32_t vgt_tess_distribution;
-               radeon_set_context_reg(cs, R_028424_CB_DCC_CONTROL,
-                                      S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
-                                      S_028424_OVERWRITE_COMBINER_WATERMARK(4));
 
                vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) |
                        S_028B50_ACCUM_TRI(11) |
@@ -340,9 +329,11 @@ si_emit_config(struct radv_physical_device *physical_device,
                switch (physical_device->rad_info.family) {
                case CHIP_VEGA10:
                case CHIP_VEGA12:
+               case CHIP_VEGA20:
                        pc_lines = 4096;
                        break;
                case CHIP_RAVEN:
+               case CHIP_RAVEN2:
                        pc_lines = 1024;
                        break;
                default:
@@ -387,13 +378,6 @@ si_emit_config(struct radv_physical_device *physical_device,
        si_emit_compute(physical_device, cs);
 }
 
-void si_init_config(struct radv_cmd_buffer *cmd_buffer)
-{
-       struct radv_physical_device *physical_device = cmd_buffer->device->physical_device;
-
-       si_emit_config(physical_device, cmd_buffer->cs);
-}
-
 void
 cik_create_gfx_config(struct radv_device *device)
 {
@@ -401,7 +385,7 @@ cik_create_gfx_config(struct radv_device *device)
        if (!cs)
                return;
 
-       si_emit_config(device->physical_device, cs);
+       si_emit_graphics(device->physical_device, cs);
 
        while (cs->cdw & 7) {
                if (device->physical_device->rad_info.gfx_ib_pad_with_type2)
@@ -529,16 +513,16 @@ si_write_scissors(struct radeon_cmdbuf *cs, int first,
                VkRect2D scissor = si_intersect_scissor(&scissors[i], &viewport_scissor);
 
                get_viewport_xform(viewports + i, scale, translate);
-               scale[0] = abs(scale[0]);
-               scale[1] = abs(scale[1]);
+               scale[0] = fabsf(scale[0]);
+               scale[1] = fabsf(scale[1]);
 
                if (scale[0] < 0.5)
                        scale[0] = 0.5;
                if (scale[1] < 0.5)
                        scale[1] = 0.5;
 
-               guardband_x = MIN2(guardband_x, (max_range - abs(translate[0])) / scale[0]);
-               guardband_y = MIN2(guardband_y, (max_range - abs(translate[1])) / scale[1]);
+               guardband_x = MIN2(guardband_x, (max_range - fabsf(translate[0])) / scale[0]);
+               guardband_y = MIN2(guardband_y, (max_range - fabsf(translate[1])) / scale[1]);
 
                radeon_emit(cs, S_028250_TL_X(scissor.offset.x) |
                            S_028250_TL_Y(scissor.offset.y) |
@@ -696,7 +680,7 @@ void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs,
                 * counters) must immediately precede every timestamp event to
                 * prevent a GPU hang on GFX9.
                 */
-               if (chip_class == GFX9) {
+               if (chip_class == GFX9 && !is_mec) {
                        radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
                        radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
                        radeon_emit(cs, gfx9_eop_bug_va);
@@ -737,12 +721,15 @@ void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs,
 }
 
 void
-si_emit_wait_fence(struct radeon_cmdbuf *cs,
-                  uint64_t va, uint32_t ref,
-                  uint32_t mask)
+radv_cp_wait_mem(struct radeon_cmdbuf *cs, uint32_t op, uint64_t va,
+                uint32_t ref, uint32_t mask)
 {
+       assert(op == WAIT_REG_MEM_EQUAL ||
+              op == WAIT_REG_MEM_NOT_EQUAL ||
+              op == WAIT_REG_MEM_GREATER_OR_EQUAL);
+
        radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false));
-       radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
+       radeon_emit(cs, op | WAIT_REG_MEM_MEM_SPACE(1));
        radeon_emit(cs, va);
        radeon_emit(cs, va >> 32);
        radeon_emit(cs, ref); /* reference value */
@@ -887,7 +874,8 @@ si_cs_emit_cache_flush(struct radeon_cmdbuf *cs,
                                           EOP_DATA_SEL_VALUE_32BIT,
                                           flush_va, old_fence, *flush_cnt,
                                           gfx9_eop_bug_va);
-               si_emit_wait_fence(cs, flush_va, *flush_cnt, 0xffffffff);
+               radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, flush_va,
+                                *flush_cnt, 0xffffffff);
        }
 
        /* VGT state sync */
@@ -896,6 +884,12 @@ si_cs_emit_cache_flush(struct radeon_cmdbuf *cs,
                radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
        }
 
+       /* VGT streamout state sync */
+       if (flush_bits & RADV_CMD_FLAG_VGT_STREAMOUT_SYNC) {
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
+       }
+
        /* Make sure ME is idle (it executes most packets) before continuing.
         * This prevents read-after-write hazards between PFP and ME.
         */
@@ -998,26 +992,29 @@ si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer)
                radv_cmd_buffer_trace_emit(cmd_buffer);
 
        cmd_buffer->state.flush_bits = 0;
+
+       /* If the driver used a compute shader for resetting a query pool, it
+        * should be finished at this point.
+        */
+       cmd_buffer->pending_reset_query = false;
 }
 
 /* sets the CP predication state using a boolean stored at va */
 void
 si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer,
-                             bool inverted, uint64_t va)
+                             bool draw_visible, uint64_t va)
 {
        uint32_t op = 0;
 
        if (va) {
                op = PRED_OP(PREDICATION_OP_BOOL64);
 
-               /* By default, our internal rendering commands are discarded
-                * only if the predicate is non-zero (ie. DRAW_VISIBLE). But
-                * VK_EXT_conditional_rendering also allows to discard commands
-                * when the predicate is zero, which means we have to use a
-                * different flag.
+               /* PREDICATION_DRAW_VISIBLE means that if the 32-bit value is
+                * zero, all rendering commands are discarded. Otherwise, they
+                * are discarded if the value is non zero.
                 */
-               op |= inverted ? PREDICATION_DRAW_VISIBLE :
-                                PREDICATION_DRAW_NOT_VISIBLE;
+               op |= draw_visible ? PREDICATION_DRAW_VISIBLE :
+                                    PREDICATION_DRAW_NOT_VISIBLE;
        }
        if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
                radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
@@ -1230,6 +1227,8 @@ void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
                                  size + skipped_size + realign_size,
                                  &dma_flags);
 
+               dma_flags &= ~CP_DMA_SYNC;
+
                si_emit_cp_dma(cmd_buffer, main_dest_va, main_src_va,
                               byte_count, dma_flags);