#include "tu_private.h"
-#include "registers/adreno_pm4.xml.h"
-#include "registers/adreno_common.xml.h"
+#include "adreno_pm4.xml.h"
+#include "adreno_common.xml.h"
#include "vk_format.h"
return VK_SUCCESS;
}
-static void
-tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
- const struct tu_device *dev,
- const struct tu_render_pass *pass)
-{
- const uint32_t tile_align_w = pass->tile_align_w;
- const uint32_t max_tile_width = 1024;
-
- /* note: don't offset the tiling config by render_area.offset,
- * because binning pass can't deal with it
- * this means we might end up with more tiles than necessary,
- * but load/store/etc are still scissored to the render_area
- */
- tiling->tile0.offset = (VkOffset2D) {};
-
- const uint32_t ra_width =
- tiling->render_area.extent.width +
- (tiling->render_area.offset.x - tiling->tile0.offset.x);
- const uint32_t ra_height =
- tiling->render_area.extent.height +
- (tiling->render_area.offset.y - tiling->tile0.offset.y);
-
- /* start from 1 tile */
- tiling->tile_count = (VkExtent2D) {
- .width = 1,
- .height = 1,
- };
- tiling->tile0.extent = (VkExtent2D) {
- .width = util_align_npot(ra_width, tile_align_w),
- .height = align(ra_height, TILE_ALIGN_H),
- };
-
- if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN)) {
- /* start with 2x2 tiles */
- tiling->tile_count.width = 2;
- tiling->tile_count.height = 2;
- tiling->tile0.extent.width = util_align_npot(DIV_ROUND_UP(ra_width, 2), tile_align_w);
- tiling->tile0.extent.height = align(DIV_ROUND_UP(ra_height, 2), TILE_ALIGN_H);
- }
-
- /* do not exceed max tile width */
- while (tiling->tile0.extent.width > max_tile_width) {
- tiling->tile_count.width++;
- tiling->tile0.extent.width =
- util_align_npot(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
- }
-
- /* will force to sysmem, don't bother trying to have a valid tile config
- * TODO: just skip all GMEM stuff when sysmem is forced?
- */
- if (!pass->gmem_pixels)
- return;
-
- /* do not exceed gmem size */
- while (tiling->tile0.extent.width * tiling->tile0.extent.height > pass->gmem_pixels) {
- if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
- tiling->tile_count.width++;
- tiling->tile0.extent.width =
- util_align_npot(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
- } else {
- /* if this assert fails then layout is impossible.. */
- assert(tiling->tile0.extent.height > TILE_ALIGN_H);
- tiling->tile_count.height++;
- tiling->tile0.extent.height =
- align(DIV_ROUND_UP(ra_height, tiling->tile_count.height), TILE_ALIGN_H);
- }
- }
-}
-
-static void
-tu_tiling_config_update_pipe_layout(struct tu_tiling_config *tiling,
- const struct tu_device *dev)
-{
- const uint32_t max_pipe_count = 32; /* A6xx */
-
- /* start from 1 tile per pipe */
- tiling->pipe0 = (VkExtent2D) {
- .width = 1,
- .height = 1,
- };
- tiling->pipe_count = tiling->tile_count;
-
- while (tiling->pipe_count.width * tiling->pipe_count.height > max_pipe_count) {
- if (tiling->pipe0.width < tiling->pipe0.height) {
- tiling->pipe0.width += 1;
- tiling->pipe_count.width =
- DIV_ROUND_UP(tiling->tile_count.width, tiling->pipe0.width);
- } else {
- tiling->pipe0.height += 1;
- tiling->pipe_count.height =
- DIV_ROUND_UP(tiling->tile_count.height, tiling->pipe0.height);
- }
- }
-}
-
-static void
-tu_tiling_config_update_pipes(struct tu_tiling_config *tiling,
- const struct tu_device *dev)
-{
- const uint32_t max_pipe_count = 32; /* A6xx */
- const uint32_t used_pipe_count =
- tiling->pipe_count.width * tiling->pipe_count.height;
- const VkExtent2D last_pipe = {
- .width = (tiling->tile_count.width - 1) % tiling->pipe0.width + 1,
- .height = (tiling->tile_count.height - 1) % tiling->pipe0.height + 1,
- };
-
- assert(used_pipe_count <= max_pipe_count);
- assert(max_pipe_count <= ARRAY_SIZE(tiling->pipe_config));
-
- for (uint32_t y = 0; y < tiling->pipe_count.height; y++) {
- for (uint32_t x = 0; x < tiling->pipe_count.width; x++) {
- const uint32_t pipe_x = tiling->pipe0.width * x;
- const uint32_t pipe_y = tiling->pipe0.height * y;
- const uint32_t pipe_w = (x == tiling->pipe_count.width - 1)
- ? last_pipe.width
- : tiling->pipe0.width;
- const uint32_t pipe_h = (y == tiling->pipe_count.height - 1)
- ? last_pipe.height
- : tiling->pipe0.height;
- const uint32_t n = tiling->pipe_count.width * y + x;
-
- tiling->pipe_config[n] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x) |
- A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y) |
- A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w) |
- A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h);
- tiling->pipe_sizes[n] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w * pipe_h);
- }
- }
-
- memset(tiling->pipe_config + used_pipe_count, 0,
- sizeof(uint32_t) * (max_pipe_count - used_pipe_count));
-}
-
-static void
-tu_tiling_config_get_tile(const struct tu_tiling_config *tiling,
- const struct tu_device *dev,
- uint32_t tx,
- uint32_t ty,
- struct tu_tile *tile)
-{
- /* find the pipe and the slot for tile (tx, ty) */
- const uint32_t px = tx / tiling->pipe0.width;
- const uint32_t py = ty / tiling->pipe0.height;
- const uint32_t sx = tx - tiling->pipe0.width * px;
- const uint32_t sy = ty - tiling->pipe0.height * py;
- /* last pipe has different width */
- const uint32_t pipe_width =
- MIN2(tiling->pipe0.width,
- tiling->tile_count.width - px * tiling->pipe0.width);
-
- assert(tx < tiling->tile_count.width && ty < tiling->tile_count.height);
- assert(px < tiling->pipe_count.width && py < tiling->pipe_count.height);
- assert(sx < tiling->pipe0.width && sy < tiling->pipe0.height);
-
- /* convert to 1D indices */
- tile->pipe = tiling->pipe_count.width * py + px;
- tile->slot = pipe_width * sy + sx;
-
- /* get the blit area for the tile */
- tile->begin = (VkOffset2D) {
- .x = tiling->tile0.offset.x + tiling->tile0.extent.width * tx,
- .y = tiling->tile0.offset.y + tiling->tile0.extent.height * ty,
- };
- tile->end.x =
- (tx == tiling->tile_count.width - 1)
- ? tiling->render_area.offset.x + tiling->render_area.extent.width
- : tile->begin.x + tiling->tile0.extent.width;
- tile->end.y =
- (ty == tiling->tile_count.height - 1)
- ? tiling->render_area.offset.y + tiling->render_area.extent.height
- : tile->begin.y + tiling->tile0.extent.height;
-}
-
void
tu6_emit_event_write(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
if (need_seqno) {
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
+ tu_cs_emit_qw(cs, global_iova(cmd, seqno_dummy));
tu_cs_emit(cs, 0);
}
}
tu6_emit_event_write(cmd_buffer, cs, CACHE_FLUSH_TS);
if (flushes & TU_CMD_FLAG_CACHE_INVALIDATE)
tu6_emit_event_write(cmd_buffer, cs, CACHE_INVALIDATE);
- if (flushes & TU_CMD_FLAG_WFI)
+ if (flushes & TU_CMD_FLAG_WAIT_MEM_WRITES)
+ tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
+ if (flushes & TU_CMD_FLAG_WAIT_FOR_IDLE)
tu_cs_emit_wfi(cs);
+ if (flushes & TU_CMD_FLAG_WAIT_FOR_ME)
+ tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
}
/* "Normal" cache flushes, that don't require any special handling */
flushes |=
TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
- TU_CMD_FLAG_WFI;
+ TU_CMD_FLAG_WAIT_FOR_IDLE;
cmd_buffer->state.cache.pending_flush_bits &= ~(
TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
- TU_CMD_FLAG_CCU_INVALIDATE_DEPTH);
+ TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
+ TU_CMD_FLAG_WAIT_FOR_IDLE);
}
tu6_emit_flushes(cmd_buffer, cs, flushes);
static void
tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
{
- const VkRect2D *render_area = &cmd->state.tiling_config.render_area;
+
+ const VkRect2D *render_area = &cmd->state.render_area;
+
+ /* Avoid assertion fails with an empty render area at (0, 0) where the
+ * subtraction below wraps around. Empty render areas should be forced to
+ * the sysmem path by use_sysmem_rendering(). It's not even clear whether
+ * an empty scissor here works, and the blob seems to force sysmem too as
+ * it sets something wrong (non-empty) for the scissor.
+ */
+ if (render_area->extent.width == 0 ||
+ render_area->extent.height == 0)
+ return;
+
uint32_t x1 = render_area->offset.x;
uint32_t y1 = render_area->offset.y;
uint32_t x2 = x1 + render_area->extent.width - 1;
A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
tu_cs_emit_regs(cs,
- A6XX_GRAS_RESOLVE_CNTL_1(.x = x1, .y = y1),
- A6XX_GRAS_RESOLVE_CNTL_2(.x = x2, .y = y2));
+ A6XX_GRAS_2D_RESOLVE_CNTL_1(.x = x1, .y = y1),
+ A6XX_GRAS_2D_RESOLVE_CNTL_2(.x = x2, .y = y2));
}
void
break;
}
+ /* We need to reload the descriptors every time the descriptor sets
+ * change. However, the commands we send only depend on the pipeline
+ * because the whole point is to cache descriptors which are used by the
+ * pipeline. There's a problem here, in that the firmware has an
+ * "optimization" which skips executing groups that are set to the same
+ * value as the last draw. This means that if the descriptor sets change
+ * but not the pipeline, we'd try to re-execute the same buffer which
+ * the firmware would ignore and we wouldn't pre-load the new
+ * descriptors. Set the DIRTY bit to avoid this optimization
+ */
+ if (id == TU_DRAW_STATE_DESC_SETS_LOAD)
+ enable_mask |= CP_SET_DRAW_STATE__0_DIRTY;
+
tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(state.size) |
enable_mask |
CP_SET_DRAW_STATE__0_GROUP_ID(id) |
tu_cs_emit_qw(cs, state.iova);
}
-/* note: get rid of this eventually */
-static void
-tu_cs_emit_sds_ib(struct tu_cs *cs, uint32_t id, struct tu_cs_entry entry)
-{
- tu_cs_emit_draw_state(cs, id, (struct tu_draw_state) {
- .iova = entry.size ? entry.bo->iova + entry.offset : 0,
- .size = entry.size / 4,
- });
-}
-
static bool
use_hw_binning(struct tu_cmd_buffer *cmd)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
/* XFB commands are emitted for BINNING || SYSMEM, which makes it incompatible
* with non-hw binning GMEM rendering. this is required because some of the
if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN))
return true;
- return (tiling->tile_count.width * tiling->tile_count.height) > 2;
+ return (fb->tile_count.width * fb->tile_count.height) > 2;
}
static bool
if (cmd->state.framebuffer->layers > 1)
return true;
+ /* Use sysmem for empty render areas */
+ if (cmd->state.render_area.extent.width == 0 ||
+ cmd->state.render_area.extent.height == 0)
+ return true;
+
if (cmd->has_tess)
return true;
- return cmd->state.tiling_config.force_sysmem;
+ return false;
}
static void
tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
- const struct tu_tile *tile)
+ uint32_t tx, uint32_t ty, uint32_t pipe, uint32_t slot)
{
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
+
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD));
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
- const uint32_t x1 = tile->begin.x;
- const uint32_t y1 = tile->begin.y;
- const uint32_t x2 = tile->end.x - 1;
- const uint32_t y2 = tile->end.y - 1;
+ const uint32_t x1 = fb->tile0.width * tx;
+ const uint32_t y1 = fb->tile0.height * ty;
+ const uint32_t x2 = x1 + fb->tile0.width - 1;
+ const uint32_t y2 = y1 + fb->tile0.height - 1;
tu6_emit_window_scissor(cs, x1, y1, x2, y2);
tu6_emit_window_offset(cs, x1, y1);
- tu_cs_emit_regs(cs,
- A6XX_VPC_SO_OVERRIDE(.so_disable = false));
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
if (use_hw_binning(cmd)) {
tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
tu_cs_emit(cs, 0x0);
- tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
- tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
- CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
- tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + tile->pipe * cmd->vsc_draw_strm_pitch);
- tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + (tile->pipe * 4) + (32 * cmd->vsc_draw_strm_pitch));
- tu_cs_emit_qw(cs, cmd->vsc_prim_strm.iova + (tile->pipe * cmd->vsc_prim_strm_pitch));
+ tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5_OFFSET, 4);
+ tu_cs_emit(cs, fb->pipe_sizes[pipe] |
+ CP_SET_BIN_DATA5_0_VSC_N(slot));
+ tu_cs_emit(cs, pipe * cmd->vsc_draw_strm_pitch);
+ tu_cs_emit(cs, pipe * 4);
+ tu_cs_emit(cs, pipe * cmd->vsc_prim_strm_pitch);
tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
tu_cs_emit(cs, 0x0);
struct tu_image_view *dst = fb->attachments[a].attachment;
struct tu_image_view *src = fb->attachments[gmem_a].attachment;
- tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.tiling_config.render_area);
+ tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.render_area);
}
static void
static void
tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- const struct tu_physical_device *phys_dev = cmd->device->physical_device;
+ struct tu_device *dev = cmd->device;
+ const struct tu_physical_device *phys_dev = dev->physical_device;
tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
- tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
+ tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
+ .vs_state = true,
+ .hs_state = true,
+ .ds_state = true,
+ .gs_state = true,
+ .fs_state = true,
+ .cs_state = true,
+ .gfx_ibo = true,
+ .cs_ibo = true,
+ .gfx_shared_const = true,
+ .cs_shared_const = true,
+ .gfx_bindless = 0x1f,
+ .cs_bindless = 0x1f));
+
+ tu_cs_emit_wfi(cs);
+
+ cmd->state.cache.pending_flush_bits &=
+ ~(TU_CMD_FLAG_WAIT_FOR_IDLE | TU_CMD_FLAG_CACHE_INVALIDATE);
tu_cs_emit_regs(cs,
A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BB11, 0);
+ tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_SHARED_CONSTS, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A982, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A9A8, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
- tu_cs_emit_write_reg(cs, REG_A6XX_VPC_GS_SIV_CNTL, 0x0000ffff);
/* TODO: set A6XX_VFD_ADD_OFFSET_INSTANCE and fix ir3 to avoid adding base instance */
tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9101, 0xffff00);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236,
- A6XX_VPC_UNKNOWN_9236_POINT_COORD_INVERT(0));
+ tu_cs_emit_regs(cs, A6XX_VPC_POINT_COORD_INVERT(false));
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
- A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(true));
- tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9990, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B07, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_809B, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
A6XX_RB_LRZ_CNTL(0));
tu_cs_emit_regs(cs,
- A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &cmd->device->border_color));
+ A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
+ .bo_offset = gb_offset(border_color)));
+ tu_cs_emit_regs(cs,
+ A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
+ .bo_offset = gb_offset(border_color)));
+
+ /* VSC buffers:
+ * use vsc pitches from the largest values used so far with this device
+ * if there hasn't been overflow, there will already be a scratch bo
+ * allocated for these sizes
+ *
+ * if overflow is detected, the stream size is increased by 2x
+ */
+ mtx_lock(&dev->vsc_pitch_mtx);
+
+ struct tu6_global *global = dev->global_bo.map;
+
+ uint32_t vsc_draw_overflow = global->vsc_draw_overflow;
+ uint32_t vsc_prim_overflow = global->vsc_prim_overflow;
+
+ if (vsc_draw_overflow >= dev->vsc_draw_strm_pitch)
+ dev->vsc_draw_strm_pitch = (dev->vsc_draw_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
+
+ if (vsc_prim_overflow >= dev->vsc_prim_strm_pitch)
+ dev->vsc_prim_strm_pitch = (dev->vsc_prim_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
+
+ cmd->vsc_prim_strm_pitch = dev->vsc_prim_strm_pitch;
+ cmd->vsc_draw_strm_pitch = dev->vsc_draw_strm_pitch;
+
+ mtx_unlock(&dev->vsc_pitch_mtx);
+
+ struct tu_bo *vsc_bo;
+ uint32_t size0 = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES +
+ cmd->vsc_draw_strm_pitch * MAX_VSC_PIPES;
+
+ tu_get_scratch_bo(dev, size0 + MAX_VSC_PIPES * 4, &vsc_bo);
+
+ tu_cs_emit_regs(cs,
+ A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = vsc_bo, .bo_offset = size0));
+ tu_cs_emit_regs(cs,
+ A6XX_VSC_PRIM_STRM_ADDRESS(.bo = vsc_bo));
tu_cs_emit_regs(cs,
- A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &cmd->device->border_color));
+ A6XX_VSC_DRAW_STRM_ADDRESS(.bo = vsc_bo,
+ .bo_offset = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES));
+
+ tu_bo_list_add(&cmd->bo_list, vsc_bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
tu_cs_sanity_check(cs);
}
static void
update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
tu_cs_emit_regs(cs,
- A6XX_VSC_BIN_SIZE(.width = tiling->tile0.extent.width,
- .height = tiling->tile0.extent.height),
- A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = &cmd->vsc_draw_strm,
- .bo_offset = 32 * cmd->vsc_draw_strm_pitch));
+ A6XX_VSC_BIN_SIZE(.width = fb->tile0.width,
+ .height = fb->tile0.height));
tu_cs_emit_regs(cs,
- A6XX_VSC_BIN_COUNT(.nx = tiling->tile_count.width,
- .ny = tiling->tile_count.height));
+ A6XX_VSC_BIN_COUNT(.nx = fb->tile_count.width,
+ .ny = fb->tile_count.height));
tu_cs_emit_pkt4(cs, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
- for (unsigned i = 0; i < 32; i++)
- tu_cs_emit(cs, tiling->pipe_config[i]);
+ tu_cs_emit_array(cs, fb->pipe_config, 32);
tu_cs_emit_regs(cs,
- A6XX_VSC_PRIM_STRM_ADDRESS(.bo = &cmd->vsc_prim_strm),
A6XX_VSC_PRIM_STRM_PITCH(cmd->vsc_prim_strm_pitch),
- A6XX_VSC_PRIM_STRM_LIMIT(cmd->vsc_prim_strm_pitch - 64));
+ A6XX_VSC_PRIM_STRM_LIMIT(cmd->vsc_prim_strm_pitch - VSC_PAD));
tu_cs_emit_regs(cs,
- A6XX_VSC_DRAW_STRM_ADDRESS(.bo = &cmd->vsc_draw_strm),
A6XX_VSC_DRAW_STRM_PITCH(cmd->vsc_draw_strm_pitch),
- A6XX_VSC_DRAW_STRM_LIMIT(cmd->vsc_draw_strm_pitch - 64));
+ A6XX_VSC_DRAW_STRM_LIMIT(cmd->vsc_draw_strm_pitch - VSC_PAD));
}
static void
emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
const uint32_t used_pipe_count =
- tiling->pipe_count.width * tiling->pipe_count.height;
+ fb->pipe_count.width * fb->pipe_count.height;
- /* Clear vsc_scratch: */
- tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, 0x0);
-
- /* Check for overflow, write vsc_scratch if detected: */
for (int i = 0; i < used_pipe_count; i++) {
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
- tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch - 64));
+ tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch - VSC_PAD));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_draw_strm_pitch));
+ tu_cs_emit_qw(cs, global_iova(cmd, vsc_draw_overflow));
+ tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_draw_strm_pitch));
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
- tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch - 64));
+ tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch - VSC_PAD));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_prim_strm_pitch));
+ tu_cs_emit_qw(cs, global_iova(cmd, vsc_prim_overflow));
+ tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_prim_strm_pitch));
}
tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
struct tu_physical_device *phys_dev = cmd->device->physical_device;
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- uint32_t x1 = tiling->tile0.offset.x;
- uint32_t y1 = tiling->tile0.offset.y;
- uint32_t x2 = tiling->render_area.offset.x + tiling->render_area.extent.width - 1;
- uint32_t y2 = tiling->render_area.offset.y + tiling->render_area.extent.height - 1;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
- tu6_emit_window_scissor(cs, x1, y1, x2, y2);
+ tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
tu_cs_emit(cs, 0x0);
}
-static void
+static struct tu_draw_state
tu_emit_input_attachments(struct tu_cmd_buffer *cmd,
const struct tu_subpass *subpass,
- struct tu_cs_entry *ib,
bool gmem)
{
/* note: we can probably emit input attachments just once for the whole
*/
if (!subpass->input_count)
- return;
+ return (struct tu_draw_state) {};
struct tu_cs_memory texture;
VkResult result = tu_cs_alloc(&cmd->sub_cs, subpass->input_count * 2,
dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
dst[2] =
A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
- A6XX_TEX_CONST_2_PITCH(cmd->state.tiling_config.tile0.extent.width * att->cpp);
+ A6XX_TEX_CONST_2_PITCH(cmd->state.framebuffer->tile0.width * att->cpp);
dst[3] = 0;
dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
}
struct tu_cs cs;
- tu_cs_begin_sub_stream(&cmd->sub_cs, 9, &cs);
+ struct tu_draw_state ds = tu_cs_draw_state(&cmd->sub_cs, &cs, 9);
tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_FRAG, 3);
tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
tu_cs_emit_regs(&cs, A6XX_SP_FS_TEX_COUNT(subpass->input_count * 2));
- *ib = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+ assert(cs.cur == cs.end); /* validate draw state size */
+
+ return ds;
}
static void
{
struct tu_cs *cs = &cmd->draw_cs;
- tu_emit_input_attachments(cmd, subpass, &cmd->state.ia_gmem_ib, true);
- tu_emit_input_attachments(cmd, subpass, &cmd->state.ia_sysmem_ib, false);
-
tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 6);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM, cmd->state.ia_gmem_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM, cmd->state.ia_sysmem_ib);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM,
+ tu_emit_input_attachments(cmd, subpass, true));
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM,
+ tu_emit_input_attachments(cmd, subpass, false));
}
static void
}
static void
-tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
- const struct VkRect2D *renderArea)
+tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
const struct tu_framebuffer *fb = cmd->state.framebuffer;
tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM);
/* enable stream-out, with sysmem there is only one pass: */
- tu_cs_emit_regs(cs,
- A6XX_VPC_SO_OVERRIDE(.so_disable = false));
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
tu_cs_emit(cs, 0x1);
tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_GMEM);
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
if (use_hw_binning(cmd)) {
/* enable stream-out during binning pass: */
- tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
- tu6_emit_bin_size(cs,
- tiling->tile0.extent.width,
- tiling->tile0.extent.height,
+ tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
tu6_emit_binning_pass(cmd, cs);
/* and disable stream-out for draw pass: */
- tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=true));
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(true));
- tu6_emit_bin_size(cs,
- tiling->tile0.extent.width,
- tiling->tile0.extent.height,
+ tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
tu_cs_emit_regs(cs,
tu_cs_emit(cs, 0x1);
} else {
/* no binning pass, so enable stream-out for draw pass:: */
- tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
- tu6_emit_bin_size(cs,
- tiling->tile0.extent.width,
- tiling->tile0.extent.height,
- 0x6000000);
+ tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height, 0x6000000);
}
tu_cs_sanity_check(cs);
}
static void
-tu6_render_tile(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- const struct tu_tile *tile)
+tu6_render_tile(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- tu6_emit_tile_select(cmd, cs, tile);
-
tu_cs_emit_call(cs, &cmd->draw_cs);
if (use_hw_binning(cmd)) {
static void
tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- if (use_hw_binning(cmd))
- cmd->use_vsc_data = true;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
tu6_tile_render_begin(cmd, &cmd->cs);
- for (uint32_t y = 0; y < tiling->tile_count.height; y++) {
- for (uint32_t x = 0; x < tiling->tile_count.width; x++) {
- struct tu_tile tile;
- tu_tiling_config_get_tile(tiling, cmd->device, x, y, &tile);
- tu6_render_tile(cmd, &cmd->cs, &tile);
+ uint32_t pipe = 0;
+ for (uint32_t py = 0; py < fb->pipe_count.height; py++) {
+ for (uint32_t px = 0; px < fb->pipe_count.width; px++, pipe++) {
+ uint32_t tx1 = px * fb->pipe0.width;
+ uint32_t ty1 = py * fb->pipe0.height;
+ uint32_t tx2 = MIN2(tx1 + fb->pipe0.width, fb->tile_count.width);
+ uint32_t ty2 = MIN2(ty1 + fb->pipe0.height, fb->tile_count.height);
+ uint32_t slot = 0;
+ for (uint32_t ty = ty1; ty < ty2; ty++) {
+ for (uint32_t tx = tx1; tx < tx2; tx++, slot++) {
+ tu6_emit_tile_select(cmd, &cmd->cs, tx, ty, pipe, slot);
+ tu6_render_tile(cmd, &cmd->cs);
+ }
+ }
}
}
static void
tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
+ tu6_sysmem_render_begin(cmd, &cmd->cs);
tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
}
-static void
-tu_cmd_update_tiling_config(struct tu_cmd_buffer *cmd,
- const VkRect2D *render_area)
-{
- const struct tu_device *dev = cmd->device;
- struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- tiling->render_area = *render_area;
- tiling->force_sysmem = false;
-
- tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass);
- tu_tiling_config_update_pipe_layout(tiling, dev);
- tu_tiling_config_update_pipes(tiling, dev);
-}
-
static VkResult
tu_create_cmd_buffer(struct tu_device *device,
struct tu_cmd_pool *pool,
VkCommandBuffer *pCommandBuffer)
{
struct tu_cmd_buffer *cmd_buffer;
- cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ cmd_buffer = vk_object_zalloc(&device->vk, NULL, sizeof(*cmd_buffer),
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
if (cmd_buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
list_inithead(&cmd_buffer->upload.list);
- VkResult result = tu_bo_init_new(device, &cmd_buffer->scratch_bo, 0x1000);
- if (result != VK_SUCCESS)
- goto fail_scratch_bo;
-
- /* TODO: resize on overflow */
- cmd_buffer->vsc_draw_strm_pitch = device->vsc_draw_strm_pitch;
- cmd_buffer->vsc_prim_strm_pitch = device->vsc_prim_strm_pitch;
- cmd_buffer->vsc_draw_strm = device->vsc_draw_strm;
- cmd_buffer->vsc_prim_strm = device->vsc_prim_strm;
-
return VK_SUCCESS;
-
-fail_scratch_bo:
- list_del(&cmd_buffer->pool_link);
- return result;
}
static void
tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
{
- tu_bo_finish(cmd_buffer->device, &cmd_buffer->scratch_bo);
-
list_del(&cmd_buffer->pool_link);
tu_cs_finish(&cmd_buffer->cs);
tu_cs_finish(&cmd_buffer->sub_cs);
tu_bo_list_destroy(&cmd_buffer->bo_list);
- vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+ vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
}
static VkResult
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
result = tu_reset_cmd_buffer(cmd_buffer);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->level = pAllocateInfo->level;
pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
cmd->state.index_va = buf->bo->iova + buf->bo_offset + offset;
cmd->state.max_index_count = (buf->size - offset) >> index_shift;
cmd->state.index_size = index_size;
- cmd->state.index_shift = index_shift;
tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
}
}
assert(dyn_idx == dynamicOffsetCount);
- uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg, hlsq_update_value;
+ uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg, hlsq_invalidate_value;
uint64_t addr[MAX_SETS + 1] = {};
- struct tu_cs cs;
+ struct tu_cs *cs, state_cs;
for (uint32_t i = 0; i < MAX_SETS; i++) {
struct tu_descriptor_set *set = descriptors_state->sets[i];
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
sp_bindless_base_reg = REG_A6XX_SP_BINDLESS_BASE(0);
hlsq_bindless_base_reg = REG_A6XX_HLSQ_BINDLESS_BASE(0);
- hlsq_update_value = 0x7c000;
+ hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(0x1f);
- cmd->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS | TU_CMD_DIRTY_SHADER_CONSTS;
+ cmd->state.desc_sets = tu_cs_draw_state(&cmd->sub_cs, &state_cs, 24);
+ cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
+ cs = &state_cs;
} else {
assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE);
sp_bindless_base_reg = REG_A6XX_SP_CS_BINDLESS_BASE(0);
hlsq_bindless_base_reg = REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
- hlsq_update_value = 0x3e00;
+ hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(0x1f);
- cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
+ cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
+ cs = &cmd->cs;
}
- tu_cs_begin_sub_stream(&cmd->sub_cs, 24, &cs);
+ tu_cs_emit_pkt4(cs, sp_bindless_base_reg, 10);
+ tu_cs_emit_array(cs, (const uint32_t*) addr, 10);
+ tu_cs_emit_pkt4(cs, hlsq_bindless_base_reg, 10);
+ tu_cs_emit_array(cs, (const uint32_t*) addr, 10);
+ tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(.dword = hlsq_invalidate_value));
- tu_cs_emit_pkt4(&cs, sp_bindless_base_reg, 10);
- tu_cs_emit_array(&cs, (const uint32_t*) addr, 10);
- tu_cs_emit_pkt4(&cs, hlsq_bindless_base_reg, 10);
- tu_cs_emit_array(&cs, (const uint32_t*) addr, 10);
- tu_cs_emit_regs(&cs, A6XX_HLSQ_UPDATE_CNTL(.dword = hlsq_update_value));
-
- struct tu_cs_entry ib = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
+ assert(cs->cur == cs->end); /* validate draw state size */
tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
- tu_cs_emit_sds_ib(&cmd->draw_cs, TU_DRAW_STATE_DESC_SETS, ib);
- cmd->state.desc_sets_ib = ib;
- } else {
- /* note: for compute we could emit directly, instead of a CP_INDIRECT
- * however, the blob uses draw states for compute
- */
- tu_cs_emit_ib(&cmd->cs, &ib);
+ tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
}
}
for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
/* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_FLUSH_BASE(i), 2);
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(flush_base[i]));
+ tu_cs_emit_qw(cs, global_iova(cmd, flush_base[i]));
tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
}
0x40000 | /* ??? */
CP_MEM_TO_REG_0_UNK31 |
CP_MEM_TO_REG_0_CNT(1));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(flush_base[idx]));
+ tu_cs_emit_qw(cs, global_iova(cmd, flush_base[idx]));
if (offset) {
tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
tu_emit_cache_flush(cmd_buffer, &cmd_buffer->cs);
}
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
- MSM_SUBMIT_BO_WRITE);
-
- if (cmd_buffer->use_vsc_data) {
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_draw_strm,
- MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_prim_strm,
- MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
- }
-
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->border_color,
- MSM_SUBMIT_BO_READ);
+ tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->global_bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
for (uint32_t i = 0; i < cmd_buffer->draw_cs.bo_count; i++) {
tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_cs.bos[i],
static struct tu_cs
tu_cmd_dynamic_state(struct tu_cmd_buffer *cmd, uint32_t id, uint32_t size)
{
- struct tu_cs_memory memory;
struct tu_cs cs;
- /* TODO: share this logic with tu_pipeline_static_state */
- tu_cs_alloc(&cmd->sub_cs, size, 1, &memory);
- tu_cs_init_external(&cs, memory.map, memory.map + size);
- tu_cs_begin(&cs);
- tu_cs_reserve_space(&cs, size);
-
assert(id < ARRAY_SIZE(cmd->state.dynamic_state));
- cmd->state.dynamic_state[id].iova = memory.iova;
- cmd->state.dynamic_state[id].size = size;
+ cmd->state.dynamic_state[id] = tu_cs_draw_state(&cmd->sub_cs, &cs, size);
tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DYNAMIC + id, cmd->state.dynamic_state[id]);
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
cmd->state.compute_pipeline = pipeline;
- cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
+ tu_cs_emit_state_ib(&cmd->cs, pipeline->program.state);
return;
}
assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS);
cmd->state.pipeline = pipeline;
- cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS;
+ cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
struct tu_cs *cs = &cmd->draw_cs;
uint32_t mask = ~pipeline->dynamic_state_mask & BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT);
uint32_t i;
tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (7 + util_bitcount(mask)));
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI, pipeline->vi.state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_RAST, pipeline->rast.state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS, pipeline->ds.state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_BLEND, pipeline->blend.state_ib);
-
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI, pipeline->vi.state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS, pipeline->ds_state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_BLEND, pipeline->blend_state);
for_each_bit(i, mask)
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i, pipeline->dynamic_state[i]);
if (pipeline->vi.bindings_used & ~cmd->vertex_bindings_set)
cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
- /* If the pipeline needs a dynamic descriptor, re-emit descriptor sets */
- if (pipeline->layout->dynamic_offset_count)
- cmd->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
-
/* dynamic linewidth state depends pipeline state's gras_su_cntl
* so the dynamic state ib must be updated when pipeline changes
*/
{
enum tu_cmd_flush_bits flush_bits = 0;
+ if (src_mask & TU_ACCESS_HOST_WRITE) {
+ /* Host writes are always visible to CP, so only invalidate GPU caches */
+ cache->pending_flush_bits |= TU_CMD_FLAG_GPU_INVALIDATE;
+ }
+
if (src_mask & TU_ACCESS_SYSMEM_WRITE) {
+ /* Invalidate CP and 2D engine (make it do WFI + WFM if necessary) as
+ * well.
+ */
cache->pending_flush_bits |= TU_CMD_FLAG_ALL_INVALIDATE;
}
+ if (src_mask & TU_ACCESS_CP_WRITE) {
+ /* Flush the CP write queue. However a WFI shouldn't be necessary as
+ * WAIT_MEM_WRITES should cover it.
+ */
+ cache->pending_flush_bits |=
+ TU_CMD_FLAG_WAIT_MEM_WRITES |
+ TU_CMD_FLAG_GPU_INVALIDATE |
+ TU_CMD_FLAG_WAIT_FOR_ME;
+ }
+
#define SRC_FLUSH(domain, flush, invalidate) \
if (src_mask & TU_ACCESS_##domain##_WRITE) { \
cache->pending_flush_bits |= TU_CMD_FLAG_##flush | \
#undef SRC_INCOHERENT_FLUSH
- if (dst_mask & (TU_ACCESS_SYSMEM_READ | TU_ACCESS_SYSMEM_WRITE)) {
+ /* Treat host & sysmem write accesses the same, since the kernel implicitly
+ * drains the queue before signalling completion to the host.
+ */
+ if (dst_mask & (TU_ACCESS_SYSMEM_READ | TU_ACCESS_SYSMEM_WRITE |
+ TU_ACCESS_HOST_READ | TU_ACCESS_HOST_WRITE)) {
flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
}
#undef DST_INCOHERENT_FLUSH
if (dst_mask & TU_ACCESS_WFI_READ) {
- flush_bits |= TU_CMD_FLAG_WFI;
+ flush_bits |= cache->pending_flush_bits &
+ (TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_WAIT_FOR_IDLE);
+ }
+
+ if (dst_mask & TU_ACCESS_WFM_READ) {
+ flush_bits |= cache->pending_flush_bits &
+ (TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_WAIT_FOR_ME);
}
cache->flush_bits |= flush_bits;
enum tu_cmd_access_mask mask = 0;
/* If the GPU writes a buffer that is then read by an indirect draw
- * command, we theoretically need a WFI + WAIT_FOR_ME combination to
- * wait for the writes to complete. The WAIT_FOR_ME is performed as part
- * of the draw by the firmware, so we just need to execute a WFI.
+ * command, we theoretically need to emit a WFI to wait for any cache
+ * flushes, and then a WAIT_FOR_ME to wait on the CP for the WFI to
+ * complete. Waiting for the WFI to complete is performed as part of the
+ * draw by the firmware, so we just need to execute the WFI.
+ *
+ * Transform feedback counters are read via CP_MEM_TO_REG, which implicitly
+ * does CP_WAIT_FOR_ME, but we still need a WFI if the GPU writes it.
*/
if (flags &
(VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+ VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT |
VK_ACCESS_MEMORY_READ_BIT)) {
mask |= TU_ACCESS_WFI_READ;
}
if (flags &
(VK_ACCESS_INDIRECT_COMMAND_READ_BIT | /* Read performed by CP */
- VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT | /* Read performed by CP, I think */
VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT | /* Read performed by CP */
- VK_ACCESS_HOST_READ_BIT | /* sysmem by definition */
+ VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT | /* Read performed by CP */
VK_ACCESS_MEMORY_READ_BIT)) {
mask |= TU_ACCESS_SYSMEM_READ;
}
+ if (flags &
+ (VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ mask |= TU_ACCESS_CP_WRITE;
+ }
+
+ if (flags &
+ (VK_ACCESS_HOST_READ_BIT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ mask |= TU_ACCESS_HOST_READ;
+ }
+
if (flags &
(VK_ACCESS_HOST_WRITE_BIT |
- VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT | /* Write performed by CP, I think */
VK_ACCESS_MEMORY_WRITE_BIT)) {
- mask |= TU_ACCESS_SYSMEM_WRITE;
+ mask |= TU_ACCESS_HOST_WRITE;
}
if (flags &
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_cmd_pool *pool;
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pool = vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
+ VK_OBJECT_TYPE_COMMAND_POOL);
if (pool == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
list_inithead(&pool->free_cmd_buffers);
tu_cmd_buffer_destroy(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_free(&device->vk, pAllocator, pool);
}
VkResult
cmd->state.pass = pass;
cmd->state.subpass = pass->subpasses;
cmd->state.framebuffer = fb;
+ cmd->state.render_area = pRenderPassBegin->renderArea;
- tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
tu_cmd_prepare_tile_store_ib(cmd);
/* Note: because this is external, any flushes will happen before draw_cs
}
}
-static struct tu_cs_entry
+static struct tu_draw_state
tu6_emit_consts(struct tu_cmd_buffer *cmd,
const struct tu_pipeline *pipeline,
struct tu_descriptor_state *descriptors_state,
tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
- return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+ return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
}
-static struct tu_cs_entry
+static struct tu_draw_state
tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
const struct tu_pipeline *pipeline)
{
cmd->vertex_bindings_set = pipeline->vi.bindings_used;
- return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+ return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
}
static uint64_t
{
/* TODO: For indirect draws, we can't compute the BO size ahead of time.
* Still not sure what to do here, so just allocate a reasonably large
- * BO and hope for the best for now.
- * (maxTessellationControlPerVertexOutputComponents * 2048 vertices +
- * maxTessellationControlPerPatchOutputComponents * 512 patches) */
- if (!draw_count) {
- return ((128 * 2048) + (128 * 512)) * 4;
- }
+ * BO and hope for the best for now. */
+ if (!draw_count)
+ draw_count = 2048;
- /* For each patch, adreno lays out the tess param BO in memory as:
- * (v_input[0][0])...(v_input[i][j])(p_input[0])...(p_input[k]).
- * where i = # vertices per patch, j = # per-vertex outputs, and
- * k = # per-patch outputs.*/
+ /* the tess param BO is pipeline->tess.param_stride bytes per patch,
+ * which includes both the per-vertex outputs and per-patch outputs
+ * build_primitive_map in ir3 calculates this stride
+ */
uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
uint32_t num_patches = draw_count / verts_per_patch;
- return draw_count * pipeline->tess.per_vertex_output_size +
- pipeline->tess.per_patch_output_size * num_patches;
+ return num_patches * pipeline->tess.param_stride;
}
static uint64_t
{
/* TODO: For indirect draws, we can't compute the BO size ahead of time.
* Still not sure what to do here, so just allocate a reasonably large
- * BO and hope for the best for now.
- * (quad factor stride * 512 patches) */
- if (!draw_count) {
- return (28 * 512) * 4;
- }
+ * BO and hope for the best for now. */
+ if (!draw_count)
+ draw_count = 2048;
/* Each distinct patch gets its own tess factor output. */
uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
tu6_emit_tess_consts(struct tu_cmd_buffer *cmd,
uint32_t draw_count,
const struct tu_pipeline *pipeline,
- struct tu_cs_entry *entry)
+ struct tu_draw_state *state,
+ uint64_t *factor_iova)
{
struct tu_cs cs;
- VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 20, &cs);
+ VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 16, &cs);
if (result != VK_SUCCESS)
return result;
tu_cs_emit_qw(&cs, tess_param_iova);
tu_cs_emit_qw(&cs, tess_factor_iova);
- tu_cs_emit_pkt4(&cs, REG_A6XX_PC_TESSFACTOR_ADDR_LO, 2);
- tu_cs_emit_qw(&cs, tess_factor_iova);
-
- /* TODO: Without this WFI here, the hardware seems unable to read these
- * addresses we just emitted. Freedreno emits these consts as part of
- * IB1 instead of in a draw state which might make this WFI unnecessary,
- * but it requires a bit more indirection (SS6_INDIRECT for consts). */
- tu_cs_emit_wfi(&cs);
+ *factor_iova = tess_factor_iova;
}
- *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+ *state = tu_cs_end_draw_state(&cmd->sub_cs, &cs);
return VK_SUCCESS;
}
pipeline->tess.upper_left_domain_origin));
if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
- cmd->state.shader_const_ib[MESA_SHADER_VERTEX] =
+ cmd->state.shader_const[MESA_SHADER_VERTEX] =
tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX);
- cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL] =
+ cmd->state.shader_const[MESA_SHADER_TESS_CTRL] =
tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_CTRL);
- cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL] =
+ cmd->state.shader_const[MESA_SHADER_TESS_EVAL] =
tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_EVAL);
- cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY] =
+ cmd->state.shader_const[MESA_SHADER_GEOMETRY] =
tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY);
- cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT] =
+ cmd->state.shader_const[MESA_SHADER_FRAGMENT] =
tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT);
}
- if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
- /* We need to reload the descriptors every time the descriptor sets
- * change. However, the commands we send only depend on the pipeline
- * because the whole point is to cache descriptors which are used by the
- * pipeline. There's a problem here, in that the firmware has an
- * "optimization" which skips executing groups that are set to the same
- * value as the last draw. This means that if the descriptor sets change
- * but not the pipeline, we'd try to re-execute the same buffer which
- * the firmware would ignore and we wouldn't pre-load the new
- * descriptors. The blob seems to re-emit the LOAD_STATE group whenever
- * the descriptor sets change, which we emulate here by copying the
- * pre-prepared buffer.
- */
- const struct tu_cs_entry *load_entry = &pipeline->load_state.state_ib;
- if (load_entry->size > 0) {
- struct tu_cs load_cs;
- result = tu_cs_begin_sub_stream(&cmd->sub_cs, load_entry->size, &load_cs);
- if (result != VK_SUCCESS)
- return result;
- tu_cs_emit_array(&load_cs,
- (uint32_t *)((char *)load_entry->bo->map + load_entry->offset),
- load_entry->size / 4);
- cmd->state.desc_sets_load_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &load_cs);
- } else {
- cmd->state.desc_sets_load_ib.size = 0;
- }
- }
-
if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
- cmd->state.vertex_buffers_ib = tu6_emit_vertex_buffers(cmd, pipeline);
+ cmd->state.vertex_buffers = tu6_emit_vertex_buffers(cmd, pipeline);
bool has_tess =
pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
- struct tu_cs_entry tess_consts = {};
+ struct tu_draw_state tess_consts = {};
if (has_tess) {
+ uint64_t tess_factor_iova = 0;
+
cmd->has_tess = true;
- result = tu6_emit_tess_consts(cmd, draw_count, pipeline, &tess_consts);
+ result = tu6_emit_tess_consts(cmd, draw_count, pipeline, &tess_consts, &tess_factor_iova);
if (result != VK_SUCCESS)
return result;
+
+ /* this sequence matches what the blob does before every tess draw
+ * PC_TESSFACTOR_ADDR_LO is a non-context register and needs a wfi
+ * before writing to it
+ */
+ tu_cs_emit_wfi(cs);
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESSFACTOR_ADDR_LO, 2);
+ tu_cs_emit_qw(cs, tess_factor_iova);
+
+ tu_cs_emit_pkt7(cs, CP_SET_SUBDRAW_SIZE, 1);
+ tu_cs_emit(cs, draw_count);
}
/* for the first draw in a renderpass, re-emit all the draw states
if (cmd->state.dirty & TU_CMD_DIRTY_DRAW_STATE) {
tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (TU_DRAW_STATE_COUNT - 2));
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_TESS, tess_consts);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI, pipeline->vi.state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_RAST, pipeline->rast.state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS, pipeline->ds.state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_BLEND, pipeline->blend.state_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const_ib[MESA_SHADER_VERTEX]);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL]);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL]);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY]);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT]);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS_LOAD, cmd->state.desc_sets_load_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers_ib);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_TESS, tess_consts);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI, pipeline->vi.state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS, pipeline->ds_state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_BLEND, pipeline->blend_state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const[MESA_SHADER_VERTEX]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_CTRL]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_EVAL]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const[MESA_SHADER_GEOMETRY]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[MESA_SHADER_FRAGMENT]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
for (uint32_t i = 0; i < ARRAY_SIZE(cmd->state.dynamic_state); i++) {
uint32_t draw_state_count =
has_tess +
((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 5 : 0) +
- ((cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) ? 1 : 0) +
+ ((cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD) ? 1 : 0) +
((cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) ? 1 : 0) +
1; /* vs_params */
/* We may need to re-emit tess consts if the current draw call is
* sufficiently larger than the last draw call. */
if (has_tess)
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_TESS, tess_consts);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_TESS, tess_consts);
if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const_ib[MESA_SHADER_VERTEX]);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL]);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL]);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY]);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const[MESA_SHADER_VERTEX]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_CTRL]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_EVAL]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const[MESA_SHADER_GEOMETRY]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[MESA_SHADER_FRAGMENT]);
}
- if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS)
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS_LOAD, cmd->state.desc_sets_load_ib);
+ if (cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD)
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers_ib);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
}
* bits to preserve instead. The only things not emitted here are
* compute-related state.
*/
- cmd->state.dirty &= (TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
+ cmd->state.dirty &= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
return VK_SUCCESS;
}
tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
tu_cs_emit(cs, instanceCount);
tu_cs_emit(cs, indexCount);
- tu_cs_emit(cs, 0x0); /* XXX */
- tu_cs_emit_qw(cs, cmd->state.index_va + (firstIndex << cmd->state.index_shift));
- tu_cs_emit(cs, indexCount << cmd->state.index_shift);
+ tu_cs_emit(cs, firstIndex);
+ tu_cs_emit_qw(cs, cmd->state.index_va);
+ tu_cs_emit(cs, cmd->state.max_index_count);
+}
+
+/* Various firmware bugs/inconsistencies mean that some indirect draw opcodes
+ * do not wait for WFI's to complete before executing. Add a WAIT_FOR_ME if
+ * pending for these opcodes. This may result in a few extra WAIT_FOR_ME's
+ * with these opcodes, but the alternative would add unnecessary WAIT_FOR_ME's
+ * before draw opcodes that don't need it.
+ */
+static void
+draw_wfm(struct tu_cmd_buffer *cmd)
+{
+ cmd->state.renderpass_cache.flush_bits |=
+ cmd->state.renderpass_cache.pending_flush_bits & TU_CMD_FLAG_WAIT_FOR_ME;
+ cmd->state.renderpass_cache.pending_flush_bits &= ~TU_CMD_FLAG_WAIT_FOR_ME;
}
void
cmd->state.vs_params = (struct tu_draw_state) {};
- tu6_draw_common(cmd, cs, false, 0);
-
- /* workaround for a firmware bug with CP_DRAW_INDIRECT_MULTI, where it
- * doesn't wait for WFIs to be completed and leads to GPU fault/hang
- * TODO: this could be worked around in a more performant way,
- * or there may exist newer firmware that has been fixed
+ /* The latest known a630_sqe.fw fails to wait for WFI before reading the
+ * indirect buffer when using CP_DRAW_INDIRECT_MULTI, so we have to fall
+ * back to CP_WAIT_FOR_ME except for a650 which has a fixed firmware.
+ *
+ * TODO: There may be newer a630_sqe.fw released in the future which fixes
+ * this, if so we should detect it and avoid this workaround.
*/
if (cmd->device->physical_device->gpu_id != 650)
- tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
+ draw_wfm(cmd);
+
+ tu6_draw_common(cmd, cs, false, 0);
tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 6);
tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
tu_cs_emit(cs, stride);
- tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
}
void
cmd->state.vs_params = (struct tu_draw_state) {};
- tu6_draw_common(cmd, cs, true, 0);
-
- /* workaround for a firmware bug with CP_DRAW_INDIRECT_MULTI, where it
- * doesn't wait for WFIs to be completed and leads to GPU fault/hang
- * TODO: this could be worked around in a more performant way,
- * or there may exist newer firmware that has been fixed
- */
if (cmd->device->physical_device->gpu_id != 650)
- tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
+ draw_wfm(cmd);
+
+ tu6_draw_common(cmd, cs, true, 0);
tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 9);
tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
tu_cs_emit(cs, stride);
- tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+}
+
+void
+tu_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ VkBuffer countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t drawCount,
+ uint32_t stride)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ TU_FROM_HANDLE(tu_buffer, buf, _buffer);
+ TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
+ struct tu_cs *cs = &cmd->draw_cs;
+
+ cmd->state.vs_params = (struct tu_draw_state) {};
+
+ /* It turns out that the firmware we have for a650 only partially fixed the
+ * problem with CP_DRAW_INDIRECT_MULTI not waiting for WFI's to complete
+ * before reading indirect parameters. It waits for WFI's before reading
+ * the draw parameters, but after reading the indirect count :(.
+ */
+ draw_wfm(cmd);
+
+ tu6_draw_common(cmd, cs, false, 0);
+
+ tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 8);
+ tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
+ tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT) |
+ A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
+ tu_cs_emit(cs, drawCount);
+ tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
+ tu_cs_emit_qw(cs, count_buf->bo->iova + count_buf->bo_offset + countBufferOffset);
+ tu_cs_emit(cs, stride);
+
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+ tu_bo_list_add(&cmd->bo_list, count_buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+}
+
+void
+tu_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ VkBuffer countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t drawCount,
+ uint32_t stride)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ TU_FROM_HANDLE(tu_buffer, buf, _buffer);
+ TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
+ struct tu_cs *cs = &cmd->draw_cs;
+
+ cmd->state.vs_params = (struct tu_draw_state) {};
+
+ draw_wfm(cmd);
+
+ tu6_draw_common(cmd, cs, true, 0);
+
+ tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 11);
+ tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
+ tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT_INDEXED) |
+ A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
+ tu_cs_emit(cs, drawCount);
+ tu_cs_emit_qw(cs, cmd->state.index_va);
+ tu_cs_emit(cs, cmd->state.max_index_count);
+ tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
+ tu_cs_emit_qw(cs, count_buf->bo->iova + count_buf->bo_offset + countBufferOffset);
+ tu_cs_emit(cs, stride);
+
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+ tu_bo_list_add(&cmd->bo_list, count_buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
}
void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
TU_FROM_HANDLE(tu_buffer, buf, _counterBuffer);
struct tu_cs *cs = &cmd->draw_cs;
+ /* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
+ * Plus, for the common case where the counter buffer is written by
+ * vkCmdEndTransformFeedback, we need to wait for the CP_WAIT_MEM_WRITES to
+ * complete which means we need a WAIT_FOR_ME anyway.
+ */
+ draw_wfm(cmd);
+
cmd->state.vs_params = tu6_emit_vs_params(cmd, 0, firstInstance);
tu6_draw_common(cmd, cs, false, 0);
*/
tu_emit_cache_flush(cmd, cs);
- if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_PIPELINE)
- tu_cs_emit_ib(cs, &pipeline->program.state_ib);
-
- struct tu_cs_entry ib;
-
- ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE);
- if (ib.size)
- tu_cs_emit_ib(cs, &ib);
+ /* note: no reason to have this in a separate IB */
+ tu_cs_emit_state_ib(cs,
+ tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE));
tu_emit_compute_driver_params(cs, pipeline, info);
- if ((cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS) &&
- pipeline->load_state.state_ib.size > 0) {
- tu_cs_emit_ib(cs, &pipeline->load_state.state_ib);
- }
+ if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD)
+ tu_cs_emit_state_ib(cs, pipeline->load_state);
- cmd->state.dirty &=
- ~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
+ cmd->state.dirty &= ~TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));