#include "tu_cs.h"
-#define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
-
void
tu_bo_list_init(struct tu_bo_list *list)
{
return VK_SUCCESS;
}
-static void
-tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
- const struct tu_device *dev,
- const struct tu_render_pass *pass)
-{
- const uint32_t tile_align_w = pass->tile_align_w;
- const uint32_t max_tile_width = 1024;
-
- /* note: don't offset the tiling config by render_area.offset,
- * because binning pass can't deal with it
- * this means we might end up with more tiles than necessary,
- * but load/store/etc are still scissored to the render_area
- */
- tiling->tile0.offset = (VkOffset2D) {};
-
- const uint32_t ra_width =
- tiling->render_area.extent.width +
- (tiling->render_area.offset.x - tiling->tile0.offset.x);
- const uint32_t ra_height =
- tiling->render_area.extent.height +
- (tiling->render_area.offset.y - tiling->tile0.offset.y);
-
- /* start from 1 tile */
- tiling->tile_count = (VkExtent2D) {
- .width = 1,
- .height = 1,
- };
- tiling->tile0.extent = (VkExtent2D) {
- .width = util_align_npot(ra_width, tile_align_w),
- .height = align(ra_height, TILE_ALIGN_H),
- };
-
- if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN)) {
- /* start with 2x2 tiles */
- tiling->tile_count.width = 2;
- tiling->tile_count.height = 2;
- tiling->tile0.extent.width = util_align_npot(DIV_ROUND_UP(ra_width, 2), tile_align_w);
- tiling->tile0.extent.height = align(DIV_ROUND_UP(ra_height, 2), TILE_ALIGN_H);
- }
-
- /* do not exceed max tile width */
- while (tiling->tile0.extent.width > max_tile_width) {
- tiling->tile_count.width++;
- tiling->tile0.extent.width =
- util_align_npot(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
- }
-
- /* will force to sysmem, don't bother trying to have a valid tile config
- * TODO: just skip all GMEM stuff when sysmem is forced?
- */
- if (!pass->gmem_pixels)
- return;
-
- /* do not exceed gmem size */
- while (tiling->tile0.extent.width * tiling->tile0.extent.height > pass->gmem_pixels) {
- if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
- tiling->tile_count.width++;
- tiling->tile0.extent.width =
- util_align_npot(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
- } else {
- /* if this assert fails then layout is impossible.. */
- assert(tiling->tile0.extent.height > TILE_ALIGN_H);
- tiling->tile_count.height++;
- tiling->tile0.extent.height =
- align(DIV_ROUND_UP(ra_height, tiling->tile_count.height), TILE_ALIGN_H);
- }
- }
-}
-
-static void
-tu_tiling_config_update_pipe_layout(struct tu_tiling_config *tiling,
- const struct tu_device *dev)
-{
- const uint32_t max_pipe_count = 32; /* A6xx */
-
- /* start from 1 tile per pipe */
- tiling->pipe0 = (VkExtent2D) {
- .width = 1,
- .height = 1,
- };
- tiling->pipe_count = tiling->tile_count;
-
- while (tiling->pipe_count.width * tiling->pipe_count.height > max_pipe_count) {
- if (tiling->pipe0.width < tiling->pipe0.height) {
- tiling->pipe0.width += 1;
- tiling->pipe_count.width =
- DIV_ROUND_UP(tiling->tile_count.width, tiling->pipe0.width);
- } else {
- tiling->pipe0.height += 1;
- tiling->pipe_count.height =
- DIV_ROUND_UP(tiling->tile_count.height, tiling->pipe0.height);
- }
- }
-}
-
-static void
-tu_tiling_config_update_pipes(struct tu_tiling_config *tiling,
- const struct tu_device *dev)
-{
- const uint32_t max_pipe_count = 32; /* A6xx */
- const uint32_t used_pipe_count =
- tiling->pipe_count.width * tiling->pipe_count.height;
- const VkExtent2D last_pipe = {
- .width = (tiling->tile_count.width - 1) % tiling->pipe0.width + 1,
- .height = (tiling->tile_count.height - 1) % tiling->pipe0.height + 1,
- };
-
- assert(used_pipe_count <= max_pipe_count);
- assert(max_pipe_count <= ARRAY_SIZE(tiling->pipe_config));
-
- for (uint32_t y = 0; y < tiling->pipe_count.height; y++) {
- for (uint32_t x = 0; x < tiling->pipe_count.width; x++) {
- const uint32_t pipe_x = tiling->pipe0.width * x;
- const uint32_t pipe_y = tiling->pipe0.height * y;
- const uint32_t pipe_w = (x == tiling->pipe_count.width - 1)
- ? last_pipe.width
- : tiling->pipe0.width;
- const uint32_t pipe_h = (y == tiling->pipe_count.height - 1)
- ? last_pipe.height
- : tiling->pipe0.height;
- const uint32_t n = tiling->pipe_count.width * y + x;
-
- tiling->pipe_config[n] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x) |
- A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y) |
- A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w) |
- A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h);
- tiling->pipe_sizes[n] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w * pipe_h);
- }
- }
-
- memset(tiling->pipe_config + used_pipe_count, 0,
- sizeof(uint32_t) * (max_pipe_count - used_pipe_count));
-}
-
-static void
-tu_tiling_config_get_tile(const struct tu_tiling_config *tiling,
- const struct tu_device *dev,
- uint32_t tx,
- uint32_t ty,
- struct tu_tile *tile)
-{
- /* find the pipe and the slot for tile (tx, ty) */
- const uint32_t px = tx / tiling->pipe0.width;
- const uint32_t py = ty / tiling->pipe0.height;
- const uint32_t sx = tx - tiling->pipe0.width * px;
- const uint32_t sy = ty - tiling->pipe0.height * py;
- /* last pipe has different width */
- const uint32_t pipe_width =
- MIN2(tiling->pipe0.width,
- tiling->tile_count.width - px * tiling->pipe0.width);
-
- assert(tx < tiling->tile_count.width && ty < tiling->tile_count.height);
- assert(px < tiling->pipe_count.width && py < tiling->pipe_count.height);
- assert(sx < tiling->pipe0.width && sy < tiling->pipe0.height);
-
- /* convert to 1D indices */
- tile->pipe = tiling->pipe_count.width * py + px;
- tile->slot = pipe_width * sy + sx;
-
- /* get the blit area for the tile */
- tile->begin = (VkOffset2D) {
- .x = tiling->tile0.offset.x + tiling->tile0.extent.width * tx,
- .y = tiling->tile0.offset.y + tiling->tile0.extent.height * ty,
- };
- tile->end.x =
- (tx == tiling->tile_count.width - 1)
- ? tiling->render_area.offset.x + tiling->render_area.extent.width
- : tile->begin.x + tiling->tile0.extent.width;
- tile->end.y =
- (ty == tiling->tile_count.height - 1)
- ? tiling->render_area.offset.y + tiling->render_area.extent.height
- : tile->begin.y + tiling->tile0.extent.height;
-}
-
-enum a3xx_msaa_samples
-tu_msaa_samples(uint32_t samples)
-{
- switch (samples) {
- case 1:
- return MSAA_ONE;
- case 2:
- return MSAA_TWO;
- case 4:
- return MSAA_FOUR;
- case 8:
- return MSAA_EIGHT;
- default:
- assert(!"invalid sample count");
- return MSAA_ONE;
- }
-}
-
-static enum a4xx_index_size
-tu6_index_size(VkIndexType type)
-{
- switch (type) {
- case VK_INDEX_TYPE_UINT16:
- return INDEX4_SIZE_16_BIT;
- case VK_INDEX_TYPE_UINT32:
- return INDEX4_SIZE_32_BIT;
- default:
- unreachable("invalid VkIndexType");
- return INDEX4_SIZE_8_BIT;
- }
-}
-
void
tu6_emit_event_write(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
if (need_seqno) {
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
+ tu_cs_emit_qw(cs, global_iova(cmd, seqno_dummy));
tu_cs_emit(cs, 0);
}
}
static void
tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
{
- const VkRect2D *render_area = &cmd->state.tiling_config.render_area;
+ const VkRect2D *render_area = &cmd->state.render_area;
uint32_t x1 = render_area->offset.x;
uint32_t y1 = render_area->offset.y;
uint32_t x2 = x1 + render_area->extent.width - 1;
static bool
use_hw_binning(struct tu_cmd_buffer *cmd)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
+
+ /* XFB commands are emitted for BINNING || SYSMEM, which makes it incompatible
+ * with non-hw binning GMEM rendering. this is required because some of the
+ * XFB commands need to only be executed once
+ */
+ if (cmd->state.xfb_used)
+ return true;
if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_NOBIN))
return false;
if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN))
return true;
- return (tiling->tile_count.width * tiling->tile_count.height) > 2;
+ return (fb->tile_count.width * fb->tile_count.height) > 2;
}
static bool
if (cmd->state.framebuffer->layers > 1)
return true;
- return cmd->state.tiling_config.force_sysmem;
+ if (cmd->has_tess)
+ return true;
+
+ return false;
}
static void
tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
- const struct tu_tile *tile)
+ uint32_t tx, uint32_t ty, uint32_t pipe, uint32_t slot)
{
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
+
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD));
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
- const uint32_t x1 = tile->begin.x;
- const uint32_t y1 = tile->begin.y;
- const uint32_t x2 = tile->end.x - 1;
- const uint32_t y2 = tile->end.y - 1;
+ const uint32_t x1 = fb->tile0.width * tx;
+ const uint32_t y1 = fb->tile0.height * ty;
+ const uint32_t x2 = x1 + fb->tile0.width - 1;
+ const uint32_t y2 = y1 + fb->tile0.height - 1;
tu6_emit_window_scissor(cs, x1, y1, x2, y2);
tu6_emit_window_offset(cs, x1, y1);
tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
tu_cs_emit(cs, 0x0);
- tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
- tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
- A6XX_CP_REG_TEST_0_BIT(0) |
- A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
+ tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5_OFFSET, 4);
+ tu_cs_emit(cs, fb->pipe_sizes[pipe] |
+ CP_SET_BIN_DATA5_0_VSC_N(slot));
+ tu_cs_emit(cs, pipe * cmd->vsc_draw_strm_pitch);
+ tu_cs_emit(cs, pipe * 4);
+ tu_cs_emit(cs, pipe * cmd->vsc_prim_strm_pitch);
- tu_cs_reserve(cs, 3 + 11);
- tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
- tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
- tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(11));
-
- /* if (no overflow) */ {
- tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
- tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
- CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
- tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + tile->pipe * cmd->vsc_draw_strm_pitch);
- tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + (tile->pipe * 4) + (32 * cmd->vsc_draw_strm_pitch));
- tu_cs_emit_qw(cs, cmd->vsc_prim_strm.iova + (tile->pipe * cmd->vsc_prim_strm_pitch));
-
- tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
- tu_cs_emit(cs, 0x0);
-
- /* use a NOP packet to skip over the 'else' side: */
- tu_cs_emit_pkt7(cs, CP_NOP, 2);
- } /* else */ {
- tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
- tu_cs_emit(cs, 0x1);
- }
+ tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
+ tu_cs_emit(cs, 0x0);
tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
tu_cs_emit(cs, 0x0);
struct tu_image_view *dst = fb->attachments[a].attachment;
struct tu_image_view *src = fb->attachments[gmem_a].attachment;
- tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.tiling_config.render_area);
+ tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.render_area);
}
static void
}
}
-static void
-tu6_emit_restart_index(struct tu_cs *cs, uint32_t restart_index)
-{
- tu_cs_emit_regs(cs,
- A6XX_PC_RESTART_INDEX(restart_index));
-}
-
static void
tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- const struct tu_physical_device *phys_dev = cmd->device->physical_device;
+ struct tu_device *dev = cmd->device;
+ const struct tu_physical_device *phys_dev = dev->physical_device;
tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_GS_SIV_CNTL, 0x0000ffff);
+ /* TODO: set A6XX_VFD_ADD_OFFSET_INSTANCE and fix ir3 to avoid adding base instance */
tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9981, 0x3);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9108, 0x3);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
- tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8878, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8879, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
tu_cs_emit_write_reg(cs, REG_A6XX_VFD_MODE_CNTL, 0x00000000);
tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
- /* Set not to use streamout by default, */
- tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
- tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
- tu_cs_emit(cs, 0);
- tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
- tu_cs_emit(cs, 0);
-
tu_cs_emit_regs(cs,
A6XX_SP_HS_CTRL_REG0(0));
A6XX_RB_LRZ_CNTL(0));
tu_cs_emit_regs(cs,
- A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &cmd->device->border_color));
+ A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
+ .bo_offset = gb_offset(border_color)));
+ tu_cs_emit_regs(cs,
+ A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
+ .bo_offset = gb_offset(border_color)));
+
+ /* VSC buffers:
+ * use vsc pitches from the largest values used so far with this device
+ * if there hasn't been overflow, there will already be a scratch bo
+ * allocated for these sizes
+ *
+ * if overflow is detected, the stream size is increased by 2x
+ */
+ mtx_lock(&dev->vsc_pitch_mtx);
+
+ struct tu6_global *global = dev->global_bo.map;
+
+ uint32_t vsc_draw_overflow = global->vsc_draw_overflow;
+ uint32_t vsc_prim_overflow = global->vsc_prim_overflow;
+
+ if (vsc_draw_overflow >= dev->vsc_draw_strm_pitch)
+ dev->vsc_draw_strm_pitch = (dev->vsc_draw_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
+
+ if (vsc_prim_overflow >= dev->vsc_prim_strm_pitch)
+ dev->vsc_prim_strm_pitch = (dev->vsc_prim_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
+
+ cmd->vsc_prim_strm_pitch = dev->vsc_prim_strm_pitch;
+ cmd->vsc_draw_strm_pitch = dev->vsc_draw_strm_pitch;
+
+ mtx_unlock(&dev->vsc_pitch_mtx);
+
+ struct tu_bo *vsc_bo;
+ uint32_t size0 = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES +
+ cmd->vsc_draw_strm_pitch * MAX_VSC_PIPES;
+
+ tu_get_scratch_bo(dev, size0 + MAX_VSC_PIPES * 4, &vsc_bo);
+
+ tu_cs_emit_regs(cs,
+ A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = vsc_bo, .bo_offset = size0));
+ tu_cs_emit_regs(cs,
+ A6XX_VSC_PRIM_STRM_ADDRESS(.bo = vsc_bo));
tu_cs_emit_regs(cs,
- A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &cmd->device->border_color));
+ A6XX_VSC_DRAW_STRM_ADDRESS(.bo = vsc_bo,
+ .bo_offset = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES));
+
+ tu_bo_list_add(&cmd->bo_list, vsc_bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
tu_cs_sanity_check(cs);
}
static void
update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
tu_cs_emit_regs(cs,
- A6XX_VSC_BIN_SIZE(.width = tiling->tile0.extent.width,
- .height = tiling->tile0.extent.height),
- A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = &cmd->vsc_draw_strm,
- .bo_offset = 32 * cmd->vsc_draw_strm_pitch));
+ A6XX_VSC_BIN_SIZE(.width = fb->tile0.width,
+ .height = fb->tile0.height));
tu_cs_emit_regs(cs,
- A6XX_VSC_BIN_COUNT(.nx = tiling->tile_count.width,
- .ny = tiling->tile_count.height));
+ A6XX_VSC_BIN_COUNT(.nx = fb->tile_count.width,
+ .ny = fb->tile_count.height));
tu_cs_emit_pkt4(cs, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
- for (unsigned i = 0; i < 32; i++)
- tu_cs_emit(cs, tiling->pipe_config[i]);
+ tu_cs_emit_array(cs, fb->pipe_config, 32);
tu_cs_emit_regs(cs,
- A6XX_VSC_PRIM_STRM_ADDRESS(.bo = &cmd->vsc_prim_strm),
A6XX_VSC_PRIM_STRM_PITCH(cmd->vsc_prim_strm_pitch),
- A6XX_VSC_PRIM_STRM_ARRAY_PITCH(cmd->vsc_prim_strm.size));
+ A6XX_VSC_PRIM_STRM_LIMIT(cmd->vsc_prim_strm_pitch - VSC_PAD));
tu_cs_emit_regs(cs,
- A6XX_VSC_DRAW_STRM_ADDRESS(.bo = &cmd->vsc_draw_strm),
A6XX_VSC_DRAW_STRM_PITCH(cmd->vsc_draw_strm_pitch),
- A6XX_VSC_DRAW_STRM_ARRAY_PITCH(cmd->vsc_draw_strm.size));
+ A6XX_VSC_DRAW_STRM_LIMIT(cmd->vsc_draw_strm_pitch - VSC_PAD));
}
static void
emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
const uint32_t used_pipe_count =
- tiling->pipe_count.width * tiling->pipe_count.height;
-
- /* Clear vsc_scratch: */
- tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, 0x0);
+ fb->pipe_count.width * fb->pipe_count.height;
- /* Check for overflow, write vsc_scratch if detected: */
for (int i = 0; i < used_pipe_count; i++) {
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
- tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch));
+ tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch - VSC_PAD));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_draw_strm_pitch));
+ tu_cs_emit_qw(cs, global_iova(cmd, vsc_draw_overflow));
+ tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_draw_strm_pitch));
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
- tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch));
+ tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch - VSC_PAD));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_prim_strm_pitch));
+ tu_cs_emit_qw(cs, global_iova(cmd, vsc_prim_overflow));
+ tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_prim_strm_pitch));
}
tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
-
- tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
-
- tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
- tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG) |
- CP_MEM_TO_REG_0_CNT(1 - 1));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
-
- /*
- * This is a bit awkward, we really want a way to invert the
- * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
- * execute cmds to use hwbinning when a bit is *not* set. This
- * dance is to invert OVERFLOW_FLAG_REG
- *
- * A CP_NOP packet is used to skip executing the 'else' clause
- * if (b0 set)..
- */
-
- /* b0 will be set if VSC_DRAW_STRM or VSC_PRIM_STRM overflow: */
- tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
- tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
- A6XX_CP_REG_TEST_0_BIT(0) |
- A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
-
- tu_cs_reserve(cs, 3 + 7);
- tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
- tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
- tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(7));
-
- /* if (b0 set) */ {
- /*
- * On overflow, mirror the value to control->vsc_overflow
- * which CPU is checking to detect overflow (see
- * check_vsc_overflow())
- */
- tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
- tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
- CP_REG_TO_MEM_0_CNT(0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_overflow));
-
- tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
- tu_cs_emit(cs, 0x0);
-
- tu_cs_emit_pkt7(cs, CP_NOP, 2); /* skip 'else' when 'if' is taken */
- } /* else */ {
- tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
- tu_cs_emit(cs, 0x1);
- }
}
static void
tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
struct tu_physical_device *phys_dev = cmd->device->physical_device;
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- uint32_t x1 = tiling->tile0.offset.x;
- uint32_t y1 = tiling->tile0.offset.y;
- uint32_t x2 = tiling->render_area.offset.x + tiling->render_area.extent.width - 1;
- uint32_t y2 = tiling->render_area.offset.y + tiling->render_area.extent.height - 1;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
- tu6_emit_window_scissor(cs, x1, y1, x2, y2);
+ tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
if (!subpass->input_count)
return;
- struct ts_cs_memory texture;
+ struct tu_cs_memory texture;
VkResult result = tu_cs_alloc(&cmd->sub_cs, subpass->input_count * 2,
A6XX_TEX_CONST_DWORDS, &texture);
assert(result == VK_SUCCESS);
dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
dst[2] =
A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
- A6XX_TEX_CONST_2_PITCH(cmd->state.tiling_config.tile0.extent.width * att->cpp);
+ A6XX_TEX_CONST_2_PITCH(cmd->state.framebuffer->tile0.width * att->cpp);
dst[3] = 0;
dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
}
static void
-tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
- const struct VkRect2D *renderArea)
+tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
const struct tu_framebuffer *fb = cmd->state.framebuffer;
tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_GMEM);
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
if (use_hw_binning(cmd)) {
/* enable stream-out during binning pass: */
tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
- tu6_emit_bin_size(cs,
- tiling->tile0.extent.width,
- tiling->tile0.extent.height,
+ tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
/* and disable stream-out for draw pass: */
tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=true));
- tu6_emit_bin_size(cs,
- tiling->tile0.extent.width,
- tiling->tile0.extent.height,
+ tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
tu_cs_emit_regs(cs,
/* no binning pass, so enable stream-out for draw pass:: */
tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
- tu6_emit_bin_size(cs,
- tiling->tile0.extent.width,
- tiling->tile0.extent.height,
- 0x6000000);
+ tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height, 0x6000000);
}
tu_cs_sanity_check(cs);
}
static void
-tu6_render_tile(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- const struct tu_tile *tile)
+tu6_render_tile(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- tu6_emit_tile_select(cmd, cs, tile);
-
tu_cs_emit_call(cs, &cmd->draw_cs);
if (use_hw_binning(cmd)) {
- tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
- tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
- A6XX_CP_REG_TEST_0_BIT(0) |
- A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
-
- tu_cs_reserve(cs, 3 + 2);
- tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
- tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
- tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(2));
-
- /* if (no overflow) */ {
- tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
- tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
- }
+ tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
+ tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
}
tu_cs_emit_ib(cs, &cmd->state.tile_store_ib);
static void
tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
tu6_tile_render_begin(cmd, &cmd->cs);
- for (uint32_t y = 0; y < tiling->tile_count.height; y++) {
- for (uint32_t x = 0; x < tiling->tile_count.width; x++) {
- struct tu_tile tile;
- tu_tiling_config_get_tile(tiling, cmd->device, x, y, &tile);
- tu6_render_tile(cmd, &cmd->cs, &tile);
+ uint32_t pipe = 0;
+ for (uint32_t py = 0; py < fb->pipe_count.height; py++) {
+ for (uint32_t px = 0; px < fb->pipe_count.width; px++, pipe++) {
+ uint32_t tx1 = px * fb->pipe0.width;
+ uint32_t ty1 = py * fb->pipe0.height;
+ uint32_t tx2 = MIN2(tx1 + fb->pipe0.width, fb->tile_count.width);
+ uint32_t ty2 = MIN2(ty1 + fb->pipe0.height, fb->tile_count.height);
+ uint32_t slot = 0;
+ for (uint32_t ty = ty1; ty < ty2; ty++) {
+ for (uint32_t tx = tx1; tx < tx2; tx++, slot++) {
+ tu6_emit_tile_select(cmd, &cmd->cs, tx, ty, pipe, slot);
+ tu6_render_tile(cmd, &cmd->cs);
+ }
+ }
}
}
static void
tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
+ tu6_sysmem_render_begin(cmd, &cmd->cs);
tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
}
-static void
-tu_cmd_update_tiling_config(struct tu_cmd_buffer *cmd,
- const VkRect2D *render_area)
-{
- const struct tu_device *dev = cmd->device;
- struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- tiling->render_area = *render_area;
- tiling->force_sysmem = false;
-
- tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass);
- tu_tiling_config_update_pipe_layout(tiling, dev);
- tu_tiling_config_update_pipes(tiling, dev);
-}
-
static VkResult
tu_create_cmd_buffer(struct tu_device *device,
struct tu_cmd_pool *pool,
list_inithead(&cmd_buffer->upload.list);
- VkResult result = tu_bo_init_new(device, &cmd_buffer->scratch_bo, 0x1000);
- if (result != VK_SUCCESS)
- goto fail_scratch_bo;
-
- /* TODO: resize on overflow */
- cmd_buffer->vsc_draw_strm_pitch = device->vsc_draw_strm_pitch;
- cmd_buffer->vsc_prim_strm_pitch = device->vsc_prim_strm_pitch;
- cmd_buffer->vsc_draw_strm = device->vsc_draw_strm;
- cmd_buffer->vsc_prim_strm = device->vsc_prim_strm;
-
return VK_SUCCESS;
-
-fail_scratch_bo:
- list_del(&cmd_buffer->pool_link);
- return result;
}
static void
tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
{
- tu_bo_finish(cmd_buffer->device, &cmd_buffer->scratch_bo);
-
list_del(&cmd_buffer->pool_link);
tu_cs_finish(&cmd_buffer->cs);
}
memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
+ cmd_buffer->state.index_size = 0xff; /* dirty restart index */
+
tu_cache_init(&cmd_buffer->state.cache);
tu_cache_init(&cmd_buffer->state.renderpass_cache);
cmd_buffer->usage_flags = pBeginInfo->flags;
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_buffer, buf, buffer);
- /* initialize/update the restart index */
- if (!cmd->state.index_buffer || cmd->state.index_type != indexType) {
- struct tu_cs *draw_cs = &cmd->draw_cs;
- tu6_emit_restart_index(
- draw_cs, indexType == VK_INDEX_TYPE_UINT32 ? 0xffffffff : 0xffff);
- tu_cs_sanity_check(draw_cs);
+ uint32_t index_size, index_shift, restart_index;
+
+ switch (indexType) {
+ case VK_INDEX_TYPE_UINT16:
+ index_size = INDEX4_SIZE_16_BIT;
+ index_shift = 1;
+ restart_index = 0xffff;
+ break;
+ case VK_INDEX_TYPE_UINT32:
+ index_size = INDEX4_SIZE_32_BIT;
+ index_shift = 2;
+ restart_index = 0xffffffff;
+ break;
+ case VK_INDEX_TYPE_UINT8_EXT:
+ index_size = INDEX4_SIZE_8_BIT;
+ index_shift = 0;
+ restart_index = 0xff;
+ break;
+ default:
+ unreachable("invalid VkIndexType");
}
- /* track the BO */
- if (cmd->state.index_buffer != buf)
- tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
+ /* initialize/update the restart index */
+ if (cmd->state.index_size != index_size)
+ tu_cs_emit_regs(&cmd->draw_cs, A6XX_PC_RESTART_INDEX(restart_index));
+
+ assert(buf->size >= offset);
- cmd->state.index_buffer = buf;
- cmd->state.index_offset = offset;
- cmd->state.index_type = indexType;
+ cmd->state.index_va = buf->bo->iova + buf->bo_offset + offset;
+ cmd->state.max_index_count = (buf->size - offset) >> index_shift;
+ cmd->state.index_size = index_size;
+
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
}
void
if (layout->dynamic_offset_count) {
/* allocate and fill out dynamic descriptor set */
- struct ts_cs_memory dynamic_desc_set;
+ struct tu_cs_memory dynamic_desc_set;
VkResult result = tu_cs_alloc(&cmd->sub_cs, layout->dynamic_offset_count,
A6XX_TEX_CONST_DWORDS, &dynamic_desc_set);
assert(result == VK_SUCCESS);
const VkDeviceSize *pSizes)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
- assert(firstBinding + bindingCount <= IR3_MAX_SO_BUFFERS);
+ struct tu_cs *cs = &cmd->draw_cs;
+
+ /* using COND_REG_EXEC for xfb commands matches the blob behavior
+ * presumably there isn't any benefit using a draw state when the
+ * condition is (SYSMEM | BINNING)
+ */
+ tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
+ CP_COND_REG_EXEC_0_SYSMEM |
+ CP_COND_REG_EXEC_0_BINNING);
for (uint32_t i = 0; i < bindingCount; i++) {
- uint32_t idx = firstBinding + i;
TU_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
+ uint64_t iova = buf->bo->iova + pOffsets[i];
+ uint32_t size = buf->bo->size - pOffsets[i];
+ uint32_t idx = i + firstBinding;
+
+ if (pSizes && pSizes[i] != VK_WHOLE_SIZE)
+ size = pSizes[i];
- if (pOffsets[i] != 0)
- cmd->state.streamout_reset |= 1 << idx;
+ /* BUFFER_BASE is 32-byte aligned, add remaining offset to BUFFER_OFFSET */
+ uint32_t offset = iova & 0x1f;
+ iova &= ~(uint64_t) 0x1f;
- cmd->state.streamout_buf.buffers[idx] = buf;
- cmd->state.streamout_buf.offsets[idx] = pOffsets[i];
- cmd->state.streamout_buf.sizes[idx] = pSizes[i];
+ tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_BASE(idx), 3);
+ tu_cs_emit_qw(cs, iova);
+ tu_cs_emit(cs, size + offset);
+
+ cmd->state.streamout_offset[idx] = offset;
- cmd->state.streamout_enabled |= 1 << idx;
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_WRITE);
}
- cmd->state.dirty |= TU_CMD_DIRTY_STREAMOUT_BUFFERS;
+ tu_cond_exec_end(cs);
}
-void tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
- uint32_t firstCounterBuffer,
- uint32_t counterBufferCount,
- const VkBuffer *pCounterBuffers,
- const VkDeviceSize *pCounterBufferOffsets)
+void
+tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer *pCounterBuffers,
+ const VkDeviceSize *pCounterBufferOffsets)
{
- assert(firstCounterBuffer + counterBufferCount <= IR3_MAX_SO_BUFFERS);
- /* TODO do something with counter buffer? */
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ struct tu_cs *cs = &cmd->draw_cs;
+
+ tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
+ CP_COND_REG_EXEC_0_SYSMEM |
+ CP_COND_REG_EXEC_0_BINNING);
+
+ /* TODO: only update offset for active buffers */
+ for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++)
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, cmd->state.streamout_offset[i]));
+
+ for (uint32_t i = 0; i < counterBufferCount; i++) {
+ uint32_t idx = firstCounterBuffer + i;
+ uint32_t offset = cmd->state.streamout_offset[idx];
+
+ if (!pCounterBuffers[i])
+ continue;
+
+ TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
+
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
+
+ tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
+ tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
+ CP_MEM_TO_REG_0_UNK31 |
+ CP_MEM_TO_REG_0_CNT(1));
+ tu_cs_emit_qw(cs, buf->bo->iova + pCounterBufferOffsets[i]);
+
+ if (offset) {
+ tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
+ tu_cs_emit(cs, CP_REG_RMW_0_DST_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
+ CP_REG_RMW_0_SRC1_ADD);
+ tu_cs_emit_qw(cs, 0xffffffff);
+ tu_cs_emit_qw(cs, offset);
+ }
+ }
+
+ tu_cond_exec_end(cs);
}
void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets)
{
- assert(firstCounterBuffer + counterBufferCount <= IR3_MAX_SO_BUFFERS);
- /* TODO do something with counter buffer? */
-
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
- cmd->state.streamout_enabled = 0;
+ struct tu_cs *cs = &cmd->draw_cs;
+
+ tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
+ CP_COND_REG_EXEC_0_SYSMEM |
+ CP_COND_REG_EXEC_0_BINNING);
+
+ /* TODO: only flush buffers that need to be flushed */
+ for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
+ /* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
+ tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_FLUSH_BASE(i), 2);
+ tu_cs_emit_qw(cs, global_iova(cmd, flush_base[i]));
+ tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
+ }
+
+ for (uint32_t i = 0; i < counterBufferCount; i++) {
+ uint32_t idx = firstCounterBuffer + i;
+ uint32_t offset = cmd->state.streamout_offset[idx];
+
+ if (!pCounterBuffers[i])
+ continue;
+
+ TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
+
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_WRITE);
+
+ /* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */
+ tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
+ tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
+ CP_MEM_TO_REG_0_SHIFT_BY_2 |
+ 0x40000 | /* ??? */
+ CP_MEM_TO_REG_0_UNK31 |
+ CP_MEM_TO_REG_0_CNT(1));
+ tu_cs_emit_qw(cs, global_iova(cmd, flush_base[idx]));
+
+ if (offset) {
+ tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
+ tu_cs_emit(cs, CP_REG_RMW_0_DST_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
+ CP_REG_RMW_0_SRC1_ADD);
+ tu_cs_emit_qw(cs, 0xffffffff);
+ tu_cs_emit_qw(cs, -offset);
+ }
+
+ tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
+ tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
+ CP_REG_TO_MEM_0_CNT(1));
+ tu_cs_emit_qw(cs, buf->bo->iova + pCounterBufferOffsets[i]);
+ }
+
+ tu_cond_exec_end(cs);
+
+ cmd->state.xfb_used = true;
}
void
tu_emit_cache_flush(cmd_buffer, &cmd_buffer->cs);
}
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
- MSM_SUBMIT_BO_WRITE);
-
- if (cmd_buffer->use_vsc_data) {
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_draw_strm,
- MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_prim_strm,
- MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
- }
-
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->border_color,
- MSM_SUBMIT_BO_READ);
+ tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->global_bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
for (uint32_t i = 0; i < cmd_buffer->draw_cs.bo_count; i++) {
tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_cs.bos[i],
static struct tu_cs
tu_cmd_dynamic_state(struct tu_cmd_buffer *cmd, uint32_t id, uint32_t size)
{
- struct ts_cs_memory memory;
+ struct tu_cs_memory memory;
struct tu_cs cs;
/* TODO: share this logic with tu_pipeline_static_state */
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
- tu_bo_list_add(&cmd->bo_list, &pipeline->program.binary_bo,
- MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
for (uint32_t i = 0; i < pipeline->cs.bo_count; i++) {
tu_bo_list_add(&cmd->bo_list, pipeline->cs.bos[i],
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
float minDepthBounds,
float maxDepthBounds)
{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_DEPTH_BOUNDS, 3);
+
+ tu_cs_emit_regs(&cs,
+ A6XX_RB_Z_BOUNDS_MIN(minDepthBounds),
+ A6XX_RB_Z_BOUNDS_MAX(maxDepthBounds));
}
static void
update_stencil_mask(uint32_t *value, VkStencilFaceFlags face, uint32_t mask)
{
if (face & VK_STENCIL_FACE_FRONT_BIT)
- *value |= A6XX_RB_STENCILMASK_MASK(mask);
+ *value = (*value & 0xff00) | (mask & 0xff);
if (face & VK_STENCIL_FACE_BACK_BIT)
- *value |= A6XX_RB_STENCILMASK_BFMASK(mask);
+ *value = (*value & 0xff) | (mask & 0xff) << 8;
}
void
cmd->record_result = result;
break;
}
+
+ if (secondary->has_tess)
+ cmd->has_tess = true;
} else {
assert(tu_cs_is_empty(&secondary->draw_cs));
assert(tu_cs_is_empty(&secondary->draw_epilogue_cs));
tu_cs_add_entries(&cmd->cs, &secondary->cs);
}
+
+ cmd->state.index_size = secondary->state.index_size; /* for restart index update */
}
cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
cmd->state.pass = pass;
cmd->state.subpass = pass->subpasses;
cmd->state.framebuffer = fb;
+ cmd->state.render_area = pRenderPassBegin->renderArea;
- tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
tu_cmd_prepare_tile_store_ib(cmd);
/* Note: because this is external, any flushes will happen before draw_cs
tu_set_input_attachments(cmd, cmd->state.subpass);
- /* note: use_hw_binning only checks tiling config */
- if (use_hw_binning(cmd))
- cmd->use_vsc_data = true;
-
for (uint32_t i = 0; i < fb->attachment_count; ++i) {
const struct tu_image_view *iview = fb->attachments[i].attachment;
tu_bo_list_add(&cmd->bo_list, iview->image->bo,
tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
}
-struct tu_draw_info
-{
- /**
- * Number of vertices.
- */
- uint32_t count;
-
- /**
- * Index of the first vertex.
- */
- int32_t vertex_offset;
-
- /**
- * First instance id.
- */
- uint32_t first_instance;
-
- /**
- * Number of instances.
- */
- uint32_t instance_count;
-
- /**
- * First index (indexed draws only).
- */
- uint32_t first_index;
-
- /**
- * Whether it's an indexed draw.
- */
- bool indexed;
-
- /**
- * Indirect draw parameters resource.
- */
- struct tu_buffer *indirect;
- uint64_t indirect_offset;
- uint32_t stride;
-
- /**
- * Draw count parameters resource.
- */
- struct tu_buffer *count_buffer;
- uint64_t count_buffer_offset;
-
- /**
- * Stream output parameters resource.
- */
- struct tu_buffer *streamout_buffer;
- uint64_t streamout_buffer_offset;
-};
-
static void
tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
struct tu_descriptor_state *descriptors_state,
{
const struct tu_program_descriptor_linkage *link =
&pipeline->program.link[type];
- const struct ir3_ubo_analysis_state *state = &link->ubo_state;
+ const struct ir3_ubo_analysis_state *state = &link->const_state.ubo_state;
if (link->push_consts.count > 0) {
unsigned num_units = link->push_consts.count;
/* Dig out the descriptor from the descriptor state and read the VA from
* it.
*/
- assert(state->range[i].bindless);
- uint32_t *base = state->range[i].bindless_base == MAX_SETS ?
+ assert(state->range[i].ubo.bindless);
+ uint32_t *base = state->range[i].ubo.bindless_base == MAX_SETS ?
descriptors_state->dynamic_descriptors :
- descriptors_state->sets[state->range[i].bindless_base]->mapped_ptr;
- unsigned block = state->range[i].block;
+ descriptors_state->sets[state->range[i].ubo.bindless_base]->mapped_ptr;
+ unsigned block = state->range[i].ubo.block;
uint32_t *desc = base + block * A6XX_TEX_CONST_DWORDS;
uint64_t va = desc[0] | ((uint64_t)(desc[1] & A6XX_UBO_1_BASE_HI__MASK) << 32);
assert(va);
return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
}
-static VkResult
-tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
- const struct tu_draw_info *draw,
- struct tu_cs_entry *entry)
-{
- /* TODO: fill out more than just base instance */
- const struct tu_program_descriptor_linkage *link =
- &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
- const struct ir3_const_state *const_state = &link->const_state;
- struct tu_cs cs;
-
- if (const_state->offsets.driver_param >= link->constlen) {
- *entry = (struct tu_cs_entry) {};
- return VK_SUCCESS;
- }
-
- VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 8, &cs);
- if (result != VK_SUCCESS)
- return result;
-
- tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
- tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(const_state->offsets.driver_param) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
- CP_LOAD_STATE6_0_NUM_UNIT(1));
- tu_cs_emit(&cs, 0);
- tu_cs_emit(&cs, 0);
-
- STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
-
- tu_cs_emit(&cs, 0);
- tu_cs_emit(&cs, 0);
- tu_cs_emit(&cs, draw->first_instance);
- tu_cs_emit(&cs, 0);
-
- *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
- return VK_SUCCESS;
-}
-
static struct tu_cs_entry
tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
const struct tu_pipeline *pipeline)
return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
}
-static void
-tu6_emit_streamout(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+static uint64_t
+get_tess_param_bo_size(const struct tu_pipeline *pipeline,
+ uint32_t draw_count)
{
- struct tu_streamout_state *tf = &cmd->state.pipeline->streamout;
+ /* TODO: For indirect draws, we can't compute the BO size ahead of time.
+ * Still not sure what to do here, so just allocate a reasonably large
+ * BO and hope for the best for now. */
+ if (!draw_count)
+ draw_count = 2048;
- for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
- struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
- if (!buf)
- continue;
+ /* the tess param BO is pipeline->tess.param_stride bytes per patch,
+ * which includes both the per-vertex outputs and per-patch outputs
+ * build_primitive_map in ir3 calculates this stride
+ */
+ uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
+ uint32_t num_patches = draw_count / verts_per_patch;
+ return num_patches * pipeline->tess.param_stride;
+}
+
+static uint64_t
+get_tess_factor_bo_size(const struct tu_pipeline *pipeline,
+ uint32_t draw_count)
+{
+ /* TODO: For indirect draws, we can't compute the BO size ahead of time.
+ * Still not sure what to do here, so just allocate a reasonably large
+ * BO and hope for the best for now. */
+ if (!draw_count)
+ draw_count = 2048;
+
+ /* Each distinct patch gets its own tess factor output. */
+ uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
+ uint32_t num_patches = draw_count / verts_per_patch;
+ uint32_t factor_stride;
+ switch (pipeline->tess.patch_type) {
+ case IR3_TESS_ISOLINES:
+ factor_stride = 12;
+ break;
+ case IR3_TESS_TRIANGLES:
+ factor_stride = 20;
+ break;
+ case IR3_TESS_QUADS:
+ factor_stride = 28;
+ break;
+ default:
+ unreachable("bad tessmode");
+ }
+ return factor_stride * num_patches;
+}
- uint32_t offset;
- offset = cmd->state.streamout_buf.offsets[i];
+static VkResult
+tu6_emit_tess_consts(struct tu_cmd_buffer *cmd,
+ uint32_t draw_count,
+ const struct tu_pipeline *pipeline,
+ struct tu_cs_entry *entry)
+{
+ struct tu_cs cs;
+ VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 20, &cs);
+ if (result != VK_SUCCESS)
+ return result;
- tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_BASE(i, .bo = buf->bo,
- .bo_offset = buf->bo_offset));
- tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_SIZE(i, buf->size));
+ uint64_t tess_factor_size = get_tess_factor_bo_size(pipeline, draw_count);
+ uint64_t tess_param_size = get_tess_param_bo_size(pipeline, draw_count);
+ uint64_t tess_bo_size = tess_factor_size + tess_param_size;
+ if (tess_bo_size > 0) {
+ struct tu_bo *tess_bo;
+ result = tu_get_scratch_bo(cmd->device, tess_bo_size, &tess_bo);
+ if (result != VK_SUCCESS)
+ return result;
- if (cmd->state.streamout_reset & (1 << i)) {
- tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, offset));
- cmd->state.streamout_reset &= ~(1 << i);
- } else {
- tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
- tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(i)) |
- CP_MEM_TO_REG_0_SHIFT_BY_2 | CP_MEM_TO_REG_0_UNK31 |
- CP_MEM_TO_REG_0_CNT(0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova +
- ctrl_offset(flush_base[i].offset));
- }
+ tu_bo_list_add(&cmd->bo_list, tess_bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
+ uint64_t tess_factor_iova = tess_bo->iova;
+ uint64_t tess_param_iova = tess_factor_iova + tess_factor_size;
- tu_cs_emit_regs(cs, A6XX_VPC_SO_FLUSH_BASE(i, .bo = &cmd->scratch_bo,
- .bo_offset =
- ctrl_offset(flush_base[i])));
- }
-
- if (cmd->state.streamout_enabled) {
- tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 12 + (2 * tf->prog_count));
- tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
- tu_cs_emit(cs, tf->vpc_so_buf_cntl);
- tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(0));
- tu_cs_emit(cs, tf->ncomp[0]);
- tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(1));
- tu_cs_emit(cs, tf->ncomp[1]);
- tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(2));
- tu_cs_emit(cs, tf->ncomp[2]);
- tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(3));
- tu_cs_emit(cs, tf->ncomp[3]);
- tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
- tu_cs_emit(cs, A6XX_VPC_SO_CNTL_ENABLE);
- for (unsigned i = 0; i < tf->prog_count; i++) {
- tu_cs_emit(cs, REG_A6XX_VPC_SO_PROG);
- tu_cs_emit(cs, tf->prog[i]);
- }
- } else {
- tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
- tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
- tu_cs_emit(cs, 0);
- tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
- tu_cs_emit(cs, 0);
+ tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(pipeline->tess.hs_bo_regid) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(SB6_HS_SHADER) |
+ CP_LOAD_STATE6_0_NUM_UNIT(1));
+ tu_cs_emit(&cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
+ tu_cs_emit(&cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
+ tu_cs_emit_qw(&cs, tess_param_iova);
+ tu_cs_emit_qw(&cs, tess_factor_iova);
+
+ tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(pipeline->tess.ds_bo_regid) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(SB6_DS_SHADER) |
+ CP_LOAD_STATE6_0_NUM_UNIT(1));
+ tu_cs_emit(&cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
+ tu_cs_emit(&cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
+ tu_cs_emit_qw(&cs, tess_param_iova);
+ tu_cs_emit_qw(&cs, tess_factor_iova);
+
+ tu_cs_emit_pkt4(&cs, REG_A6XX_PC_TESSFACTOR_ADDR_LO, 2);
+ tu_cs_emit_qw(&cs, tess_factor_iova);
+
+ /* TODO: Without this WFI here, the hardware seems unable to read these
+ * addresses we just emitted. Freedreno emits these consts as part of
+ * IB1 instead of in a draw state which might make this WFI unnecessary,
+ * but it requires a bit more indirection (SS6_INDIRECT for consts). */
+ tu_cs_emit_wfi(&cs);
}
+ *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+ return VK_SUCCESS;
}
static VkResult
-tu6_bind_draw_states(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- const struct tu_draw_info *draw)
+tu6_draw_common(struct tu_cmd_buffer *cmd,
+ struct tu_cs *cs,
+ bool indexed,
+ /* note: draw_count is 0 for indirect */
+ uint32_t draw_count)
{
const struct tu_pipeline *pipeline = cmd->state.pipeline;
VkResult result;
struct tu_descriptor_state *descriptors_state =
&cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
+ tu_emit_cache_flush_renderpass(cmd, cs);
+
/* TODO lrz */
- tu_cs_emit_regs(cs,
- A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart =
- pipeline->ia.primitive_restart && draw->indexed));
+ tu_cs_emit_regs(cs, A6XX_PC_PRIMITIVE_CNTL_0(
+ .primitive_restart =
+ pipeline->ia.primitive_restart && indexed,
+ .tess_upper_left_domain_origin =
+ pipeline->tess.upper_left_domain_origin));
if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
cmd->state.shader_const_ib[MESA_SHADER_VERTEX] =
tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX);
+ cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL] =
+ tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_CTRL);
+ cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL] =
+ tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_EVAL);
cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY] =
tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY);
cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT] =
tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT);
}
- if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS)
- tu6_emit_streamout(cmd, cs);
-
if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
/* We need to reload the descriptors every time the descriptor sets
* change. However, the commands we send only depend on the pipeline
if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
cmd->state.vertex_buffers_ib = tu6_emit_vertex_buffers(cmd, pipeline);
- struct tu_cs_entry vs_params;
- result = tu6_emit_vs_params(cmd, draw, &vs_params);
- if (result != VK_SUCCESS)
- return result;
+ bool has_tess =
+ pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
+ struct tu_cs_entry tess_consts = {};
+ if (has_tess) {
+ cmd->has_tess = true;
+ result = tu6_emit_tess_consts(cmd, draw_count, pipeline, &tess_consts);
+ if (result != VK_SUCCESS)
+ return result;
+ }
/* for the first draw in a renderpass, re-emit all the draw states
*
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state_ib);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state_ib);
+ tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_TESS, tess_consts);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI, pipeline->vi.state_ib);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state_ib);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_RAST, pipeline->rast.state_ib);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS, pipeline->ds.state_ib);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_BLEND, pipeline->blend.state_ib);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const_ib[MESA_SHADER_VERTEX]);
+ tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL]);
+ tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL]);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY]);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT]);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets_ib);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS_LOAD, cmd->state.desc_sets_load_ib);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_PARAMS, vs_params);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
for (uint32_t i = 0; i < ARRAY_SIZE(cmd->state.dynamic_state); i++) {
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i,
* note we eventually don't want to have to emit anything here
*/
uint32_t draw_state_count =
- ((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 3 : 0) +
+ has_tess +
+ ((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 5 : 0) +
((cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) ? 1 : 0) +
((cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) ? 1 : 0) +
1; /* vs_params */
tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_count);
+ /* We may need to re-emit tess consts if the current draw call is
+ * sufficiently larger than the last draw call. */
+ if (has_tess)
+ tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_TESS, tess_consts);
if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const_ib[MESA_SHADER_VERTEX]);
+ tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL]);
+ tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL]);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY]);
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT]);
}
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS_LOAD, cmd->state.desc_sets_load_ib);
if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers_ib);
- tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_PARAMS, vs_params);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
}
tu_cs_sanity_check(cs);
- /* track BOs */
- if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS) {
- for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
- const struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
- if (buf) {
- tu_bo_list_add(&cmd->bo_list, buf->bo,
- MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
- }
- }
- }
-
/* There are too many graphics dirty bits to list here, so just list the
* bits to preserve instead. The only things not emitted here are
* compute-related state.
return VK_SUCCESS;
}
-static void
-tu6_emit_draw_indirect(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- const struct tu_draw_info *draw)
+static uint32_t
+tu_draw_initiator(struct tu_cmd_buffer *cmd, enum pc_di_src_sel src_sel)
{
- const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
- bool has_gs = cmd->state.pipeline->active_stages &
- VK_SHADER_STAGE_GEOMETRY_BIT;
-
- tu_cs_emit_regs(cs,
- A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
- A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
-
- if (draw->indexed) {
- const enum a4xx_index_size index_size =
- tu6_index_size(cmd->state.index_type);
- const uint32_t index_bytes =
- (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
- const struct tu_buffer *index_buf = cmd->state.index_buffer;
- unsigned max_indicies =
- (index_buf->size - cmd->state.index_offset) / index_bytes;
-
- const uint32_t cp_draw_indx =
- CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
- CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
- CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
- CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
- COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
-
- tu_cs_emit_pkt7(cs, CP_DRAW_INDX_INDIRECT, 6);
- tu_cs_emit(cs, cp_draw_indx);
- tu_cs_emit_qw(cs, index_buf->bo->iova + cmd->state.index_offset);
- tu_cs_emit(cs, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
- tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
- } else {
- const uint32_t cp_draw_indx =
- CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
- CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
- CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
- COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
-
- tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT, 3);
- tu_cs_emit(cs, cp_draw_indx);
- tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
+ const struct tu_pipeline *pipeline = cmd->state.pipeline;
+ uint32_t initiator =
+ CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(pipeline->ia.primtype) |
+ CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(src_sel) |
+ CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(cmd->state.index_size) |
+ CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY);
+
+ if (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
+ initiator |= CP_DRAW_INDX_OFFSET_0_GS_ENABLE;
+
+ switch (pipeline->tess.patch_type) {
+ case IR3_TESS_TRIANGLES:
+ initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_TRIANGLES) |
+ CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
+ break;
+ case IR3_TESS_ISOLINES:
+ initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_ISOLINES) |
+ CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
+ break;
+ case IR3_TESS_NONE:
+ initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS);
+ break;
+ case IR3_TESS_QUADS:
+ initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS) |
+ CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
+ break;
}
-
- tu_bo_list_add(&cmd->bo_list, draw->indirect->bo, MSM_SUBMIT_BO_READ);
+ return initiator;
}
-static void
-tu6_emit_draw_direct(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- const struct tu_draw_info *draw)
+
+static uint32_t
+vs_params_offset(struct tu_cmd_buffer *cmd)
{
+ const struct tu_program_descriptor_linkage *link =
+ &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
+ const struct ir3_const_state *const_state = &link->const_state;
- const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
- bool has_gs = cmd->state.pipeline->active_stages &
- VK_SHADER_STAGE_GEOMETRY_BIT;
+ if (const_state->offsets.driver_param >= link->constlen)
+ return 0;
- tu_cs_emit_regs(cs,
- A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
- A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
-
- /* TODO hw binning */
- if (draw->indexed) {
- const enum a4xx_index_size index_size =
- tu6_index_size(cmd->state.index_type);
- const uint32_t index_bytes =
- (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
- const struct tu_buffer *buf = cmd->state.index_buffer;
- const VkDeviceSize offset = buf->bo_offset + cmd->state.index_offset +
- index_bytes * draw->first_index;
- const uint32_t size = index_bytes * draw->count;
-
- const uint32_t cp_draw_indx =
- CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
- CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
- CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
- CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
- COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
-
- tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
- tu_cs_emit(cs, cp_draw_indx);
- tu_cs_emit(cs, draw->instance_count);
- tu_cs_emit(cs, draw->count);
- tu_cs_emit(cs, 0x0); /* XXX */
- tu_cs_emit_qw(cs, buf->bo->iova + offset);
- tu_cs_emit(cs, size);
- } else {
- const uint32_t cp_draw_indx =
- CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
- CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
- CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
- COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
+ /* this layout is required by CP_DRAW_INDIRECT_MULTI */
+ STATIC_ASSERT(IR3_DP_DRAWID == 0);
+ STATIC_ASSERT(IR3_DP_VTXID_BASE == 1);
+ STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
- tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
- tu_cs_emit(cs, cp_draw_indx);
- tu_cs_emit(cs, draw->instance_count);
- tu_cs_emit(cs, draw->count);
- }
+ /* 0 means disabled for CP_DRAW_INDIRECT_MULTI */
+ assert(const_state->offsets.driver_param != 0);
+
+ return const_state->offsets.driver_param;
}
-static void
-tu_draw(struct tu_cmd_buffer *cmd, const struct tu_draw_info *draw)
+static struct tu_draw_state
+tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
+ uint32_t vertex_offset,
+ uint32_t first_instance)
{
- struct tu_cs *cs = &cmd->draw_cs;
- VkResult result;
-
- tu_emit_cache_flush_renderpass(cmd, cs);
+ uint32_t offset = vs_params_offset(cmd);
- result = tu6_bind_draw_states(cmd, cs, draw);
+ struct tu_cs cs;
+ VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 3 + (offset ? 8 : 0), &cs);
if (result != VK_SUCCESS) {
cmd->record_result = result;
- return;
+ return (struct tu_draw_state) {};
}
- if (draw->indirect)
- tu6_emit_draw_indirect(cmd, cs, draw);
- else
- tu6_emit_draw_direct(cmd, cs, draw);
+ /* TODO: don't make a new draw state when it doesn't change */
- if (cmd->state.streamout_enabled) {
- for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
- if (cmd->state.streamout_enabled & (1 << i))
- tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
- }
+ tu_cs_emit_regs(&cs,
+ A6XX_VFD_INDEX_OFFSET(vertex_offset),
+ A6XX_VFD_INSTANCE_START_OFFSET(first_instance));
+
+ if (offset) {
+ tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
+ CP_LOAD_STATE6_0_NUM_UNIT(1));
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, 0);
+
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, vertex_offset);
+ tu_cs_emit(&cs, first_instance);
+ tu_cs_emit(&cs, 0);
}
- tu_cs_sanity_check(cs);
+ struct tu_cs_entry entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+ return (struct tu_draw_state) {entry.bo->iova + entry.offset, entry.size / 4};
}
void
uint32_t firstVertex,
uint32_t firstInstance)
{
- TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
- struct tu_draw_info info = {};
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ struct tu_cs *cs = &cmd->draw_cs;
+
+ cmd->state.vs_params = tu6_emit_vs_params(cmd, firstVertex, firstInstance);
- info.count = vertexCount;
- info.instance_count = instanceCount;
- info.first_instance = firstInstance;
- info.vertex_offset = firstVertex;
+ tu6_draw_common(cmd, cs, false, vertexCount);
- tu_draw(cmd_buffer, &info);
+ tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
+ tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
+ tu_cs_emit(cs, instanceCount);
+ tu_cs_emit(cs, vertexCount);
}
void
int32_t vertexOffset,
uint32_t firstInstance)
{
- TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
- struct tu_draw_info info = {};
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ struct tu_cs *cs = &cmd->draw_cs;
- info.indexed = true;
- info.count = indexCount;
- info.instance_count = instanceCount;
- info.first_index = firstIndex;
- info.vertex_offset = vertexOffset;
- info.first_instance = firstInstance;
+ cmd->state.vs_params = tu6_emit_vs_params(cmd, vertexOffset, firstInstance);
- tu_draw(cmd_buffer, &info);
+ tu6_draw_common(cmd, cs, true, indexCount);
+
+ tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
+ tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
+ tu_cs_emit(cs, instanceCount);
+ tu_cs_emit(cs, indexCount);
+ tu_cs_emit(cs, firstIndex);
+ tu_cs_emit_qw(cs, cmd->state.index_va);
+ tu_cs_emit(cs, cmd->state.max_index_count);
}
void
uint32_t drawCount,
uint32_t stride)
{
- TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
- TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
- struct tu_draw_info info = {};
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ TU_FROM_HANDLE(tu_buffer, buf, _buffer);
+ struct tu_cs *cs = &cmd->draw_cs;
- info.count = drawCount;
- info.indirect = buffer;
- info.indirect_offset = offset;
- info.stride = stride;
+ cmd->state.vs_params = (struct tu_draw_state) {};
+
+ tu6_draw_common(cmd, cs, false, 0);
+
+ /* workaround for a firmware bug with CP_DRAW_INDIRECT_MULTI, where it
+ * doesn't wait for WFIs to be completed and leads to GPU fault/hang
+ * TODO: this could be worked around in a more performant way,
+ * or there may exist newer firmware that has been fixed
+ */
+ if (cmd->device->physical_device->gpu_id != 650)
+ tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
- tu_draw(cmd_buffer, &info);
+ tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 6);
+ tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
+ tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_NORMAL) |
+ A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
+ tu_cs_emit(cs, drawCount);
+ tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
+ tu_cs_emit(cs, stride);
+
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
}
void
uint32_t drawCount,
uint32_t stride)
{
- TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
- TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
- struct tu_draw_info info = {};
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ TU_FROM_HANDLE(tu_buffer, buf, _buffer);
+ struct tu_cs *cs = &cmd->draw_cs;
- info.indexed = true;
- info.count = drawCount;
- info.indirect = buffer;
- info.indirect_offset = offset;
- info.stride = stride;
+ cmd->state.vs_params = (struct tu_draw_state) {};
+
+ tu6_draw_common(cmd, cs, true, 0);
+
+ /* workaround for a firmware bug with CP_DRAW_INDIRECT_MULTI, where it
+ * doesn't wait for WFIs to be completed and leads to GPU fault/hang
+ * TODO: this could be worked around in a more performant way,
+ * or there may exist newer firmware that has been fixed
+ */
+ if (cmd->device->physical_device->gpu_id != 650)
+ tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
- tu_draw(cmd_buffer, &info);
+ tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 9);
+ tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
+ tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDEXED) |
+ A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
+ tu_cs_emit(cs, drawCount);
+ tu_cs_emit_qw(cs, cmd->state.index_va);
+ tu_cs_emit(cs, cmd->state.max_index_count);
+ tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
+ tu_cs_emit(cs, stride);
+
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
}
void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
uint32_t counterOffset,
uint32_t vertexStride)
{
- TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
- TU_FROM_HANDLE(tu_buffer, buffer, _counterBuffer);
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ TU_FROM_HANDLE(tu_buffer, buf, _counterBuffer);
+ struct tu_cs *cs = &cmd->draw_cs;
+
+ cmd->state.vs_params = tu6_emit_vs_params(cmd, 0, firstInstance);
- struct tu_draw_info info = {};
+ tu6_draw_common(cmd, cs, false, 0);
- info.instance_count = instanceCount;
- info.first_instance = firstInstance;
- info.streamout_buffer = buffer;
- info.streamout_buffer_offset = counterBufferOffset;
- info.stride = vertexStride;
+ tu_cs_emit_pkt7(cs, CP_DRAW_AUTO, 6);
+ tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_XFB));
+ tu_cs_emit(cs, instanceCount);
+ tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + counterBufferOffset);
+ tu_cs_emit(cs, counterOffset);
+ tu_cs_emit(cs, vertexStride);
- tu_draw(cmd_buffer, &info);
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
}
struct tu_dispatch_info