return VK_SUCCESS;
}
-static void
-tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
- const struct tu_device *dev,
- const struct tu_render_pass *pass)
-{
- const uint32_t tile_align_w = pass->tile_align_w;
- const uint32_t max_tile_width = 1024;
-
- /* note: don't offset the tiling config by render_area.offset,
- * because binning pass can't deal with it
- * this means we might end up with more tiles than necessary,
- * but load/store/etc are still scissored to the render_area
- */
- tiling->tile0.offset = (VkOffset2D) {};
-
- const uint32_t ra_width =
- tiling->render_area.extent.width +
- (tiling->render_area.offset.x - tiling->tile0.offset.x);
- const uint32_t ra_height =
- tiling->render_area.extent.height +
- (tiling->render_area.offset.y - tiling->tile0.offset.y);
-
- /* start from 1 tile */
- tiling->tile_count = (VkExtent2D) {
- .width = 1,
- .height = 1,
- };
- tiling->tile0.extent = (VkExtent2D) {
- .width = util_align_npot(ra_width, tile_align_w),
- .height = align(ra_height, TILE_ALIGN_H),
- };
-
- if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN)) {
- /* start with 2x2 tiles */
- tiling->tile_count.width = 2;
- tiling->tile_count.height = 2;
- tiling->tile0.extent.width = util_align_npot(DIV_ROUND_UP(ra_width, 2), tile_align_w);
- tiling->tile0.extent.height = align(DIV_ROUND_UP(ra_height, 2), TILE_ALIGN_H);
- }
-
- /* do not exceed max tile width */
- while (tiling->tile0.extent.width > max_tile_width) {
- tiling->tile_count.width++;
- tiling->tile0.extent.width =
- util_align_npot(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
- }
-
- /* will force to sysmem, don't bother trying to have a valid tile config
- * TODO: just skip all GMEM stuff when sysmem is forced?
- */
- if (!pass->gmem_pixels)
- return;
-
- /* do not exceed gmem size */
- while (tiling->tile0.extent.width * tiling->tile0.extent.height > pass->gmem_pixels) {
- if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
- tiling->tile_count.width++;
- tiling->tile0.extent.width =
- util_align_npot(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
- } else {
- /* if this assert fails then layout is impossible.. */
- assert(tiling->tile0.extent.height > TILE_ALIGN_H);
- tiling->tile_count.height++;
- tiling->tile0.extent.height =
- align(DIV_ROUND_UP(ra_height, tiling->tile_count.height), TILE_ALIGN_H);
- }
- }
-}
-
-static void
-tu_tiling_config_update_pipe_layout(struct tu_tiling_config *tiling,
- const struct tu_device *dev)
-{
- const uint32_t max_pipe_count = 32; /* A6xx */
-
- /* start from 1 tile per pipe */
- tiling->pipe0 = (VkExtent2D) {
- .width = 1,
- .height = 1,
- };
- tiling->pipe_count = tiling->tile_count;
-
- while (tiling->pipe_count.width * tiling->pipe_count.height > max_pipe_count) {
- if (tiling->pipe0.width < tiling->pipe0.height) {
- tiling->pipe0.width += 1;
- tiling->pipe_count.width =
- DIV_ROUND_UP(tiling->tile_count.width, tiling->pipe0.width);
- } else {
- tiling->pipe0.height += 1;
- tiling->pipe_count.height =
- DIV_ROUND_UP(tiling->tile_count.height, tiling->pipe0.height);
- }
- }
-}
-
-static void
-tu_tiling_config_update_pipes(struct tu_tiling_config *tiling,
- const struct tu_device *dev)
-{
- const uint32_t max_pipe_count = 32; /* A6xx */
- const uint32_t used_pipe_count =
- tiling->pipe_count.width * tiling->pipe_count.height;
- const VkExtent2D last_pipe = {
- .width = (tiling->tile_count.width - 1) % tiling->pipe0.width + 1,
- .height = (tiling->tile_count.height - 1) % tiling->pipe0.height + 1,
- };
-
- assert(used_pipe_count <= max_pipe_count);
- assert(max_pipe_count <= ARRAY_SIZE(tiling->pipe_config));
-
- for (uint32_t y = 0; y < tiling->pipe_count.height; y++) {
- for (uint32_t x = 0; x < tiling->pipe_count.width; x++) {
- const uint32_t pipe_x = tiling->pipe0.width * x;
- const uint32_t pipe_y = tiling->pipe0.height * y;
- const uint32_t pipe_w = (x == tiling->pipe_count.width - 1)
- ? last_pipe.width
- : tiling->pipe0.width;
- const uint32_t pipe_h = (y == tiling->pipe_count.height - 1)
- ? last_pipe.height
- : tiling->pipe0.height;
- const uint32_t n = tiling->pipe_count.width * y + x;
-
- tiling->pipe_config[n] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x) |
- A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y) |
- A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w) |
- A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h);
- tiling->pipe_sizes[n] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w * pipe_h);
- }
- }
-
- memset(tiling->pipe_config + used_pipe_count, 0,
- sizeof(uint32_t) * (max_pipe_count - used_pipe_count));
-}
-
-static void
-tu_tiling_config_get_tile(const struct tu_tiling_config *tiling,
- const struct tu_device *dev,
- uint32_t tx,
- uint32_t ty,
- struct tu_tile *tile)
-{
- /* find the pipe and the slot for tile (tx, ty) */
- const uint32_t px = tx / tiling->pipe0.width;
- const uint32_t py = ty / tiling->pipe0.height;
- const uint32_t sx = tx - tiling->pipe0.width * px;
- const uint32_t sy = ty - tiling->pipe0.height * py;
- /* last pipe has different width */
- const uint32_t pipe_width =
- MIN2(tiling->pipe0.width,
- tiling->tile_count.width - px * tiling->pipe0.width);
-
- assert(tx < tiling->tile_count.width && ty < tiling->tile_count.height);
- assert(px < tiling->pipe_count.width && py < tiling->pipe_count.height);
- assert(sx < tiling->pipe0.width && sy < tiling->pipe0.height);
-
- /* convert to 1D indices */
- tile->pipe = tiling->pipe_count.width * py + px;
- tile->slot = pipe_width * sy + sx;
-
- /* get the blit area for the tile */
- tile->begin = (VkOffset2D) {
- .x = tiling->tile0.offset.x + tiling->tile0.extent.width * tx,
- .y = tiling->tile0.offset.y + tiling->tile0.extent.height * ty,
- };
- tile->end.x =
- (tx == tiling->tile_count.width - 1)
- ? tiling->render_area.offset.x + tiling->render_area.extent.width
- : tile->begin.x + tiling->tile0.extent.width;
- tile->end.y =
- (ty == tiling->tile_count.height - 1)
- ? tiling->render_area.offset.y + tiling->render_area.extent.height
- : tile->begin.y + tiling->tile0.extent.height;
-}
-
void
tu6_emit_event_write(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
if (need_seqno) {
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
+ tu_cs_emit_qw(cs, global_iova(cmd, seqno_dummy));
tu_cs_emit(cs, 0);
}
}
static void
tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
{
- const VkRect2D *render_area = &cmd->state.tiling_config.render_area;
+ const VkRect2D *render_area = &cmd->state.render_area;
uint32_t x1 = render_area->offset.x;
uint32_t y1 = render_area->offset.y;
uint32_t x2 = x1 + render_area->extent.width - 1;
static bool
use_hw_binning(struct tu_cmd_buffer *cmd)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
/* XFB commands are emitted for BINNING || SYSMEM, which makes it incompatible
* with non-hw binning GMEM rendering. this is required because some of the
if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN))
return true;
- return (tiling->tile_count.width * tiling->tile_count.height) > 2;
+ return (fb->tile_count.width * fb->tile_count.height) > 2;
}
static bool
if (cmd->has_tess)
return true;
- return cmd->state.tiling_config.force_sysmem;
+ return false;
}
static void
tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
- const struct tu_tile *tile)
+ uint32_t tx, uint32_t ty, uint32_t pipe, uint32_t slot)
{
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
+
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD));
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
- const uint32_t x1 = tile->begin.x;
- const uint32_t y1 = tile->begin.y;
- const uint32_t x2 = tile->end.x - 1;
- const uint32_t y2 = tile->end.y - 1;
+ const uint32_t x1 = fb->tile0.width * tx;
+ const uint32_t y1 = fb->tile0.height * ty;
+ const uint32_t x2 = x1 + fb->tile0.width - 1;
+ const uint32_t y2 = y1 + fb->tile0.height - 1;
tu6_emit_window_scissor(cs, x1, y1, x2, y2);
tu6_emit_window_offset(cs, x1, y1);
tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
tu_cs_emit(cs, 0x0);
- tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
- tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
- CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
- tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + tile->pipe * cmd->vsc_draw_strm_pitch);
- tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + (tile->pipe * 4) + (32 * cmd->vsc_draw_strm_pitch));
- tu_cs_emit_qw(cs, cmd->vsc_prim_strm.iova + (tile->pipe * cmd->vsc_prim_strm_pitch));
+ tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5_OFFSET, 4);
+ tu_cs_emit(cs, fb->pipe_sizes[pipe] |
+ CP_SET_BIN_DATA5_0_VSC_N(slot));
+ tu_cs_emit(cs, pipe * cmd->vsc_draw_strm_pitch);
+ tu_cs_emit(cs, pipe * 4);
+ tu_cs_emit(cs, pipe * cmd->vsc_prim_strm_pitch);
tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
tu_cs_emit(cs, 0x0);
struct tu_image_view *dst = fb->attachments[a].attachment;
struct tu_image_view *src = fb->attachments[gmem_a].attachment;
- tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.tiling_config.render_area);
+ tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.render_area);
}
static void
static void
tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- const struct tu_physical_device *phys_dev = cmd->device->physical_device;
+ struct tu_device *dev = cmd->device;
+ const struct tu_physical_device *phys_dev = dev->physical_device;
tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9981, 0x3);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9108, 0x3);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
A6XX_RB_LRZ_CNTL(0));
tu_cs_emit_regs(cs,
- A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &cmd->device->border_color));
+ A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
+ .bo_offset = gb_offset(border_color)));
+ tu_cs_emit_regs(cs,
+ A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
+ .bo_offset = gb_offset(border_color)));
+
+ /* VSC buffers:
+ * use vsc pitches from the largest values used so far with this device
+ * if there hasn't been overflow, there will already be a scratch bo
+ * allocated for these sizes
+ *
+ * if overflow is detected, the stream size is increased by 2x
+ */
+ mtx_lock(&dev->vsc_pitch_mtx);
+
+ struct tu6_global *global = dev->global_bo.map;
+
+ uint32_t vsc_draw_overflow = global->vsc_draw_overflow;
+ uint32_t vsc_prim_overflow = global->vsc_prim_overflow;
+
+ if (vsc_draw_overflow >= dev->vsc_draw_strm_pitch)
+ dev->vsc_draw_strm_pitch = (dev->vsc_draw_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
+
+ if (vsc_prim_overflow >= dev->vsc_prim_strm_pitch)
+ dev->vsc_prim_strm_pitch = (dev->vsc_prim_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
+
+ cmd->vsc_prim_strm_pitch = dev->vsc_prim_strm_pitch;
+ cmd->vsc_draw_strm_pitch = dev->vsc_draw_strm_pitch;
+
+ mtx_unlock(&dev->vsc_pitch_mtx);
+
+ struct tu_bo *vsc_bo;
+ uint32_t size0 = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES +
+ cmd->vsc_draw_strm_pitch * MAX_VSC_PIPES;
+
+ tu_get_scratch_bo(dev, size0 + MAX_VSC_PIPES * 4, &vsc_bo);
+
+ tu_cs_emit_regs(cs,
+ A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = vsc_bo, .bo_offset = size0));
tu_cs_emit_regs(cs,
- A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &cmd->device->border_color));
+ A6XX_VSC_PRIM_STRM_ADDRESS(.bo = vsc_bo));
+ tu_cs_emit_regs(cs,
+ A6XX_VSC_DRAW_STRM_ADDRESS(.bo = vsc_bo,
+ .bo_offset = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES));
+
+ tu_bo_list_add(&cmd->bo_list, vsc_bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
tu_cs_sanity_check(cs);
}
static void
update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
tu_cs_emit_regs(cs,
- A6XX_VSC_BIN_SIZE(.width = tiling->tile0.extent.width,
- .height = tiling->tile0.extent.height),
- A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = &cmd->vsc_draw_strm,
- .bo_offset = 32 * cmd->vsc_draw_strm_pitch));
+ A6XX_VSC_BIN_SIZE(.width = fb->tile0.width,
+ .height = fb->tile0.height));
tu_cs_emit_regs(cs,
- A6XX_VSC_BIN_COUNT(.nx = tiling->tile_count.width,
- .ny = tiling->tile_count.height));
+ A6XX_VSC_BIN_COUNT(.nx = fb->tile_count.width,
+ .ny = fb->tile_count.height));
tu_cs_emit_pkt4(cs, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
- for (unsigned i = 0; i < 32; i++)
- tu_cs_emit(cs, tiling->pipe_config[i]);
+ tu_cs_emit_array(cs, fb->pipe_config, 32);
tu_cs_emit_regs(cs,
- A6XX_VSC_PRIM_STRM_ADDRESS(.bo = &cmd->vsc_prim_strm),
A6XX_VSC_PRIM_STRM_PITCH(cmd->vsc_prim_strm_pitch),
- A6XX_VSC_PRIM_STRM_LIMIT(cmd->vsc_prim_strm_pitch - 64));
+ A6XX_VSC_PRIM_STRM_LIMIT(cmd->vsc_prim_strm_pitch - VSC_PAD));
tu_cs_emit_regs(cs,
- A6XX_VSC_DRAW_STRM_ADDRESS(.bo = &cmd->vsc_draw_strm),
A6XX_VSC_DRAW_STRM_PITCH(cmd->vsc_draw_strm_pitch),
- A6XX_VSC_DRAW_STRM_LIMIT(cmd->vsc_draw_strm_pitch - 64));
+ A6XX_VSC_DRAW_STRM_LIMIT(cmd->vsc_draw_strm_pitch - VSC_PAD));
}
static void
emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
const uint32_t used_pipe_count =
- tiling->pipe_count.width * tiling->pipe_count.height;
-
- /* Clear vsc_scratch: */
- tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, 0x0);
+ fb->pipe_count.width * fb->pipe_count.height;
- /* Check for overflow, write vsc_scratch if detected: */
for (int i = 0; i < used_pipe_count; i++) {
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
- tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch - 64));
+ tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch - VSC_PAD));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_draw_strm_pitch));
+ tu_cs_emit_qw(cs, global_iova(cmd, vsc_draw_overflow));
+ tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_draw_strm_pitch));
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
- tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch - 64));
+ tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch - VSC_PAD));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_prim_strm_pitch));
+ tu_cs_emit_qw(cs, global_iova(cmd, vsc_prim_overflow));
+ tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_prim_strm_pitch));
}
tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
struct tu_physical_device *phys_dev = cmd->device->physical_device;
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- uint32_t x1 = tiling->tile0.offset.x;
- uint32_t y1 = tiling->tile0.offset.y;
- uint32_t x2 = tiling->render_area.offset.x + tiling->render_area.extent.width - 1;
- uint32_t y2 = tiling->render_area.offset.y + tiling->render_area.extent.height - 1;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
- tu6_emit_window_scissor(cs, x1, y1, x2, y2);
+ tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
dst[2] =
A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
- A6XX_TEX_CONST_2_PITCH(cmd->state.tiling_config.tile0.extent.width * att->cpp);
+ A6XX_TEX_CONST_2_PITCH(cmd->state.framebuffer->tile0.width * att->cpp);
dst[3] = 0;
dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
}
static void
-tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
- const struct VkRect2D *renderArea)
+tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
const struct tu_framebuffer *fb = cmd->state.framebuffer;
tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_GMEM);
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
if (use_hw_binning(cmd)) {
/* enable stream-out during binning pass: */
tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
- tu6_emit_bin_size(cs,
- tiling->tile0.extent.width,
- tiling->tile0.extent.height,
+ tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
/* and disable stream-out for draw pass: */
tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=true));
- tu6_emit_bin_size(cs,
- tiling->tile0.extent.width,
- tiling->tile0.extent.height,
+ tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
tu_cs_emit_regs(cs,
/* no binning pass, so enable stream-out for draw pass:: */
tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
- tu6_emit_bin_size(cs,
- tiling->tile0.extent.width,
- tiling->tile0.extent.height,
- 0x6000000);
+ tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height, 0x6000000);
}
tu_cs_sanity_check(cs);
}
static void
-tu6_render_tile(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- const struct tu_tile *tile)
+tu6_render_tile(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- tu6_emit_tile_select(cmd, cs, tile);
-
tu_cs_emit_call(cs, &cmd->draw_cs);
if (use_hw_binning(cmd)) {
static void
tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- if (use_hw_binning(cmd))
- cmd->use_vsc_data = true;
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
tu6_tile_render_begin(cmd, &cmd->cs);
- for (uint32_t y = 0; y < tiling->tile_count.height; y++) {
- for (uint32_t x = 0; x < tiling->tile_count.width; x++) {
- struct tu_tile tile;
- tu_tiling_config_get_tile(tiling, cmd->device, x, y, &tile);
- tu6_render_tile(cmd, &cmd->cs, &tile);
+ uint32_t pipe = 0;
+ for (uint32_t py = 0; py < fb->pipe_count.height; py++) {
+ for (uint32_t px = 0; px < fb->pipe_count.width; px++, pipe++) {
+ uint32_t tx1 = px * fb->pipe0.width;
+ uint32_t ty1 = py * fb->pipe0.height;
+ uint32_t tx2 = MIN2(tx1 + fb->pipe0.width, fb->tile_count.width);
+ uint32_t ty2 = MIN2(ty1 + fb->pipe0.height, fb->tile_count.height);
+ uint32_t slot = 0;
+ for (uint32_t ty = ty1; ty < ty2; ty++) {
+ for (uint32_t tx = tx1; tx < tx2; tx++, slot++) {
+ tu6_emit_tile_select(cmd, &cmd->cs, tx, ty, pipe, slot);
+ tu6_render_tile(cmd, &cmd->cs);
+ }
+ }
}
}
static void
tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
+ tu6_sysmem_render_begin(cmd, &cmd->cs);
tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
}
-static void
-tu_cmd_update_tiling_config(struct tu_cmd_buffer *cmd,
- const VkRect2D *render_area)
-{
- const struct tu_device *dev = cmd->device;
- struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
- tiling->render_area = *render_area;
- tiling->force_sysmem = false;
-
- tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass);
- tu_tiling_config_update_pipe_layout(tiling, dev);
- tu_tiling_config_update_pipes(tiling, dev);
-}
-
static VkResult
tu_create_cmd_buffer(struct tu_device *device,
struct tu_cmd_pool *pool,
list_inithead(&cmd_buffer->upload.list);
- VkResult result = tu_bo_init_new(device, &cmd_buffer->scratch_bo, 0x1000);
- if (result != VK_SUCCESS)
- goto fail_scratch_bo;
-
- /* TODO: resize on overflow */
- cmd_buffer->vsc_draw_strm_pitch = device->vsc_draw_strm_pitch;
- cmd_buffer->vsc_prim_strm_pitch = device->vsc_prim_strm_pitch;
- cmd_buffer->vsc_draw_strm = device->vsc_draw_strm;
- cmd_buffer->vsc_prim_strm = device->vsc_prim_strm;
-
return VK_SUCCESS;
-
-fail_scratch_bo:
- list_del(&cmd_buffer->pool_link);
- return result;
}
static void
tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
{
- tu_bo_finish(cmd_buffer->device, &cmd_buffer->scratch_bo);
-
list_del(&cmd_buffer->pool_link);
tu_cs_finish(&cmd_buffer->cs);
cmd->state.index_va = buf->bo->iova + buf->bo_offset + offset;
cmd->state.max_index_count = (buf->size - offset) >> index_shift;
cmd->state.index_size = index_size;
- cmd->state.index_shift = index_shift;
tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
}
for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
/* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_FLUSH_BASE(i), 2);
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(flush_base[i]));
+ tu_cs_emit_qw(cs, global_iova(cmd, flush_base[i]));
tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
}
0x40000 | /* ??? */
CP_MEM_TO_REG_0_UNK31 |
CP_MEM_TO_REG_0_CNT(1));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(flush_base[idx]));
+ tu_cs_emit_qw(cs, global_iova(cmd, flush_base[idx]));
if (offset) {
tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
tu_emit_cache_flush(cmd_buffer, &cmd_buffer->cs);
}
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
- MSM_SUBMIT_BO_WRITE);
-
- if (cmd_buffer->use_vsc_data) {
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_draw_strm,
- MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_prim_strm,
- MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
- }
-
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->border_color,
- MSM_SUBMIT_BO_READ);
+ tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->global_bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
for (uint32_t i = 0; i < cmd_buffer->draw_cs.bo_count; i++) {
tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_cs.bos[i],
cmd->state.pass = pass;
cmd->state.subpass = pass->subpasses;
cmd->state.framebuffer = fb;
+ cmd->state.render_area = pRenderPassBegin->renderArea;
- tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
tu_cmd_prepare_tile_store_ib(cmd);
/* Note: because this is external, any flushes will happen before draw_cs
{
/* TODO: For indirect draws, we can't compute the BO size ahead of time.
* Still not sure what to do here, so just allocate a reasonably large
- * BO and hope for the best for now.
- * (maxTessellationControlPerVertexOutputComponents * 2048 vertices +
- * maxTessellationControlPerPatchOutputComponents * 512 patches) */
- if (!draw_count) {
- return ((128 * 2048) + (128 * 512)) * 4;
- }
+ * BO and hope for the best for now. */
+ if (!draw_count)
+ draw_count = 2048;
- /* For each patch, adreno lays out the tess param BO in memory as:
- * (v_input[0][0])...(v_input[i][j])(p_input[0])...(p_input[k]).
- * where i = # vertices per patch, j = # per-vertex outputs, and
- * k = # per-patch outputs.*/
+ /* the tess param BO is pipeline->tess.param_stride bytes per patch,
+ * which includes both the per-vertex outputs and per-patch outputs
+ * build_primitive_map in ir3 calculates this stride
+ */
uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
uint32_t num_patches = draw_count / verts_per_patch;
- return draw_count * pipeline->tess.per_vertex_output_size +
- pipeline->tess.per_patch_output_size * num_patches;
+ return num_patches * pipeline->tess.param_stride;
}
static uint64_t
{
/* TODO: For indirect draws, we can't compute the BO size ahead of time.
* Still not sure what to do here, so just allocate a reasonably large
- * BO and hope for the best for now.
- * (quad factor stride * 512 patches) */
- if (!draw_count) {
- return (28 * 512) * 4;
- }
+ * BO and hope for the best for now. */
+ if (!draw_count)
+ draw_count = 2048;
/* Each distinct patch gets its own tess factor output. */
uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
tu_cs_emit(cs, instanceCount);
tu_cs_emit(cs, indexCount);
- tu_cs_emit(cs, 0x0); /* XXX */
- tu_cs_emit_qw(cs, cmd->state.index_va + (firstIndex << cmd->state.index_shift));
- tu_cs_emit(cs, indexCount << cmd->state.index_shift);
+ tu_cs_emit(cs, firstIndex);
+ tu_cs_emit_qw(cs, cmd->state.index_va);
+ tu_cs_emit(cs, cmd->state.max_index_count);
}
void