#include "vk_format.h"
#include "tu_cs.h"
-#include "tu_blit.h"
#define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
return VK_SUCCESS;
}
-static bool
-is_linear_mipmapped(const struct tu_image_view *iview)
-{
- return iview->image->layout.tile_mode == TILE6_LINEAR &&
- iview->base_mip != iview->image->level_count - 1;
-}
-
-static bool
-force_sysmem(const struct tu_cmd_buffer *cmd,
- const struct VkRect2D *render_area)
-{
- const struct tu_framebuffer *fb = cmd->state.framebuffer;
- const struct tu_physical_device *device = cmd->device->physical_device;
- bool has_linear_mipmapped_store = false;
- const struct tu_render_pass *pass = cmd->state.pass;
-
- /* Iterate over all the places we call tu6_emit_store_attachment() */
- for (unsigned i = 0; i < pass->subpass_count; i++) {
- const struct tu_subpass *subpass = &pass->subpasses[i];
- if (subpass->resolve_attachments) {
- for (unsigned i = 0; i < subpass->color_count; i++) {
- uint32_t a = subpass->resolve_attachments[i].attachment;
- if (a != VK_ATTACHMENT_UNUSED &&
- cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
- const struct tu_image_view *iview = fb->attachments[a].attachment;
- if (is_linear_mipmapped(iview)) {
- has_linear_mipmapped_store = true;
- break;
- }
- }
- }
- }
- }
-
- for (unsigned i = 0; i < pass->attachment_count; i++) {
- if (pass->attachments[i].gmem_offset >= 0 &&
- cmd->state.pass->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
- const struct tu_image_view *iview = fb->attachments[i].attachment;
- if (is_linear_mipmapped(iview)) {
- has_linear_mipmapped_store = true;
- break;
- }
- }
- }
-
- /* Linear textures cannot have any padding between mipmap levels and their
- * height isn't padded, while at the same time the GMEM->MEM resolve does
- * not have per-pixel granularity, so if the image height isn't aligned to
- * the resolve granularity and the render area is tall enough, we may wind
- * up writing past the bottom of the image into the next miplevel or even
- * past the end of the image. For the last miplevel, the layout code should
- * insert enough padding so that the overdraw writes to the padding. To
- * work around this, we force-enable sysmem rendering.
- */
- const uint32_t y2 = render_area->offset.y + render_area->extent.height;
- const uint32_t aligned_y2 = ALIGN_POT(y2, device->tile_align_h);
-
- return has_linear_mipmapped_store && aligned_y2 > fb->height;
-}
-
static void
tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
const struct tu_device *dev,
uint32_t pixels)
{
- const uint32_t tile_align_w = dev->physical_device->tile_align_w;
- const uint32_t tile_align_h = dev->physical_device->tile_align_h;
- const uint32_t max_tile_width = 1024; /* A6xx */
+ const uint32_t tile_align_w = 64; /* note: 32 when no input attachments */
+ const uint32_t tile_align_h = 16;
+ const uint32_t max_tile_width = 1024;
/* note: don't offset the tiling config by render_area.offset,
* because binning pass can't deal with it
align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
}
+ /* will force to sysmem, don't bother trying to have a valid tile config
+ * TODO: just skip all GMEM stuff when sysmem is forced?
+ */
+ if (!pixels)
+ return;
+
/* do not exceed gmem size */
while (tiling->tile0.extent.width * tiling->tile0.extent.height > pixels) {
if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
};
tiling->pipe_count = tiling->tile_count;
- /* do not exceed max pipe count vertically */
- while (tiling->pipe_count.height > max_pipe_count) {
- tiling->pipe0.height += 2;
- tiling->pipe_count.height =
- (tiling->tile_count.height + tiling->pipe0.height - 1) /
- tiling->pipe0.height;
- }
-
- /* do not exceed max pipe count */
- while (tiling->pipe_count.width * tiling->pipe_count.height >
- max_pipe_count) {
- tiling->pipe0.width += 1;
- tiling->pipe_count.width =
- (tiling->tile_count.width + tiling->pipe0.width - 1) /
- tiling->pipe0.width;
+ while (tiling->pipe_count.width * tiling->pipe_count.height > max_pipe_count) {
+ if (tiling->pipe0.width < tiling->pipe0.height) {
+ tiling->pipe0.width += 1;
+ tiling->pipe_count.width =
+ DIV_ROUND_UP(tiling->tile_count.width, tiling->pipe0.width);
+ } else {
+ tiling->pipe0.height += 1;
+ tiling->pipe_count.height =
+ DIV_ROUND_UP(tiling->tile_count.height, tiling->pipe0.height);
+ }
}
}
const uint32_t py = ty / tiling->pipe0.height;
const uint32_t sx = tx - tiling->pipe0.width * px;
const uint32_t sy = ty - tiling->pipe0.height * py;
+ /* last pipe has different width */
+ const uint32_t pipe_width =
+ MIN2(tiling->pipe0.width,
+ tiling->tile_count.width - px * tiling->pipe0.width);
assert(tx < tiling->tile_count.width && ty < tiling->tile_count.height);
assert(px < tiling->pipe_count.width && py < tiling->pipe_count.height);
/* convert to 1D indices */
tile->pipe = tiling->pipe_count.width * py + px;
- tile->slot = tiling->pipe0.width * sy + sx;
+ tile->slot = pipe_width * sy + sx;
/* get the blit area for the tile */
tile->begin = (VkOffset2D) {
}
}
-#define tu_image_view_ubwc_pitches(iview) \
- .pitch = tu_image_ubwc_pitch(iview->image, iview->base_mip), \
- .array_pitch = tu_image_ubwc_size(iview->image, iview->base_mip) >> 2
-
static void
tu6_emit_zs(struct tu_cmd_buffer *cmd,
const struct tu_subpass *subpass,
tu_cs_emit_regs(cs,
A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt),
A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview->image, iview->base_mip)),
- A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview->image->layout.layer_size),
+ A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(
+ fdl_layer_stride(&iview->image->layout, iview->base_mip)),
A6XX_RB_DEPTH_BUFFER_BASE(tu_image_view_base_ref(iview)),
A6XX_RB_DEPTH_BUFFER_BASE_GMEM(cmd->state.pass->attachments[a].gmem_offset));
continue;
const struct tu_image_view *iview = fb->attachments[a].attachment;
- const enum a6xx_tile_mode tile_mode =
- tu6_get_image_tile_mode(iview->image, iview->base_mip);
mrt_comp[i] = 0xf;
if (vk_format_is_srgb(iview->vk_format))
srgb_cntl |= (1 << i);
- const struct tu_native_format format =
- tu6_format_color(iview->vk_format, iview->image->layout.tile_mode);
+ struct tu_native_format format =
+ tu6_format_image(iview->image, iview->vk_format, iview->base_mip);
tu_cs_emit_regs(cs,
A6XX_RB_MRT_BUF_INFO(i,
- .color_tile_mode = tile_mode,
+ .color_tile_mode = format.tile_mode,
.color_format = format.fmt,
.color_swap = format.swap),
A6XX_RB_MRT_PITCH(i, tu_image_stride(iview->image, iview->base_mip)),
- A6XX_RB_MRT_ARRAY_PITCH(i, iview->image->layout.layer_size),
+ A6XX_RB_MRT_ARRAY_PITCH(i,
+ fdl_layer_stride(&iview->image->layout, iview->base_mip)),
A6XX_RB_MRT_BASE(i, tu_image_view_base_ref(iview)),
A6XX_RB_MRT_BASE_GMEM(i, cmd->state.pass->attachments[a].gmem_offset));
.rt5 = mrt_comp[5],
.rt6 = mrt_comp[6],
.rt7 = mrt_comp[7]));
+
+ tu_cs_emit_regs(cs, A6XX_GRAS_MAX_LAYER_INDEX(fb->layers - 1));
}
-static void
-tu6_emit_msaa(struct tu_cmd_buffer *cmd,
- const struct tu_subpass *subpass,
- struct tu_cs *cs)
+void
+tu6_emit_msaa(struct tu_cs *cs, VkSampleCountFlagBits vk_samples)
{
- const enum a3xx_msaa_samples samples = tu_msaa_samples(subpass->samples);
+ const enum a3xx_msaa_samples samples = tu_msaa_samples(vk_samples);
bool msaa_disable = samples == MSAA_ONE;
tu_cs_emit_regs(cs,
uint32_t x2 = x1 + render_area->extent.width - 1;
uint32_t y2 = y1 + render_area->extent.height - 1;
- /* TODO: alignment requirement seems to be less than tile_align_w/h */
if (align) {
- x1 = x1 & ~cmd->device->physical_device->tile_align_w;
- y1 = y1 & ~cmd->device->physical_device->tile_align_h;
- x2 = ALIGN_POT(x2 + 1, cmd->device->physical_device->tile_align_w) - 1;
- y2 = ALIGN_POT(y2 + 1, cmd->device->physical_device->tile_align_h) - 1;
+ x1 = x1 & ~(GMEM_ALIGN_W - 1);
+ y1 = y1 & ~(GMEM_ALIGN_H - 1);
+ x2 = ALIGN_POT(x2 + 1, GMEM_ALIGN_W) - 1;
+ y2 = ALIGN_POT(y2 + 1, GMEM_ALIGN_H) - 1;
}
tu_cs_emit_regs(cs,
A6XX_RB_BLIT_SCISSOR_BR(.x = x2, .y = y2));
}
-static void
-tu6_emit_blit_info(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- const struct tu_image_view *iview,
- uint32_t gmem_offset,
- bool resolve)
-{
- tu_cs_emit_regs(cs,
- A6XX_RB_BLIT_INFO(.unk0 = !resolve, .gmem = !resolve));
-
- const struct tu_native_format format =
- tu6_format_color(iview->vk_format, iview->image->layout.tile_mode);
-
- enum a6xx_tile_mode tile_mode =
- tu6_get_image_tile_mode(iview->image, iview->base_mip);
- tu_cs_emit_regs(cs,
- A6XX_RB_BLIT_DST_INFO(
- .tile_mode = tile_mode,
- .samples = tu_msaa_samples(iview->image->samples),
- .color_format = format.fmt,
- .color_swap = format.swap,
- .flags = iview->image->layout.ubwc_layer_size != 0),
- A6XX_RB_BLIT_DST(tu_image_view_base_ref(iview)),
- A6XX_RB_BLIT_DST_PITCH(tu_image_stride(iview->image, iview->base_mip)),
- A6XX_RB_BLIT_DST_ARRAY_PITCH(iview->image->layout.layer_size));
-
- if (iview->image->layout.ubwc_layer_size) {
- tu_cs_emit_regs(cs,
- A6XX_RB_BLIT_FLAG_DST(tu_image_view_ubwc_base_ref(iview)),
- A6XX_RB_BLIT_FLAG_DST_PITCH(tu_image_view_ubwc_pitches(iview)));
- }
-
- tu_cs_emit_regs(cs,
- A6XX_RB_BLIT_BASE_GMEM(gmem_offset));
-}
-
-static void
-tu6_emit_blit(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
-{
- tu6_emit_event_write(cmd, cs, BLIT, false);
-}
-
-static void
-tu6_emit_window_scissor(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
+void
+tu6_emit_window_scissor(struct tu_cs *cs,
uint32_t x1,
uint32_t y1,
uint32_t x2,
A6XX_GRAS_RESOLVE_CNTL_2(.x = x2, .y = y2));
}
-static void
-tu6_emit_window_offset(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- uint32_t x1,
- uint32_t y1)
+void
+tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1)
{
tu_cs_emit_regs(cs,
A6XX_RB_WINDOW_OFFSET(.x = x1, .y = y1));
if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_SYSMEM))
return true;
+ /* can't fit attachments into gmem */
+ if (!cmd->state.pass->gmem_pixels)
+ return true;
+
+ if (cmd->state.framebuffer->layers > 1)
+ return true;
+
return cmd->state.tiling_config.force_sysmem;
}
const uint32_t y1 = tile->begin.y;
const uint32_t x2 = tile->end.x - 1;
const uint32_t y2 = tile->end.y - 1;
- tu6_emit_window_scissor(cmd, cs, x1, y1, x2, y2);
- tu6_emit_window_offset(cmd, cs, x1, y1);
+ tu6_emit_window_scissor(cs, x1, y1, x2, y2);
+ tu6_emit_window_offset(cs, x1, y1);
tu_cs_emit_regs(cs,
- A6XX_VPC_SO_OVERRIDE(.so_disable = true));
+ A6XX_VPC_SO_OVERRIDE(.so_disable = false));
if (use_hw_binning(cmd)) {
tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
}
}
-static void
-tu6_emit_load_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs, uint32_t a)
-{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
- const struct tu_framebuffer *fb = cmd->state.framebuffer;
- const struct tu_image_view *iview = fb->attachments[a].attachment;
- const struct tu_render_pass_attachment *attachment =
- &cmd->state.pass->attachments[a];
-
- if (attachment->gmem_offset < 0)
- return;
-
- const uint32_t x1 = tiling->render_area.offset.x;
- const uint32_t y1 = tiling->render_area.offset.y;
- const uint32_t x2 = x1 + tiling->render_area.extent.width;
- const uint32_t y2 = y1 + tiling->render_area.extent.height;
- const uint32_t tile_x2 =
- tiling->tile0.offset.x + tiling->tile0.extent.width * tiling->tile_count.width;
- const uint32_t tile_y2 =
- tiling->tile0.offset.y + tiling->tile0.extent.height * tiling->tile_count.height;
- bool need_load =
- x1 != tiling->tile0.offset.x || x2 != MIN2(fb->width, tile_x2) ||
- y1 != tiling->tile0.offset.y || y2 != MIN2(fb->height, tile_y2);
-
- if (need_load)
- tu_finishme("improve handling of unaligned render area");
-
- if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
- need_load = true;
-
- if (vk_format_has_stencil(iview->vk_format) &&
- attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
- need_load = true;
-
- if (need_load) {
- tu6_emit_blit_info(cmd, cs, iview, attachment->gmem_offset, false);
- tu6_emit_blit(cmd, cs);
- }
-}
-
-static void
-tu6_emit_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
- uint32_t a,
- const VkRenderPassBeginInfo *info)
-{
- const struct tu_framebuffer *fb = cmd->state.framebuffer;
- const struct tu_image_view *iview = fb->attachments[a].attachment;
- const struct tu_render_pass_attachment *attachment =
- &cmd->state.pass->attachments[a];
- unsigned clear_mask = 0;
-
- /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
- if (attachment->gmem_offset < 0)
- return;
-
- if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
- clear_mask = 0xf;
-
- if (vk_format_has_stencil(iview->vk_format)) {
- clear_mask &= 0x1;
- if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
- clear_mask |= 0x2;
- }
- if (!clear_mask)
- return;
-
- tu_clear_gmem_attachment(cmd, cs, a, clear_mask,
- &info->pClearValues[a]);
-}
-
-static void
-tu6_emit_predicated_blit(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- uint32_t a,
- uint32_t gmem_a,
- bool resolve)
-{
- tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
-
- tu6_emit_blit_info(cmd, cs,
- cmd->state.framebuffer->attachments[a].attachment,
- cmd->state.pass->attachments[gmem_a].gmem_offset, resolve);
- tu6_emit_blit(cmd, cs);
-
- tu_cond_exec_end(cs);
-}
-
static void
tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
uint32_t gmem_a)
{
const struct tu_framebuffer *fb = cmd->state.framebuffer;
- const struct tu_image_view *dst = fb->attachments[a].attachment;
- const struct tu_image_view *src = fb->attachments[gmem_a].attachment;
-
- tu_blit(cmd, cs, &(struct tu_blit) {
- .dst = sysmem_attachment_surf(dst, dst->base_layer,
- &cmd->state.tiling_config.render_area),
- .src = sysmem_attachment_surf(src, src->base_layer,
- &cmd->state.tiling_config.render_area),
- .layers = fb->layers,
- });
-}
-
-
-/* Emit a MSAA resolve operation, with both gmem and sysmem paths. */
-static void tu6_emit_resolve(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- uint32_t a,
- uint32_t gmem_a)
-{
- if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
- return;
-
- tu6_emit_predicated_blit(cmd, cs, a, gmem_a, true);
-
- tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
- tu6_emit_sysmem_resolve(cmd, cs, a, gmem_a);
- tu_cond_exec_end(cs);
-}
-
-static void
-tu6_emit_store_attachment(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs,
- uint32_t a,
- uint32_t gmem_a)
-{
- if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
- return;
+ struct tu_image_view *dst = fb->attachments[a].attachment;
+ struct tu_image_view *src = fb->attachments[gmem_a].attachment;
- tu6_emit_blit_info(cmd, cs,
- cmd->state.framebuffer->attachments[a].attachment,
- cmd->state.pass->attachments[gmem_a].gmem_offset, true);
- tu6_emit_blit(cmd, cs);
+ tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.tiling_config.render_area);
}
static void
for (uint32_t a = 0; a < pass->attachment_count; ++a) {
if (pass->attachments[a].gmem_offset >= 0)
- tu6_emit_store_attachment(cmd, cs, a, a);
+ tu_store_gmem_attachment(cmd, cs, a, a);
}
if (subpass->resolve_attachments) {
for (unsigned i = 0; i < subpass->color_count; i++) {
uint32_t a = subpass->resolve_attachments[i].attachment;
if (a != VK_ATTACHMENT_UNUSED)
- tu6_emit_store_attachment(cmd, cs, a,
- subpass->color_attachments[i].attachment);
+ tu_store_gmem_attachment(cmd, cs, a,
+ subpass->color_attachments[i].attachment);
}
}
}
static void
tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
+ const struct tu_physical_device *phys_dev = cmd->device->physical_device;
+
tu6_emit_cache_flush(cmd, cs);
tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
- tu_cs_emit_write_reg(cs, REG_A6XX_RB_CCU_CNTL, 0x10000000);
+ tu_cs_emit_regs(cs,
+ A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
- tu_cs_emit_regs(cs,
- A6XX_VPC_SO_BUFFER_BASE(0),
- A6XX_VPC_SO_BUFFER_SIZE(0));
-
- tu_cs_emit_regs(cs,
- A6XX_VPC_SO_FLUSH_BASE(0));
-
- tu_cs_emit_regs(cs,
- A6XX_VPC_SO_BUF_CNTL(0));
-
- tu_cs_emit_regs(cs,
- A6XX_VPC_SO_BUFFER_OFFSET(0, 0));
-
- tu_cs_emit_regs(cs,
- A6XX_VPC_SO_BUFFER_BASE(1, 0),
- A6XX_VPC_SO_BUFFER_SIZE(1, 0));
-
- tu_cs_emit_regs(cs,
- A6XX_VPC_SO_BUFFER_OFFSET(1, 0),
- A6XX_VPC_SO_FLUSH_BASE(1, 0),
- A6XX_VPC_SO_BUFFER_BASE(2, 0),
- A6XX_VPC_SO_BUFFER_SIZE(2, 0));
-
- tu_cs_emit_regs(cs,
- A6XX_VPC_SO_BUFFER_OFFSET(2, 0),
- A6XX_VPC_SO_FLUSH_BASE(2, 0),
- A6XX_VPC_SO_BUFFER_BASE(3, 0),
- A6XX_VPC_SO_BUFFER_SIZE(3, 0));
-
- tu_cs_emit_regs(cs,
- A6XX_VPC_SO_BUFFER_OFFSET(3, 0),
- A6XX_VPC_SO_FLUSH_BASE(3, 0));
+ /* Set not to use streamout by default, */
+ tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
+ tu_cs_emit(cs, 0);
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
+ tu_cs_emit(cs, 0);
tu_cs_emit_regs(cs,
A6XX_SP_HS_CTRL_REG0(0));
tu_cs_emit_regs(cs,
A6XX_RB_LRZ_CNTL(0));
+ tu_cs_emit_regs(cs,
+ A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &cmd->device->border_color));
+ tu_cs_emit_regs(cs,
+ A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &cmd->device->border_color));
+
tu_cs_sanity_check(cs);
}
{
unsigned seqno;
- seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_AND_INV_EVENT, true);
+ seqno = tu6_emit_event_write(cmd, cs, RB_DONE_TS, true);
tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
/* Clear vsc_scratch: */
tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
+ tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
tu_cs_emit(cs, 0x0);
/* Check for overflow, write vsc_scratch if detected: */
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data_pitch));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
+ tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_data_pitch));
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data2_pitch));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
+ tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_data2_pitch));
}
tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG) |
CP_MEM_TO_REG_0_CNT(1 - 1));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_SCRATCH);
+ tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
/*
* This is a bit awkward, we really want a way to invert the
tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
CP_REG_TO_MEM_0_CNT(0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_OVERFLOW);
+ tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_overflow));
tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
tu_cs_emit(cs, 0x0);
uint32_t x2 = tiling->render_area.offset.x + tiling->render_area.extent.width - 1;
uint32_t y2 = tiling->render_area.offset.y + tiling->render_area.extent.height - 1;
- tu6_emit_window_scissor(cmd, cs, x1, y1, x2, y2);
+ tu6_emit_window_scissor(cs, x1, y1, x2, y2);
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
tu_cs_emit(cs, 0x0);
- tu_cs_emit_wfi(cs);
-
- tu_cs_emit_regs(cs,
- A6XX_RB_CCU_CNTL(.unknown = phys_dev->magic.RB_CCU_CNTL_gmem));
-
cmd->wait_for_idle = false;
}
-static void
-tu_emit_sysmem_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
- uint32_t a,
- const VkRenderPassBeginInfo *info)
-{
- const struct tu_framebuffer *fb = cmd->state.framebuffer;
- const struct tu_image_view *iview = fb->attachments[a].attachment;
- const struct tu_render_pass_attachment *attachment =
- &cmd->state.pass->attachments[a];
- unsigned clear_mask = 0;
-
- /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
- if (attachment->gmem_offset < 0)
- return;
-
- if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
- clear_mask = 0xf;
- }
-
- if (vk_format_has_stencil(iview->vk_format)) {
- clear_mask &= 0x1;
- if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
- clear_mask |= 0x2;
- if (clear_mask != 0x3)
- tu_finishme("depth/stencil only load op");
- }
-
- if (!clear_mask)
- return;
-
- tu_clear_sysmem_attachment(cmd, cs, a,
- &info->pClearValues[a], &(struct VkClearRect) {
- .rect = info->renderArea,
- .baseArrayLayer = iview->base_layer,
- .layerCount = iview->layer_count,
- });
-}
-
static void
tu_emit_load_clear(struct tu_cmd_buffer *cmd,
const VkRenderPassBeginInfo *info)
tu6_emit_blit_scissor(cmd, cs, true);
for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
- tu6_emit_load_attachment(cmd, cs, i);
+ tu_load_gmem_attachment(cmd, cs, i);
tu6_emit_blit_scissor(cmd, cs, false);
for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
- tu6_emit_clear_attachment(cmd, cs, i, info);
+ tu_clear_gmem_attachment(cmd, cs, i, info);
tu_cond_exec_end(cs);
- /* invalidate because reading input attachments will cache GMEM and
- * the cache isn''t updated when GMEM is written
- * TODO: is there a no-cache bit for textures?
- */
- if (cmd->state.subpass->input_count)
- tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
-
tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
- tu_emit_sysmem_clear_attachment(cmd, cs, i, info);
+ tu_clear_sysmem_attachment(cmd, cs, i, info);
tu_cond_exec_end(cs);
}
tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
const struct VkRect2D *renderArea)
{
+ const struct tu_physical_device *phys_dev = cmd->device->physical_device;
const struct tu_framebuffer *fb = cmd->state.framebuffer;
- if (fb->width > 0 && fb->height > 0) {
- tu6_emit_window_scissor(cmd, cs,
- 0, 0, fb->width - 1, fb->height - 1);
- } else {
- tu6_emit_window_scissor(cmd, cs, 0, 0, 0, 0);
- }
- tu6_emit_window_offset(cmd, cs, 0, 0);
+ assert(fb->width > 0 && fb->height > 0);
+ tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
+ tu6_emit_window_offset(cs, 0, 0);
tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
tu6_emit_wfi(cmd, cs);
tu_cs_emit_regs(cs,
- A6XX_RB_CCU_CNTL(0x10000000));
+ A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
/* enable stream-out, with sysmem there is only one pass: */
tu_cs_emit_regs(cs,
/* Do any resolves of the last subpass. These are handled in the
* tile_store_ib in the gmem path.
*/
-
const struct tu_subpass *subpass = cmd->state.subpass;
if (subpass->resolve_attachments) {
for (unsigned i = 0; i < subpass->color_count; i++) {
tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
tu_cs_emit(cs, 0x0);
- /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
- tu6_emit_wfi(cmd, cs);
+ /* TODO: flushing with barriers instead of blindly always flushing */
+ tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
+ tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
+ tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
+ tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
+
+ tu_cs_emit_wfi(cs);
tu_cs_emit_regs(cs,
- A6XX_RB_CCU_CNTL(phys_dev->magic.RB_CCU_CNTL_gmem));
+ A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_gmem, .gmem = 1));
const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
if (use_hw_binning(cmd)) {
+ /* enable stream-out during binning pass: */
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
+
tu6_emit_bin_size(cs,
tiling->tile0.extent.width,
tiling->tile0.extent.height,
tu6_emit_binning_pass(cmd, cs);
+ /* and disable stream-out for draw pass: */
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=true));
+
tu6_emit_bin_size(cs,
tiling->tile0.extent.width,
tiling->tile0.extent.height,
tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
tu_cs_emit(cs, 0x1);
} else {
+ /* no binning pass, so enable stream-out for draw pass:: */
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
+
tu6_emit_bin_size(cs,
tiling->tile0.extent.width,
tiling->tile0.extent.height,
static void
tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer *cmd)
{
- const uint32_t tile_store_space = 32 + 23 * cmd->state.pass->attachment_count;
+ const uint32_t tile_store_space = 11 + (35 * 2) * cmd->state.pass->attachment_count;
struct tu_cs sub_cs;
VkResult result =
struct tu_tiling_config *tiling = &cmd->state.tiling_config;
tiling->render_area = *render_area;
- tiling->force_sysmem = force_sysmem(cmd, render_area);
+ tiling->force_sysmem = false;
tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass->gmem_pixels);
tu_tiling_config_update_pipe_layout(tiling, dev);
descriptors_state->sets[idx] = set;
descriptors_state->valid |= (1u << idx);
+ /* Note: the actual input attachment indices come from the shader
+ * itself, so we can't generate the patched versions of these until
+ * draw time when both the pipeline and descriptors are bound and
+ * we're inside the render pass.
+ */
+ unsigned dst_idx = layout->set[idx].input_attachment_start;
+ memcpy(&descriptors_state->input_attachments[dst_idx * A6XX_TEX_CONST_DWORDS],
+ set->dynamic_descriptors,
+ set->layout->input_attachment_count * A6XX_TEX_CONST_DWORDS * 4);
+
for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
- unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
+ /* Dynamic buffers come after input attachments in the descriptor set
+ * itself, but due to how the Vulkan descriptor set binding works, we
+ * have to put input attachments and dynamic buffers in separate
+ * buffers in the descriptor_state and then combine them at draw
+ * time. Binding a descriptor set only invalidates the descriptor
+ * sets after it, but if we try to tightly pack the descriptors after
+ * the input attachments then we could corrupt dynamic buffers in the
+ * descriptor set before it, or we'd have to move all the dynamic
+ * buffers over. We just put them into separate buffers to make
+ * binding as well as the later patching of input attachments easy.
+ */
+ unsigned src_idx = j + set->layout->input_attachment_count;
+ unsigned dst_idx = j + layout->set[idx].dynamic_offset_start;
assert(dyn_idx < dynamicOffsetCount);
- descriptors_state->dynamic_buffers[idx] =
- set->dynamic_descriptors[j].va + pDynamicOffsets[dyn_idx];
+ uint32_t *dst =
+ &descriptors_state->dynamic_descriptors[dst_idx * A6XX_TEX_CONST_DWORDS];
+ uint32_t *src =
+ &set->dynamic_descriptors[src_idx * A6XX_TEX_CONST_DWORDS];
+ uint32_t offset = pDynamicOffsets[dyn_idx];
+
+ /* Patch the storage/uniform descriptors right away. */
+ if (layout->set[idx].layout->dynamic_ubo & (1 << j)) {
+ /* Note: we can assume here that the addition won't roll over and
+ * change the SIZE field.
+ */
+ uint64_t va = src[0] | ((uint64_t)src[1] << 32);
+ va += offset;
+ dst[0] = va;
+ dst[1] = va >> 32;
+ } else {
+ memcpy(dst, src, A6XX_TEX_CONST_DWORDS * 4);
+ /* Note: A6XX_IBO_5_DEPTH is always 0 */
+ uint64_t va = dst[4] | ((uint64_t)dst[5] << 32);
+ va += offset;
+ dst[4] = va;
+ dst[5] = va >> 32;
+ }
}
}
- cmd_buffer->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
+ if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE)
+ cmd_buffer->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
+ else
+ cmd_buffer->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
+}
+
+void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer *pBuffers,
+ const VkDeviceSize *pOffsets,
+ const VkDeviceSize *pSizes)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ assert(firstBinding + bindingCount <= IR3_MAX_SO_BUFFERS);
+
+ for (uint32_t i = 0; i < bindingCount; i++) {
+ uint32_t idx = firstBinding + i;
+ TU_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
+
+ if (pOffsets[i] != 0)
+ cmd->state.streamout_reset |= 1 << idx;
+
+ cmd->state.streamout_buf.buffers[idx] = buf;
+ cmd->state.streamout_buf.offsets[idx] = pOffsets[i];
+ cmd->state.streamout_buf.sizes[idx] = pSizes[i];
+
+ cmd->state.streamout_enabled |= 1 << idx;
+ }
+
+ cmd->state.dirty |= TU_CMD_DIRTY_STREAMOUT_BUFFERS;
+}
+
+void tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer *pCounterBuffers,
+ const VkDeviceSize *pCounterBufferOffsets)
+{
+ assert(firstCounterBuffer + counterBufferCount <= IR3_MAX_SO_BUFFERS);
+ /* TODO do something with counter buffer? */
+}
+
+void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer *pCounterBuffers,
+ const VkDeviceSize *pCounterBufferOffsets)
+{
+ assert(firstCounterBuffer + counterBufferCount <= IR3_MAX_SO_BUFFERS);
+ /* TODO do something with counter buffer? */
+
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ cmd->state.streamout_enabled = 0;
}
void
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
}
+ tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->border_color,
+ MSM_SUBMIT_BO_READ);
+
for (uint32_t i = 0; i < cmd_buffer->draw_cs.bo_count; i++) {
tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_cs.bos[i],
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
const VkViewport *pViewports)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
- struct tu_cs *draw_cs = &cmd->draw_cs;
assert(firstViewport == 0 && viewportCount == 1);
- tu6_emit_viewport(draw_cs, pViewports);
-
- tu_cs_sanity_check(draw_cs);
+ cmd->state.dynamic.viewport.viewports[0] = pViewports[0];
+ cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_VIEWPORT;
}
void
const VkRect2D *pScissors)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
- struct tu_cs *draw_cs = &cmd->draw_cs;
assert(firstScissor == 0 && scissorCount == 1);
- tu6_emit_scissor(draw_cs, pScissors);
-
- tu_cs_sanity_check(draw_cs);
+ cmd->state.dynamic.scissor.scissors[0] = pScissors[0];
+ cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_SCISSOR;
}
void
break;
}
- result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- break;
- }
+ if (secondary->usage_flags &
+ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+ assert(tu_cs_is_empty(&secondary->cs));
- result = tu_cs_add_entries(&cmd->draw_epilogue_cs,
- &secondary->draw_epilogue_cs);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- break;
+ result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ break;
+ }
+
+ result = tu_cs_add_entries(&cmd->draw_epilogue_cs,
+ &secondary->draw_epilogue_cs);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ break;
+ }
+ } else {
+ assert(tu_cs_is_empty(&secondary->draw_cs));
+ assert(tu_cs_is_empty(&secondary->draw_epilogue_cs));
+
+ for (uint32_t j = 0; j < secondary->cs.bo_count; j++) {
+ tu_bo_list_add(&cmd->bo_list, secondary->cs.bos[j],
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+ }
+
+ tu_cs_emit_call(&cmd->cs, &secondary->cs);
}
}
cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
- tu6_emit_msaa(cmd, cmd->state.subpass, &cmd->draw_cs);
+ tu6_emit_msaa(&cmd->draw_cs, cmd->state.subpass->samples);
tu6_emit_render_cntl(cmd, cmd->state.subpass, &cmd->draw_cs, false);
/* note: use_hw_binning only checks tiling config */
tu_bo_list_add(&cmd->bo_list, iview->image->bo,
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
}
+
+ /* Flag input attachment descriptors for re-emission if necessary */
+ cmd->state.dirty |= TU_CMD_DIRTY_INPUT_ATTACHMENTS;
}
void
struct tu_cs *cs = &cmd->draw_cs;
const struct tu_subpass *subpass = cmd->state.subpass++;
- /* TODO:
- * if msaa samples change between subpasses,
- * attachment store is broken for some attachments
- */
+
+ tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
+
if (subpass->resolve_attachments) {
- tu6_emit_blit_scissor(cmd, cs, true);
for (unsigned i = 0; i < subpass->color_count; i++) {
uint32_t a = subpass->resolve_attachments[i].attachment;
- if (a != VK_ATTACHMENT_UNUSED) {
- tu6_emit_resolve(cmd, cs, a,
- subpass->color_attachments[i].attachment);
- }
+ if (a == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ tu_store_gmem_attachment(cmd, cs, a,
+ subpass->color_attachments[i].attachment);
+
+ if (pass->attachments[a].gmem_offset < 0)
+ continue;
+
+ /* TODO:
+ * check if the resolved attachment is needed by later subpasses,
+ * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
+ */
+ tu_finishme("missing GMEM->GMEM resolve path\n");
+ tu_emit_load_gmem_attachment(cmd, cs, a);
}
}
- /* invalidate because reading input attachments will cache GMEM and
- * the cache isn''t updated when GMEM is written
- * TODO: is there a no-cache bit for textures?
- */
- if (cmd->state.subpass->input_count)
- tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
+ tu_cond_exec_end(cs);
- /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
- tu6_emit_zs(cmd, cmd->state.subpass, cs);
- tu6_emit_mrt(cmd, cmd->state.subpass, cs);
- tu6_emit_msaa(cmd, cmd->state.subpass, cs);
- tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
+ tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
- /* Emit flushes so that input attachments will read the correct value. This
- * is for sysmem only, although it shouldn't do much harm on gmem.
+ /* Emit flushes so that input attachments will read the correct value.
+ * TODO: use subpass dependencies to flush or not
*/
tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
- /* TODO:
- * since we don't know how to do GMEM->GMEM resolve,
- * resolve attachments are resolved to memory then loaded to GMEM again if needed
- */
if (subpass->resolve_attachments) {
+ tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
+
for (unsigned i = 0; i < subpass->color_count; i++) {
uint32_t a = subpass->resolve_attachments[i].attachment;
- if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].gmem_offset >= 0) {
- tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
- tu6_emit_predicated_blit(cmd, cs, a, a, false);
- }
+ if (a == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ tu6_emit_sysmem_resolve(cmd, cs, a,
+ subpass->color_attachments[i].attachment);
}
+
+ tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
}
+
+ tu_cond_exec_end(cs);
+
+ /* subpass->input_count > 0 then texture cache invalidate is likely to be needed */
+ if (cmd->state.subpass->input_count)
+ tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
+
+ /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
+ tu6_emit_zs(cmd, cmd->state.subpass, cs);
+ tu6_emit_mrt(cmd, cmd->state.subpass, cs);
+ tu6_emit_msaa(cs, cmd->state.subpass->samples);
+ tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
+
+ /* Flag input attachment descriptors for re-emission if necessary */
+ cmd->state.dirty |= TU_CMD_DIRTY_INPUT_ATTACHMENTS;
}
void
*/
struct tu_buffer *count_buffer;
uint64_t count_buffer_offset;
+
+ /**
+ * Stream output parameters resource.
+ */
+ struct tu_buffer *streamout_buffer;
+ uint64_t streamout_buffer_offset;
};
#define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
#define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
+#define ENABLE_NON_GMEM (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_SYSMEM)
enum tu_draw_state_group_id
{
TU_DRAW_STATE_DS,
TU_DRAW_STATE_BLEND,
TU_DRAW_STATE_VS_CONST,
+ TU_DRAW_STATE_GS_CONST,
TU_DRAW_STATE_FS_CONST,
- TU_DRAW_STATE_VS_TEX,
- TU_DRAW_STATE_FS_TEX_SYSMEM,
- TU_DRAW_STATE_FS_TEX_GMEM,
- TU_DRAW_STATE_FS_IBO,
+ TU_DRAW_STATE_DESC_SETS,
+ TU_DRAW_STATE_DESC_SETS_GMEM,
+ TU_DRAW_STATE_DESC_SETS_LOAD,
TU_DRAW_STATE_VS_PARAMS,
TU_DRAW_STATE_COUNT,
struct tu_cs_entry ib;
};
-const static struct tu_sampler*
-sampler_ptr(struct tu_descriptor_state *descriptors_state,
- const struct tu_descriptor_map *map, unsigned i,
- unsigned array_index)
-{
- assert(descriptors_state->valid & (1 << map->set[i]));
-
- struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
- assert(map->binding[i] < set->layout->binding_count);
-
- const struct tu_descriptor_set_binding_layout *layout =
- &set->layout->binding[map->binding[i]];
-
- if (layout->immutable_samplers_offset) {
- const struct tu_sampler *immutable_samplers =
- tu_immutable_samplers(set->layout, layout);
-
- return &immutable_samplers[array_index];
- }
-
- switch (layout->type) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4];
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4 + A6XX_TEX_CONST_DWORDS +
- array_index *
- (A6XX_TEX_CONST_DWORDS +
- sizeof(struct tu_sampler) / 4)];
- default:
- unreachable("unimplemented descriptor type");
- break;
- }
-}
-
-static void
-write_tex_const(struct tu_cmd_buffer *cmd,
- uint32_t *dst,
- struct tu_descriptor_state *descriptors_state,
- const struct tu_descriptor_map *map,
- unsigned i, unsigned array_index, bool is_sysmem)
-{
- assert(descriptors_state->valid & (1 << map->set[i]));
-
- struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
- assert(map->binding[i] < set->layout->binding_count);
-
- const struct tu_descriptor_set_binding_layout *layout =
- &set->layout->binding[map->binding[i]];
-
- switch (layout->type) {
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
- array_index * A6XX_TEX_CONST_DWORDS],
- A6XX_TEX_CONST_DWORDS * 4);
- break;
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
- array_index *
- (A6XX_TEX_CONST_DWORDS +
- sizeof(struct tu_sampler) / 4)],
- A6XX_TEX_CONST_DWORDS * 4);
- break;
- default:
- unreachable("unimplemented descriptor type");
- break;
- }
-
- if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT && !is_sysmem) {
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
- uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
- array_index].attachment;
- const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
-
- assert(att->gmem_offset >= 0);
-
- dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
- dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
- dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
- dst[2] |=
- A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
- A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
- dst[3] = 0;
- dst[4] = 0x100000 + att->gmem_offset;
- dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
- for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
- dst[i] = 0;
-
- if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
- tu_finishme("patch input attachment pitch for secondary cmd buffer");
- }
-}
-
-static void
-write_image_ibo(struct tu_cmd_buffer *cmd,
- uint32_t *dst,
- struct tu_descriptor_state *descriptors_state,
- const struct tu_descriptor_map *map,
- unsigned i, unsigned array_index)
-{
- assert(descriptors_state->valid & (1 << map->set[i]));
-
- struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
- assert(map->binding[i] < set->layout->binding_count);
-
- const struct tu_descriptor_set_binding_layout *layout =
- &set->layout->binding[map->binding[i]];
-
- assert(layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
-
- memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
- (array_index * 2 + 1) * A6XX_TEX_CONST_DWORDS],
- A6XX_TEX_CONST_DWORDS * 4);
-}
-
-static uint64_t
-buffer_ptr(struct tu_descriptor_state *descriptors_state,
- const struct tu_descriptor_map *map,
- unsigned i, unsigned array_index)
-{
- assert(descriptors_state->valid & (1 << map->set[i]));
-
- struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
- assert(map->binding[i] < set->layout->binding_count);
-
- const struct tu_descriptor_set_binding_layout *layout =
- &set->layout->binding[map->binding[i]];
-
- switch (layout->type) {
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- return descriptors_state->dynamic_buffers[layout->dynamic_offset_offset +
- array_index];
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- return (uint64_t) set->mapped_ptr[layout->offset / 4 + array_index * 2 + 1] << 32 |
- set->mapped_ptr[layout->offset / 4 + array_index * 2];
- default:
- unreachable("unimplemented descriptor type");
- break;
- }
-}
-
-static inline uint32_t
-tu6_stage2opcode(gl_shader_stage type)
+static inline uint32_t
+tu6_stage2opcode(gl_shader_stage type)
{
switch (type) {
case MESA_SHADER_VERTEX:
switch (type) {
case MESA_SHADER_VERTEX:
return SB6_VS_SHADER;
+ case MESA_SHADER_GEOMETRY:
+ return SB6_GS_SHADER;
case MESA_SHADER_FRAGMENT:
return SB6_FS_SHADER;
case MESA_SHADER_COMPUTE:
&pipeline->program.link[type];
const struct ir3_ubo_analysis_state *state = &link->ubo_state;
- for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
- if (state->range[i].start < state->range[i].end) {
- uint32_t size = state->range[i].end - state->range[i].start;
- uint32_t offset = state->range[i].start;
-
- /* and even if the start of the const buffer is before
- * first_immediate, the end may not be:
- */
- size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
-
- if (size == 0)
- continue;
-
- /* things should be aligned to vec4: */
- debug_assert((state->range[i].offset % 16) == 0);
- debug_assert((size % 16) == 0);
- debug_assert((offset % 16) == 0);
-
- if (i == 0) {
- /* push constants */
- tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (size / 4));
- tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
- CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
- tu_cs_emit(cs, 0);
- tu_cs_emit(cs, 0);
- for (unsigned i = 0; i < size / 4; i++)
- tu_cs_emit(cs, push_constants[i + offset / 4]);
- continue;
- }
-
- /* Look through the UBO map to find our UBO index, and get the VA for
- * that UBO.
- */
- uint64_t va = 0;
- uint32_t ubo_idx = i - 1;
- uint32_t ubo_map_base = 0;
- for (int j = 0; j < link->ubo_map.num; j++) {
- if (ubo_idx >= ubo_map_base &&
- ubo_idx < ubo_map_base + link->ubo_map.array_size[j]) {
- va = buffer_ptr(descriptors_state, &link->ubo_map, j,
- ubo_idx - ubo_map_base);
- break;
- }
- ubo_map_base += link->ubo_map.array_size[j];
- }
- assert(va);
-
- tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
- tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
- CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
- tu_cs_emit_qw(cs, va + offset);
- }
+ if (link->push_consts.count > 0) {
+ unsigned num_units = link->push_consts.count;
+ unsigned offset = link->push_consts.lo;
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_units * 4);
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+ CP_LOAD_STATE6_0_NUM_UNIT(num_units));
+ tu_cs_emit(cs, 0);
+ tu_cs_emit(cs, 0);
+ for (unsigned i = 0; i < num_units * 4; i++)
+ tu_cs_emit(cs, push_constants[i + offset * 4]);
}
-}
-static void
-tu6_emit_ubos(struct tu_cs *cs, const struct tu_pipeline *pipeline,
- struct tu_descriptor_state *descriptors_state,
- gl_shader_stage type)
-{
- const struct tu_program_descriptor_linkage *link =
- &pipeline->program.link[type];
+ for (uint32_t i = 0; i < state->num_enabled; i++) {
+ uint32_t size = state->range[i].end - state->range[i].start;
+ uint32_t offset = state->range[i].start;
+
+ /* and even if the start of the const buffer is before
+ * first_immediate, the end may not be:
+ */
+ size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
- uint32_t num = MIN2(link->ubo_map.num_desc, link->const_state.num_ubos);
- uint32_t anum = align(num, 2);
+ if (size == 0)
+ continue;
- if (!num)
- return;
+ /* things should be aligned to vec4: */
+ debug_assert((state->range[i].offset % 16) == 0);
+ debug_assert((size % 16) == 0);
+ debug_assert((offset % 16) == 0);
- tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (2 * anum));
- tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(link->const_state.offsets.ubo) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
- CP_LOAD_STATE6_0_NUM_UNIT(anum/2));
- tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
- tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
-
- unsigned emitted = 0;
- for (unsigned i = 0; emitted < num && i < link->ubo_map.num; i++) {
- for (unsigned j = 0; emitted < num && j < link->ubo_map.array_size[i]; j++) {
- tu_cs_emit_qw(cs, buffer_ptr(descriptors_state, &link->ubo_map, i, j));
- emitted++;
- }
- }
+ /* Dig out the descriptor from the descriptor state and read the VA from
+ * it.
+ */
+ assert(state->range[i].bindless);
+ uint32_t *base = state->range[i].bindless_base == MAX_SETS ?
+ descriptors_state->dynamic_descriptors :
+ descriptors_state->sets[state->range[i].bindless_base]->mapped_ptr;
+ unsigned block = state->range[i].block;
+ /* If the block in the shader here is in the dynamic descriptor set, it
+ * is an index into the dynamic descriptor set which is combined from
+ * dynamic descriptors and input attachments on-the-fly, and we don't
+ * have access to it here. Instead we work backwards to get the index
+ * into dynamic_descriptors.
+ */
+ if (state->range[i].bindless_base == MAX_SETS)
+ block -= pipeline->layout->input_attachment_count;
+ uint32_t *desc = base + block * A6XX_TEX_CONST_DWORDS;
+ uint64_t va = desc[0] | ((uint64_t)(desc[1] & A6XX_UBO_1_BASE_HI__MASK) << 32);
+ assert(va);
- for (; emitted < anum; emitted++) {
- tu_cs_emit(cs, 0xffffffff);
- tu_cs_emit(cs, 0xffffffff);
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+ CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
+ tu_cs_emit_qw(cs, va + offset);
}
}
tu_cs_begin_sub_stream(&cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
- tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
}
}
static VkResult
-tu6_emit_textures(struct tu_cmd_buffer *cmd,
- const struct tu_pipeline *pipeline,
- struct tu_descriptor_state *descriptors_state,
- gl_shader_stage type,
- struct tu_cs_entry *entry,
- bool *needs_border,
- bool is_sysmem)
+tu6_emit_descriptor_sets(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ VkPipelineBindPoint bind_point,
+ struct tu_cs_entry *entry,
+ bool gmem)
{
struct tu_cs *draw_state = &cmd->sub_cs;
- const struct tu_program_descriptor_linkage *link =
- &pipeline->program.link[type];
+ struct tu_pipeline_layout *layout = pipeline->layout;
+ struct tu_descriptor_state *descriptors_state =
+ tu_get_descriptors_state(cmd, bind_point);
+ const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const uint32_t *input_attachment_idx =
+ pipeline->program.input_attachment_idx;
+ uint32_t num_dynamic_descs = layout->dynamic_offset_count +
+ layout->input_attachment_count;
+ struct ts_cs_memory dynamic_desc_set;
VkResult result;
- if (link->texture_map.num_desc == 0 && link->sampler_map.num_desc == 0) {
- *entry = (struct tu_cs_entry) {};
- return VK_SUCCESS;
- }
-
- /* allocate and fill texture state */
- struct ts_cs_memory tex_const;
- result = tu_cs_alloc(draw_state, link->texture_map.num_desc,
- A6XX_TEX_CONST_DWORDS, &tex_const);
- if (result != VK_SUCCESS)
- return result;
-
- int tex_index = 0;
- for (unsigned i = 0; i < link->texture_map.num; i++) {
- for (int j = 0; j < link->texture_map.array_size[i]; j++) {
- write_tex_const(cmd,
- &tex_const.map[A6XX_TEX_CONST_DWORDS * tex_index++],
- descriptors_state, &link->texture_map, i, j,
- is_sysmem);
- }
- }
-
- /* allocate and fill sampler state */
- struct ts_cs_memory tex_samp = { 0 };
- if (link->sampler_map.num_desc) {
- result = tu_cs_alloc(draw_state, link->sampler_map.num_desc,
- A6XX_TEX_SAMP_DWORDS, &tex_samp);
+ if (num_dynamic_descs > 0) {
+ /* allocate and fill out dynamic descriptor set */
+ result = tu_cs_alloc(draw_state, num_dynamic_descs,
+ A6XX_TEX_CONST_DWORDS, &dynamic_desc_set);
if (result != VK_SUCCESS)
return result;
- int sampler_index = 0;
- for (unsigned i = 0; i < link->sampler_map.num; i++) {
- for (int j = 0; j < link->sampler_map.array_size[i]; j++) {
- const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
- &link->sampler_map,
- i, j);
- memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS * sampler_index++],
- sampler->state, sizeof(sampler->state));
- *needs_border |= sampler->needs_border;
+ memcpy(dynamic_desc_set.map, descriptors_state->input_attachments,
+ layout->input_attachment_count * A6XX_TEX_CONST_DWORDS * 4);
+
+ if (gmem) {
+ /* Patch input attachments to refer to GMEM instead */
+ for (unsigned i = 0; i < layout->input_attachment_count; i++) {
+ uint32_t *dst =
+ &dynamic_desc_set.map[A6XX_TEX_CONST_DWORDS * i];
+
+ /* The compiler has already laid out input_attachment_idx in the
+ * final order of input attachments, so there's no need to go
+ * through the pipeline layout finding input attachments.
+ */
+ unsigned attachment_idx = input_attachment_idx[i];
+
+ /* It's possible for the pipeline layout to include an input
+ * attachment which doesn't actually exist for the current
+ * subpass. Of course, this is only valid so long as the pipeline
+ * doesn't try to actually load that attachment. Just skip
+ * patching in that scenario to avoid out-of-bounds accesses.
+ */
+ if (attachment_idx >= cmd->state.subpass->input_count)
+ continue;
+
+ uint32_t a = cmd->state.subpass->input_attachments[attachment_idx].attachment;
+ const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
+
+ assert(att->gmem_offset >= 0);
+
+ dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
+ dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
+ dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
+ dst[2] |=
+ A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
+ A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
+ dst[3] = 0;
+ dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
+ dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
+ for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
+ dst[i] = 0;
+
+ if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
+ tu_finishme("patch input attachment pitch for secondary cmd buffer");
}
}
- }
- unsigned tex_samp_reg, tex_const_reg, tex_count_reg;
- enum a6xx_state_block sb;
+ memcpy(dynamic_desc_set.map + layout->input_attachment_count * A6XX_TEX_CONST_DWORDS,
+ descriptors_state->dynamic_descriptors,
+ layout->dynamic_offset_count * A6XX_TEX_CONST_DWORDS * 4);
+ }
- switch (type) {
- case MESA_SHADER_VERTEX:
- sb = SB6_VS_TEX;
- tex_samp_reg = REG_A6XX_SP_VS_TEX_SAMP_LO;
- tex_const_reg = REG_A6XX_SP_VS_TEX_CONST_LO;
- tex_count_reg = REG_A6XX_SP_VS_TEX_COUNT;
- break;
- case MESA_SHADER_FRAGMENT:
- sb = SB6_FS_TEX;
- tex_samp_reg = REG_A6XX_SP_FS_TEX_SAMP_LO;
- tex_const_reg = REG_A6XX_SP_FS_TEX_CONST_LO;
- tex_count_reg = REG_A6XX_SP_FS_TEX_COUNT;
+ uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg;
+ uint32_t hlsq_update_value;
+ switch (bind_point) {
+ case VK_PIPELINE_BIND_POINT_GRAPHICS:
+ sp_bindless_base_reg = REG_A6XX_SP_BINDLESS_BASE(0);
+ hlsq_bindless_base_reg = REG_A6XX_HLSQ_BINDLESS_BASE(0);
+ hlsq_update_value = 0x7c000;
break;
- case MESA_SHADER_COMPUTE:
- sb = SB6_CS_TEX;
- tex_samp_reg = REG_A6XX_SP_CS_TEX_SAMP_LO;
- tex_const_reg = REG_A6XX_SP_CS_TEX_CONST_LO;
- tex_count_reg = REG_A6XX_SP_CS_TEX_COUNT;
+ case VK_PIPELINE_BIND_POINT_COMPUTE:
+ sp_bindless_base_reg = REG_A6XX_SP_CS_BINDLESS_BASE(0);
+ hlsq_bindless_base_reg = REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
+ hlsq_update_value = 0x3e00;
break;
default:
- unreachable("bad state block");
- }
-
- struct tu_cs cs;
- result = tu_cs_begin_sub_stream(draw_state, 16, &cs);
- if (result != VK_SUCCESS)
- return result;
-
- if (link->sampler_map.num_desc) {
- /* output sampler state: */
- tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
- tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
- CP_LOAD_STATE6_0_NUM_UNIT(link->sampler_map.num_desc));
- tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
-
- tu_cs_emit_pkt4(&cs, tex_samp_reg, 2);
- tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
- }
-
- /* emit texture state: */
- tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
- tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
- CP_LOAD_STATE6_0_NUM_UNIT(link->texture_map.num_desc));
- tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
-
- tu_cs_emit_pkt4(&cs, tex_const_reg, 2);
- tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
-
- tu_cs_emit_pkt4(&cs, tex_count_reg, 1);
- tu_cs_emit(&cs, link->texture_map.num_desc);
-
- *entry = tu_cs_end_sub_stream(draw_state, &cs);
- return VK_SUCCESS;
-}
-
-static VkResult
-tu6_emit_ibo(struct tu_cmd_buffer *cmd,
- const struct tu_pipeline *pipeline,
- struct tu_descriptor_state *descriptors_state,
- gl_shader_stage type,
- struct tu_cs_entry *entry)
-{
- struct tu_cs *draw_state = &cmd->sub_cs;
- const struct tu_program_descriptor_linkage *link =
- &pipeline->program.link[type];
- VkResult result;
-
- unsigned num_desc = link->ssbo_map.num_desc + link->image_map.num_desc;
-
- if (num_desc == 0) {
- *entry = (struct tu_cs_entry) {};
- return VK_SUCCESS;
- }
-
- struct ts_cs_memory ibo_const;
- result = tu_cs_alloc(draw_state, num_desc,
- A6XX_TEX_CONST_DWORDS, &ibo_const);
- if (result != VK_SUCCESS)
- return result;
-
- int ssbo_index = 0;
- for (unsigned i = 0; i < link->ssbo_map.num; i++) {
- for (int j = 0; j < link->ssbo_map.array_size[i]; j++) {
- uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
-
- uint64_t va = buffer_ptr(descriptors_state, &link->ssbo_map, i, j);
- /* We don't expose robustBufferAccess, so leave the size unlimited. */
- uint32_t sz = MAX_STORAGE_BUFFER_RANGE / 4;
-
- dst[0] = A6XX_IBO_0_FMT(FMT6_32_UINT);
- dst[1] = A6XX_IBO_1_WIDTH(sz & MASK(15)) |
- A6XX_IBO_1_HEIGHT(sz >> 15);
- dst[2] = A6XX_IBO_2_UNK4 |
- A6XX_IBO_2_UNK31 |
- A6XX_IBO_2_TYPE(A6XX_TEX_1D);
- dst[3] = 0;
- dst[4] = va;
- dst[5] = va >> 32;
- for (int i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
- dst[i] = 0;
-
- ssbo_index++;
- }
+ unreachable("bad bind point");
}
- for (unsigned i = 0; i < link->image_map.num; i++) {
- for (int j = 0; j < link->image_map.array_size[i]; j++) {
- uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
-
- write_image_ibo(cmd, dst,
- descriptors_state, &link->image_map, i, j);
-
- ssbo_index++;
- }
+ /* Be careful here to *not* refer to the pipeline, so that if only the
+ * pipeline changes we don't have to emit this again (except if there are
+ * dynamic descriptors in the pipeline layout). This means always emitting
+ * all the valid descriptors, which means that we always have to put the
+ * dynamic descriptor in the driver-only slot at the end
+ */
+ uint32_t num_user_sets = util_last_bit(descriptors_state->valid);
+ uint32_t num_sets = num_user_sets;
+ if (num_dynamic_descs > 0) {
+ num_user_sets = MAX_SETS;
+ num_sets = num_user_sets + 1;
}
- assert(ssbo_index == num_desc);
+ unsigned regs[2] = { sp_bindless_base_reg, hlsq_bindless_base_reg };
struct tu_cs cs;
- result = tu_cs_begin_sub_stream(draw_state, 7, &cs);
+ result = tu_cs_begin_sub_stream(draw_state, ARRAY_SIZE(regs) * (1 + num_sets * 2) + 2, &cs);
if (result != VK_SUCCESS)
return result;
- uint32_t opcode, ibo_addr_reg;
- enum a6xx_state_block sb;
- enum a6xx_state_type st;
+ if (num_sets > 0) {
+ for (unsigned i = 0; i < ARRAY_SIZE(regs); i++) {
+ tu_cs_emit_pkt4(&cs, regs[i], num_sets * 2);
+ for (unsigned j = 0; j < num_user_sets; j++) {
+ if (descriptors_state->valid & (1 << j)) {
+ /* magic | 3 copied from the blob */
+ tu_cs_emit_qw(&cs, descriptors_state->sets[j]->va | 3);
+ } else {
+ tu_cs_emit_qw(&cs, 0 | 3);
+ }
+ }
+ if (num_dynamic_descs > 0) {
+ tu_cs_emit_qw(&cs, dynamic_desc_set.iova | 3);
+ }
+ }
- switch (type) {
- case MESA_SHADER_FRAGMENT:
- opcode = CP_LOAD_STATE6;
- st = ST6_SHADER;
- sb = SB6_IBO;
- ibo_addr_reg = REG_A6XX_SP_IBO_LO;
- break;
- case MESA_SHADER_COMPUTE:
- opcode = CP_LOAD_STATE6_FRAG;
- st = ST6_IBO;
- sb = SB6_CS_SHADER;
- ibo_addr_reg = REG_A6XX_SP_CS_IBO_LO;
- break;
- default:
- unreachable("unsupported stage for ibos");
+ tu_cs_emit_regs(&cs, A6XX_HLSQ_UPDATE_CNTL(hlsq_update_value));
}
- /* emit texture state: */
- tu_cs_emit_pkt7(&cs, opcode, 3);
- tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
- CP_LOAD_STATE6_0_STATE_TYPE(st) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
- CP_LOAD_STATE6_0_NUM_UNIT(num_desc));
- tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
-
- tu_cs_emit_pkt4(&cs, ibo_addr_reg, 2);
- tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
-
*entry = tu_cs_end_sub_stream(draw_state, &cs);
return VK_SUCCESS;
}
-struct PACKED bcolor_entry {
- uint32_t fp32[4];
- uint16_t ui16[4];
- int16_t si16[4];
- uint16_t fp16[4];
- uint16_t rgb565;
- uint16_t rgb5a1;
- uint16_t rgba4;
- uint8_t __pad0[2];
- uint8_t ui8[4];
- int8_t si8[4];
- uint32_t rgb10a2;
- uint32_t z24; /* also s8? */
- uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
- uint8_t __pad1[56];
-} border_color[] = {
- [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
- [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
- [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
- .fp32[3] = 0x3f800000,
- .ui16[3] = 0xffff,
- .si16[3] = 0x7fff,
- .fp16[3] = 0x3c00,
- .rgb5a1 = 0x8000,
- .rgba4 = 0xf000,
- .ui8[3] = 0xff,
- .si8[3] = 0x7f,
- .rgb10a2 = 0xc0000000,
- .srgb[3] = 0x3c00,
- },
- [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
- .fp32[3] = 1,
- .fp16[3] = 1,
- },
- [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
- .fp32[0 ... 3] = 0x3f800000,
- .ui16[0 ... 3] = 0xffff,
- .si16[0 ... 3] = 0x7fff,
- .fp16[0 ... 3] = 0x3c00,
- .rgb565 = 0xffff,
- .rgb5a1 = 0xffff,
- .rgba4 = 0xffff,
- .ui8[0 ... 3] = 0xff,
- .si8[0 ... 3] = 0x7f,
- .rgb10a2 = 0xffffffff,
- .z24 = 0xffffff,
- .srgb[0 ... 3] = 0x3c00,
- },
- [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
- .fp32[0 ... 3] = 1,
- .fp16[0 ... 3] = 1,
- },
-};
-
-static VkResult
-tu6_emit_border_color(struct tu_cmd_buffer *cmd,
- struct tu_cs *cs)
+static void
+tu6_emit_streamout(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
+ struct tu_streamout_state *tf = &cmd->state.pipeline->streamout;
- const struct tu_pipeline *pipeline = cmd->state.pipeline;
- struct tu_descriptor_state *descriptors_state =
- &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
- const struct tu_descriptor_map *vs_sampler =
- &pipeline->program.link[MESA_SHADER_VERTEX].sampler_map;
- const struct tu_descriptor_map *fs_sampler =
- &pipeline->program.link[MESA_SHADER_FRAGMENT].sampler_map;
- struct ts_cs_memory ptr;
-
- VkResult result = tu_cs_alloc(&cmd->sub_cs,
- vs_sampler->num_desc + fs_sampler->num_desc,
- 128 / 4,
- &ptr);
- if (result != VK_SUCCESS)
- return result;
+ for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
+ struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
+ if (!buf)
+ continue;
+
+ uint32_t offset;
+ offset = cmd->state.streamout_buf.offsets[i];
- for (unsigned i = 0; i < vs_sampler->num; i++) {
- for (unsigned j = 0; j < vs_sampler->array_size[i]; j++) {
- const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
- vs_sampler, i, j);
- memcpy(ptr.map, &border_color[sampler->border], 128);
- ptr.map += 128 / 4;
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_BASE(i, .bo = buf->bo,
+ .bo_offset = buf->bo_offset));
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_SIZE(i, buf->size));
+
+ if (cmd->state.streamout_reset & (1 << i)) {
+ offset *= tf->stride[i];
+
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, offset));
+ cmd->state.streamout_reset &= ~(1 << i);
+ } else {
+ tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
+ tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(i)) |
+ CP_MEM_TO_REG_0_SHIFT_BY_2 | CP_MEM_TO_REG_0_UNK31 |
+ CP_MEM_TO_REG_0_CNT(0));
+ tu_cs_emit_qw(cs, cmd->scratch_bo.iova +
+ ctrl_offset(flush_base[i].offset));
}
- }
- for (unsigned i = 0; i < fs_sampler->num; i++) {
- for (unsigned j = 0; j < fs_sampler->array_size[i]; j++) {
- const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
- fs_sampler, i, j);
- memcpy(ptr.map, &border_color[sampler->border], 128);
- ptr.map += 128 / 4;
+ tu_cs_emit_regs(cs, A6XX_VPC_SO_FLUSH_BASE(i, .bo = &cmd->scratch_bo,
+ .bo_offset =
+ ctrl_offset(flush_base[i])));
+ }
+
+ if (cmd->state.streamout_enabled) {
+ tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 12 + (2 * tf->prog_count));
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
+ tu_cs_emit(cs, tf->vpc_so_buf_cntl);
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(0));
+ tu_cs_emit(cs, tf->ncomp[0]);
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(1));
+ tu_cs_emit(cs, tf->ncomp[1]);
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(2));
+ tu_cs_emit(cs, tf->ncomp[2]);
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(3));
+ tu_cs_emit(cs, tf->ncomp[3]);
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
+ tu_cs_emit(cs, A6XX_VPC_SO_CNTL_ENABLE);
+ for (unsigned i = 0; i < tf->prog_count; i++) {
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_PROG);
+ tu_cs_emit(cs, tf->prog[i]);
}
+ } else {
+ tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
+ tu_cs_emit(cs, 0);
+ tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
+ tu_cs_emit(cs, 0);
}
-
- tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO, 2);
- tu_cs_emit_qw(cs, ptr.iova);
- return VK_SUCCESS;
}
static VkResult
dynamic->stencil_reference.back);
}
+ if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
+ (pipeline->dynamic_state.mask & TU_DYNAMIC_VIEWPORT)) {
+ tu6_emit_viewport(cs, &cmd->state.dynamic.viewport.viewports[0]);
+ }
+
+ if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_SCISSOR) &&
+ (pipeline->dynamic_state.mask & TU_DYNAMIC_SCISSOR)) {
+ tu6_emit_scissor(cs, &cmd->state.dynamic.scissor.scissors[0]);
+ }
+
if (cmd->state.dirty &
(TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
for (uint32_t i = 0; i < pipeline->vi.count; i++) {
const uint32_t binding = pipeline->vi.bindings[i];
- const uint32_t stride = pipeline->vi.strides[i];
const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
const VkDeviceSize offset = buf->bo_offset +
- cmd->state.vb.offsets[binding] +
- pipeline->vi.offsets[i];
+ cmd->state.vb.offsets[binding];
const VkDeviceSize size =
- offset < buf->bo->size ? buf->bo->size - offset : 0;
+ offset < buf->size ? buf->size - offset : 0;
tu_cs_emit_regs(cs,
A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
- A6XX_VFD_FETCH_SIZE(i, size),
- A6XX_VFD_FETCH_STRIDE(i, stride));
+ A6XX_VFD_FETCH_SIZE(i, size));
}
}
.enable_mask = ENABLE_ALL,
.ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX)
};
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_GS_CONST,
+ .enable_mask = ENABLE_ALL,
+ .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY)
+ };
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_FS_CONST,
};
}
- if (cmd->state.dirty &
- (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS)) {
- bool needs_border = false;
- struct tu_cs_entry vs_tex, fs_tex_sysmem, fs_tex_gmem, fs_ibo;
-
- result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_VERTEX, &vs_tex, &needs_border,
- false);
- if (result != VK_SUCCESS)
- return result;
-
- /* TODO: we could emit just one texture descriptor draw state when there
- * are no input attachments, which is the most common case. We could
- * also split out the sampler state, which doesn't change even for input
- * attachments.
- */
- result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_FRAGMENT, &fs_tex_sysmem,
- &needs_border, true);
- if (result != VK_SUCCESS)
- return result;
-
- result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_FRAGMENT, &fs_tex_gmem,
- &needs_border, false);
- if (result != VK_SUCCESS)
- return result;
+ if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS)
+ tu6_emit_streamout(cmd, cs);
- result = tu6_emit_ibo(cmd, pipeline, descriptors_state,
- MESA_SHADER_FRAGMENT, &fs_ibo);
+ /* If there are any any dynamic descriptors, then we may need to re-emit
+ * them after every pipeline change in case the number of input attachments
+ * changes. We also always need to re-emit after a pipeline change if there
+ * are any input attachments, because the input attachment index comes from
+ * the pipeline. Finally, it can also happen that the subpass changes
+ * without the pipeline changing, in which case the GMEM descriptors need
+ * to be patched differently.
+ *
+ * TODO: We could probably be clever and avoid re-emitting state on
+ * pipeline changes if the number of input attachments is always 0. We
+ * could also only re-emit dynamic state.
+ */
+ if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS ||
+ ((pipeline->layout->dynamic_offset_count +
+ pipeline->layout->input_attachment_count > 0) &&
+ cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) ||
+ (pipeline->layout->input_attachment_count > 0 &&
+ cmd->state.dirty & TU_CMD_DIRTY_INPUT_ATTACHMENTS)) {
+ struct tu_cs_entry desc_sets, desc_sets_gmem;
+ bool need_gmem_desc_set = pipeline->layout->input_attachment_count > 0;
+
+ result = tu6_emit_descriptor_sets(cmd, pipeline,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ &desc_sets, false);
if (result != VK_SUCCESS)
return result;
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
- .id = TU_DRAW_STATE_VS_TEX,
- .enable_mask = ENABLE_ALL,
- .ib = vs_tex,
- };
- draw_state_groups[draw_state_group_count++] =
- (struct tu_draw_state_group) {
- .id = TU_DRAW_STATE_FS_TEX_GMEM,
- .enable_mask = CP_SET_DRAW_STATE__0_GMEM,
- .ib = fs_tex_gmem,
- };
- draw_state_groups[draw_state_group_count++] =
- (struct tu_draw_state_group) {
- .id = TU_DRAW_STATE_FS_TEX_SYSMEM,
- .enable_mask = CP_SET_DRAW_STATE__0_SYSMEM,
- .ib = fs_tex_sysmem,
- };
- draw_state_groups[draw_state_group_count++] =
- (struct tu_draw_state_group) {
- .id = TU_DRAW_STATE_FS_IBO,
- .enable_mask = ENABLE_DRAW,
- .ib = fs_ibo,
+ .id = TU_DRAW_STATE_DESC_SETS,
+ .enable_mask = need_gmem_desc_set ? ENABLE_NON_GMEM : ENABLE_ALL,
+ .ib = desc_sets,
};
- if (needs_border) {
- result = tu6_emit_border_color(cmd, cs);
+ if (need_gmem_desc_set) {
+ result = tu6_emit_descriptor_sets(cmd, pipeline,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ &desc_sets_gmem, true);
+ if (result != VK_SUCCESS)
+ return result;
+
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_DESC_SETS_GMEM,
+ .enable_mask = CP_SET_DRAW_STATE__0_GMEM,
+ .ib = desc_sets_gmem,
+ };
+ }
+
+ /* We need to reload the descriptors every time the descriptor sets
+ * change. However, the commands we send only depend on the pipeline
+ * because the whole point is to cache descriptors which are used by the
+ * pipeline. There's a problem here, in that the firmware has an
+ * "optimization" which skips executing groups that are set to the same
+ * value as the last draw. This means that if the descriptor sets change
+ * but not the pipeline, we'd try to re-execute the same buffer which
+ * the firmware would ignore and we wouldn't pre-load the new
+ * descriptors. The blob seems to re-emit the LOAD_STATE group whenever
+ * the descriptor sets change, which we emulate here by copying the
+ * pre-prepared buffer.
+ */
+ const struct tu_cs_entry *load_entry = &pipeline->load_state.state_ib;
+ if (load_entry->size > 0) {
+ struct tu_cs load_cs;
+ result = tu_cs_begin_sub_stream(&cmd->sub_cs, load_entry->size, &load_cs);
if (result != VK_SUCCESS)
return result;
+ tu_cs_emit_array(&load_cs,
+ (uint32_t *)((char *)load_entry->bo->map + load_entry->offset),
+ load_entry->size / 4);
+ struct tu_cs_entry load_copy = tu_cs_end_sub_stream(&cmd->sub_cs, &load_cs);
+
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_DESC_SETS_LOAD,
+ /* The blob seems to not enable this for binning, even when
+ * resources would actually be used in the binning shader.
+ * Presumably the overhead of prefetching the resources isn't
+ * worth it.
+ */
+ .enable_mask = ENABLE_DRAW,
+ .ib = load_copy,
+ };
}
}
unsigned i;
for_each_bit(i, descriptors_state->valid) {
struct tu_descriptor_set *set = descriptors_state->sets[i];
- for (unsigned j = 0; j < set->layout->buffer_count; ++j)
- if (set->descriptors[j]) {
- tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
+ for (unsigned j = 0; j < set->layout->buffer_count; ++j) {
+ if (set->buffers[j]) {
+ tu_bo_list_add(&cmd->bo_list, set->buffers[j],
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
}
+ }
+ if (set->size > 0) {
+ tu_bo_list_add(&cmd->bo_list, &set->pool->bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+ }
}
}
+ if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS) {
+ for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
+ const struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
+ if (buf) {
+ tu_bo_list_add(&cmd->bo_list, buf->bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
+ }
+ }
+ }
+
+ /* There are too many graphics dirty bits to list here, so just list the
+ * bits to preserve instead. The only things not emitted here are
+ * compute-related state.
+ */
+ cmd->state.dirty &= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
/* Fragment shader state overwrites compute shader state, so flag the
* compute pipeline for re-emit.
*/
- cmd->state.dirty = TU_CMD_DIRTY_COMPUTE_PIPELINE;
+ cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
return VK_SUCCESS;
}
+static void
+tu6_emit_draw_indirect(struct tu_cmd_buffer *cmd,
+ struct tu_cs *cs,
+ const struct tu_draw_info *draw)
+{
+ const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
+ bool has_gs = cmd->state.pipeline->active_stages &
+ VK_SHADER_STAGE_GEOMETRY_BIT;
+
+ tu_cs_emit_regs(cs,
+ A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
+ A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
+
+ if (draw->indexed) {
+ const enum a4xx_index_size index_size =
+ tu6_index_size(cmd->state.index_type);
+ const uint32_t index_bytes =
+ (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
+ const struct tu_buffer *index_buf = cmd->state.index_buffer;
+ unsigned max_indicies =
+ (index_buf->size - cmd->state.index_offset) / index_bytes;
+
+ const uint32_t cp_draw_indx =
+ CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
+ CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
+ CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
+ CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
+ COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
+
+ tu_cs_emit_pkt7(cs, CP_DRAW_INDX_INDIRECT, 6);
+ tu_cs_emit(cs, cp_draw_indx);
+ tu_cs_emit_qw(cs, index_buf->bo->iova + cmd->state.index_offset);
+ tu_cs_emit(cs, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
+ tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
+ } else {
+ const uint32_t cp_draw_indx =
+ CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
+ CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
+ CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
+ COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
+
+ tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT, 3);
+ tu_cs_emit(cs, cp_draw_indx);
+ tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
+ }
+
+ tu_bo_list_add(&cmd->bo_list, draw->indirect->bo, MSM_SUBMIT_BO_READ);
+}
+
static void
tu6_emit_draw_direct(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
{
const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
+ bool has_gs = cmd->state.pipeline->active_stages &
+ VK_SHADER_STAGE_GEOMETRY_BIT;
tu_cs_emit_regs(cs,
A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
- CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
+ CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
+ COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
tu_cs_emit(cs, cp_draw_indx);
const uint32_t cp_draw_indx =
CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
- CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) | 0x2000;
+ CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
+ COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
tu_cs_emit(cs, cp_draw_indx);
return;
}
- if (draw->indirect) {
- tu_finishme("indirect draw");
- return;
- }
+ if (draw->indirect)
+ tu6_emit_draw_indirect(cmd, cs, draw);
+ else
+ tu6_emit_draw_direct(cmd, cs, draw);
- tu6_emit_draw_direct(cmd, cs, draw);
+ if (cmd->state.streamout_enabled) {
+ for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
+ if (cmd->state.streamout_enabled & (1 << i))
+ tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i, false);
+ }
+ }
cmd->wait_for_idle = true;
tu_draw(cmd_buffer, &info);
}
+void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
+ uint32_t instanceCount,
+ uint32_t firstInstance,
+ VkBuffer _counterBuffer,
+ VkDeviceSize counterBufferOffset,
+ uint32_t counterOffset,
+ uint32_t vertexStride)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ TU_FROM_HANDLE(tu_buffer, buffer, _counterBuffer);
+
+ struct tu_draw_info info = {};
+
+ info.instance_count = instanceCount;
+ info.first_instance = firstInstance;
+ info.streamout_buffer = buffer;
+ info.streamout_buffer_offset = counterBufferOffset;
+ info.stride = vertexStride;
+
+ tu_draw(cmd_buffer, &info);
+}
+
struct tu_dispatch_info
{
/**
tu_emit_compute_driver_params(cs, pipeline, info);
- bool needs_border;
- result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_COMPUTE, &ib, &needs_border, false);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
- if (ib.size)
- tu_cs_emit_ib(cs, &ib);
-
- if (needs_border)
- tu_finishme("compute border color");
-
- result = tu6_emit_ibo(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE, &ib);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
- if (ib.size)
- tu_cs_emit_ib(cs, &ib);
+ if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS) {
+ result = tu6_emit_descriptor_sets(cmd, pipeline,
+ VK_PIPELINE_BIND_POINT_COMPUTE, &ib,
+ false);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
- /* track BOs */
- if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
+ /* track BOs */
unsigned i;
for_each_bit(i, descriptors_state->valid) {
struct tu_descriptor_set *set = descriptors_state->sets[i];
- for (unsigned j = 0; j < set->layout->buffer_count; ++j)
- if (set->descriptors[j]) {
- tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
+ for (unsigned j = 0; j < set->layout->buffer_count; ++j) {
+ if (set->buffers[j]) {
+ tu_bo_list_add(&cmd->bo_list, set->buffers[j],
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
}
+ }
+
+ if (set->size > 0) {
+ tu_bo_list_add(&cmd->bo_list, &set->pool->bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+ }
}
}
+ if (ib.size)
+ tu_cs_emit_ib(cs, &ib);
+
+ if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS)
+ tu_cs_emit_ib(cs, &pipeline->load_state.state_ib);
+
+ cmd->state.dirty &=
+ ~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
+
/* Compute shader state overwrites fragment shader state, so we flag the
* graphics pipeline for re-emit.
*/
- cmd->state.dirty = TU_CMD_DIRTY_PIPELINE;
+ cmd->state.dirty |= TU_CMD_DIRTY_PIPELINE;
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
};
static void
-tu_barrier(struct tu_cmd_buffer *cmd_buffer,
+tu_barrier(struct tu_cmd_buffer *cmd,
uint32_t memoryBarrierCount,
const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers,
const struct tu_barrier_info *info)
{
+ /* renderpass case is only for subpass self-dependencies
+ * which means syncing the render output with texture cache
+ * note: only the CACHE_INVALIDATE is needed in GMEM mode
+ * and in sysmem mode we might not need either color/depth flush
+ */
+ if (cmd->state.pass) {
+ tu6_emit_event_write(cmd, &cmd->draw_cs, PC_CCU_FLUSH_COLOR_TS, true);
+ tu6_emit_event_write(cmd, &cmd->draw_cs, PC_CCU_FLUSH_DEPTH_TS, true);
+ tu6_emit_event_write(cmd, &cmd->draw_cs, CACHE_INVALIDATE, false);
+ return;
+ }
}
void
tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags destStageMask,
- VkBool32 byRegion,
+ VkPipelineStageFlags dstStageMask,
+ VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount,
const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,