return VK_SUCCESS;
}
+static bool
+is_linear_mipmapped(const struct tu_image_view *iview)
+{
+ return iview->image->layout.tile_mode == TILE6_LINEAR &&
+ iview->base_mip != iview->image->level_count - 1;
+}
+
+static bool
+force_sysmem(const struct tu_cmd_buffer *cmd,
+ const struct VkRect2D *render_area)
+{
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
+ const struct tu_physical_device *device = cmd->device->physical_device;
+ bool has_linear_mipmapped_store = false;
+ const struct tu_render_pass *pass = cmd->state.pass;
+
+ /* Iterate over all the places we call tu6_emit_store_attachment() */
+ for (unsigned i = 0; i < pass->subpass_count; i++) {
+ const struct tu_subpass *subpass = &pass->subpasses[i];
+ if (subpass->resolve_attachments) {
+ for (unsigned i = 0; i < subpass->color_count; i++) {
+ uint32_t a = subpass->resolve_attachments[i].attachment;
+ if (a != VK_ATTACHMENT_UNUSED &&
+ cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
+ const struct tu_image_view *iview = fb->attachments[a].attachment;
+ if (is_linear_mipmapped(iview)) {
+ has_linear_mipmapped_store = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < pass->attachment_count; i++) {
+ if (pass->attachments[i].gmem_offset >= 0 &&
+ cmd->state.pass->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
+ const struct tu_image_view *iview = fb->attachments[i].attachment;
+ if (is_linear_mipmapped(iview)) {
+ has_linear_mipmapped_store = true;
+ break;
+ }
+ }
+ }
+
+ /* Linear textures cannot have any padding between mipmap levels and their
+ * height isn't padded, while at the same time the GMEM->MEM resolve does
+ * not have per-pixel granularity, so if the image height isn't aligned to
+ * the resolve granularity and the render area is tall enough, we may wind
+ * up writing past the bottom of the image into the next miplevel or even
+ * past the end of the image. For the last miplevel, the layout code should
+ * insert enough padding so that the overdraw writes to the padding. To
+ * work around this, we force-enable sysmem rendering.
+ */
+ const uint32_t y2 = render_area->offset.y + render_area->extent.height;
+ const uint32_t aligned_y2 = ALIGN_POT(y2, device->tile_align_h);
+
+ return has_linear_mipmapped_store && aligned_y2 > fb->height;
+}
+
static void
tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
const struct tu_device *dev,
}
static void
-tu6_emit_bin_size(struct tu_cmd_buffer *cmd, struct tu_cs *cs, uint32_t flags)
+tu6_emit_bin_size(struct tu_cs *cs,
+ uint32_t bin_w, uint32_t bin_h, uint32_t flags)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
- const uint32_t bin_w = tiling->tile0.extent.width;
- const uint32_t bin_h = tiling->tile0.extent.height;
-
tu_cs_emit_regs(cs,
A6XX_GRAS_BIN_CONTROL(.binw = bin_w,
.binh = bin_h,
return (tiling->tile_count.width * tiling->tile_count.height) > 2;
}
+static bool
+use_sysmem_rendering(struct tu_cmd_buffer *cmd)
+{
+ if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_SYSMEM))
+ return true;
+
+ return cmd->state.tiling_config.force_sysmem;
+}
+
static void
tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
const struct tu_tile *tile)
{
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
- tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(0x7));
+ tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD));
tu6_emit_marker(cmd, cs);
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
- tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM) | 0x10);
+ tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
tu6_emit_marker(cmd, cs);
const uint32_t x1 = tile->begin.x;
if (!clear_mask)
return;
- const struct tu_native_format *format =
- tu6_get_native_format(iview->vk_format);
- assert(format && format->rb >= 0);
+ tu_clear_gmem_attachment(cmd, cs, a, clear_mask,
+ &info->pClearValues[a]);
+}
- tu_cs_emit_regs(cs,
- A6XX_RB_BLIT_DST_INFO(.color_format = format->rb));
+static void
+tu6_emit_predicated_blit(struct tu_cmd_buffer *cmd,
+ struct tu_cs *cs,
+ uint32_t a,
+ uint32_t gmem_a,
+ bool resolve)
+{
+ const uint32_t space = 14 + 6;
+ struct tu_cond_exec_state state;
- tu_cs_emit_regs(cs,
- A6XX_RB_BLIT_INFO(.gmem = true,
- .clear_mask = clear_mask));
+ VkResult result = tu_cond_exec_start(cmd->device, cs, &state,
+ CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
+ CP_COND_REG_EXEC_0_GMEM,
+ space);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
- tu_cs_emit_regs(cs,
- A6XX_RB_BLIT_BASE_GMEM(attachment->gmem_offset));
+ tu6_emit_blit_info(cmd, cs,
+ cmd->state.framebuffer->attachments[a].attachment,
+ cmd->state.pass->attachments[gmem_a].gmem_offset, resolve);
+ tu6_emit_blit(cmd, cs);
- tu_cs_emit_regs(cs,
- A6XX_RB_UNKNOWN_88D0(0));
+ tu_cond_exec_end(cs, &state);
+}
- uint32_t clear_vals[4] = { 0 };
- tu_pack_clear_value(&info->pClearValues[a], iview->vk_format, clear_vals);
+static void
+tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
+ struct tu_cs *cs,
+ uint32_t a,
+ uint32_t gmem_a)
+{
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
+ const struct tu_image_view *dst = fb->attachments[a].attachment;
+ const struct tu_image_view *src = fb->attachments[gmem_a].attachment;
+
+ tu_blit(cmd, cs, &(struct tu_blit) {
+ .dst = sysmem_attachment_surf(dst, dst->base_layer,
+ &cmd->state.tiling_config.render_area),
+ .src = sysmem_attachment_surf(src, src->base_layer,
+ &cmd->state.tiling_config.render_area),
+ .layers = fb->layers,
+ });
+}
- tu_cs_emit_regs(cs,
- A6XX_RB_BLIT_CLEAR_COLOR_DW0(clear_vals[0]),
- A6XX_RB_BLIT_CLEAR_COLOR_DW1(clear_vals[1]),
- A6XX_RB_BLIT_CLEAR_COLOR_DW2(clear_vals[2]),
- A6XX_RB_BLIT_CLEAR_COLOR_DW3(clear_vals[3]));
- tu6_emit_blit(cmd, cs);
+/* Emit a MSAA resolve operation, with both gmem and sysmem paths. */
+static void tu6_emit_resolve(struct tu_cmd_buffer *cmd,
+ struct tu_cs *cs,
+ uint32_t a,
+ uint32_t gmem_a)
+{
+ if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
+ return;
+
+ tu6_emit_predicated_blit(cmd, cs, a, gmem_a, true);
+
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
+ const uint32_t space = 25 + 66 * fb->layers + 17;
+ struct tu_cond_exec_state state;
+
+ VkResult result = tu_cond_exec_start(cmd->device, cs, &state,
+ CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
+ CP_COND_REG_EXEC_0_SYSMEM,
+ space);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
+
+ tu6_emit_sysmem_resolve(cmd, cs, a, gmem_a);
+ tu_cond_exec_end(cs, &state);
}
static void
tu6_emit_marker(cmd, cs);
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
- tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE) | 0x10);
+ tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
tu6_emit_marker(cmd, cs);
tu6_emit_blit_scissor(cmd, cs, true);
static void
tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- struct tu_physical_device *phys_dev = cmd->device->physical_device;
-
VkResult result = tu_cs_reserve_space(cmd->device, cs, 256);
if (result != VK_SUCCESS) {
cmd->record_result = result;
tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
- tu_cs_emit_write_reg(cs, REG_A6XX_RB_CCU_CNTL, phys_dev->magic.RB_CCU_CNTL_gmem);
+ tu_cs_emit_write_reg(cs, REG_A6XX_RB_CCU_CNTL, 0x10000000);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
tu_cs_emit_wfi(cs);
tu_cs_emit_regs(cs,
- A6XX_RB_CCU_CNTL(.unknown = 0x7c400004));
+ A6XX_RB_CCU_CNTL(.unknown = phys_dev->magic.RB_CCU_CNTL_gmem));
cmd->wait_for_idle = false;
}
static void
-tu6_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+tu_emit_sysmem_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
+ uint32_t a,
+ const VkRenderPassBeginInfo *info)
+{
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
+ const struct tu_image_view *iview = fb->attachments[a].attachment;
+ const struct tu_render_pass_attachment *attachment =
+ &cmd->state.pass->attachments[a];
+ unsigned clear_mask = 0;
+
+ /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
+ if (attachment->gmem_offset < 0)
+ return;
+
+ if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+ clear_mask = 0xf;
+ }
+
+ if (vk_format_has_stencil(iview->vk_format)) {
+ clear_mask &= 0x1;
+ if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
+ clear_mask |= 0x2;
+ if (clear_mask != 0x3)
+ tu_finishme("depth/stencil only load op");
+ }
+
+ if (!clear_mask)
+ return;
+
+ tu_clear_sysmem_attachment(cmd, cs, a,
+ &info->pClearValues[a], &(struct VkClearRect) {
+ .rect = info->renderArea,
+ .baseArrayLayer = iview->base_layer,
+ .layerCount = iview->layer_count,
+ });
+}
+
+static void
+tu_cmd_prepare_sysmem_clear_ib(struct tu_cmd_buffer *cmd,
+ const VkRenderPassBeginInfo *info)
+{
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
+ const uint32_t blit_cmd_space = 25 + 66 * fb->layers + 17;
+ const uint32_t clear_space =
+ blit_cmd_space * cmd->state.pass->attachment_count + 5;
+
+ struct tu_cs sub_cs;
+
+ VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs,
+ clear_space, &sub_cs);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
+
+ for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
+ tu_emit_sysmem_clear_attachment(cmd, &sub_cs, i, info);
+
+ /* TODO: We shouldn't need this flush, but without it we'd have an empty IB
+ * when nothing clears which we currently can't handle.
+ */
+ tu_cs_reserve_space(cmd->device, &sub_cs, 5);
+ tu6_emit_event_write(cmd, &sub_cs, PC_CCU_FLUSH_COLOR_TS, true);
+
+ cmd->state.sysmem_clear_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
+}
+
+static void
+tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
+ const struct VkRect2D *renderArea)
+{
+ VkResult result = tu_cs_reserve_space(cmd->device, cs, 1024);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
+
+ const struct tu_framebuffer *fb = cmd->state.framebuffer;
+ if (fb->width > 0 && fb->height > 0) {
+ tu6_emit_window_scissor(cmd, cs,
+ 0, 0, fb->width - 1, fb->height - 1);
+ } else {
+ tu6_emit_window_scissor(cmd, cs, 0, 0, 0, 0);
+ }
+
+ tu6_emit_window_offset(cmd, cs, 0, 0);
+
+ tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
+
+ tu_cs_emit_ib(cs, &cmd->state.sysmem_clear_ib);
+
+ tu6_emit_lrz_flush(cmd, cs);
+
+ tu6_emit_marker(cmd, cs);
+ tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
+ tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
+ tu6_emit_marker(cmd, cs);
+
+ tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
+ tu_cs_emit(cs, 0x0);
+
+ tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
+ tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
+ tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
+
+ tu6_emit_wfi(cmd, cs);
+ tu_cs_emit_regs(cs,
+ A6XX_RB_CCU_CNTL(0x10000000));
+
+ /* enable stream-out, with sysmem there is only one pass: */
+ tu_cs_emit_regs(cs,
+ A6XX_VPC_SO_OVERRIDE(.so_disable = false));
+
+ tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
+ tu_cs_emit(cs, 0x1);
+
+ tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
+ tu_cs_emit(cs, 0x0);
+
+ tu_cs_sanity_check(cs);
+}
+
+static void
+tu6_sysmem_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+{
+ /* Do any resolves of the last subpass. These are handled in the
+ * tile_store_ib in the gmem path.
+ */
+
+ const struct tu_subpass *subpass = cmd->state.subpass;
+ if (subpass->resolve_attachments) {
+ for (unsigned i = 0; i < subpass->color_count; i++) {
+ uint32_t a = subpass->resolve_attachments[i].attachment;
+ if (a != VK_ATTACHMENT_UNUSED)
+ tu6_emit_sysmem_resolve(cmd, cs, a,
+ subpass->color_attachments[i].attachment);
+ }
+ }
+
+ const uint32_t space = 14 + tu_cs_get_call_size(&cmd->draw_epilogue_cs);
+ VkResult result = tu_cs_reserve_space(cmd->device, cs, space);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
+
+ tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
+
+ tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
+ tu_cs_emit(cs, 0x0);
+
+ tu6_emit_lrz_flush(cmd, cs);
+
+ tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
+ tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
+
+ tu_cs_sanity_check(cs);
+}
+
+
+static void
+tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
struct tu_physical_device *phys_dev = cmd->device->physical_device;
/* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
tu6_emit_wfi(cmd, cs);
tu_cs_emit_regs(cs,
- A6XX_RB_CCU_CNTL(0x7c400004));
+ A6XX_RB_CCU_CNTL(phys_dev->magic.RB_CCU_CNTL_gmem));
+ const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
if (use_hw_binning(cmd)) {
- tu6_emit_bin_size(cmd, cs, A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
+ tu6_emit_bin_size(cs,
+ tiling->tile0.extent.width,
+ tiling->tile0.extent.height,
+ A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
tu6_emit_binning_pass(cmd, cs);
- tu6_emit_bin_size(cmd, cs, A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
+ tu6_emit_bin_size(cs,
+ tiling->tile0.extent.width,
+ tiling->tile0.extent.height,
+ A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
tu_cs_emit_regs(cs,
A6XX_VFD_MODE_CNTL(0));
tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
tu_cs_emit(cs, 0x1);
} else {
- tu6_emit_bin_size(cmd, cs, 0x6000000);
+ tu6_emit_bin_size(cs,
+ tiling->tile0.extent.width,
+ tiling->tile0.extent.height,
+ 0x6000000);
}
tu_cs_sanity_check(cs);
/* if (no overflow) */ {
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
- tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(0x5) | 0x10);
+ tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
}
}
}
static void
-tu6_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
const uint32_t space = 16 + tu_cs_get_call_size(&cmd->draw_epilogue_cs);
VkResult result = tu_cs_reserve_space(cmd->device, cs, space);
{
const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
- tu6_render_begin(cmd, &cmd->cs);
+ tu6_tile_render_begin(cmd, &cmd->cs);
for (uint32_t y = 0; y < tiling->tile_count.height; y++) {
for (uint32_t x = 0; x < tiling->tile_count.width; x++) {
}
}
- tu6_render_end(cmd, &cmd->cs);
+ tu6_tile_render_end(cmd, &cmd->cs);
+}
+
+static void
+tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
+{
+ const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+
+ tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
+
+ const uint32_t space = tu_cs_get_call_size(&cmd->draw_cs);
+ VkResult result = tu_cs_reserve_space(cmd->device, &cmd->cs, space);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
+
+ tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
+ cmd->wait_for_idle = true;
+
+ tu6_sysmem_render_end(cmd, &cmd->cs);
}
static void
struct tu_tiling_config *tiling = &cmd->state.tiling_config;
tiling->render_area = *render_area;
+ tiling->force_sysmem = force_sysmem(cmd, render_area);
tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass->gmem_pixels);
tu_tiling_config_update_pipe_layout(tiling, dev);
cmd->state.framebuffer = fb;
tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
+ tu_cmd_prepare_sysmem_clear_ib(cmd, pRenderPassBegin);
tu_cmd_prepare_tile_load_ib(cmd, pRenderPassBegin);
tu_cmd_prepare_tile_store_ib(cmd);
const struct tu_render_pass *pass = cmd->state.pass;
struct tu_cs *cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(cmd->device, cs, 1024);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
const struct tu_subpass *subpass = cmd->state.subpass++;
/* TODO:
* if msaa samples change between subpasses,
for (unsigned i = 0; i < subpass->color_count; i++) {
uint32_t a = subpass->resolve_attachments[i].attachment;
if (a != VK_ATTACHMENT_UNUSED) {
- tu6_emit_store_attachment(cmd, cs, a,
- subpass->color_attachments[i].attachment);
+ tu6_emit_resolve(cmd, cs, a,
+ subpass->color_attachments[i].attachment);
}
}
}
+ VkResult result = tu_cs_reserve_space(cmd->device, &cmd->draw_cs, 1024);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
+
/* invalidate because reading input attachments will cache GMEM and
* the cache isn''t updated when GMEM is written
* TODO: is there a no-cache bit for textures?
tu6_emit_msaa(cmd, cmd->state.subpass, cs);
tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
+ /* Emit flushes so that input attachments will read the correct value. This
+ * is for sysmem only, although it shouldn't do much harm on gmem.
+ */
+ tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
+ tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
+
/* TODO:
* since we don't know how to do GMEM->GMEM resolve,
* resolve attachments are resolved to memory then loaded to GMEM again if needed
if (subpass->resolve_attachments) {
for (unsigned i = 0; i < subpass->color_count; i++) {
uint32_t a = subpass->resolve_attachments[i].attachment;
- const struct tu_image_view *iview =
- cmd->state.framebuffer->attachments[a].attachment;
if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].gmem_offset >= 0) {
tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
- tu6_emit_blit_info(cmd, cs, iview, pass->attachments[a].gmem_offset, false);
- tu6_emit_blit(cmd, cs);
+ tu6_emit_predicated_blit(cmd, cs, a, a, false);
}
}
}
TU_DRAW_STATE_VS_CONST,
TU_DRAW_STATE_FS_CONST,
TU_DRAW_STATE_VS_TEX,
- TU_DRAW_STATE_FS_TEX,
+ TU_DRAW_STATE_FS_TEX_SYSMEM,
+ TU_DRAW_STATE_FS_TEX_GMEM,
TU_DRAW_STATE_FS_IBO,
TU_DRAW_STATE_VS_PARAMS,
uint32_t *dst,
struct tu_descriptor_state *descriptors_state,
const struct tu_descriptor_map *map,
- unsigned i, unsigned array_index)
+ unsigned i, unsigned array_index, bool is_sysmem)
{
assert(descriptors_state->valid & (1 << map->set[i]));
break;
}
- if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
+ if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT && !is_sysmem) {
const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
array_index].attachment;
struct tu_descriptor_state *descriptors_state,
gl_shader_stage type,
struct tu_cs_entry *entry,
- bool *needs_border)
+ bool *needs_border,
+ bool is_sysmem)
{
struct tu_device *device = cmd->device;
struct tu_cs *draw_state = &cmd->sub_cs;
for (int j = 0; j < link->texture_map.array_size[i]; j++) {
write_tex_const(cmd,
&tex_const.map[A6XX_TEX_CONST_DWORDS * tex_index++],
- descriptors_state, &link->texture_map, i, j);
+ descriptors_state, &link->texture_map, i, j,
+ is_sysmem);
}
}
if (cmd->state.dirty &
(TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS)) {
bool needs_border = false;
- struct tu_cs_entry vs_tex, fs_tex, fs_ibo;
+ struct tu_cs_entry vs_tex, fs_tex_sysmem, fs_tex_gmem, fs_ibo;
result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_VERTEX, &vs_tex, &needs_border);
+ MESA_SHADER_VERTEX, &vs_tex, &needs_border,
+ false);
if (result != VK_SUCCESS)
return result;
+ /* TODO: we could emit just one texture descriptor draw state when there
+ * are no input attachments, which is the most common case. We could
+ * also split out the sampler state, which doesn't change even for input
+ * attachments.
+ */
result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_FRAGMENT, &fs_tex, &needs_border);
+ MESA_SHADER_FRAGMENT, &fs_tex_sysmem,
+ &needs_border, true);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = tu6_emit_textures(cmd, pipeline, descriptors_state,
+ MESA_SHADER_FRAGMENT, &fs_tex_gmem,
+ &needs_border, false);
if (result != VK_SUCCESS)
return result;
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
- .id = TU_DRAW_STATE_FS_TEX,
- .enable_mask = ENABLE_DRAW,
- .ib = fs_tex,
+ .id = TU_DRAW_STATE_FS_TEX_GMEM,
+ .enable_mask = CP_SET_DRAW_STATE__0_GMEM,
+ .ib = fs_tex_gmem,
+ };
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_FS_TEX_SYSMEM,
+ .enable_mask = CP_SET_DRAW_STATE__0_SYSMEM,
+ .ib = fs_tex_sysmem,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
bool needs_border;
result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_COMPUTE, &ib, &needs_border);
+ MESA_SHADER_COMPUTE, &ib, &needs_border, false);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
cmd->state.dirty = TU_CMD_DIRTY_PIPELINE;
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
- tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(0x8));
+ tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
const uint32_t *local_size = pipeline->compute.local_size;
const uint32_t *num_groups = info->blocks;
tu_cs_end(&cmd_buffer->draw_cs);
tu_cs_end(&cmd_buffer->draw_epilogue_cs);
- tu_cmd_render_tiles(cmd_buffer);
+ if (use_sysmem_rendering(cmd_buffer))
+ tu_cmd_render_sysmem(cmd_buffer);
+ else
+ tu_cmd_render_tiles(cmd_buffer);
/* discard draw_cs and draw_epilogue_cs entries now that the tiles are
rendered */