static void
tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
const struct tu_device *dev,
- uint32_t pixels)
+ const struct tu_render_pass *pass)
{
- const uint32_t tile_align_w = 64; /* note: 32 when no input attachments */
- const uint32_t tile_align_h = 16;
+ const uint32_t tile_align_w = pass->tile_align_w;
const uint32_t max_tile_width = 1024;
/* note: don't offset the tiling config by render_area.offset,
.height = 1,
};
tiling->tile0.extent = (VkExtent2D) {
- .width = align(ra_width, tile_align_w),
- .height = align(ra_height, tile_align_h),
+ .width = util_align_npot(ra_width, tile_align_w),
+ .height = align(ra_height, TILE_ALIGN_H),
};
if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN)) {
/* start with 2x2 tiles */
tiling->tile_count.width = 2;
tiling->tile_count.height = 2;
- tiling->tile0.extent.width = align(DIV_ROUND_UP(ra_width, 2), tile_align_w);
- tiling->tile0.extent.height = align(DIV_ROUND_UP(ra_height, 2), tile_align_h);
+ tiling->tile0.extent.width = util_align_npot(DIV_ROUND_UP(ra_width, 2), tile_align_w);
+ tiling->tile0.extent.height = align(DIV_ROUND_UP(ra_height, 2), TILE_ALIGN_H);
}
/* do not exceed max tile width */
while (tiling->tile0.extent.width > max_tile_width) {
tiling->tile_count.width++;
tiling->tile0.extent.width =
- align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
+ util_align_npot(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
}
/* will force to sysmem, don't bother trying to have a valid tile config
* TODO: just skip all GMEM stuff when sysmem is forced?
*/
- if (!pixels)
+ if (!pass->gmem_pixels)
return;
/* do not exceed gmem size */
- while (tiling->tile0.extent.width * tiling->tile0.extent.height > pixels) {
+ while (tiling->tile0.extent.width * tiling->tile0.extent.height > pass->gmem_pixels) {
if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
tiling->tile_count.width++;
tiling->tile0.extent.width =
- align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
+ util_align_npot(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
} else {
/* if this assert fails then layout is impossible.. */
- assert(tiling->tile0.extent.height > tile_align_h);
+ assert(tiling->tile0.extent.height > TILE_ALIGN_H);
tiling->tile_count.height++;
tiling->tile0.extent.height =
- align(DIV_ROUND_UP(ra_height, tiling->tile_count.height), tile_align_h);
+ align(DIV_ROUND_UP(ra_height, tiling->tile_count.height), TILE_ALIGN_H);
}
}
}
}
const struct tu_image_view *iview = fb->attachments[a].attachment;
- enum a6xx_depth_format fmt = tu6_pipe2depth(iview->vk_format);
+ const struct tu_render_pass_attachment *attachment =
+ &cmd->state.pass->attachments[a];
+ enum a6xx_depth_format fmt = tu6_pipe2depth(attachment->format);
- tu_cs_emit_regs(cs,
- A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt),
- A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview->image, iview->base_mip)),
- A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview->image->layout.layer_size),
- A6XX_RB_DEPTH_BUFFER_BASE(tu_image_view_base_ref(iview)),
- A6XX_RB_DEPTH_BUFFER_BASE_GMEM(cmd->state.pass->attachments[a].gmem_offset));
+ tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
+ tu_cs_emit(cs, A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt).value);
+ tu_cs_image_ref(cs, iview, 0);
+ tu_cs_emit(cs, attachment->gmem_offset);
tu_cs_emit_regs(cs,
A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
- tu_cs_emit_regs(cs,
- A6XX_RB_DEPTH_FLAG_BUFFER_BASE(tu_image_view_ubwc_base_ref(iview)),
- A6XX_RB_DEPTH_FLAG_BUFFER_PITCH(tu_image_view_ubwc_pitches(iview)));
+ tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
+ tu_cs_image_flag_ref(cs, iview, 0);
tu_cs_emit_regs(cs,
A6XX_GRAS_LRZ_BUFFER_BASE(0),
A6XX_GRAS_LRZ_BUFFER_PITCH(0),
A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
- tu_cs_emit_regs(cs,
- A6XX_RB_STENCIL_INFO(0));
-
- /* enable zs? */
+ if (attachment->format == VK_FORMAT_S8_UINT) {
+ tu_cs_emit_pkt4(cs, REG_A6XX_RB_STENCIL_INFO, 6);
+ tu_cs_emit(cs, A6XX_RB_STENCIL_INFO(.separate_stencil = true).value);
+ tu_cs_image_ref(cs, iview, 0);
+ tu_cs_emit(cs, attachment->gmem_offset);
+ } else {
+ tu_cs_emit_regs(cs,
+ A6XX_RB_STENCIL_INFO(0));
+ }
}
static void
struct tu_cs *cs)
{
const struct tu_framebuffer *fb = cmd->state.framebuffer;
- unsigned char mrt_comp[MAX_RTS] = { 0 };
- unsigned srgb_cntl = 0;
for (uint32_t i = 0; i < subpass->color_count; ++i) {
uint32_t a = subpass->color_attachments[i].attachment;
const struct tu_image_view *iview = fb->attachments[a].attachment;
- mrt_comp[i] = 0xf;
-
- if (vk_format_is_srgb(iview->vk_format))
- srgb_cntl |= (1 << i);
-
- struct tu_native_format format =
- tu6_format_image(iview->image, iview->vk_format, iview->base_mip);
+ tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
+ tu_cs_emit(cs, iview->RB_MRT_BUF_INFO);
+ tu_cs_image_ref(cs, iview, 0);
+ tu_cs_emit(cs, cmd->state.pass->attachments[a].gmem_offset);
tu_cs_emit_regs(cs,
- A6XX_RB_MRT_BUF_INFO(i,
- .color_tile_mode = format.tile_mode,
- .color_format = format.fmt,
- .color_swap = format.swap),
- A6XX_RB_MRT_PITCH(i, tu_image_stride(iview->image, iview->base_mip)),
- A6XX_RB_MRT_ARRAY_PITCH(i, iview->image->layout.layer_size),
- A6XX_RB_MRT_BASE(i, tu_image_view_base_ref(iview)),
- A6XX_RB_MRT_BASE_GMEM(i, cmd->state.pass->attachments[a].gmem_offset));
+ A6XX_SP_FS_MRT_REG(i, .dword = iview->SP_FS_MRT_REG));
- tu_cs_emit_regs(cs,
- A6XX_SP_FS_MRT_REG(i,
- .color_format = format.fmt,
- .color_sint = vk_format_is_sint(iview->vk_format),
- .color_uint = vk_format_is_uint(iview->vk_format)));
-
- tu_cs_emit_regs(cs,
- A6XX_RB_MRT_FLAG_BUFFER_ADDR(i, tu_image_view_ubwc_base_ref(iview)),
- A6XX_RB_MRT_FLAG_BUFFER_PITCH(i, tu_image_view_ubwc_pitches(iview)));
+ tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(i), 3);
+ tu_cs_image_flag_ref(cs, iview, 0);
}
tu_cs_emit_regs(cs,
- A6XX_RB_SRGB_CNTL(.dword = srgb_cntl));
-
+ A6XX_RB_SRGB_CNTL(.dword = subpass->srgb_cntl));
tu_cs_emit_regs(cs,
- A6XX_SP_SRGB_CNTL(.dword = srgb_cntl));
+ A6XX_SP_SRGB_CNTL(.dword = subpass->srgb_cntl));
- tu_cs_emit_regs(cs,
- A6XX_RB_RENDER_COMPONENTS(
- .rt0 = mrt_comp[0],
- .rt1 = mrt_comp[1],
- .rt2 = mrt_comp[2],
- .rt3 = mrt_comp[3],
- .rt4 = mrt_comp[4],
- .rt5 = mrt_comp[5],
- .rt6 = mrt_comp[6],
- .rt7 = mrt_comp[7]));
-
- tu_cs_emit_regs(cs,
- A6XX_SP_FS_RENDER_COMPONENTS(
- .rt0 = mrt_comp[0],
- .rt1 = mrt_comp[1],
- .rt2 = mrt_comp[2],
- .rt3 = mrt_comp[3],
- .rt4 = mrt_comp[4],
- .rt5 = mrt_comp[5],
- .rt6 = mrt_comp[6],
- .rt7 = mrt_comp[7]));
-
- // XXX: We probably can't hardcode LAYER_CNTL_TYPE.
- tu_cs_emit_regs(cs,
- A6XX_GRAS_LAYER_CNTL(.layered = fb->layers > 1,
- .type = LAYER_2D_ARRAY));
+ tu_cs_emit_regs(cs, A6XX_GRAS_MAX_LAYER_INDEX(fb->layers - 1));
}
void
continue;
const struct tu_image_view *iview = fb->attachments[a].attachment;
- if (iview->image->layout.ubwc_layer_size != 0)
+ if (iview->ubwc_enabled)
mrts_ubwc_enable |= 1 << i;
}
const uint32_t a = subpass->depth_stencil_attachment.attachment;
if (a != VK_ATTACHMENT_UNUSED) {
const struct tu_image_view *iview = fb->attachments[a].attachment;
- if (iview->image->layout.ubwc_layer_size != 0)
+ if (iview->ubwc_enabled)
cntl |= A6XX_RB_RENDER_CNTL_FLAG_DEPTH;
}
tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
- tu_cs_emit_qw(cs, cmd->vsc_data.iova + tile->pipe * cmd->vsc_data_pitch);
- tu_cs_emit_qw(cs, cmd->vsc_data.iova + (tile->pipe * 4) + (32 * cmd->vsc_data_pitch));
- tu_cs_emit_qw(cs, cmd->vsc_data2.iova + (tile->pipe * cmd->vsc_data2_pitch));
+ tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + tile->pipe * cmd->vsc_draw_strm_pitch);
+ tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + (tile->pipe * 4) + (32 * cmd->vsc_draw_strm_pitch));
+ tu_cs_emit_qw(cs, cmd->vsc_prim_strm.iova + (tile->pipe * cmd->vsc_prim_strm_pitch));
tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
tu_cs_emit(cs, 0x0);
tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
tu_cs_emit(cs, 0x0);
-
- tu_cs_emit_regs(cs,
- A6XX_RB_UNKNOWN_8804(0));
-
- tu_cs_emit_regs(cs,
- A6XX_SP_TP_UNKNOWN_B304(0));
-
- tu_cs_emit_regs(cs,
- A6XX_GRAS_UNKNOWN_80A4(0));
} else {
tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
tu_cs_emit(cs, 0x1);
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
- /* blit scissor may have been changed by CmdClearAttachments */
- tu6_emit_blit_scissor(cmd, cs, false);
+ tu6_emit_blit_scissor(cmd, cs, true);
for (uint32_t a = 0; a < pass->attachment_count; ++a) {
if (pass->attachments[a].gmem_offset >= 0)
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9101, 0xffff00);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236, 1);
+ tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236,
+ A6XX_VPC_UNKNOWN_9236_POINT_COORD_INVERT(0));
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9806, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9990, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9981, 0x3);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9108, 0x3);
- tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B304, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
- tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8804, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A4, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A5, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A6, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8805, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8806, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8878, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8879, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
tu_cs_emit_regs(cs,
A6XX_VSC_BIN_SIZE(.width = tiling->tile0.extent.width,
.height = tiling->tile0.extent.height),
- A6XX_VSC_SIZE_ADDRESS(.bo = &cmd->vsc_data,
- .bo_offset = 32 * cmd->vsc_data_pitch));
+ A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = &cmd->vsc_draw_strm,
+ .bo_offset = 32 * cmd->vsc_draw_strm_pitch));
tu_cs_emit_regs(cs,
A6XX_VSC_BIN_COUNT(.nx = tiling->tile_count.width,
tu_cs_emit(cs, tiling->pipe_config[i]);
tu_cs_emit_regs(cs,
- A6XX_VSC_PIPE_DATA2_ADDRESS(.bo = &cmd->vsc_data2),
- A6XX_VSC_PIPE_DATA2_PITCH(cmd->vsc_data2_pitch),
- A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd->vsc_data2.size));
+ A6XX_VSC_PRIM_STRM_ADDRESS(.bo = &cmd->vsc_prim_strm),
+ A6XX_VSC_PRIM_STRM_PITCH(cmd->vsc_prim_strm_pitch),
+ A6XX_VSC_PRIM_STRM_ARRAY_PITCH(cmd->vsc_prim_strm.size));
tu_cs_emit_regs(cs,
- A6XX_VSC_PIPE_DATA_ADDRESS(.bo = &cmd->vsc_data),
- A6XX_VSC_PIPE_DATA_PITCH(cmd->vsc_data_pitch),
- A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd->vsc_data.size));
+ A6XX_VSC_DRAW_STRM_ADDRESS(.bo = &cmd->vsc_draw_strm),
+ A6XX_VSC_DRAW_STRM_PITCH(cmd->vsc_draw_strm_pitch),
+ A6XX_VSC_DRAW_STRM_ARRAY_PITCH(cmd->vsc_draw_strm.size));
}
static void
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
- tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i)));
+ tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
- tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data_pitch));
+ tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_data_pitch));
+ tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_draw_strm_pitch));
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
- tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i)));
+ tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
- tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data2_pitch));
+ tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_data2_pitch));
+ tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_prim_strm_pitch));
}
tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
* if (b0 set)..
*/
- /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
+ /* b0 will be set if VSC_DRAW_STRM or VSC_PRIM_STRM overflow: */
tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
A6XX_CP_REG_TEST_0_BIT(0) |
tu6_emit_blit_scissor(cmd, cs, true);
for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
- tu_load_gmem_attachment(cmd, cs, i);
+ tu_load_gmem_attachment(cmd, cs, i, false);
tu6_emit_blit_scissor(cmd, cs, false);
tu6_emit_lrz_flush(cmd, cs);
- tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
+ tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS, true);
tu_cs_sanity_check(cs);
}
tiling->render_area = *render_area;
tiling->force_sysmem = false;
- tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass->gmem_pixels);
+ tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass);
tu_tiling_config_update_pipe_layout(tiling, dev);
tu_tiling_config_update_pipes(tiling, dev);
}
goto fail_scratch_bo;
/* TODO: resize on overflow */
- cmd_buffer->vsc_data_pitch = device->vsc_data_pitch;
- cmd_buffer->vsc_data2_pitch = device->vsc_data2_pitch;
- cmd_buffer->vsc_data = device->vsc_data;
- cmd_buffer->vsc_data2 = device->vsc_data2;
+ cmd_buffer->vsc_draw_strm_pitch = device->vsc_draw_strm_pitch;
+ cmd_buffer->vsc_prim_strm_pitch = device->vsc_prim_strm_pitch;
+ cmd_buffer->vsc_draw_strm = device->vsc_draw_strm;
+ cmd_buffer->vsc_prim_strm = device->vsc_prim_strm;
return VK_SUCCESS;
list_del(&cmd_buffer->pool_link);
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
tu_cs_finish(&cmd_buffer->cs);
tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
tu_cs_reset(&cmd_buffer->sub_cs);
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++) {
cmd_buffer->descriptors[i].valid = 0;
cmd_buffer->descriptors[i].push_dirty = false;
}
return VK_SUCCESS;
}
+/* Sets vertex buffers to HW binding points. We emit VBs in SDS (so that bin
+ * rendering can skip over unused state), so we need to collect all the
+ * bindings together into a single state emit at draw time.
+ */
void
tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
uint32_t firstBinding,
assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
- cmd->state.vb.buffers[firstBinding + i] =
- tu_buffer_from_handle(pBuffers[i]);
+ struct tu_buffer *buf = tu_buffer_from_handle(pBuffers[i]);
+
+ cmd->state.vb.buffers[firstBinding + i] = buf;
cmd->state.vb.offsets[firstBinding + i] = pOffsets[i];
+
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
}
- /* VB states depend on VkPipelineVertexInputStateCreateInfo */
cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
}
descriptors_state->sets[idx] = set;
descriptors_state->valid |= (1u << idx);
+ /* Note: the actual input attachment indices come from the shader
+ * itself, so we can't generate the patched versions of these until
+ * draw time when both the pipeline and descriptors are bound and
+ * we're inside the render pass.
+ */
+ unsigned dst_idx = layout->set[idx].input_attachment_start;
+ memcpy(&descriptors_state->input_attachments[dst_idx * A6XX_TEX_CONST_DWORDS],
+ set->dynamic_descriptors,
+ set->layout->input_attachment_count * A6XX_TEX_CONST_DWORDS * 4);
+
for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
- unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
+ /* Dynamic buffers come after input attachments in the descriptor set
+ * itself, but due to how the Vulkan descriptor set binding works, we
+ * have to put input attachments and dynamic buffers in separate
+ * buffers in the descriptor_state and then combine them at draw
+ * time. Binding a descriptor set only invalidates the descriptor
+ * sets after it, but if we try to tightly pack the descriptors after
+ * the input attachments then we could corrupt dynamic buffers in the
+ * descriptor set before it, or we'd have to move all the dynamic
+ * buffers over. We just put them into separate buffers to make
+ * binding as well as the later patching of input attachments easy.
+ */
+ unsigned src_idx = j + set->layout->input_attachment_count;
+ unsigned dst_idx = j + layout->set[idx].dynamic_offset_start;
assert(dyn_idx < dynamicOffsetCount);
- descriptors_state->dynamic_buffers[idx] =
- set->dynamic_descriptors[j].va + pDynamicOffsets[dyn_idx];
+ uint32_t *dst =
+ &descriptors_state->dynamic_descriptors[dst_idx * A6XX_TEX_CONST_DWORDS];
+ uint32_t *src =
+ &set->dynamic_descriptors[src_idx * A6XX_TEX_CONST_DWORDS];
+ uint32_t offset = pDynamicOffsets[dyn_idx];
+
+ /* Patch the storage/uniform descriptors right away. */
+ if (layout->set[idx].layout->dynamic_ubo & (1 << j)) {
+ /* Note: we can assume here that the addition won't roll over and
+ * change the SIZE field.
+ */
+ uint64_t va = src[0] | ((uint64_t)src[1] << 32);
+ va += offset;
+ dst[0] = va;
+ dst[1] = va >> 32;
+ } else {
+ memcpy(dst, src, A6XX_TEX_CONST_DWORDS * 4);
+ /* Note: A6XX_IBO_5_DEPTH is always 0 */
+ uint64_t va = dst[4] | ((uint64_t)dst[5] << 32);
+ va += offset;
+ dst[4] = va;
+ dst[5] = va >> 32;
+ }
}
}
- cmd_buffer->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
+ if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE)
+ cmd_buffer->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
+ else
+ cmd_buffer->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
}
void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
}
if (cmd_buffer->use_vsc_data) {
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data,
+ tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_draw_strm,
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data2,
+ tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_prim_strm,
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
}
cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
}
+void
+tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
+ const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+
+ tu6_emit_sample_locations(&cmd->draw_cs, pSampleLocationsInfo);
+}
+
void
tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
}
- tu_cs_emit_call(&cmd->cs, &secondary->cs);
+ tu_cs_add_entries(&cmd->cs, &secondary->cs);
}
}
cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
tu_bo_list_add(&cmd->bo_list, iview->image->bo,
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
}
+
+ /* Flag input attachment descriptors for re-emission if necessary */
+ cmd->state.dirty |= TU_CMD_DIRTY_INPUT_ATTACHMENTS;
}
void
tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
if (subpass->resolve_attachments) {
+ tu6_emit_blit_scissor(cmd, cs, true);
+
for (unsigned i = 0; i < subpass->color_count; i++) {
uint32_t a = subpass->resolve_attachments[i].attachment;
if (a == VK_ATTACHMENT_UNUSED)
continue;
tu_store_gmem_attachment(cmd, cs, a,
- subpass->color_attachments[i].attachment);
+ subpass->color_attachments[i].attachment);
if (pass->attachments[a].gmem_offset < 0)
continue;
* if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
*/
tu_finishme("missing GMEM->GMEM resolve path\n");
- tu_emit_load_gmem_attachment(cmd, cs, a);
+ tu_load_gmem_attachment(cmd, cs, a, true);
}
}
tu6_emit_mrt(cmd, cmd->state.subpass, cs);
tu6_emit_msaa(cs, cmd->state.subpass->samples);
tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
+
+ /* Flag input attachment descriptors for re-emission if necessary */
+ cmd->state.dirty |= TU_CMD_DIRTY_INPUT_ATTACHMENTS;
}
void
#define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
#define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
+#define ENABLE_NON_GMEM (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_SYSMEM)
enum tu_draw_state_group_id
{
TU_DRAW_STATE_PROGRAM,
TU_DRAW_STATE_PROGRAM_BINNING,
+ TU_DRAW_STATE_VB,
TU_DRAW_STATE_VI,
TU_DRAW_STATE_VI_BINNING,
TU_DRAW_STATE_VP,
TU_DRAW_STATE_DS,
TU_DRAW_STATE_BLEND,
TU_DRAW_STATE_VS_CONST,
+ TU_DRAW_STATE_GS_CONST,
TU_DRAW_STATE_FS_CONST,
- TU_DRAW_STATE_VS_TEX,
- TU_DRAW_STATE_FS_TEX_SYSMEM,
- TU_DRAW_STATE_FS_TEX_GMEM,
- TU_DRAW_STATE_FS_IBO,
+ TU_DRAW_STATE_DESC_SETS,
+ TU_DRAW_STATE_DESC_SETS_GMEM,
+ TU_DRAW_STATE_DESC_SETS_LOAD,
TU_DRAW_STATE_VS_PARAMS,
TU_DRAW_STATE_COUNT,
struct tu_cs_entry ib;
};
-const static void *
-sampler_ptr(struct tu_descriptor_state *descriptors_state,
- const struct tu_descriptor_map *map, unsigned i,
- unsigned array_index)
-{
- assert(descriptors_state->valid & (1 << map->set[i]));
-
- struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
- assert(map->binding[i] < set->layout->binding_count);
-
- const struct tu_descriptor_set_binding_layout *layout =
- &set->layout->binding[map->binding[i]];
-
- if (layout->immutable_samplers_offset) {
- const uint32_t *immutable_samplers =
- tu_immutable_samplers(set->layout, layout);
-
- return &immutable_samplers[array_index * A6XX_TEX_SAMP_DWORDS];
- }
-
- switch (layout->type) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- return &set->mapped_ptr[layout->offset / 4];
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- return &set->mapped_ptr[layout->offset / 4 + A6XX_TEX_CONST_DWORDS +
- array_index * (A6XX_TEX_CONST_DWORDS + A6XX_TEX_SAMP_DWORDS)];
- default:
- unreachable("unimplemented descriptor type");
- break;
- }
-}
-
-static void
-write_tex_const(struct tu_cmd_buffer *cmd,
- uint32_t *dst,
- struct tu_descriptor_state *descriptors_state,
- const struct tu_descriptor_map *map,
- unsigned i, unsigned array_index, bool is_sysmem)
-{
- assert(descriptors_state->valid & (1 << map->set[i]));
-
- struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
- assert(map->binding[i] < set->layout->binding_count);
-
- const struct tu_descriptor_set_binding_layout *layout =
- &set->layout->binding[map->binding[i]];
-
- switch (layout->type) {
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
- array_index * A6XX_TEX_CONST_DWORDS],
- A6XX_TEX_CONST_DWORDS * 4);
- break;
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
- array_index *
- (A6XX_TEX_CONST_DWORDS +
- A6XX_TEX_SAMP_DWORDS)],
- A6XX_TEX_CONST_DWORDS * 4);
- break;
- default:
- unreachable("unimplemented descriptor type");
- break;
- }
-
- if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT && !is_sysmem) {
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
- uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
- array_index].attachment;
- const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
-
- assert(att->gmem_offset >= 0);
-
- dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
- dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
- dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
- dst[2] |=
- A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
- A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
- dst[3] = 0;
- dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
- dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
- for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
- dst[i] = 0;
-
- if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
- tu_finishme("patch input attachment pitch for secondary cmd buffer");
- }
-}
-
-static void
-write_image_ibo(struct tu_cmd_buffer *cmd,
- uint32_t *dst,
- struct tu_descriptor_state *descriptors_state,
- const struct tu_descriptor_map *map,
- unsigned i, unsigned array_index)
-{
- assert(descriptors_state->valid & (1 << map->set[i]));
-
- struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
- assert(map->binding[i] < set->layout->binding_count);
-
- const struct tu_descriptor_set_binding_layout *layout =
- &set->layout->binding[map->binding[i]];
-
- assert(layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
-
- memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
- (array_index * 2 + 1) * A6XX_TEX_CONST_DWORDS],
- A6XX_TEX_CONST_DWORDS * 4);
-}
-
-static uint64_t
-buffer_ptr(struct tu_descriptor_state *descriptors_state,
- const struct tu_descriptor_map *map,
- unsigned i, unsigned array_index)
-{
- assert(descriptors_state->valid & (1 << map->set[i]));
-
- struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
- assert(map->binding[i] < set->layout->binding_count);
-
- const struct tu_descriptor_set_binding_layout *layout =
- &set->layout->binding[map->binding[i]];
-
- switch (layout->type) {
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- return descriptors_state->dynamic_buffers[layout->dynamic_offset_offset +
- array_index];
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- return (uint64_t) set->mapped_ptr[layout->offset / 4 + array_index * 2 + 1] << 32 |
- set->mapped_ptr[layout->offset / 4 + array_index * 2];
- default:
- unreachable("unimplemented descriptor type");
- break;
- }
-}
-
static inline uint32_t
tu6_stage2opcode(gl_shader_stage type)
{
switch (type) {
case MESA_SHADER_VERTEX:
return SB6_VS_SHADER;
+ case MESA_SHADER_GEOMETRY:
+ return SB6_GS_SHADER;
case MESA_SHADER_FRAGMENT:
return SB6_FS_SHADER;
case MESA_SHADER_COMPUTE:
tu_cs_emit(cs, push_constants[i + offset * 4]);
}
- for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
- if (state->range[i].start < state->range[i].end) {
- uint32_t size = state->range[i].end - state->range[i].start;
- uint32_t offset = state->range[i].start;
-
- /* and even if the start of the const buffer is before
- * first_immediate, the end may not be:
- */
- size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
-
- if (size == 0)
- continue;
-
- /* things should be aligned to vec4: */
- debug_assert((state->range[i].offset % 16) == 0);
- debug_assert((size % 16) == 0);
- debug_assert((offset % 16) == 0);
-
- /* Look through the UBO map to find our UBO index, and get the VA for
- * that UBO.
- */
- uint64_t va = 0;
- uint32_t ubo_idx = i - 1;
- uint32_t ubo_map_base = 0;
- for (int j = 0; j < link->ubo_map.num; j++) {
- if (ubo_idx >= ubo_map_base &&
- ubo_idx < ubo_map_base + link->ubo_map.array_size[j]) {
- va = buffer_ptr(descriptors_state, &link->ubo_map, j,
- ubo_idx - ubo_map_base);
- break;
- }
- ubo_map_base += link->ubo_map.array_size[j];
- }
- assert(va);
-
- tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
- tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
- CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
- tu_cs_emit_qw(cs, va + offset);
- }
- }
-}
+ for (uint32_t i = 0; i < state->num_enabled; i++) {
+ uint32_t size = state->range[i].end - state->range[i].start;
+ uint32_t offset = state->range[i].start;
-static void
-tu6_emit_ubos(struct tu_cs *cs, const struct tu_pipeline *pipeline,
- struct tu_descriptor_state *descriptors_state,
- gl_shader_stage type)
-{
- const struct tu_program_descriptor_linkage *link =
- &pipeline->program.link[type];
-
- uint32_t num = MIN2(link->ubo_map.num_desc, link->const_state.num_ubos);
- uint32_t anum = align(num, 2);
+ /* and even if the start of the const buffer is before
+ * first_immediate, the end may not be:
+ */
+ size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
- if (!num)
- return;
+ if (size == 0)
+ continue;
- tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (2 * anum));
- tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(link->const_state.offsets.ubo) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
- CP_LOAD_STATE6_0_NUM_UNIT(anum/2));
- tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
- tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
-
- unsigned emitted = 0;
- for (unsigned i = 0; emitted < num && i < link->ubo_map.num; i++) {
- for (unsigned j = 0; emitted < num && j < link->ubo_map.array_size[i]; j++) {
- tu_cs_emit_qw(cs, buffer_ptr(descriptors_state, &link->ubo_map, i, j));
- emitted++;
- }
- }
+ /* things should be aligned to vec4: */
+ debug_assert((state->range[i].offset % 16) == 0);
+ debug_assert((size % 16) == 0);
+ debug_assert((offset % 16) == 0);
- for (; emitted < anum; emitted++) {
- tu_cs_emit(cs, 0xffffffff);
- tu_cs_emit(cs, 0xffffffff);
+ /* Dig out the descriptor from the descriptor state and read the VA from
+ * it.
+ */
+ assert(state->range[i].bindless);
+ uint32_t *base = state->range[i].bindless_base == MAX_SETS ?
+ descriptors_state->dynamic_descriptors :
+ descriptors_state->sets[state->range[i].bindless_base]->mapped_ptr;
+ unsigned block = state->range[i].block;
+ /* If the block in the shader here is in the dynamic descriptor set, it
+ * is an index into the dynamic descriptor set which is combined from
+ * dynamic descriptors and input attachments on-the-fly, and we don't
+ * have access to it here. Instead we work backwards to get the index
+ * into dynamic_descriptors.
+ */
+ if (state->range[i].bindless_base == MAX_SETS)
+ block -= pipeline->layout->input_attachment_count;
+ uint32_t *desc = base + block * A6XX_TEX_CONST_DWORDS;
+ uint64_t va = desc[0] | ((uint64_t)(desc[1] & A6XX_UBO_1_BASE_HI__MASK) << 32);
+ assert(va);
+
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+ CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
+ tu_cs_emit_qw(cs, va + offset);
}
}
tu_cs_begin_sub_stream(&cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
- tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
}
return VK_SUCCESS;
}
-static VkResult
-tu6_emit_textures(struct tu_cmd_buffer *cmd,
- const struct tu_pipeline *pipeline,
- struct tu_descriptor_state *descriptors_state,
- gl_shader_stage type,
- struct tu_cs_entry *entry,
- bool is_sysmem)
+static struct tu_cs_entry
+tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline)
{
- struct tu_cs *draw_state = &cmd->sub_cs;
- const struct tu_program_descriptor_linkage *link =
- &pipeline->program.link[type];
- VkResult result;
-
- if (link->texture_map.num_desc == 0 && link->sampler_map.num_desc == 0) {
- *entry = (struct tu_cs_entry) {};
- return VK_SUCCESS;
- }
-
- /* allocate and fill texture state */
- struct ts_cs_memory tex_const;
- result = tu_cs_alloc(draw_state, link->texture_map.num_desc,
- A6XX_TEX_CONST_DWORDS, &tex_const);
- if (result != VK_SUCCESS)
- return result;
-
- int tex_index = 0;
- for (unsigned i = 0; i < link->texture_map.num; i++) {
- for (int j = 0; j < link->texture_map.array_size[i]; j++) {
- write_tex_const(cmd,
- &tex_const.map[A6XX_TEX_CONST_DWORDS * tex_index++],
- descriptors_state, &link->texture_map, i, j,
- is_sysmem);
- }
- }
-
- /* allocate and fill sampler state */
- struct ts_cs_memory tex_samp = { 0 };
- if (link->sampler_map.num_desc) {
- result = tu_cs_alloc(draw_state, link->sampler_map.num_desc,
- A6XX_TEX_SAMP_DWORDS, &tex_samp);
- if (result != VK_SUCCESS)
- return result;
-
- int sampler_index = 0;
- for (unsigned i = 0; i < link->sampler_map.num; i++) {
- for (int j = 0; j < link->sampler_map.array_size[i]; j++) {
- const uint32_t *sampler = sampler_ptr(descriptors_state,
- &link->sampler_map,
- i, j);
- memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS * sampler_index++],
- sampler, A6XX_TEX_SAMP_DWORDS * 4);
- }
- }
- }
-
- unsigned tex_samp_reg, tex_const_reg, tex_count_reg;
- enum a6xx_state_block sb;
-
- switch (type) {
- case MESA_SHADER_VERTEX:
- sb = SB6_VS_TEX;
- tex_samp_reg = REG_A6XX_SP_VS_TEX_SAMP_LO;
- tex_const_reg = REG_A6XX_SP_VS_TEX_CONST_LO;
- tex_count_reg = REG_A6XX_SP_VS_TEX_COUNT;
- break;
- case MESA_SHADER_FRAGMENT:
- sb = SB6_FS_TEX;
- tex_samp_reg = REG_A6XX_SP_FS_TEX_SAMP_LO;
- tex_const_reg = REG_A6XX_SP_FS_TEX_CONST_LO;
- tex_count_reg = REG_A6XX_SP_FS_TEX_COUNT;
- break;
- case MESA_SHADER_COMPUTE:
- sb = SB6_CS_TEX;
- tex_samp_reg = REG_A6XX_SP_CS_TEX_SAMP_LO;
- tex_const_reg = REG_A6XX_SP_CS_TEX_CONST_LO;
- tex_count_reg = REG_A6XX_SP_CS_TEX_COUNT;
- break;
- default:
- unreachable("bad state block");
- }
-
struct tu_cs cs;
- result = tu_cs_begin_sub_stream(draw_state, 16, &cs);
- if (result != VK_SUCCESS)
- return result;
-
- if (link->sampler_map.num_desc) {
- /* output sampler state: */
- tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
- tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
- CP_LOAD_STATE6_0_NUM_UNIT(link->sampler_map.num_desc));
- tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
-
- tu_cs_emit_pkt4(&cs, tex_samp_reg, 2);
- tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
- }
+ tu_cs_begin_sub_stream(&cmd->sub_cs, 4 * MAX_VBS, &cs);
- /* emit texture state: */
- tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
- tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
- CP_LOAD_STATE6_0_NUM_UNIT(link->texture_map.num_desc));
- tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
+ for (uint32_t i = 0; i < pipeline->vi.count; i++) {
+ const uint32_t binding = pipeline->vi.bindings[i];
+ const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
+ const VkDeviceSize offset = buf->bo_offset +
+ cmd->state.vb.offsets[binding];
+ const VkDeviceSize size =
+ offset < buf->size ? buf->size - offset : 0;
- tu_cs_emit_pkt4(&cs, tex_const_reg, 2);
- tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
+ tu_cs_emit_regs(&cs,
+ A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
+ A6XX_VFD_FETCH_SIZE(i, size));
- tu_cs_emit_pkt4(&cs, tex_count_reg, 1);
- tu_cs_emit(&cs, link->texture_map.num_desc);
+ }
- *entry = tu_cs_end_sub_stream(draw_state, &cs);
- return VK_SUCCESS;
+ return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
}
static VkResult
-tu6_emit_ibo(struct tu_cmd_buffer *cmd,
- const struct tu_pipeline *pipeline,
- struct tu_descriptor_state *descriptors_state,
- gl_shader_stage type,
- struct tu_cs_entry *entry)
+tu6_emit_descriptor_sets(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ VkPipelineBindPoint bind_point,
+ struct tu_cs_entry *entry,
+ bool gmem)
{
struct tu_cs *draw_state = &cmd->sub_cs;
- const struct tu_program_descriptor_linkage *link =
- &pipeline->program.link[type];
+ struct tu_pipeline_layout *layout = pipeline->layout;
+ struct tu_descriptor_state *descriptors_state =
+ tu_get_descriptors_state(cmd, bind_point);
+ const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ const uint32_t *input_attachment_idx =
+ pipeline->program.input_attachment_idx;
+ uint32_t num_dynamic_descs = layout->dynamic_offset_count +
+ layout->input_attachment_count;
+ struct ts_cs_memory dynamic_desc_set;
VkResult result;
- unsigned num_desc = link->ssbo_map.num_desc + link->image_map.num_desc;
-
- if (num_desc == 0) {
- *entry = (struct tu_cs_entry) {};
- return VK_SUCCESS;
- }
-
- struct ts_cs_memory ibo_const;
- result = tu_cs_alloc(draw_state, num_desc,
- A6XX_TEX_CONST_DWORDS, &ibo_const);
- if (result != VK_SUCCESS)
- return result;
+ if (num_dynamic_descs > 0) {
+ /* allocate and fill out dynamic descriptor set */
+ result = tu_cs_alloc(draw_state, num_dynamic_descs,
+ A6XX_TEX_CONST_DWORDS, &dynamic_desc_set);
+ if (result != VK_SUCCESS)
+ return result;
- int ssbo_index = 0;
- for (unsigned i = 0; i < link->ssbo_map.num; i++) {
- for (int j = 0; j < link->ssbo_map.array_size[i]; j++) {
- uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
-
- uint64_t va = buffer_ptr(descriptors_state, &link->ssbo_map, i, j);
- /* We don't expose robustBufferAccess, so leave the size unlimited. */
- uint32_t sz = MAX_STORAGE_BUFFER_RANGE / 4;
-
- dst[0] = A6XX_IBO_0_FMT(FMT6_32_UINT);
- dst[1] = A6XX_IBO_1_WIDTH(sz & MASK(15)) |
- A6XX_IBO_1_HEIGHT(sz >> 15);
- dst[2] = A6XX_IBO_2_UNK4 |
- A6XX_IBO_2_UNK31 |
- A6XX_IBO_2_TYPE(A6XX_TEX_1D);
- dst[3] = 0;
- dst[4] = va;
- dst[5] = va >> 32;
- for (int i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
- dst[i] = 0;
-
- ssbo_index++;
+ memcpy(dynamic_desc_set.map, descriptors_state->input_attachments,
+ layout->input_attachment_count * A6XX_TEX_CONST_DWORDS * 4);
+
+ if (gmem) {
+ /* Patch input attachments to refer to GMEM instead */
+ for (unsigned i = 0; i < layout->input_attachment_count; i++) {
+ uint32_t *dst =
+ &dynamic_desc_set.map[A6XX_TEX_CONST_DWORDS * i];
+
+ /* The compiler has already laid out input_attachment_idx in the
+ * final order of input attachments, so there's no need to go
+ * through the pipeline layout finding input attachments.
+ */
+ unsigned attachment_idx = input_attachment_idx[i];
+
+ /* It's possible for the pipeline layout to include an input
+ * attachment which doesn't actually exist for the current
+ * subpass. Of course, this is only valid so long as the pipeline
+ * doesn't try to actually load that attachment. Just skip
+ * patching in that scenario to avoid out-of-bounds accesses.
+ */
+ if (attachment_idx >= cmd->state.subpass->input_count)
+ continue;
+
+ uint32_t a = cmd->state.subpass->input_attachments[attachment_idx].attachment;
+ const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
+
+ assert(att->gmem_offset >= 0);
+
+ dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
+ dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
+ dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
+ dst[2] |=
+ A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
+ A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
+ dst[3] = 0;
+ dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
+ dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
+ for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
+ dst[i] = 0;
+
+ if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
+ tu_finishme("patch input attachment pitch for secondary cmd buffer");
+ }
}
- }
- for (unsigned i = 0; i < link->image_map.num; i++) {
- for (int j = 0; j < link->image_map.array_size[i]; j++) {
- uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
+ memcpy(dynamic_desc_set.map + layout->input_attachment_count * A6XX_TEX_CONST_DWORDS,
+ descriptors_state->dynamic_descriptors,
+ layout->dynamic_offset_count * A6XX_TEX_CONST_DWORDS * 4);
+ }
- write_image_ibo(cmd, dst,
- descriptors_state, &link->image_map, i, j);
+ uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg;
+ uint32_t hlsq_update_value;
+ switch (bind_point) {
+ case VK_PIPELINE_BIND_POINT_GRAPHICS:
+ sp_bindless_base_reg = REG_A6XX_SP_BINDLESS_BASE(0);
+ hlsq_bindless_base_reg = REG_A6XX_HLSQ_BINDLESS_BASE(0);
+ hlsq_update_value = 0x7c000;
+ break;
+ case VK_PIPELINE_BIND_POINT_COMPUTE:
+ sp_bindless_base_reg = REG_A6XX_SP_CS_BINDLESS_BASE(0);
+ hlsq_bindless_base_reg = REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
+ hlsq_update_value = 0x3e00;
+ break;
+ default:
+ unreachable("bad bind point");
+ }
- ssbo_index++;
- }
+ /* Be careful here to *not* refer to the pipeline, so that if only the
+ * pipeline changes we don't have to emit this again (except if there are
+ * dynamic descriptors in the pipeline layout). This means always emitting
+ * all the valid descriptors, which means that we always have to put the
+ * dynamic descriptor in the driver-only slot at the end
+ */
+ uint32_t num_user_sets = util_last_bit(descriptors_state->valid);
+ uint32_t num_sets = num_user_sets;
+ if (num_dynamic_descs > 0) {
+ num_user_sets = MAX_SETS;
+ num_sets = num_user_sets + 1;
}
- assert(ssbo_index == num_desc);
+ unsigned regs[2] = { sp_bindless_base_reg, hlsq_bindless_base_reg };
struct tu_cs cs;
- result = tu_cs_begin_sub_stream(draw_state, 7, &cs);
+ result = tu_cs_begin_sub_stream(draw_state, ARRAY_SIZE(regs) * (1 + num_sets * 2) + 2, &cs);
if (result != VK_SUCCESS)
return result;
- uint32_t opcode, ibo_addr_reg;
- enum a6xx_state_block sb;
- enum a6xx_state_type st;
+ if (num_sets > 0) {
+ for (unsigned i = 0; i < ARRAY_SIZE(regs); i++) {
+ tu_cs_emit_pkt4(&cs, regs[i], num_sets * 2);
+ for (unsigned j = 0; j < num_user_sets; j++) {
+ if (descriptors_state->valid & (1 << j)) {
+ /* magic | 3 copied from the blob */
+ tu_cs_emit_qw(&cs, descriptors_state->sets[j]->va | 3);
+ } else {
+ tu_cs_emit_qw(&cs, 0 | 3);
+ }
+ }
+ if (num_dynamic_descs > 0) {
+ tu_cs_emit_qw(&cs, dynamic_desc_set.iova | 3);
+ }
+ }
- switch (type) {
- case MESA_SHADER_FRAGMENT:
- opcode = CP_LOAD_STATE6;
- st = ST6_SHADER;
- sb = SB6_IBO;
- ibo_addr_reg = REG_A6XX_SP_IBO_LO;
- break;
- case MESA_SHADER_COMPUTE:
- opcode = CP_LOAD_STATE6_FRAG;
- st = ST6_IBO;
- sb = SB6_CS_SHADER;
- ibo_addr_reg = REG_A6XX_SP_CS_IBO_LO;
- break;
- default:
- unreachable("unsupported stage for ibos");
+ tu_cs_emit_regs(&cs, A6XX_HLSQ_UPDATE_CNTL(hlsq_update_value));
}
- /* emit texture state: */
- tu_cs_emit_pkt7(&cs, opcode, 3);
- tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
- CP_LOAD_STATE6_0_STATE_TYPE(st) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
- CP_LOAD_STATE6_0_NUM_UNIT(num_desc));
- tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
-
- tu_cs_emit_pkt4(&cs, ibo_addr_reg, 2);
- tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
-
*entry = tu_cs_end_sub_stream(draw_state, &cs);
return VK_SUCCESS;
}
tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_SIZE(i, buf->size));
if (cmd->state.streamout_reset & (1 << i)) {
- offset *= tf->stride[i];
-
tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, offset));
cmd->state.streamout_reset &= ~(1 << i);
} else {
tu6_emit_scissor(cs, &cmd->state.dynamic.scissor.scissors[0]);
}
- if (cmd->state.dirty &
- (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
- for (uint32_t i = 0; i < pipeline->vi.count; i++) {
- const uint32_t binding = pipeline->vi.bindings[i];
- const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
- const VkDeviceSize offset = buf->bo_offset +
- cmd->state.vb.offsets[binding];
- const VkDeviceSize size =
- offset < buf->size ? buf->size - offset : 0;
-
- tu_cs_emit_regs(cs,
- A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
- A6XX_VFD_FETCH_SIZE(i, size));
- }
- }
-
if (cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) {
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.enable_mask = ENABLE_ALL,
.ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX)
};
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_GS_CONST,
+ .enable_mask = ENABLE_ALL,
+ .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY)
+ };
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_FS_CONST,
};
}
- if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS)
- tu6_emit_streamout(cmd, cs);
-
if (cmd->state.dirty &
- (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS)) {
- struct tu_cs_entry vs_tex, fs_tex_sysmem, fs_tex_gmem, fs_ibo;
-
- result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_VERTEX, &vs_tex, false);
- if (result != VK_SUCCESS)
- return result;
-
- /* TODO: we could emit just one texture descriptor draw state when there
- * are no input attachments, which is the most common case. We could
- * also split out the sampler state, which doesn't change even for input
- * attachments.
- */
- result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_FRAGMENT, &fs_tex_sysmem, true);
- if (result != VK_SUCCESS)
- return result;
+ (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_VB,
+ .enable_mask = ENABLE_ALL,
+ .ib = tu6_emit_vertex_buffers(cmd, pipeline)
+ };
+ }
- result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_FRAGMENT, &fs_tex_gmem, false);
- if (result != VK_SUCCESS)
- return result;
+ if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS)
+ tu6_emit_streamout(cmd, cs);
- result = tu6_emit_ibo(cmd, pipeline, descriptors_state,
- MESA_SHADER_FRAGMENT, &fs_ibo);
+ /* If there are any any dynamic descriptors, then we may need to re-emit
+ * them after every pipeline change in case the number of input attachments
+ * changes. We also always need to re-emit after a pipeline change if there
+ * are any input attachments, because the input attachment index comes from
+ * the pipeline. Finally, it can also happen that the subpass changes
+ * without the pipeline changing, in which case the GMEM descriptors need
+ * to be patched differently.
+ *
+ * TODO: We could probably be clever and avoid re-emitting state on
+ * pipeline changes if the number of input attachments is always 0. We
+ * could also only re-emit dynamic state.
+ */
+ if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS ||
+ ((pipeline->layout->dynamic_offset_count +
+ pipeline->layout->input_attachment_count > 0) &&
+ cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) ||
+ (pipeline->layout->input_attachment_count > 0 &&
+ cmd->state.dirty & TU_CMD_DIRTY_INPUT_ATTACHMENTS)) {
+ struct tu_cs_entry desc_sets, desc_sets_gmem;
+ bool need_gmem_desc_set = pipeline->layout->input_attachment_count > 0;
+
+ result = tu6_emit_descriptor_sets(cmd, pipeline,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ &desc_sets, false);
if (result != VK_SUCCESS)
return result;
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
- .id = TU_DRAW_STATE_VS_TEX,
- .enable_mask = ENABLE_ALL,
- .ib = vs_tex,
- };
- draw_state_groups[draw_state_group_count++] =
- (struct tu_draw_state_group) {
- .id = TU_DRAW_STATE_FS_TEX_GMEM,
- .enable_mask = CP_SET_DRAW_STATE__0_GMEM,
- .ib = fs_tex_gmem,
- };
- draw_state_groups[draw_state_group_count++] =
- (struct tu_draw_state_group) {
- .id = TU_DRAW_STATE_FS_TEX_SYSMEM,
- .enable_mask = CP_SET_DRAW_STATE__0_SYSMEM,
- .ib = fs_tex_sysmem,
- };
- draw_state_groups[draw_state_group_count++] =
- (struct tu_draw_state_group) {
- .id = TU_DRAW_STATE_FS_IBO,
- .enable_mask = ENABLE_DRAW,
- .ib = fs_ibo,
+ .id = TU_DRAW_STATE_DESC_SETS,
+ .enable_mask = need_gmem_desc_set ? ENABLE_NON_GMEM : ENABLE_ALL,
+ .ib = desc_sets,
};
+
+ if (need_gmem_desc_set) {
+ result = tu6_emit_descriptor_sets(cmd, pipeline,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ &desc_sets_gmem, true);
+ if (result != VK_SUCCESS)
+ return result;
+
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_DESC_SETS_GMEM,
+ .enable_mask = CP_SET_DRAW_STATE__0_GMEM,
+ .ib = desc_sets_gmem,
+ };
+ }
+
+ /* We need to reload the descriptors every time the descriptor sets
+ * change. However, the commands we send only depend on the pipeline
+ * because the whole point is to cache descriptors which are used by the
+ * pipeline. There's a problem here, in that the firmware has an
+ * "optimization" which skips executing groups that are set to the same
+ * value as the last draw. This means that if the descriptor sets change
+ * but not the pipeline, we'd try to re-execute the same buffer which
+ * the firmware would ignore and we wouldn't pre-load the new
+ * descriptors. The blob seems to re-emit the LOAD_STATE group whenever
+ * the descriptor sets change, which we emulate here by copying the
+ * pre-prepared buffer.
+ */
+ const struct tu_cs_entry *load_entry = &pipeline->load_state.state_ib;
+ if (load_entry->size > 0) {
+ struct tu_cs load_cs;
+ result = tu_cs_begin_sub_stream(&cmd->sub_cs, load_entry->size, &load_cs);
+ if (result != VK_SUCCESS)
+ return result;
+ tu_cs_emit_array(&load_cs,
+ (uint32_t *)((char *)load_entry->bo->map + load_entry->offset),
+ load_entry->size / 4);
+ struct tu_cs_entry load_copy = tu_cs_end_sub_stream(&cmd->sub_cs, &load_cs);
+
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_DESC_SETS_LOAD,
+ /* The blob seems to not enable this for binning, even when
+ * resources would actually be used in the binning shader.
+ * Presumably the overhead of prefetching the resources isn't
+ * worth it.
+ */
+ .enable_mask = ENABLE_DRAW,
+ .ib = load_copy,
+ };
+ }
}
struct tu_cs_entry vs_params;
tu_cs_sanity_check(cs);
/* track BOs */
- if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) {
- for (uint32_t i = 0; i < MAX_VBS; i++) {
- const struct tu_buffer *buf = cmd->state.vb.buffers[i];
- if (buf)
- tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
- }
- }
if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
unsigned i;
for_each_bit(i, descriptors_state->valid) {
struct tu_descriptor_set *set = descriptors_state->sets[i];
- for (unsigned j = 0; j < set->layout->buffer_count; ++j)
- if (set->descriptors[j]) {
- tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
+ for (unsigned j = 0; j < set->layout->buffer_count; ++j) {
+ if (set->buffers[j]) {
+ tu_bo_list_add(&cmd->bo_list, set->buffers[j],
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
}
+ }
+ if (set->size > 0) {
+ tu_bo_list_add(&cmd->bo_list, &set->pool->bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+ }
}
}
if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS) {
}
}
+ /* There are too many graphics dirty bits to list here, so just list the
+ * bits to preserve instead. The only things not emitted here are
+ * compute-related state.
+ */
+ cmd->state.dirty &= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
+
/* Fragment shader state overwrites compute shader state, so flag the
* compute pipeline for re-emit.
*/
- cmd->state.dirty = TU_CMD_DIRTY_COMPUTE_PIPELINE;
+ cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
return VK_SUCCESS;
}
tu_emit_compute_driver_params(cs, pipeline, info);
- result = tu6_emit_textures(cmd, pipeline, descriptors_state,
- MESA_SHADER_COMPUTE, &ib, false);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
- if (ib.size)
- tu_cs_emit_ib(cs, &ib);
-
- result = tu6_emit_ibo(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE, &ib);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
- if (ib.size)
- tu_cs_emit_ib(cs, &ib);
+ if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS) {
+ result = tu6_emit_descriptor_sets(cmd, pipeline,
+ VK_PIPELINE_BIND_POINT_COMPUTE, &ib,
+ false);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
- /* track BOs */
- if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
+ /* track BOs */
unsigned i;
for_each_bit(i, descriptors_state->valid) {
struct tu_descriptor_set *set = descriptors_state->sets[i];
- for (unsigned j = 0; j < set->layout->buffer_count; ++j)
- if (set->descriptors[j]) {
- tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
+ for (unsigned j = 0; j < set->layout->buffer_count; ++j) {
+ if (set->buffers[j]) {
+ tu_bo_list_add(&cmd->bo_list, set->buffers[j],
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
}
+ }
+
+ if (set->size > 0) {
+ tu_bo_list_add(&cmd->bo_list, &set->pool->bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+ }
}
}
+ if (ib.size)
+ tu_cs_emit_ib(cs, &ib);
+
+ if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS)
+ tu_cs_emit_ib(cs, &pipeline->load_state.state_ib);
+
+ cmd->state.dirty &=
+ ~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
+
/* Compute shader state overwrites fragment shader state, so we flag the
* graphics pipeline for re-emit.
*/
- cmd->state.dirty = TU_CMD_DIRTY_PIPELINE;
+ cmd->state.dirty |= TU_CMD_DIRTY_PIPELINE;
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));