return VK_SUCCESS;
}
+/* Sets vertex buffers to HW binding points. We emit VBs in SDS (so that bin
+ * rendering can skip over unused state), so we need to collect all the
+ * bindings together into a single state emit at draw time.
+ */
void
tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
uint32_t firstBinding,
assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
- cmd->state.vb.buffers[firstBinding + i] =
- tu_buffer_from_handle(pBuffers[i]);
+ struct tu_buffer *buf = tu_buffer_from_handle(pBuffers[i]);
+
+ cmd->state.vb.buffers[firstBinding + i] = buf;
cmd->state.vb.offsets[firstBinding + i] = pOffsets[i];
+
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
}
- /* VB states depend on VkPipelineVertexInputStateCreateInfo */
cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
}
{
TU_DRAW_STATE_PROGRAM,
TU_DRAW_STATE_PROGRAM_BINNING,
+ TU_DRAW_STATE_VB,
TU_DRAW_STATE_VI,
TU_DRAW_STATE_VI_BINNING,
TU_DRAW_STATE_VP,
return VK_SUCCESS;
}
+static struct tu_cs_entry
+tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline)
+{
+ struct tu_cs cs;
+ tu_cs_begin_sub_stream(&cmd->sub_cs, 4 * MAX_VBS, &cs);
+
+ for (uint32_t i = 0; i < pipeline->vi.count; i++) {
+ const uint32_t binding = pipeline->vi.bindings[i];
+ const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
+ const VkDeviceSize offset = buf->bo_offset +
+ cmd->state.vb.offsets[binding];
+ const VkDeviceSize size =
+ offset < buf->size ? buf->size - offset : 0;
+
+ tu_cs_emit_regs(&cs,
+ A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
+ A6XX_VFD_FETCH_SIZE(i, size));
+
+ }
+
+ return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+}
+
static VkResult
tu6_emit_descriptor_sets(struct tu_cmd_buffer *cmd,
const struct tu_pipeline *pipeline,
tu6_emit_scissor(cs, &cmd->state.dynamic.scissor.scissors[0]);
}
- if (cmd->state.dirty &
- (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
- for (uint32_t i = 0; i < pipeline->vi.count; i++) {
- const uint32_t binding = pipeline->vi.bindings[i];
- const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
- const VkDeviceSize offset = buf->bo_offset +
- cmd->state.vb.offsets[binding];
- const VkDeviceSize size =
- offset < buf->size ? buf->size - offset : 0;
-
- tu_cs_emit_regs(cs,
- A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
- A6XX_VFD_FETCH_SIZE(i, size));
- }
- }
-
if (cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) {
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
};
}
+ if (cmd->state.dirty &
+ (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_VB,
+ .enable_mask = ENABLE_ALL,
+ .ib = tu6_emit_vertex_buffers(cmd, pipeline)
+ };
+ }
+
if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS)
tu6_emit_streamout(cmd, cs);
tu_cs_sanity_check(cs);
/* track BOs */
- if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) {
- for (uint32_t i = 0; i < MAX_VBS; i++) {
- const struct tu_buffer *buf = cmd->state.vb.buffers[i];
- if (buf)
- tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
- }
- }
if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
unsigned i;
for_each_bit(i, descriptors_state->valid) {