return VK_SUCCESS;
}
-static VkResult
-tu_tiling_config_update_gmem_layout(struct tu_tiling_config *tiling,
- const struct tu_device *dev)
-{
- const uint32_t gmem_size = dev->physical_device->gmem_size;
- uint32_t offset = 0;
-
- for (uint32_t i = 0; i < tiling->buffer_count; i++) {
- /* 16KB-aligned */
- offset = align(offset, 0x4000);
-
- tiling->gmem_offsets[i] = offset;
- offset += tiling->tile0.extent.width * tiling->tile0.extent.height *
- tiling->buffer_cpp[i];
- }
-
- return offset <= gmem_size ? VK_SUCCESS : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-}
-
static void
tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
- const struct tu_device *dev)
+ const struct tu_device *dev,
+ uint32_t pixels)
{
const uint32_t tile_align_w = dev->physical_device->tile_align_w;
const uint32_t tile_align_h = dev->physical_device->tile_align_h;
}
/* do not exceed gmem size */
- while (tu_tiling_config_update_gmem_layout(tiling, dev) != VK_SUCCESS) {
+ while (tiling->tile0.extent.width * tiling->tile0.extent.height > pixels) {
if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
tiling->tile_count.width++;
tiling->tile0.extent.width =
struct tu_cs *cs)
{
const struct tu_framebuffer *fb = cmd->state.framebuffer;
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
const uint32_t a = subpass->depth_stencil_attachment.attachment;
if (a == VK_ATTACHMENT_UNUSED) {
tu_cs_emit(cs, A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview->image, iview->base_mip)));
tu_cs_emit(cs, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview->image->layout.layer_size));
tu_cs_emit_qw(cs, tu_image_base(iview->image, iview->base_mip, iview->base_layer));
- tu_cs_emit(cs, tiling->gmem_offsets[a]);
+ tu_cs_emit(cs, cmd->state.pass->attachments[a].gmem_offset);
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
tu_cs_emit(cs, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
struct tu_cs *cs)
{
const struct tu_framebuffer *fb = cmd->state.framebuffer;
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
unsigned char mrt_comp[MAX_RTS] = { 0 };
unsigned srgb_cntl = 0;
tu_cs_emit(cs, A6XX_RB_MRT_PITCH(tu_image_stride(iview->image, iview->base_mip)));
tu_cs_emit(cs, A6XX_RB_MRT_ARRAY_PITCH(iview->image->layout.layer_size));
tu_cs_emit_qw(cs, tu_image_base(iview->image, iview->base_mip, iview->base_layer));
- tu_cs_emit(
- cs, tiling->gmem_offsets[a]); /* RB_MRT[i].BASE_GMEM */
+ tu_cs_emit(cs, cmd->state.pass->attachments[a].gmem_offset);
tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_MRT_REG(i), 1);
tu_cs_emit(cs, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format->rb) |
tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
A6XX_CP_REG_TEST_0_BIT(0) |
- A6XX_CP_REG_TEST_0_UNK25);
+ A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
tu_cs_emit(cs, 0x10000000);
const struct tu_render_pass_attachment *attachment =
&cmd->state.pass->attachments[a];
- if (!attachment->needs_gmem)
+ if (attachment->gmem_offset < 0)
return;
const uint32_t x1 = tiling->render_area.offset.x;
need_load = true;
if (need_load) {
- tu6_emit_blit_info(cmd, cs, iview, tiling->gmem_offsets[a], false);
+ tu6_emit_blit_info(cmd, cs, iview, attachment->gmem_offset, false);
tu6_emit_blit(cmd, cs);
}
}
uint32_t a,
const VkRenderPassBeginInfo *info)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
const struct tu_framebuffer *fb = cmd->state.framebuffer;
const struct tu_image_view *iview = fb->attachments[a].attachment;
const struct tu_render_pass_attachment *attachment =
unsigned clear_mask = 0;
/* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
- if (!attachment->needs_gmem)
+ if (attachment->gmem_offset < 0)
return;
if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
tu_cs_emit(cs, A6XX_RB_BLIT_INFO_GMEM | A6XX_RB_BLIT_INFO_CLEAR_MASK(clear_mask));
tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
- tu_cs_emit(cs, tiling->gmem_offsets[a]);
+ tu_cs_emit(cs, attachment->gmem_offset);
tu_cs_emit_pkt4(cs, REG_A6XX_RB_UNKNOWN_88D0, 1);
tu_cs_emit(cs, 0);
tu6_emit_blit_info(cmd, cs,
cmd->state.framebuffer->attachments[a].attachment,
- cmd->state.tiling_config.gmem_offsets[gmem_a], true);
+ cmd->state.pass->attachments[gmem_a].gmem_offset, true);
tu6_emit_blit(cmd, cs);
}
tu6_emit_blit_scissor(cmd, cs, true);
for (uint32_t a = 0; a < pass->attachment_count; ++a) {
- if (pass->attachments[a].needs_gmem)
+ if (pass->attachments[a].gmem_offset >= 0)
tu6_emit_store_attachment(cmd, cs, a, a);
}
tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
- tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A009, 0x00000001);
+ tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_AND_INV_EVENT, true);
tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
- tu_cs_emit(cs, 0x00000013);
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
+ CP_WAIT_REG_MEM_0_POLL_MEMORY);
tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
- tu_cs_emit(cs, seqno);
- tu_cs_emit(cs, 0xffffffff);
- tu_cs_emit(cs, 0x00000010);
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(seqno));
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
- tu_cs_emit_pkt7(cs, CP_UNK_A6XX_14, 4);
- tu_cs_emit(cs, 0x00000000);
+ tu_cs_emit_pkt7(cs, CP_WAIT_MEM_GTE, 4);
+ tu_cs_emit(cs, CP_WAIT_MEM_GTE_0_RESERVED(0));
tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
- tu_cs_emit(cs, seqno);
+ tu_cs_emit(cs, CP_WAIT_MEM_GTE_3_REF(seqno));
}
static void
tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
A6XX_CP_REG_TEST_0_BIT(0) |
- A6XX_CP_REG_TEST_0_UNK25);
+ A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
tu_cs_emit(cs, 0x10000000);
*/
tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
- CP_REG_TO_MEM_0_CNT(1 - 1));
+ CP_REG_TO_MEM_0_CNT(0));
tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_OVERFLOW);
tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
A6XX_CP_REG_TEST_0_BIT(0) |
- A6XX_CP_REG_TEST_0_UNK25);
+ A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
tu_cs_emit(cs, 0x10000000);
const VkRenderPassBeginInfo *info)
{
const uint32_t tile_load_space =
- 6 + (23+19) * cmd->state.pass->attachment_count +
+ 8 + (23+19) * cmd->state.pass->attachment_count +
21 + (13 * cmd->state.subpass->color_count + 8) + 11;
struct tu_cs sub_cs;
for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
tu6_emit_clear_attachment(cmd, &sub_cs, i, info);
+ /* invalidate because reading input attachments will cache GMEM and
+ * the cache isn''t updated when GMEM is written
+ * TODO: is there a no-cache bit for textures?
+ */
+ if (cmd->state.subpass->input_count)
+ tu6_emit_event_write(cmd, &sub_cs, CACHE_INVALIDATE, false);
+
tu6_emit_zs(cmd, cmd->state.subpass, &sub_cs);
tu6_emit_mrt(cmd, cmd->state.subpass, &sub_cs);
tu6_emit_msaa(cmd, cmd->state.subpass, &sub_cs);
const VkRect2D *render_area)
{
const struct tu_device *dev = cmd->device;
- const struct tu_render_pass *pass = cmd->state.pass;
struct tu_tiling_config *tiling = &cmd->state.tiling_config;
tiling->render_area = *render_area;
- for (uint32_t a = 0; a < pass->attachment_count; a++) {
- if (pass->attachments[a].needs_gmem)
- tiling->buffer_cpp[a] = pass->attachments[a].cpp;
- else
- tiling->buffer_cpp[a] = 0;
- }
- tiling->buffer_count = pass->attachment_count;
- tu_tiling_config_update_tile_layout(tiling, dev);
+ tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass->gmem_pixels);
tu_tiling_config_update_pipe_layout(tiling, dev);
tu_tiling_config_update_pipes(tiling, dev);
}
default:
break;
}
+ } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
+ (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
+ assert(pBeginInfo->pInheritanceInfo);
+ cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
+ cmd_buffer->state.subpass = &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
}
cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
uint32_t commandBufferCount,
const VkCommandBuffer *pCmdBuffers)
{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ VkResult result;
+
+ assert(commandBufferCount > 0);
+
+ for (uint32_t i = 0; i < commandBufferCount; i++) {
+ TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
+
+ result = tu_bo_list_merge(&cmd->bo_list, &secondary->bo_list);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ break;
+ }
+
+ result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ break;
+ }
+ }
+ cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
}
VkResult
}
void
-tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
- const VkRenderPassBeginInfo *pRenderPassBeginInfo,
- const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
+tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo *pRenderPassBeginInfo,
+ const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
{
tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
pSubpassBeginInfo->contents);
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
const struct tu_render_pass *pass = cmd->state.pass;
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
struct tu_cs *cs = &cmd->draw_cs;
VkResult result = tu_cs_reserve_space(cmd->device, cs, 1024);
}
}
+ /* invalidate because reading input attachments will cache GMEM and
+ * the cache isn''t updated when GMEM is written
+ * TODO: is there a no-cache bit for textures?
+ */
+ if (cmd->state.subpass->input_count)
+ tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
+
/* emit mrt/zs/msaa state for the subpass that is starting */
tu6_emit_zs(cmd, cmd->state.subpass, cs);
tu6_emit_mrt(cmd, cmd->state.subpass, cs);
uint32_t a = subpass->resolve_attachments[i].attachment;
const struct tu_image_view *iview =
cmd->state.framebuffer->attachments[a].attachment;
- if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].needs_gmem) {
+ if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].gmem_offset >= 0) {
tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
- tu6_emit_blit_info(cmd, cs, iview, tiling->gmem_offsets[a], false);
+ tu6_emit_blit_info(cmd, cs, iview, pass->attachments[a].gmem_offset, false);
tu6_emit_blit(cmd, cs);
}
}
}
void
-tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
- const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
- const VkSubpassEndInfoKHR *pSubpassEndInfo)
+tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
+ const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR *pSubpassEndInfo)
{
tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
}
uint64_t count_buffer_offset;
};
+#define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
+#define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
+
enum tu_draw_state_group_id
{
TU_DRAW_STATE_PROGRAM,
TU_DRAW_STATE_VS_TEX,
TU_DRAW_STATE_FS_TEX,
TU_DRAW_STATE_FS_IBO,
+ TU_DRAW_STATE_VS_PARAMS,
TU_DRAW_STATE_COUNT,
};
struct tu_cs_entry ib;
};
-static struct tu_sampler*
+const static struct tu_sampler*
sampler_ptr(struct tu_descriptor_state *descriptors_state,
const struct tu_descriptor_map *map, unsigned i,
unsigned array_index)
const struct tu_descriptor_set_binding_layout *layout =
&set->layout->binding[map->binding[i]];
+ if (layout->immutable_samplers_offset) {
+ const struct tu_sampler *immutable_samplers =
+ tu_immutable_samplers(set->layout, layout);
+
+ return &immutable_samplers[array_index];
+ }
+
switch (layout->type) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4];
const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
array_index].attachment;
+ const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
+
+ assert(att->gmem_offset >= 0);
- assert(cmd->state.pass->attachments[a].needs_gmem);
dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
dst[2] |=
A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
- A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * tiling->buffer_cpp[a]);
+ A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
dst[3] = 0;
- dst[4] = 0x100000 + tiling->gmem_offsets[a];
+ dst[4] = 0x100000 + att->gmem_offset;
dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
dst[i] = 0;
+
+ if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
+ tu_finishme("patch input attachment pitch for secondary cmd buffer");
}
}
return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
}
+static VkResult
+tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
+ const struct tu_draw_info *draw,
+ struct tu_cs_entry *entry)
+{
+ /* TODO: fill out more than just base instance */
+ const struct tu_program_descriptor_linkage *link =
+ &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
+ const struct ir3_const_state *const_state = &link->const_state;
+ struct tu_cs cs;
+
+ if (const_state->offsets.driver_param >= link->constlen) {
+ *entry = (struct tu_cs_entry) {};
+ return VK_SUCCESS;
+ }
+
+ VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs, 8, &cs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(const_state->offsets.driver_param) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
+ CP_LOAD_STATE6_0_NUM_UNIT(1));
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, 0);
+
+ STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
+
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, draw->first_instance);
+ tu_cs_emit(&cs, 0);
+
+ *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+ return VK_SUCCESS;
+}
+
static VkResult
tu6_emit_textures(struct tu_cmd_buffer *cmd,
const struct tu_pipeline *pipeline,
int sampler_index = 0;
for (unsigned i = 0; i < link->sampler_map.num; i++) {
for (int j = 0; j < link->sampler_map.array_size[i]; j++) {
- struct tu_sampler *sampler = sampler_ptr(descriptors_state,
- &link->sampler_map, i, j);
+ const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
+ &link->sampler_map,
+ i, j);
memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS * sampler_index++],
sampler->state, sizeof(sampler->state));
*needs_border |= sampler->needs_border;
for (unsigned i = 0; i < vs_sampler->num; i++) {
for (unsigned j = 0; j < vs_sampler->array_size[i]; j++) {
- struct tu_sampler *sampler = sampler_ptr(descriptors_state, vs_sampler, i, j);
+ const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
+ vs_sampler, i, j);
memcpy(ptr.map, &border_color[sampler->border], 128);
ptr.map += 128 / 4;
}
for (unsigned i = 0; i < fs_sampler->num; i++) {
for (unsigned j = 0; j < fs_sampler->array_size[i]; j++) {
- struct tu_sampler *sampler = sampler_ptr(descriptors_state, fs_sampler, i, j);
+ const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
+ fs_sampler, i, j);
memcpy(ptr.map, &border_color[sampler->border], 128);
ptr.map += 128 / 4;
}
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_PROGRAM,
- .enable_mask = 0x6,
+ .enable_mask = ENABLE_DRAW,
.ib = pipeline->program.state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_PROGRAM_BINNING,
- .enable_mask = 0x1,
+ .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
.ib = pipeline->program.binning_state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VI,
- .enable_mask = 0x6,
+ .enable_mask = ENABLE_DRAW,
.ib = pipeline->vi.state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VI_BINNING,
- .enable_mask = 0x1,
+ .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
.ib = pipeline->vi.binning_state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VP,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = pipeline->vp.state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_RAST,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = pipeline->rast.state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_DS,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = pipeline->ds.state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_BLEND,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = pipeline->blend.state_ib,
};
}
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VS_CONST,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX)
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_FS_CONST,
- .enable_mask = 0x6,
+ .enable_mask = ENABLE_DRAW,
.ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT)
};
}
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VS_TEX,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = vs_tex,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_FS_TEX,
- .enable_mask = 0x6,
+ .enable_mask = ENABLE_DRAW,
.ib = fs_tex,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_FS_IBO,
- .enable_mask = 0x6,
+ .enable_mask = ENABLE_DRAW,
.ib = fs_ibo,
};
}
}
+ struct tu_cs_entry vs_params;
+ result = tu6_emit_vs_params(cmd, draw, &vs_params);
+ if (result != VK_SUCCESS)
+ return result;
+
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_VS_PARAMS,
+ .enable_mask = ENABLE_ALL,
+ .ib = vs_params,
+ };
+
tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_group_count);
for (uint32_t i = 0; i < draw_state_group_count; i++) {
const struct tu_draw_state_group *group = &draw_state_groups[i];
-
+ debug_assert((group->enable_mask & ~ENABLE_ALL) == 0);
uint32_t cp_set_draw_state =
CP_SET_DRAW_STATE__0_COUNT(group->ib.size / 4) |
- CP_SET_DRAW_STATE__0_ENABLE_MASK(group->enable_mask) |
+ group->enable_mask |
CP_SET_DRAW_STATE__0_GROUP_ID(group->id);
uint64_t iova;
if (group->ib.size) {
const struct tu_program_descriptor_linkage *link =
&pipeline->program.link[type];
const struct ir3_const_state *const_state = &link->const_state;
- uint32_t offset_dwords = const_state->offsets.driver_param;
+ uint32_t offset = const_state->offsets.driver_param;
- if (link->constlen <= offset_dwords)
+ if (link->constlen <= offset)
return;
if (!info->indirect) {
- uint32_t driver_params[] = {
- info->blocks[0],
- info->blocks[1],
- info->blocks[2],
- pipeline->compute.local_size[0],
- pipeline->compute.local_size[1],
- pipeline->compute.local_size[2],
+ uint32_t driver_params[IR3_DP_CS_COUNT] = {
+ [IR3_DP_NUM_WORK_GROUPS_X] = info->blocks[0],
+ [IR3_DP_NUM_WORK_GROUPS_Y] = info->blocks[1],
+ [IR3_DP_NUM_WORK_GROUPS_Z] = info->blocks[2],
+ [IR3_DP_LOCAL_GROUP_SIZE_X] = pipeline->compute.local_size[0],
+ [IR3_DP_LOCAL_GROUP_SIZE_Y] = pipeline->compute.local_size[1],
+ [IR3_DP_LOCAL_GROUP_SIZE_Z] = pipeline->compute.local_size[2],
};
- uint32_t num_consts = MIN2(const_state->num_driver_params,
- link->constlen - offset_dwords);
- uint32_t align_size = align(num_consts, 4);
+ uint32_t num_consts = MIN2(const_state->num_driver_params,
+ (link->constlen - offset) * 4);
/* push constants */
- tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + align_size);
- tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset_dwords / 4) |
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_consts);
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
- CP_LOAD_STATE6_0_NUM_UNIT(align_size / 4));
+ CP_LOAD_STATE6_0_NUM_UNIT(num_consts / 4));
tu_cs_emit(cs, 0);
tu_cs_emit(cs, 0);
uint32_t i;
for (i = 0; i < num_consts; i++)
tu_cs_emit(cs, driver_params[i]);
- for (; i < align_size; i++)
- tu_cs_emit(cs, 0);
} else {
tu_finishme("Indirect driver params");
}
}
void
-tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
- const VkSubpassEndInfoKHR *pSubpassEndInfo)
+tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
+ const VkSubpassEndInfoKHR *pSubpassEndInfo)
{
tu_CmdEndRenderPass(commandBuffer);
}