return VK_SUCCESS;
}
-static VkResult
-tu_tiling_config_update_gmem_layout(struct tu_tiling_config *tiling,
- const struct tu_device *dev)
-{
- const uint32_t gmem_size = dev->physical_device->gmem_size;
- uint32_t offset = 0;
-
- for (uint32_t i = 0; i < tiling->buffer_count; i++) {
- /* 16KB-aligned */
- offset = align(offset, 0x4000);
-
- tiling->gmem_offsets[i] = offset;
- offset += tiling->tile0.extent.width * tiling->tile0.extent.height *
- tiling->buffer_cpp[i];
- }
-
- return offset <= gmem_size ? VK_SUCCESS : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-}
-
static void
tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
- const struct tu_device *dev)
+ const struct tu_device *dev,
+ uint32_t pixels)
{
const uint32_t tile_align_w = dev->physical_device->tile_align_w;
const uint32_t tile_align_h = dev->physical_device->tile_align_h;
}
/* do not exceed gmem size */
- while (tu_tiling_config_update_gmem_layout(tiling, dev) != VK_SUCCESS) {
+ while (tiling->tile0.extent.width * tiling->tile0.extent.height > pixels) {
if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
tiling->tile_count.width++;
tiling->tile0.extent.width =
struct tu_cs *cs)
{
const struct tu_framebuffer *fb = cmd->state.framebuffer;
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
const uint32_t a = subpass->depth_stencil_attachment.attachment;
if (a == VK_ATTACHMENT_UNUSED) {
tu_cs_emit(cs, A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview->image, iview->base_mip)));
tu_cs_emit(cs, A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview->image->layout.layer_size));
tu_cs_emit_qw(cs, tu_image_base(iview->image, iview->base_mip, iview->base_layer));
- tu_cs_emit(cs, tiling->gmem_offsets[a]);
+ tu_cs_emit(cs, cmd->state.pass->attachments[a].gmem_offset);
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
tu_cs_emit(cs, A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
struct tu_cs *cs)
{
const struct tu_framebuffer *fb = cmd->state.framebuffer;
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
unsigned char mrt_comp[MAX_RTS] = { 0 };
unsigned srgb_cntl = 0;
tu_cs_emit(cs, A6XX_RB_MRT_PITCH(tu_image_stride(iview->image, iview->base_mip)));
tu_cs_emit(cs, A6XX_RB_MRT_ARRAY_PITCH(iview->image->layout.layer_size));
tu_cs_emit_qw(cs, tu_image_base(iview->image, iview->base_mip, iview->base_layer));
- tu_cs_emit(
- cs, tiling->gmem_offsets[a]); /* RB_MRT[i].BASE_GMEM */
+ tu_cs_emit(cs, cmd->state.pass->attachments[a].gmem_offset);
tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_MRT_REG(i), 1);
tu_cs_emit(cs, A6XX_SP_FS_MRT_REG_COLOR_FORMAT(format->rb) |
tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
A6XX_CP_REG_TEST_0_BIT(0) |
- A6XX_CP_REG_TEST_0_UNK25);
+ A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
tu_cs_emit(cs, 0x10000000);
const struct tu_render_pass_attachment *attachment =
&cmd->state.pass->attachments[a];
- if (!attachment->needs_gmem)
+ if (attachment->gmem_offset < 0)
return;
const uint32_t x1 = tiling->render_area.offset.x;
need_load = true;
if (need_load) {
- tu6_emit_blit_info(cmd, cs, iview, tiling->gmem_offsets[a], false);
+ tu6_emit_blit_info(cmd, cs, iview, attachment->gmem_offset, false);
tu6_emit_blit(cmd, cs);
}
}
uint32_t a,
const VkRenderPassBeginInfo *info)
{
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
const struct tu_framebuffer *fb = cmd->state.framebuffer;
const struct tu_image_view *iview = fb->attachments[a].attachment;
const struct tu_render_pass_attachment *attachment =
unsigned clear_mask = 0;
/* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
- if (!attachment->needs_gmem)
+ if (attachment->gmem_offset < 0)
return;
if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
tu_cs_emit(cs, A6XX_RB_BLIT_INFO_GMEM | A6XX_RB_BLIT_INFO_CLEAR_MASK(clear_mask));
tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
- tu_cs_emit(cs, tiling->gmem_offsets[a]);
+ tu_cs_emit(cs, attachment->gmem_offset);
tu_cs_emit_pkt4(cs, REG_A6XX_RB_UNKNOWN_88D0, 1);
tu_cs_emit(cs, 0);
tu6_emit_blit_info(cmd, cs,
cmd->state.framebuffer->attachments[a].attachment,
- cmd->state.tiling_config.gmem_offsets[gmem_a], true);
+ cmd->state.pass->attachments[gmem_a].gmem_offset, true);
tu6_emit_blit(cmd, cs);
}
tu6_emit_blit_scissor(cmd, cs, true);
for (uint32_t a = 0; a < pass->attachment_count; ++a) {
- if (pass->attachments[a].needs_gmem)
+ if (pass->attachments[a].gmem_offset >= 0)
tu6_emit_store_attachment(cmd, cs, a, a);
}
tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
- tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A009, 0x00000001);
+ tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_AND_INV_EVENT, true);
tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
- tu_cs_emit(cs, 0x00000013);
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
+ CP_WAIT_REG_MEM_0_POLL_MEMORY);
tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
- tu_cs_emit(cs, seqno);
- tu_cs_emit(cs, 0xffffffff);
- tu_cs_emit(cs, 0x00000010);
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(seqno));
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
- tu_cs_emit_pkt7(cs, CP_UNK_A6XX_14, 4);
- tu_cs_emit(cs, 0x00000000);
+ tu_cs_emit_pkt7(cs, CP_WAIT_MEM_GTE, 4);
+ tu_cs_emit(cs, CP_WAIT_MEM_GTE_0_RESERVED(0));
tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
- tu_cs_emit(cs, seqno);
+ tu_cs_emit(cs, CP_WAIT_MEM_GTE_3_REF(seqno));
}
static void
tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
A6XX_CP_REG_TEST_0_BIT(0) |
- A6XX_CP_REG_TEST_0_UNK25);
+ A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
tu_cs_emit(cs, 0x10000000);
*/
tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
- CP_REG_TO_MEM_0_CNT(1 - 1));
+ CP_REG_TO_MEM_0_CNT(0));
tu_cs_emit_qw(cs, cmd->scratch_bo.iova + VSC_OVERFLOW);
tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
A6XX_CP_REG_TEST_0_BIT(0) |
- A6XX_CP_REG_TEST_0_UNK25);
+ A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
tu_cs_emit(cs, 0x10000000);
}
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_LRZ_CNTL, 1);
- tu_cs_emit(cs, A6XX_GRAS_LRZ_CNTL_ENABLE | A6XX_GRAS_LRZ_CNTL_UNK3);
+ tu_cs_emit(cs, 0);
tu6_emit_lrz_flush(cmd, cs);
const VkRenderPassBeginInfo *info)
{
const uint32_t tile_load_space =
- 6 + (23+19) * cmd->state.pass->attachment_count +
+ 8 + (23+19) * cmd->state.pass->attachment_count +
21 + (13 * cmd->state.subpass->color_count + 8) + 11;
struct tu_cs sub_cs;
- VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->tile_cs,
+ VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs,
tile_load_space, &sub_cs);
if (result != VK_SUCCESS) {
cmd->record_result = result;
for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
tu6_emit_clear_attachment(cmd, &sub_cs, i, info);
+ /* invalidate because reading input attachments will cache GMEM and
+ * the cache isn''t updated when GMEM is written
+ * TODO: is there a no-cache bit for textures?
+ */
+ if (cmd->state.subpass->input_count)
+ tu6_emit_event_write(cmd, &sub_cs, CACHE_INVALIDATE, false);
+
tu6_emit_zs(cmd, cmd->state.subpass, &sub_cs);
tu6_emit_mrt(cmd, cmd->state.subpass, &sub_cs);
tu6_emit_msaa(cmd, cmd->state.subpass, &sub_cs);
- cmd->state.tile_load_ib = tu_cs_end_sub_stream(&cmd->tile_cs, &sub_cs);
+ cmd->state.tile_load_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
}
static void
const uint32_t tile_store_space = 32 + 23 * cmd->state.pass->attachment_count;
struct tu_cs sub_cs;
- VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->tile_cs,
+ VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs,
tile_store_space, &sub_cs);
if (result != VK_SUCCESS) {
cmd->record_result = result;
/* emit to tile-store sub_cs */
tu6_emit_tile_store(cmd, &sub_cs);
- cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->tile_cs, &sub_cs);
+ cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
}
static void
const VkRect2D *render_area)
{
const struct tu_device *dev = cmd->device;
- const struct tu_render_pass *pass = cmd->state.pass;
struct tu_tiling_config *tiling = &cmd->state.tiling_config;
tiling->render_area = *render_area;
- for (uint32_t a = 0; a < pass->attachment_count; a++) {
- if (pass->attachments[a].needs_gmem)
- tiling->buffer_cpp[a] = pass->attachments[a].cpp;
- else
- tiling->buffer_cpp[a] = 0;
- }
- tiling->buffer_count = pass->attachment_count;
- tu_tiling_config_update_tile_layout(tiling, dev);
+ tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass->gmem_pixels);
tu_tiling_config_update_pipe_layout(tiling, dev);
tu_tiling_config_update_pipes(tiling, dev);
}
tu_bo_list_init(&cmd_buffer->bo_list);
tu_cs_init(&cmd_buffer->cs, TU_CS_MODE_GROW, 4096);
tu_cs_init(&cmd_buffer->draw_cs, TU_CS_MODE_GROW, 4096);
- tu_cs_init(&cmd_buffer->draw_state, TU_CS_MODE_SUB_STREAM, 2048);
- tu_cs_init(&cmd_buffer->tile_cs, TU_CS_MODE_SUB_STREAM, 1024);
+ tu_cs_init(&cmd_buffer->sub_cs, TU_CS_MODE_SUB_STREAM, 2048);
*pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
tu_cs_finish(cmd_buffer->device, &cmd_buffer->cs);
tu_cs_finish(cmd_buffer->device, &cmd_buffer->draw_cs);
- tu_cs_finish(cmd_buffer->device, &cmd_buffer->draw_state);
- tu_cs_finish(cmd_buffer->device, &cmd_buffer->tile_cs);
+ tu_cs_finish(cmd_buffer->device, &cmd_buffer->sub_cs);
tu_bo_list_destroy(&cmd_buffer->bo_list);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
tu_bo_list_reset(&cmd_buffer->bo_list);
tu_cs_reset(cmd_buffer->device, &cmd_buffer->cs);
tu_cs_reset(cmd_buffer->device, &cmd_buffer->draw_cs);
- tu_cs_reset(cmd_buffer->device, &cmd_buffer->draw_state);
- tu_cs_reset(cmd_buffer->device, &cmd_buffer->tile_cs);
+ tu_cs_reset(cmd_buffer->device, &cmd_buffer->sub_cs);
for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
cmd_buffer->descriptors[i].dirty = 0;
default:
break;
}
+ } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
+ (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
+ assert(pBeginInfo->pInheritanceInfo);
+ cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
+ cmd_buffer->state.subpass = &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
}
cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
}
- for (uint32_t i = 0; i < cmd_buffer->draw_state.bo_count; i++) {
- tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_state.bos[i],
- MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
- }
-
- for (uint32_t i = 0; i < cmd_buffer->tile_cs.bo_count; i++) {
- tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->tile_cs.bos[i],
+ for (uint32_t i = 0; i < cmd_buffer->sub_cs.bo_count; i++) {
+ tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->sub_cs.bos[i],
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
}
uint32_t commandBufferCount,
const VkCommandBuffer *pCmdBuffers)
{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ VkResult result;
+
+ assert(commandBufferCount > 0);
+
+ for (uint32_t i = 0; i < commandBufferCount; i++) {
+ TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
+
+ result = tu_bo_list_merge(&cmd->bo_list, &secondary->bo_list);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ break;
+ }
+
+ result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ break;
+ }
+ }
+ cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
}
VkResult
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
- VkResult result;
cmd->state.pass = pass;
cmd->state.subpass = pass->subpasses;
}
void
-tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
- const VkRenderPassBeginInfo *pRenderPassBeginInfo,
- const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
+tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo *pRenderPassBeginInfo,
+ const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
{
tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
pSubpassBeginInfo->contents);
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
const struct tu_render_pass *pass = cmd->state.pass;
- const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
struct tu_cs *cs = &cmd->draw_cs;
VkResult result = tu_cs_reserve_space(cmd->device, cs, 1024);
}
}
+ /* invalidate because reading input attachments will cache GMEM and
+ * the cache isn''t updated when GMEM is written
+ * TODO: is there a no-cache bit for textures?
+ */
+ if (cmd->state.subpass->input_count)
+ tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
+
/* emit mrt/zs/msaa state for the subpass that is starting */
tu6_emit_zs(cmd, cmd->state.subpass, cs);
tu6_emit_mrt(cmd, cmd->state.subpass, cs);
uint32_t a = subpass->resolve_attachments[i].attachment;
const struct tu_image_view *iview =
cmd->state.framebuffer->attachments[a].attachment;
- if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].needs_gmem) {
+ if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].gmem_offset >= 0) {
tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
- tu6_emit_blit_info(cmd, cs, iview, tiling->gmem_offsets[a], false);
+ tu6_emit_blit_info(cmd, cs, iview, pass->attachments[a].gmem_offset, false);
tu6_emit_blit(cmd, cs);
}
}
}
void
-tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
- const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
- const VkSubpassEndInfoKHR *pSubpassEndInfo)
+tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
+ const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR *pSubpassEndInfo)
{
tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
}
uint64_t count_buffer_offset;
};
+#define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
+#define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
+
enum tu_draw_state_group_id
{
TU_DRAW_STATE_PROGRAM,
TU_DRAW_STATE_VS_TEX,
TU_DRAW_STATE_FS_TEX,
TU_DRAW_STATE_FS_IBO,
+ TU_DRAW_STATE_VS_PARAMS,
TU_DRAW_STATE_COUNT,
};
struct tu_cs_entry ib;
};
-static struct tu_sampler*
+const static struct tu_sampler*
sampler_ptr(struct tu_descriptor_state *descriptors_state,
- const struct tu_descriptor_map *map, unsigned i)
+ const struct tu_descriptor_map *map, unsigned i,
+ unsigned array_index)
{
assert(descriptors_state->valid & (1 << map->set[i]));
const struct tu_descriptor_set_binding_layout *layout =
&set->layout->binding[map->binding[i]];
+ if (layout->immutable_samplers_offset) {
+ const struct tu_sampler *immutable_samplers =
+ tu_immutable_samplers(set->layout, layout);
+
+ return &immutable_samplers[array_index];
+ }
+
switch (layout->type) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4];
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4 + A6XX_TEX_CONST_DWORDS];
+ return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4 + A6XX_TEX_CONST_DWORDS +
+ array_index *
+ (A6XX_TEX_CONST_DWORDS +
+ sizeof(struct tu_sampler) / 4)];
default:
unreachable("unimplemented descriptor type");
break;
}
}
-static uint32_t*
-texture_ptr(struct tu_descriptor_state *descriptors_state,
- const struct tu_descriptor_map *map, unsigned i)
+static void
+write_tex_const(struct tu_cmd_buffer *cmd,
+ uint32_t *dst,
+ struct tu_descriptor_state *descriptors_state,
+ const struct tu_descriptor_map *map,
+ unsigned i, unsigned array_index)
{
assert(descriptors_state->valid & (1 << map->set[i]));
switch (layout->type) {
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- return &set->mapped_ptr[layout->offset / 4];
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- return &set->mapped_ptr[layout->offset / 4];
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
+ array_index * A6XX_TEX_CONST_DWORDS],
+ A6XX_TEX_CONST_DWORDS * 4);
+ break;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
+ array_index *
+ (A6XX_TEX_CONST_DWORDS +
+ sizeof(struct tu_sampler) / 4)],
+ A6XX_TEX_CONST_DWORDS * 4);
+ break;
default:
unreachable("unimplemented descriptor type");
break;
}
+
+ if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
+ const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
+ array_index].attachment;
+ const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
+
+ assert(att->gmem_offset >= 0);
+
+ dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
+ dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
+ dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
+ dst[2] |=
+ A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
+ A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
+ dst[3] = 0;
+ dst[4] = 0x100000 + att->gmem_offset;
+ dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
+ for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
+ dst[i] = 0;
+
+ if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
+ tu_finishme("patch input attachment pitch for secondary cmd buffer");
+ }
}
static uint64_t
buffer_ptr(struct tu_descriptor_state *descriptors_state,
const struct tu_descriptor_map *map,
- unsigned i)
+ unsigned i, unsigned array_index)
{
assert(descriptors_state->valid & (1 << map->set[i]));
switch (layout->type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- return descriptors_state->dynamic_buffers[layout->dynamic_offset_offset];
+ return descriptors_state->dynamic_buffers[layout->dynamic_offset_offset +
+ array_index];
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- return (uint64_t) set->mapped_ptr[layout->offset / 4 + 1] << 32 |
- set->mapped_ptr[layout->offset / 4];
+ return (uint64_t) set->mapped_ptr[layout->offset / 4 + array_index * 2 + 1] << 32 |
+ set->mapped_ptr[layout->offset / 4 + array_index * 2];
default:
unreachable("unimplemented descriptor type");
break;
continue;
}
- uint64_t va = buffer_ptr(descriptors_state, &link->ubo_map, i - 1);
+ /* Look through the UBO map to find our UBO index, and get the VA for
+ * that UBO.
+ */
+ uint64_t va = 0;
+ uint32_t ubo_idx = i - 1;
+ uint32_t ubo_map_base = 0;
+ for (int j = 0; j < link->ubo_map.num; j++) {
+ if (ubo_idx >= ubo_map_base &&
+ ubo_idx < ubo_map_base + link->ubo_map.array_size[j]) {
+ va = buffer_ptr(descriptors_state, &link->ubo_map, j,
+ ubo_idx - ubo_map_base);
+ break;
+ }
+ ubo_map_base += link->ubo_map.array_size[j];
+ }
+ assert(va);
tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
const struct tu_program_descriptor_linkage *link =
&pipeline->program.link[type];
- uint32_t num = MIN2(link->ubo_map.num, link->const_state.num_ubos);
+ uint32_t num = MIN2(link->ubo_map.num_desc, link->const_state.num_ubos);
uint32_t anum = align(num, 2);
- uint32_t i;
if (!num)
return;
tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
- for (i = 0; i < num; i++)
- tu_cs_emit_qw(cs, buffer_ptr(descriptors_state, &link->ubo_map, i));
+ unsigned emitted = 0;
+ for (unsigned i = 0; emitted < num && i < link->ubo_map.num; i++) {
+ for (unsigned j = 0; emitted < num && j < link->ubo_map.array_size[i]; j++) {
+ tu_cs_emit_qw(cs, buffer_ptr(descriptors_state, &link->ubo_map, i, j));
+ emitted++;
+ }
+ }
- for (; i < anum; i++) {
+ for (; emitted < anum; emitted++) {
tu_cs_emit(cs, 0xffffffff);
tu_cs_emit(cs, 0xffffffff);
}
gl_shader_stage type)
{
struct tu_cs cs;
- tu_cs_begin_sub_stream(cmd->device, &cmd->draw_state, 512, &cs); /* TODO: maximum size? */
+ tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
- return tu_cs_end_sub_stream(&cmd->draw_state, &cs);
+ return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+}
+
+static VkResult
+tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
+ const struct tu_draw_info *draw,
+ struct tu_cs_entry *entry)
+{
+ /* TODO: fill out more than just base instance */
+ const struct tu_program_descriptor_linkage *link =
+ &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
+ const struct ir3_const_state *const_state = &link->const_state;
+ struct tu_cs cs;
+
+ if (const_state->offsets.driver_param >= link->constlen) {
+ *entry = (struct tu_cs_entry) {};
+ return VK_SUCCESS;
+ }
+
+ VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs, 8, &cs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(const_state->offsets.driver_param) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
+ CP_LOAD_STATE6_0_NUM_UNIT(1));
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, 0);
+
+ STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
+
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, draw->first_instance);
+ tu_cs_emit(&cs, 0);
+
+ *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+ return VK_SUCCESS;
}
static VkResult
tu6_emit_textures(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
gl_shader_stage type,
struct tu_cs_entry *entry,
bool *needs_border)
{
struct tu_device *device = cmd->device;
- struct tu_cs *draw_state = &cmd->draw_state;
- struct tu_descriptor_state *descriptors_state =
- &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
+ struct tu_cs *draw_state = &cmd->sub_cs;
const struct tu_program_descriptor_linkage *link =
- &cmd->state.pipeline->program.link[type];
+ &pipeline->program.link[type];
VkResult result;
- if (link->texture_map.num == 0 && link->sampler_map.num == 0) {
+ if (link->texture_map.num_desc == 0 && link->sampler_map.num_desc == 0) {
*entry = (struct tu_cs_entry) {};
return VK_SUCCESS;
}
/* allocate and fill texture state */
struct ts_cs_memory tex_const;
- result = tu_cs_alloc(device, draw_state, link->texture_map.num, A6XX_TEX_CONST_DWORDS, &tex_const);
+ result = tu_cs_alloc(device, draw_state, link->texture_map.num_desc,
+ A6XX_TEX_CONST_DWORDS, &tex_const);
if (result != VK_SUCCESS)
return result;
+ int tex_index = 0;
for (unsigned i = 0; i < link->texture_map.num; i++) {
- memcpy(&tex_const.map[A6XX_TEX_CONST_DWORDS*i],
- texture_ptr(descriptors_state, &link->texture_map, i),
- A6XX_TEX_CONST_DWORDS*4);
+ for (int j = 0; j < link->texture_map.array_size[i]; j++) {
+ write_tex_const(cmd,
+ &tex_const.map[A6XX_TEX_CONST_DWORDS * tex_index++],
+ descriptors_state, &link->texture_map, i, j);
+ }
}
/* allocate and fill sampler state */
- struct ts_cs_memory tex_samp;
- result = tu_cs_alloc(device, draw_state, link->sampler_map.num, A6XX_TEX_SAMP_DWORDS, &tex_samp);
- if (result != VK_SUCCESS)
- return result;
+ struct ts_cs_memory tex_samp = { 0 };
+ if (link->sampler_map.num_desc) {
+ result = tu_cs_alloc(device, draw_state, link->sampler_map.num_desc,
+ A6XX_TEX_SAMP_DWORDS, &tex_samp);
+ if (result != VK_SUCCESS)
+ return result;
- for (unsigned i = 0; i < link->sampler_map.num; i++) {
- struct tu_sampler *sampler = sampler_ptr(descriptors_state, &link->sampler_map, i);
- memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS*i], sampler->state, sizeof(sampler->state));
- *needs_border |= sampler->needs_border;
+ int sampler_index = 0;
+ for (unsigned i = 0; i < link->sampler_map.num; i++) {
+ for (int j = 0; j < link->sampler_map.array_size[i]; j++) {
+ const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
+ &link->sampler_map,
+ i, j);
+ memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS * sampler_index++],
+ sampler->state, sizeof(sampler->state));
+ *needs_border |= sampler->needs_border;
+ }
+ }
}
unsigned tex_samp_reg, tex_const_reg, tex_count_reg;
if (result != VK_SUCCESS)
return result;
- /* output sampler state: */
- tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
- tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
- CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
- CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
- CP_LOAD_STATE6_0_NUM_UNIT(link->sampler_map.num));
- tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
+ if (link->sampler_map.num_desc) {
+ /* output sampler state: */
+ tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
+ CP_LOAD_STATE6_0_NUM_UNIT(link->sampler_map.num_desc));
+ tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
- tu_cs_emit_pkt4(&cs, tex_samp_reg, 2);
- tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
+ tu_cs_emit_pkt4(&cs, tex_samp_reg, 2);
+ tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
+ }
/* emit texture state: */
tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
- CP_LOAD_STATE6_0_NUM_UNIT(link->texture_map.num));
+ CP_LOAD_STATE6_0_NUM_UNIT(link->texture_map.num_desc));
tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
tu_cs_emit_pkt4(&cs, tex_const_reg, 2);
tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
tu_cs_emit_pkt4(&cs, tex_count_reg, 1);
- tu_cs_emit(&cs, link->texture_map.num);
+ tu_cs_emit(&cs, link->texture_map.num_desc);
*entry = tu_cs_end_sub_stream(draw_state, &cs);
return VK_SUCCESS;
}
-static struct tu_cs_entry
-tu6_emit_ibo(struct tu_device *device, struct tu_cs *draw_state,
+static VkResult
+tu6_emit_ibo(struct tu_cmd_buffer *cmd,
const struct tu_pipeline *pipeline,
struct tu_descriptor_state *descriptors_state,
- gl_shader_stage type)
+ gl_shader_stage type,
+ struct tu_cs_entry *entry)
{
+ struct tu_device *device = cmd->device;
+ struct tu_cs *draw_state = &cmd->sub_cs;
const struct tu_program_descriptor_linkage *link =
&pipeline->program.link[type];
+ VkResult result;
- uint32_t size = link->image_mapping.num_ibo * A6XX_TEX_CONST_DWORDS;
- if (!size)
- return (struct tu_cs_entry) {};
+ if (link->image_mapping.num_ibo == 0) {
+ *entry = (struct tu_cs_entry) {};
+ return VK_SUCCESS;
+ }
- struct tu_cs cs;
- tu_cs_begin_sub_stream(device, draw_state, size, &cs);
+ struct ts_cs_memory ibo_const;
+ result = tu_cs_alloc(device, draw_state, link->image_mapping.num_ibo,
+ A6XX_TEX_CONST_DWORDS, &ibo_const);
+ if (result != VK_SUCCESS)
+ return result;
for (unsigned i = 0; i < link->image_mapping.num_ibo; i++) {
unsigned idx = link->image_mapping.ibo_to_image[i];
+ uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * i];
if (idx & IBO_SSBO) {
idx &= ~IBO_SSBO;
- uint64_t va = buffer_ptr(descriptors_state, &link->ssbo_map, idx);
+ uint64_t va = buffer_ptr(descriptors_state, &link->ssbo_map, idx,
+ 0 /* XXX */);
/* We don't expose robustBufferAccess, so leave the size unlimited. */
uint32_t sz = MAX_STORAGE_BUFFER_RANGE / 4;
- tu_cs_emit(&cs, A6XX_IBO_0_FMT(TFMT6_32_UINT));
- tu_cs_emit(&cs,
- A6XX_IBO_1_WIDTH(sz & MASK(15)) |
- A6XX_IBO_1_HEIGHT(sz >> 15));
- tu_cs_emit(&cs,
- A6XX_IBO_2_UNK4 |
- A6XX_IBO_2_UNK31 |
- A6XX_IBO_2_TYPE(A6XX_TEX_1D));
- tu_cs_emit(&cs, 0);
- tu_cs_emit_qw(&cs, va);
+ dst[0] = A6XX_IBO_0_FMT(TFMT6_32_UINT);
+ dst[1] = A6XX_IBO_1_WIDTH(sz & MASK(15)) |
+ A6XX_IBO_1_HEIGHT(sz >> 15);
+ dst[2] = A6XX_IBO_2_UNK4 |
+ A6XX_IBO_2_UNK31 |
+ A6XX_IBO_2_TYPE(A6XX_TEX_1D);
+ dst[3] = 0;
+ dst[4] = va;
+ dst[5] = va >> 32;
for (int i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
- tu_cs_emit(&cs, 0);
+ dst[i] = 0;
} else {
tu_finishme("Emit images");
}
}
- struct tu_cs_entry entry = tu_cs_end_sub_stream(draw_state, &cs);
+ struct tu_cs cs;
+ result = tu_cs_begin_sub_stream(device, draw_state, 7, &cs);
+ if (result != VK_SUCCESS)
+ return result;
- uint64_t ibo_addr = entry.bo->iova + entry.offset;
+ uint32_t opcode, ibo_addr_reg;
+ enum a6xx_state_block sb;
+ enum a6xx_state_type st;
- tu_cs_begin_sub_stream(device, draw_state, 64, &cs);
+ switch (type) {
+ case MESA_SHADER_FRAGMENT:
+ opcode = CP_LOAD_STATE6;
+ st = ST6_SHADER;
+ sb = SB6_IBO;
+ ibo_addr_reg = REG_A6XX_SP_IBO_LO;
+ break;
+ case MESA_SHADER_COMPUTE:
+ opcode = CP_LOAD_STATE6_FRAG;
+ st = ST6_IBO;
+ sb = SB6_CS_SHADER;
+ ibo_addr_reg = REG_A6XX_SP_CS_IBO_LO;
+ break;
+ default:
+ unreachable("unsupported stage for ibos");
+ }
/* emit texture state: */
- tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6, 3);
+ tu_cs_emit_pkt7(&cs, opcode, 3);
tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
- CP_LOAD_STATE6_0_STATE_TYPE(type == MESA_SHADER_COMPUTE ?
- ST6_IBO : ST6_SHADER) |
+ CP_LOAD_STATE6_0_STATE_TYPE(st) |
CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
- CP_LOAD_STATE6_0_STATE_BLOCK(type == MESA_SHADER_COMPUTE ?
- SB6_CS_SHADER : SB6_IBO) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
CP_LOAD_STATE6_0_NUM_UNIT(link->image_mapping.num_ibo));
- tu_cs_emit_qw(&cs, ibo_addr); /* SRC_ADDR_LO/HI */
+ tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
- tu_cs_emit_pkt4(&cs,
- type == MESA_SHADER_COMPUTE ?
- REG_A6XX_SP_IBO_LO : REG_A6XX_SP_CS_IBO_LO, 2);
- tu_cs_emit_qw(&cs, ibo_addr); /* SRC_ADDR_LO/HI */
+ tu_cs_emit_pkt4(&cs, ibo_addr_reg, 2);
+ tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
- return tu_cs_end_sub_stream(draw_state, &cs);
+ *entry = tu_cs_end_sub_stream(draw_state, &cs);
+ return VK_SUCCESS;
}
struct PACKED bcolor_entry {
&pipeline->program.link[MESA_SHADER_FRAGMENT].sampler_map;
struct ts_cs_memory ptr;
- VkResult result = tu_cs_alloc(cmd->device, &cmd->draw_state,
- vs_sampler->num + fs_sampler->num, 128 / 4,
+ VkResult result = tu_cs_alloc(cmd->device, &cmd->sub_cs,
+ vs_sampler->num_desc + fs_sampler->num_desc,
+ 128 / 4,
&ptr);
if (result != VK_SUCCESS)
return result;
for (unsigned i = 0; i < vs_sampler->num; i++) {
- struct tu_sampler *sampler = sampler_ptr(descriptors_state, vs_sampler, i);
- memcpy(ptr.map, &border_color[sampler->border], 128);
- ptr.map += 128 / 4;
+ for (unsigned j = 0; j < vs_sampler->array_size[i]; j++) {
+ const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
+ vs_sampler, i, j);
+ memcpy(ptr.map, &border_color[sampler->border], 128);
+ ptr.map += 128 / 4;
+ }
}
for (unsigned i = 0; i < fs_sampler->num; i++) {
- struct tu_sampler *sampler = sampler_ptr(descriptors_state, fs_sampler, i);
- memcpy(ptr.map, &border_color[sampler->border], 128);
- ptr.map += 128 / 4;
+ for (unsigned j = 0; j < fs_sampler->array_size[i]; j++) {
+ const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
+ fs_sampler, i, j);
+ memcpy(ptr.map, &border_color[sampler->border], 128);
+ ptr.map += 128 / 4;
+ }
}
tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO, 2);
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_PROGRAM,
- .enable_mask = 0x6,
+ .enable_mask = ENABLE_DRAW,
.ib = pipeline->program.state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_PROGRAM_BINNING,
- .enable_mask = 0x1,
+ .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
.ib = pipeline->program.binning_state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VI,
- .enable_mask = 0x6,
+ .enable_mask = ENABLE_DRAW,
.ib = pipeline->vi.state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VI_BINNING,
- .enable_mask = 0x1,
+ .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
.ib = pipeline->vi.binning_state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VP,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = pipeline->vp.state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_RAST,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = pipeline->rast.state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_DS,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = pipeline->ds.state_ib,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_BLEND,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = pipeline->blend.state_ib,
};
}
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VS_CONST,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX)
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_FS_CONST,
- .enable_mask = 0x6,
+ .enable_mask = ENABLE_DRAW,
.ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT)
};
}
if (cmd->state.dirty &
(TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS)) {
bool needs_border = false;
- struct tu_cs_entry vs_tex, fs_tex;
+ struct tu_cs_entry vs_tex, fs_tex, fs_ibo;
- result = tu6_emit_textures(cmd, MESA_SHADER_VERTEX, &vs_tex, &needs_border);
+ result = tu6_emit_textures(cmd, pipeline, descriptors_state,
+ MESA_SHADER_VERTEX, &vs_tex, &needs_border);
if (result != VK_SUCCESS)
return result;
- result = tu6_emit_textures(cmd, MESA_SHADER_FRAGMENT, &fs_tex, &needs_border);
+ result = tu6_emit_textures(cmd, pipeline, descriptors_state,
+ MESA_SHADER_FRAGMENT, &fs_tex, &needs_border);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = tu6_emit_ibo(cmd, pipeline, descriptors_state,
+ MESA_SHADER_FRAGMENT, &fs_ibo);
if (result != VK_SUCCESS)
return result;
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_VS_TEX,
- .enable_mask = 0x7,
+ .enable_mask = ENABLE_ALL,
.ib = vs_tex,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_FS_TEX,
- .enable_mask = 0x6,
+ .enable_mask = ENABLE_DRAW,
.ib = fs_tex,
};
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
.id = TU_DRAW_STATE_FS_IBO,
- .enable_mask = 0x6,
- .ib = tu6_emit_ibo(cmd->device, &cmd->draw_state, pipeline,
- descriptors_state, MESA_SHADER_FRAGMENT)
+ .enable_mask = ENABLE_DRAW,
+ .ib = fs_ibo,
};
if (needs_border) {
}
}
+ struct tu_cs_entry vs_params;
+ result = tu6_emit_vs_params(cmd, draw, &vs_params);
+ if (result != VK_SUCCESS)
+ return result;
+
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_VS_PARAMS,
+ .enable_mask = ENABLE_ALL,
+ .ib = vs_params,
+ };
+
tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_group_count);
for (uint32_t i = 0; i < draw_state_group_count; i++) {
const struct tu_draw_state_group *group = &draw_state_groups[i];
-
+ debug_assert((group->enable_mask & ~ENABLE_ALL) == 0);
uint32_t cp_set_draw_state =
CP_SET_DRAW_STATE__0_COUNT(group->ib.size / 4) |
- CP_SET_DRAW_STATE__0_ENABLE_MASK(group->enable_mask) |
+ group->enable_mask |
CP_SET_DRAW_STATE__0_GROUP_ID(group->id);
uint64_t iova;
if (group->ib.size) {
const struct tu_program_descriptor_linkage *link =
&pipeline->program.link[type];
const struct ir3_const_state *const_state = &link->const_state;
- uint32_t offset_dwords = const_state->offsets.driver_param;
+ uint32_t offset = const_state->offsets.driver_param;
- if (link->constlen <= offset_dwords)
+ if (link->constlen <= offset)
return;
if (!info->indirect) {
- uint32_t driver_params[] = {
- info->blocks[0],
- info->blocks[1],
- info->blocks[2],
- pipeline->compute.local_size[0],
- pipeline->compute.local_size[1],
- pipeline->compute.local_size[2],
+ uint32_t driver_params[IR3_DP_CS_COUNT] = {
+ [IR3_DP_NUM_WORK_GROUPS_X] = info->blocks[0],
+ [IR3_DP_NUM_WORK_GROUPS_Y] = info->blocks[1],
+ [IR3_DP_NUM_WORK_GROUPS_Z] = info->blocks[2],
+ [IR3_DP_LOCAL_GROUP_SIZE_X] = pipeline->compute.local_size[0],
+ [IR3_DP_LOCAL_GROUP_SIZE_Y] = pipeline->compute.local_size[1],
+ [IR3_DP_LOCAL_GROUP_SIZE_Z] = pipeline->compute.local_size[2],
};
- uint32_t num_consts = MIN2(const_state->num_driver_params,
- link->constlen - offset_dwords);
- uint32_t align_size = align(num_consts, 4);
+ uint32_t num_consts = MIN2(const_state->num_driver_params,
+ (link->constlen - offset) * 4);
/* push constants */
- tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + align_size);
- tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset_dwords / 4) |
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_consts);
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
- CP_LOAD_STATE6_0_NUM_UNIT(align_size / 4));
+ CP_LOAD_STATE6_0_NUM_UNIT(num_consts / 4));
tu_cs_emit(cs, 0);
tu_cs_emit(cs, 0);
uint32_t i;
for (i = 0; i < num_consts; i++)
tu_cs_emit(cs, driver_params[i]);
- for (; i < align_size; i++)
- tu_cs_emit(cs, 0);
} else {
tu_finishme("Indirect driver params");
}
tu_emit_compute_driver_params(cs, pipeline, info);
bool needs_border;
- result = tu6_emit_textures(cmd, MESA_SHADER_COMPUTE, &ib, &needs_border);
+ result = tu6_emit_textures(cmd, pipeline, descriptors_state,
+ MESA_SHADER_COMPUTE, &ib, &needs_border);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
tu_cs_emit_ib(cs, &ib);
if (needs_border)
- tu6_emit_border_color(cmd, cs);
+ tu_finishme("compute border color");
+
+ result = tu6_emit_ibo(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE, &ib);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
- ib = tu6_emit_ibo(cmd->device, &cmd->draw_state, pipeline,
- descriptors_state, MESA_SHADER_COMPUTE);
if (ib.size)
tu_cs_emit_ib(cs, &ib);
}
void
-tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
- const VkSubpassEndInfoKHR *pSubpassEndInfo)
+tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
+ const VkSubpassEndInfoKHR *pSubpassEndInfo)
{
tu_CmdEndRenderPass(commandBuffer);
}