+ list_inithead(&pool->cmd_buffers);
+ list_inithead(&pool->free_cmd_buffers);
+
+ pool->queue_family_index = pCreateInfo->queueFamilyIndex;
+
+ *pCmdPool = tu_cmd_pool_to_handle(pool);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_DestroyCommandPool(VkDevice _device,
+ VkCommandPool commandPool,
+ const VkAllocationCallbacks *pAllocator)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
+
+ if (!pool)
+ return;
+
+ list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
+ &pool->cmd_buffers, pool_link)
+ {
+ tu_cmd_buffer_destroy(cmd_buffer);
+ }
+
+ list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
+ &pool->free_cmd_buffers, pool_link)
+ {
+ tu_cmd_buffer_destroy(cmd_buffer);
+ }
+
+ vk_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult
+tu_ResetCommandPool(VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolResetFlags flags)
+{
+ TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
+ VkResult result;
+
+ list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
+ pool_link)
+ {
+ result = tu_reset_cmd_buffer(cmd_buffer);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+tu_TrimCommandPool(VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolTrimFlags flags)
+{
+ TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
+
+ if (!pool)
+ return;
+
+ list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
+ &pool->free_cmd_buffers, pool_link)
+ {
+ tu_cmd_buffer_destroy(cmd_buffer);
+ }
+}
+
+void
+tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo *pRenderPassBegin,
+ VkSubpassContents contents)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
+ TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
+
+ cmd->state.pass = pass;
+ cmd->state.subpass = pass->subpasses;
+ cmd->state.framebuffer = fb;
+
+ tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
+ tu_cmd_prepare_sysmem_clear_ib(cmd, pRenderPassBegin);
+ tu_cmd_prepare_tile_load_ib(cmd, pRenderPassBegin);
+ tu_cmd_prepare_tile_store_ib(cmd);
+
+ VkResult result = tu_cs_reserve_space(cmd->device, &cmd->draw_cs, 1024);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
+
+ tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
+ tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
+ tu6_emit_msaa(cmd, cmd->state.subpass, &cmd->draw_cs);
+ tu6_emit_render_cntl(cmd, cmd->state.subpass, &cmd->draw_cs, false);
+
+ /* note: use_hw_binning only checks tiling config */
+ if (use_hw_binning(cmd))
+ cmd->use_vsc_data = true;
+
+ for (uint32_t i = 0; i < fb->attachment_count; ++i) {
+ const struct tu_image_view *iview = fb->attachments[i].attachment;
+ tu_bo_list_add(&cmd->bo_list, iview->image->bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
+ }
+}
+
+void
+tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo *pRenderPassBeginInfo,
+ const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
+{
+ tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
+ pSubpassBeginInfo->contents);
+}
+
+void
+tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ const struct tu_render_pass *pass = cmd->state.pass;
+ struct tu_cs *cs = &cmd->draw_cs;
+
+ const struct tu_subpass *subpass = cmd->state.subpass++;
+ /* TODO:
+ * if msaa samples change between subpasses,
+ * attachment store is broken for some attachments
+ */
+ if (subpass->resolve_attachments) {
+ tu6_emit_blit_scissor(cmd, cs, true);
+ for (unsigned i = 0; i < subpass->color_count; i++) {
+ uint32_t a = subpass->resolve_attachments[i].attachment;
+ if (a != VK_ATTACHMENT_UNUSED) {
+ tu6_emit_resolve(cmd, cs, a,
+ subpass->color_attachments[i].attachment);
+ }
+ }
+ }
+
+ VkResult result = tu_cs_reserve_space(cmd->device, &cmd->draw_cs, 1024);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ return;
+ }
+
+ /* invalidate because reading input attachments will cache GMEM and
+ * the cache isn''t updated when GMEM is written
+ * TODO: is there a no-cache bit for textures?
+ */
+ if (cmd->state.subpass->input_count)
+ tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
+
+ /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
+ tu6_emit_zs(cmd, cmd->state.subpass, cs);
+ tu6_emit_mrt(cmd, cmd->state.subpass, cs);
+ tu6_emit_msaa(cmd, cmd->state.subpass, cs);
+ tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
+
+ /* Emit flushes so that input attachments will read the correct value. This
+ * is for sysmem only, although it shouldn't do much harm on gmem.
+ */
+ tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
+ tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
+
+ /* TODO:
+ * since we don't know how to do GMEM->GMEM resolve,
+ * resolve attachments are resolved to memory then loaded to GMEM again if needed
+ */
+ if (subpass->resolve_attachments) {
+ for (unsigned i = 0; i < subpass->color_count; i++) {
+ uint32_t a = subpass->resolve_attachments[i].attachment;
+ if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].gmem_offset >= 0) {
+ tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
+ tu6_emit_predicated_blit(cmd, cs, a, a, false);
+ }
+ }
+ }
+}
+
+void
+tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
+ const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR *pSubpassEndInfo)
+{
+ tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
+}
+
+struct tu_draw_info
+{
+ /**
+ * Number of vertices.
+ */
+ uint32_t count;
+
+ /**
+ * Index of the first vertex.
+ */
+ int32_t vertex_offset;
+
+ /**
+ * First instance id.
+ */
+ uint32_t first_instance;
+
+ /**
+ * Number of instances.
+ */
+ uint32_t instance_count;
+
+ /**
+ * First index (indexed draws only).
+ */
+ uint32_t first_index;
+
+ /**
+ * Whether it's an indexed draw.
+ */
+ bool indexed;
+
+ /**
+ * Indirect draw parameters resource.
+ */
+ struct tu_buffer *indirect;
+ uint64_t indirect_offset;
+ uint32_t stride;
+
+ /**
+ * Draw count parameters resource.
+ */
+ struct tu_buffer *count_buffer;
+ uint64_t count_buffer_offset;
+};
+
+#define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
+#define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
+
+enum tu_draw_state_group_id
+{
+ TU_DRAW_STATE_PROGRAM,
+ TU_DRAW_STATE_PROGRAM_BINNING,
+ TU_DRAW_STATE_VI,
+ TU_DRAW_STATE_VI_BINNING,
+ TU_DRAW_STATE_VP,
+ TU_DRAW_STATE_RAST,
+ TU_DRAW_STATE_DS,
+ TU_DRAW_STATE_BLEND,
+ TU_DRAW_STATE_VS_CONST,
+ TU_DRAW_STATE_FS_CONST,
+ TU_DRAW_STATE_VS_TEX,
+ TU_DRAW_STATE_FS_TEX_SYSMEM,
+ TU_DRAW_STATE_FS_TEX_GMEM,
+ TU_DRAW_STATE_FS_IBO,
+ TU_DRAW_STATE_VS_PARAMS,
+
+ TU_DRAW_STATE_COUNT,
+};
+
+struct tu_draw_state_group
+{
+ enum tu_draw_state_group_id id;
+ uint32_t enable_mask;
+ struct tu_cs_entry ib;
+};
+
+const static struct tu_sampler*
+sampler_ptr(struct tu_descriptor_state *descriptors_state,
+ const struct tu_descriptor_map *map, unsigned i,
+ unsigned array_index)
+{
+ assert(descriptors_state->valid & (1 << map->set[i]));
+
+ struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
+ assert(map->binding[i] < set->layout->binding_count);
+
+ const struct tu_descriptor_set_binding_layout *layout =
+ &set->layout->binding[map->binding[i]];
+
+ if (layout->immutable_samplers_offset) {
+ const struct tu_sampler *immutable_samplers =
+ tu_immutable_samplers(set->layout, layout);
+
+ return &immutable_samplers[array_index];
+ }
+
+ switch (layout->type) {
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4];
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ return (struct tu_sampler*) &set->mapped_ptr[layout->offset / 4 + A6XX_TEX_CONST_DWORDS +
+ array_index *
+ (A6XX_TEX_CONST_DWORDS +
+ sizeof(struct tu_sampler) / 4)];
+ default:
+ unreachable("unimplemented descriptor type");
+ break;
+ }
+}
+
+static void
+write_tex_const(struct tu_cmd_buffer *cmd,
+ uint32_t *dst,
+ struct tu_descriptor_state *descriptors_state,
+ const struct tu_descriptor_map *map,
+ unsigned i, unsigned array_index, bool is_sysmem)
+{
+ assert(descriptors_state->valid & (1 << map->set[i]));
+
+ struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
+ assert(map->binding[i] < set->layout->binding_count);
+
+ const struct tu_descriptor_set_binding_layout *layout =
+ &set->layout->binding[map->binding[i]];
+
+ switch (layout->type) {
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
+ array_index * A6XX_TEX_CONST_DWORDS],
+ A6XX_TEX_CONST_DWORDS * 4);
+ break;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
+ array_index *
+ (A6XX_TEX_CONST_DWORDS +
+ sizeof(struct tu_sampler) / 4)],
+ A6XX_TEX_CONST_DWORDS * 4);
+ break;
+ default:
+ unreachable("unimplemented descriptor type");
+ break;
+ }
+
+ if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT && !is_sysmem) {
+ const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
+ array_index].attachment;
+ const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
+
+ assert(att->gmem_offset >= 0);
+
+ dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
+ dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
+ dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
+ dst[2] |=
+ A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
+ A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
+ dst[3] = 0;
+ dst[4] = 0x100000 + att->gmem_offset;
+ dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
+ for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
+ dst[i] = 0;
+
+ if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
+ tu_finishme("patch input attachment pitch for secondary cmd buffer");
+ }
+}
+
+static void
+write_image_ibo(struct tu_cmd_buffer *cmd,
+ uint32_t *dst,
+ struct tu_descriptor_state *descriptors_state,
+ const struct tu_descriptor_map *map,
+ unsigned i, unsigned array_index)
+{
+ assert(descriptors_state->valid & (1 << map->set[i]));
+
+ struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
+ assert(map->binding[i] < set->layout->binding_count);
+
+ const struct tu_descriptor_set_binding_layout *layout =
+ &set->layout->binding[map->binding[i]];
+
+ assert(layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
+
+ memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
+ (array_index * 2 + 1) * A6XX_TEX_CONST_DWORDS],
+ A6XX_TEX_CONST_DWORDS * 4);
+}
+
+static uint64_t
+buffer_ptr(struct tu_descriptor_state *descriptors_state,
+ const struct tu_descriptor_map *map,
+ unsigned i, unsigned array_index)
+{
+ assert(descriptors_state->valid & (1 << map->set[i]));
+
+ struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
+ assert(map->binding[i] < set->layout->binding_count);
+
+ const struct tu_descriptor_set_binding_layout *layout =
+ &set->layout->binding[map->binding[i]];
+
+ switch (layout->type) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ return descriptors_state->dynamic_buffers[layout->dynamic_offset_offset +
+ array_index];
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ return (uint64_t) set->mapped_ptr[layout->offset / 4 + array_index * 2 + 1] << 32 |
+ set->mapped_ptr[layout->offset / 4 + array_index * 2];
+ default:
+ unreachable("unimplemented descriptor type");
+ break;
+ }
+}
+
+static inline uint32_t
+tu6_stage2opcode(gl_shader_stage type)
+{
+ switch (type) {
+ case MESA_SHADER_VERTEX:
+ case MESA_SHADER_TESS_CTRL:
+ case MESA_SHADER_TESS_EVAL:
+ case MESA_SHADER_GEOMETRY:
+ return CP_LOAD_STATE6_GEOM;
+ case MESA_SHADER_FRAGMENT:
+ case MESA_SHADER_COMPUTE:
+ case MESA_SHADER_KERNEL:
+ return CP_LOAD_STATE6_FRAG;
+ default:
+ unreachable("bad shader type");
+ }
+}
+
+static inline enum a6xx_state_block
+tu6_stage2shadersb(gl_shader_stage type)
+{
+ switch (type) {
+ case MESA_SHADER_VERTEX:
+ return SB6_VS_SHADER;
+ case MESA_SHADER_FRAGMENT:
+ return SB6_FS_SHADER;
+ case MESA_SHADER_COMPUTE:
+ case MESA_SHADER_KERNEL:
+ return SB6_CS_SHADER;
+ default:
+ unreachable("bad shader type");
+ return ~0;
+ }
+}
+
+static void
+tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
+ gl_shader_stage type,
+ uint32_t *push_constants)
+{
+ const struct tu_program_descriptor_linkage *link =
+ &pipeline->program.link[type];
+ const struct ir3_ubo_analysis_state *state = &link->ubo_state;
+
+ for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
+ if (state->range[i].start < state->range[i].end) {
+ uint32_t size = state->range[i].end - state->range[i].start;
+ uint32_t offset = state->range[i].start;
+
+ /* and even if the start of the const buffer is before
+ * first_immediate, the end may not be:
+ */
+ size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
+
+ if (size == 0)
+ continue;
+
+ /* things should be aligned to vec4: */
+ debug_assert((state->range[i].offset % 16) == 0);
+ debug_assert((size % 16) == 0);
+ debug_assert((offset % 16) == 0);
+
+ if (i == 0) {
+ /* push constants */
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (size / 4));
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+ CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
+ tu_cs_emit(cs, 0);
+ tu_cs_emit(cs, 0);
+ for (unsigned i = 0; i < size / 4; i++)
+ tu_cs_emit(cs, push_constants[i + offset / 4]);
+ continue;
+ }
+
+ /* Look through the UBO map to find our UBO index, and get the VA for
+ * that UBO.
+ */
+ uint64_t va = 0;
+ uint32_t ubo_idx = i - 1;
+ uint32_t ubo_map_base = 0;
+ for (int j = 0; j < link->ubo_map.num; j++) {
+ if (ubo_idx >= ubo_map_base &&
+ ubo_idx < ubo_map_base + link->ubo_map.array_size[j]) {
+ va = buffer_ptr(descriptors_state, &link->ubo_map, j,
+ ubo_idx - ubo_map_base);
+ break;
+ }
+ ubo_map_base += link->ubo_map.array_size[j];
+ }
+ assert(va);
+
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+ CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
+ tu_cs_emit_qw(cs, va + offset);
+ }
+ }
+}
+
+static void
+tu6_emit_ubos(struct tu_cs *cs, const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
+ gl_shader_stage type)
+{
+ const struct tu_program_descriptor_linkage *link =
+ &pipeline->program.link[type];
+
+ uint32_t num = MIN2(link->ubo_map.num_desc, link->const_state.num_ubos);
+ uint32_t anum = align(num, 2);
+
+ if (!num)
+ return;
+
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (2 * anum));
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(link->const_state.offsets.ubo) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+ CP_LOAD_STATE6_0_NUM_UNIT(anum/2));
+ tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
+ tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
+
+ unsigned emitted = 0;
+ for (unsigned i = 0; emitted < num && i < link->ubo_map.num; i++) {
+ for (unsigned j = 0; emitted < num && j < link->ubo_map.array_size[i]; j++) {
+ tu_cs_emit_qw(cs, buffer_ptr(descriptors_state, &link->ubo_map, i, j));
+ emitted++;
+ }
+ }
+
+ for (; emitted < anum; emitted++) {
+ tu_cs_emit(cs, 0xffffffff);
+ tu_cs_emit(cs, 0xffffffff);
+ }
+}
+
+static struct tu_cs_entry
+tu6_emit_consts(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
+ gl_shader_stage type)
+{
+ struct tu_cs cs;
+ tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
+
+ tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
+ tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
+
+ return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+}
+
+static VkResult
+tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
+ const struct tu_draw_info *draw,
+ struct tu_cs_entry *entry)
+{
+ /* TODO: fill out more than just base instance */
+ const struct tu_program_descriptor_linkage *link =
+ &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
+ const struct ir3_const_state *const_state = &link->const_state;
+ struct tu_cs cs;
+
+ if (const_state->offsets.driver_param >= link->constlen) {
+ *entry = (struct tu_cs_entry) {};
+ return VK_SUCCESS;
+ }
+
+ VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs, 8, &cs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(const_state->offsets.driver_param) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
+ CP_LOAD_STATE6_0_NUM_UNIT(1));
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, 0);
+
+ STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
+
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, draw->first_instance);
+ tu_cs_emit(&cs, 0);
+
+ *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+ return VK_SUCCESS;
+}
+
+static VkResult
+tu6_emit_textures(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
+ gl_shader_stage type,
+ struct tu_cs_entry *entry,
+ bool *needs_border,
+ bool is_sysmem)
+{
+ struct tu_device *device = cmd->device;
+ struct tu_cs *draw_state = &cmd->sub_cs;
+ const struct tu_program_descriptor_linkage *link =
+ &pipeline->program.link[type];
+ VkResult result;
+
+ if (link->texture_map.num_desc == 0 && link->sampler_map.num_desc == 0) {
+ *entry = (struct tu_cs_entry) {};
+ return VK_SUCCESS;
+ }
+
+ /* allocate and fill texture state */
+ struct ts_cs_memory tex_const;
+ result = tu_cs_alloc(device, draw_state, link->texture_map.num_desc,
+ A6XX_TEX_CONST_DWORDS, &tex_const);
+ if (result != VK_SUCCESS)
+ return result;
+
+ int tex_index = 0;
+ for (unsigned i = 0; i < link->texture_map.num; i++) {
+ for (int j = 0; j < link->texture_map.array_size[i]; j++) {
+ write_tex_const(cmd,
+ &tex_const.map[A6XX_TEX_CONST_DWORDS * tex_index++],
+ descriptors_state, &link->texture_map, i, j,
+ is_sysmem);
+ }
+ }
+
+ /* allocate and fill sampler state */
+ struct ts_cs_memory tex_samp = { 0 };
+ if (link->sampler_map.num_desc) {
+ result = tu_cs_alloc(device, draw_state, link->sampler_map.num_desc,
+ A6XX_TEX_SAMP_DWORDS, &tex_samp);
+ if (result != VK_SUCCESS)
+ return result;
+
+ int sampler_index = 0;
+ for (unsigned i = 0; i < link->sampler_map.num; i++) {
+ for (int j = 0; j < link->sampler_map.array_size[i]; j++) {
+ const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
+ &link->sampler_map,
+ i, j);
+ memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS * sampler_index++],
+ sampler->state, sizeof(sampler->state));
+ *needs_border |= sampler->needs_border;
+ }
+ }
+ }
+
+ unsigned tex_samp_reg, tex_const_reg, tex_count_reg;
+ enum a6xx_state_block sb;
+
+ switch (type) {
+ case MESA_SHADER_VERTEX:
+ sb = SB6_VS_TEX;
+ tex_samp_reg = REG_A6XX_SP_VS_TEX_SAMP_LO;
+ tex_const_reg = REG_A6XX_SP_VS_TEX_CONST_LO;
+ tex_count_reg = REG_A6XX_SP_VS_TEX_COUNT;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ sb = SB6_FS_TEX;
+ tex_samp_reg = REG_A6XX_SP_FS_TEX_SAMP_LO;
+ tex_const_reg = REG_A6XX_SP_FS_TEX_CONST_LO;
+ tex_count_reg = REG_A6XX_SP_FS_TEX_COUNT;
+ break;
+ case MESA_SHADER_COMPUTE:
+ sb = SB6_CS_TEX;
+ tex_samp_reg = REG_A6XX_SP_CS_TEX_SAMP_LO;
+ tex_const_reg = REG_A6XX_SP_CS_TEX_CONST_LO;
+ tex_count_reg = REG_A6XX_SP_CS_TEX_COUNT;
+ break;
+ default:
+ unreachable("bad state block");
+ }
+
+ struct tu_cs cs;
+ result = tu_cs_begin_sub_stream(device, draw_state, 16, &cs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ if (link->sampler_map.num_desc) {
+ /* output sampler state: */
+ tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
+ CP_LOAD_STATE6_0_NUM_UNIT(link->sampler_map.num_desc));
+ tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
+
+ tu_cs_emit_pkt4(&cs, tex_samp_reg, 2);
+ tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
+ }
+
+ /* emit texture state: */
+ tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
+ CP_LOAD_STATE6_0_NUM_UNIT(link->texture_map.num_desc));
+ tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
+
+ tu_cs_emit_pkt4(&cs, tex_const_reg, 2);
+ tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
+
+ tu_cs_emit_pkt4(&cs, tex_count_reg, 1);
+ tu_cs_emit(&cs, link->texture_map.num_desc);
+
+ *entry = tu_cs_end_sub_stream(draw_state, &cs);
+ return VK_SUCCESS;
+}
+
+static VkResult
+tu6_emit_ibo(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
+ gl_shader_stage type,
+ struct tu_cs_entry *entry)
+{
+ struct tu_device *device = cmd->device;
+ struct tu_cs *draw_state = &cmd->sub_cs;
+ const struct tu_program_descriptor_linkage *link =
+ &pipeline->program.link[type];
+ VkResult result;
+
+ unsigned num_desc = link->ssbo_map.num_desc + link->image_map.num_desc;
+
+ if (num_desc == 0) {
+ *entry = (struct tu_cs_entry) {};
+ return VK_SUCCESS;
+ }
+
+ struct ts_cs_memory ibo_const;
+ result = tu_cs_alloc(device, draw_state, num_desc,
+ A6XX_TEX_CONST_DWORDS, &ibo_const);
+ if (result != VK_SUCCESS)
+ return result;
+
+ int ssbo_index = 0;
+ for (unsigned i = 0; i < link->ssbo_map.num; i++) {
+ for (int j = 0; j < link->ssbo_map.array_size[i]; j++) {
+ uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
+
+ uint64_t va = buffer_ptr(descriptors_state, &link->ssbo_map, i, j);
+ /* We don't expose robustBufferAccess, so leave the size unlimited. */
+ uint32_t sz = MAX_STORAGE_BUFFER_RANGE / 4;
+
+ dst[0] = A6XX_IBO_0_FMT(FMT6_32_UINT);
+ dst[1] = A6XX_IBO_1_WIDTH(sz & MASK(15)) |
+ A6XX_IBO_1_HEIGHT(sz >> 15);
+ dst[2] = A6XX_IBO_2_UNK4 |
+ A6XX_IBO_2_UNK31 |
+ A6XX_IBO_2_TYPE(A6XX_TEX_1D);
+ dst[3] = 0;
+ dst[4] = va;
+ dst[5] = va >> 32;
+ for (int i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
+ dst[i] = 0;
+
+ ssbo_index++;
+ }
+ }
+
+ for (unsigned i = 0; i < link->image_map.num; i++) {
+ for (int j = 0; j < link->image_map.array_size[i]; j++) {
+ uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
+
+ write_image_ibo(cmd, dst,
+ descriptors_state, &link->image_map, i, j);
+
+ ssbo_index++;
+ }
+ }
+
+ assert(ssbo_index == num_desc);
+
+ struct tu_cs cs;
+ result = tu_cs_begin_sub_stream(device, draw_state, 7, &cs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ uint32_t opcode, ibo_addr_reg;
+ enum a6xx_state_block sb;
+ enum a6xx_state_type st;
+
+ switch (type) {
+ case MESA_SHADER_FRAGMENT:
+ opcode = CP_LOAD_STATE6;
+ st = ST6_SHADER;
+ sb = SB6_IBO;
+ ibo_addr_reg = REG_A6XX_SP_IBO_LO;
+ break;
+ case MESA_SHADER_COMPUTE:
+ opcode = CP_LOAD_STATE6_FRAG;
+ st = ST6_IBO;
+ sb = SB6_CS_SHADER;
+ ibo_addr_reg = REG_A6XX_SP_CS_IBO_LO;
+ break;
+ default:
+ unreachable("unsupported stage for ibos");
+ }
+
+ /* emit texture state: */
+ tu_cs_emit_pkt7(&cs, opcode, 3);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+ CP_LOAD_STATE6_0_STATE_TYPE(st) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
+ CP_LOAD_STATE6_0_NUM_UNIT(num_desc));
+ tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
+
+ tu_cs_emit_pkt4(&cs, ibo_addr_reg, 2);
+ tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
+
+ *entry = tu_cs_end_sub_stream(draw_state, &cs);
+ return VK_SUCCESS;
+}
+
+struct PACKED bcolor_entry {
+ uint32_t fp32[4];
+ uint16_t ui16[4];
+ int16_t si16[4];
+ uint16_t fp16[4];
+ uint16_t rgb565;
+ uint16_t rgb5a1;
+ uint16_t rgba4;
+ uint8_t __pad0[2];
+ uint8_t ui8[4];
+ int8_t si8[4];
+ uint32_t rgb10a2;
+ uint32_t z24; /* also s8? */
+ uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
+ uint8_t __pad1[56];
+} border_color[] = {
+ [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
+ [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
+ [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
+ .fp32[3] = 0x3f800000,
+ .ui16[3] = 0xffff,
+ .si16[3] = 0x7fff,
+ .fp16[3] = 0x3c00,
+ .rgb5a1 = 0x8000,
+ .rgba4 = 0xf000,
+ .ui8[3] = 0xff,
+ .si8[3] = 0x7f,
+ .rgb10a2 = 0xc0000000,
+ .srgb[3] = 0x3c00,
+ },
+ [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
+ .fp32[3] = 1,
+ .fp16[3] = 1,
+ },
+ [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
+ .fp32[0 ... 3] = 0x3f800000,
+ .ui16[0 ... 3] = 0xffff,
+ .si16[0 ... 3] = 0x7fff,
+ .fp16[0 ... 3] = 0x3c00,
+ .rgb565 = 0xffff,
+ .rgb5a1 = 0xffff,
+ .rgba4 = 0xffff,
+ .ui8[0 ... 3] = 0xff,
+ .si8[0 ... 3] = 0x7f,
+ .rgb10a2 = 0xffffffff,
+ .z24 = 0xffffff,
+ .srgb[0 ... 3] = 0x3c00,
+ },
+ [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
+ .fp32[0 ... 3] = 1,
+ .fp16[0 ... 3] = 1,
+ },
+};
+
+static VkResult
+tu6_emit_border_color(struct tu_cmd_buffer *cmd,
+ struct tu_cs *cs)
+{
+ STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
+
+ const struct tu_pipeline *pipeline = cmd->state.pipeline;
+ struct tu_descriptor_state *descriptors_state =
+ &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
+ const struct tu_descriptor_map *vs_sampler =
+ &pipeline->program.link[MESA_SHADER_VERTEX].sampler_map;
+ const struct tu_descriptor_map *fs_sampler =
+ &pipeline->program.link[MESA_SHADER_FRAGMENT].sampler_map;
+ struct ts_cs_memory ptr;
+
+ VkResult result = tu_cs_alloc(cmd->device, &cmd->sub_cs,
+ vs_sampler->num_desc + fs_sampler->num_desc,
+ 128 / 4,
+ &ptr);
+ if (result != VK_SUCCESS)
+ return result;