+write_tex_const(struct tu_cmd_buffer *cmd,
+ uint32_t *dst,
+ struct tu_descriptor_state *descriptors_state,
+ const struct tu_descriptor_map *map,
+ unsigned i, unsigned array_index)
+{
+ assert(descriptors_state->valid & (1 << map->set[i]));
+
+ struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
+ assert(map->binding[i] < set->layout->binding_count);
+
+ const struct tu_descriptor_set_binding_layout *layout =
+ &set->layout->binding[map->binding[i]];
+
+ switch (layout->type) {
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
+ array_index * A6XX_TEX_CONST_DWORDS],
+ A6XX_TEX_CONST_DWORDS * 4);
+ break;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
+ array_index *
+ (A6XX_TEX_CONST_DWORDS +
+ sizeof(struct tu_sampler) / 4)],
+ A6XX_TEX_CONST_DWORDS * 4);
+ break;
+ default:
+ unreachable("unimplemented descriptor type");
+ break;
+ }
+
+ if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
+ const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+ uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
+ array_index].attachment;
+ const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
+
+ assert(att->gmem_offset >= 0);
+
+ dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
+ dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
+ dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
+ dst[2] |=
+ A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
+ A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
+ dst[3] = 0;
+ dst[4] = 0x100000 + att->gmem_offset;
+ dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
+ for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
+ dst[i] = 0;
+
+ if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
+ tu_finishme("patch input attachment pitch for secondary cmd buffer");
+ }
+}
+
+static void
+write_image_ibo(struct tu_cmd_buffer *cmd,
+ uint32_t *dst,
+ struct tu_descriptor_state *descriptors_state,
+ const struct tu_descriptor_map *map,
+ unsigned i, unsigned array_index)
+{
+ assert(descriptors_state->valid & (1 << map->set[i]));
+
+ struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
+ assert(map->binding[i] < set->layout->binding_count);
+
+ const struct tu_descriptor_set_binding_layout *layout =
+ &set->layout->binding[map->binding[i]];
+
+ assert(layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
+
+ memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
+ (array_index * 2 + 1) * A6XX_TEX_CONST_DWORDS],
+ A6XX_TEX_CONST_DWORDS * 4);
+}
+
+static uint64_t
+buffer_ptr(struct tu_descriptor_state *descriptors_state,
+ const struct tu_descriptor_map *map,
+ unsigned i, unsigned array_index)
+{
+ assert(descriptors_state->valid & (1 << map->set[i]));
+
+ struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
+ assert(map->binding[i] < set->layout->binding_count);
+
+ const struct tu_descriptor_set_binding_layout *layout =
+ &set->layout->binding[map->binding[i]];
+
+ switch (layout->type) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ return descriptors_state->dynamic_buffers[layout->dynamic_offset_offset +
+ array_index];
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ return (uint64_t) set->mapped_ptr[layout->offset / 4 + array_index * 2 + 1] << 32 |
+ set->mapped_ptr[layout->offset / 4 + array_index * 2];
+ default:
+ unreachable("unimplemented descriptor type");
+ break;
+ }
+}
+
+static inline uint32_t
+tu6_stage2opcode(gl_shader_stage type)
+{
+ switch (type) {
+ case MESA_SHADER_VERTEX:
+ case MESA_SHADER_TESS_CTRL:
+ case MESA_SHADER_TESS_EVAL:
+ case MESA_SHADER_GEOMETRY:
+ return CP_LOAD_STATE6_GEOM;
+ case MESA_SHADER_FRAGMENT:
+ case MESA_SHADER_COMPUTE:
+ case MESA_SHADER_KERNEL:
+ return CP_LOAD_STATE6_FRAG;
+ default:
+ unreachable("bad shader type");
+ }
+}
+
+static inline enum a6xx_state_block
+tu6_stage2shadersb(gl_shader_stage type)
+{
+ switch (type) {
+ case MESA_SHADER_VERTEX:
+ return SB6_VS_SHADER;
+ case MESA_SHADER_FRAGMENT:
+ return SB6_FS_SHADER;
+ case MESA_SHADER_COMPUTE:
+ case MESA_SHADER_KERNEL:
+ return SB6_CS_SHADER;
+ default:
+ unreachable("bad shader type");
+ return ~0;
+ }
+}
+
+static void
+tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
+ gl_shader_stage type,
+ uint32_t *push_constants)
+{
+ const struct tu_program_descriptor_linkage *link =
+ &pipeline->program.link[type];
+ const struct ir3_ubo_analysis_state *state = &link->ubo_state;
+
+ for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
+ if (state->range[i].start < state->range[i].end) {
+ uint32_t size = state->range[i].end - state->range[i].start;
+ uint32_t offset = state->range[i].start;
+
+ /* and even if the start of the const buffer is before
+ * first_immediate, the end may not be:
+ */
+ size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
+
+ if (size == 0)
+ continue;
+
+ /* things should be aligned to vec4: */
+ debug_assert((state->range[i].offset % 16) == 0);
+ debug_assert((size % 16) == 0);
+ debug_assert((offset % 16) == 0);
+
+ if (i == 0) {
+ /* push constants */
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (size / 4));
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+ CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
+ tu_cs_emit(cs, 0);
+ tu_cs_emit(cs, 0);
+ for (unsigned i = 0; i < size / 4; i++)
+ tu_cs_emit(cs, push_constants[i + offset / 4]);
+ continue;
+ }
+
+ /* Look through the UBO map to find our UBO index, and get the VA for
+ * that UBO.
+ */
+ uint64_t va = 0;
+ uint32_t ubo_idx = i - 1;
+ uint32_t ubo_map_base = 0;
+ for (int j = 0; j < link->ubo_map.num; j++) {
+ if (ubo_idx >= ubo_map_base &&
+ ubo_idx < ubo_map_base + link->ubo_map.array_size[j]) {
+ va = buffer_ptr(descriptors_state, &link->ubo_map, j,
+ ubo_idx - ubo_map_base);
+ break;
+ }
+ ubo_map_base += link->ubo_map.array_size[j];
+ }
+ assert(va);
+
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+ CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
+ tu_cs_emit_qw(cs, va + offset);
+ }
+ }
+}
+
+static void
+tu6_emit_ubos(struct tu_cs *cs, const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
+ gl_shader_stage type)
+{
+ const struct tu_program_descriptor_linkage *link =
+ &pipeline->program.link[type];
+
+ uint32_t num = MIN2(link->ubo_map.num_desc, link->const_state.num_ubos);
+ uint32_t anum = align(num, 2);
+
+ if (!num)
+ return;
+
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (2 * anum));
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(link->const_state.offsets.ubo) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+ CP_LOAD_STATE6_0_NUM_UNIT(anum/2));
+ tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
+ tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
+
+ unsigned emitted = 0;
+ for (unsigned i = 0; emitted < num && i < link->ubo_map.num; i++) {
+ for (unsigned j = 0; emitted < num && j < link->ubo_map.array_size[i]; j++) {
+ tu_cs_emit_qw(cs, buffer_ptr(descriptors_state, &link->ubo_map, i, j));
+ emitted++;
+ }
+ }
+
+ for (; emitted < anum; emitted++) {
+ tu_cs_emit(cs, 0xffffffff);
+ tu_cs_emit(cs, 0xffffffff);
+ }
+}
+
+static struct tu_cs_entry
+tu6_emit_consts(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
+ gl_shader_stage type)
+{
+ struct tu_cs cs;
+ tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
+
+ tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
+ tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
+
+ return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+}
+
+static VkResult
+tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
+ const struct tu_draw_info *draw,
+ struct tu_cs_entry *entry)
+{
+ /* TODO: fill out more than just base instance */
+ const struct tu_program_descriptor_linkage *link =
+ &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
+ const struct ir3_const_state *const_state = &link->const_state;
+ struct tu_cs cs;
+
+ if (const_state->offsets.driver_param >= link->constlen) {
+ *entry = (struct tu_cs_entry) {};
+ return VK_SUCCESS;
+ }
+
+ VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs, 8, &cs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(const_state->offsets.driver_param) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
+ CP_LOAD_STATE6_0_NUM_UNIT(1));
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, 0);
+
+ STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
+
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, 0);
+ tu_cs_emit(&cs, draw->first_instance);
+ tu_cs_emit(&cs, 0);
+
+ *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+ return VK_SUCCESS;
+}
+
+static VkResult
+tu6_emit_textures(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
+ gl_shader_stage type,
+ struct tu_cs_entry *entry,
+ bool *needs_border)
+{
+ struct tu_device *device = cmd->device;
+ struct tu_cs *draw_state = &cmd->sub_cs;
+ const struct tu_program_descriptor_linkage *link =
+ &pipeline->program.link[type];
+ VkResult result;
+
+ if (link->texture_map.num_desc == 0 && link->sampler_map.num_desc == 0) {
+ *entry = (struct tu_cs_entry) {};
+ return VK_SUCCESS;
+ }
+
+ /* allocate and fill texture state */
+ struct ts_cs_memory tex_const;
+ result = tu_cs_alloc(device, draw_state, link->texture_map.num_desc,
+ A6XX_TEX_CONST_DWORDS, &tex_const);
+ if (result != VK_SUCCESS)
+ return result;
+
+ int tex_index = 0;
+ for (unsigned i = 0; i < link->texture_map.num; i++) {
+ for (int j = 0; j < link->texture_map.array_size[i]; j++) {
+ write_tex_const(cmd,
+ &tex_const.map[A6XX_TEX_CONST_DWORDS * tex_index++],
+ descriptors_state, &link->texture_map, i, j);
+ }
+ }
+
+ /* allocate and fill sampler state */
+ struct ts_cs_memory tex_samp = { 0 };
+ if (link->sampler_map.num_desc) {
+ result = tu_cs_alloc(device, draw_state, link->sampler_map.num_desc,
+ A6XX_TEX_SAMP_DWORDS, &tex_samp);
+ if (result != VK_SUCCESS)
+ return result;
+
+ int sampler_index = 0;
+ for (unsigned i = 0; i < link->sampler_map.num; i++) {
+ for (int j = 0; j < link->sampler_map.array_size[i]; j++) {
+ const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
+ &link->sampler_map,
+ i, j);
+ memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS * sampler_index++],
+ sampler->state, sizeof(sampler->state));
+ *needs_border |= sampler->needs_border;
+ }
+ }
+ }
+
+ unsigned tex_samp_reg, tex_const_reg, tex_count_reg;
+ enum a6xx_state_block sb;
+
+ switch (type) {
+ case MESA_SHADER_VERTEX:
+ sb = SB6_VS_TEX;
+ tex_samp_reg = REG_A6XX_SP_VS_TEX_SAMP_LO;
+ tex_const_reg = REG_A6XX_SP_VS_TEX_CONST_LO;
+ tex_count_reg = REG_A6XX_SP_VS_TEX_COUNT;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ sb = SB6_FS_TEX;
+ tex_samp_reg = REG_A6XX_SP_FS_TEX_SAMP_LO;
+ tex_const_reg = REG_A6XX_SP_FS_TEX_CONST_LO;
+ tex_count_reg = REG_A6XX_SP_FS_TEX_COUNT;
+ break;
+ case MESA_SHADER_COMPUTE:
+ sb = SB6_CS_TEX;
+ tex_samp_reg = REG_A6XX_SP_CS_TEX_SAMP_LO;
+ tex_const_reg = REG_A6XX_SP_CS_TEX_CONST_LO;
+ tex_count_reg = REG_A6XX_SP_CS_TEX_COUNT;
+ break;
+ default:
+ unreachable("bad state block");
+ }
+
+ struct tu_cs cs;
+ result = tu_cs_begin_sub_stream(device, draw_state, 16, &cs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ if (link->sampler_map.num_desc) {
+ /* output sampler state: */
+ tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
+ CP_LOAD_STATE6_0_NUM_UNIT(link->sampler_map.num_desc));
+ tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
+
+ tu_cs_emit_pkt4(&cs, tex_samp_reg, 2);
+ tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
+ }
+
+ /* emit texture state: */
+ tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
+ CP_LOAD_STATE6_0_NUM_UNIT(link->texture_map.num_desc));
+ tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
+
+ tu_cs_emit_pkt4(&cs, tex_const_reg, 2);
+ tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
+
+ tu_cs_emit_pkt4(&cs, tex_count_reg, 1);
+ tu_cs_emit(&cs, link->texture_map.num_desc);
+
+ *entry = tu_cs_end_sub_stream(draw_state, &cs);
+ return VK_SUCCESS;
+}
+
+static VkResult
+tu6_emit_ibo(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ struct tu_descriptor_state *descriptors_state,
+ gl_shader_stage type,
+ struct tu_cs_entry *entry)
+{
+ struct tu_device *device = cmd->device;
+ struct tu_cs *draw_state = &cmd->sub_cs;
+ const struct tu_program_descriptor_linkage *link =
+ &pipeline->program.link[type];
+ VkResult result;
+
+ unsigned num_desc = link->ssbo_map.num_desc + link->image_map.num_desc;
+
+ if (num_desc == 0) {
+ *entry = (struct tu_cs_entry) {};
+ return VK_SUCCESS;
+ }
+
+ struct ts_cs_memory ibo_const;
+ result = tu_cs_alloc(device, draw_state, num_desc,
+ A6XX_TEX_CONST_DWORDS, &ibo_const);
+ if (result != VK_SUCCESS)
+ return result;
+
+ int ssbo_index = 0;
+ for (unsigned i = 0; i < link->ssbo_map.num; i++) {
+ for (int j = 0; j < link->ssbo_map.array_size[i]; j++) {
+ uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
+
+ uint64_t va = buffer_ptr(descriptors_state, &link->ssbo_map, i, j);
+ /* We don't expose robustBufferAccess, so leave the size unlimited. */
+ uint32_t sz = MAX_STORAGE_BUFFER_RANGE / 4;
+
+ dst[0] = A6XX_IBO_0_FMT(TFMT6_32_UINT);
+ dst[1] = A6XX_IBO_1_WIDTH(sz & MASK(15)) |
+ A6XX_IBO_1_HEIGHT(sz >> 15);
+ dst[2] = A6XX_IBO_2_UNK4 |
+ A6XX_IBO_2_UNK31 |
+ A6XX_IBO_2_TYPE(A6XX_TEX_1D);
+ dst[3] = 0;
+ dst[4] = va;
+ dst[5] = va >> 32;
+ for (int i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
+ dst[i] = 0;
+
+ ssbo_index++;
+ }
+ }
+
+ for (unsigned i = 0; i < link->image_map.num; i++) {
+ for (int j = 0; j < link->image_map.array_size[i]; j++) {
+ uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
+
+ write_image_ibo(cmd, dst,
+ descriptors_state, &link->image_map, i, j);
+
+ ssbo_index++;
+ }
+ }
+
+ assert(ssbo_index == num_desc);
+
+ struct tu_cs cs;
+ result = tu_cs_begin_sub_stream(device, draw_state, 7, &cs);
+ if (result != VK_SUCCESS)
+ return result;
+
+ uint32_t opcode, ibo_addr_reg;
+ enum a6xx_state_block sb;
+ enum a6xx_state_type st;
+
+ switch (type) {
+ case MESA_SHADER_FRAGMENT:
+ opcode = CP_LOAD_STATE6;
+ st = ST6_SHADER;
+ sb = SB6_IBO;
+ ibo_addr_reg = REG_A6XX_SP_IBO_LO;
+ break;
+ case MESA_SHADER_COMPUTE:
+ opcode = CP_LOAD_STATE6_FRAG;
+ st = ST6_IBO;
+ sb = SB6_CS_SHADER;
+ ibo_addr_reg = REG_A6XX_SP_CS_IBO_LO;
+ break;
+ default:
+ unreachable("unsupported stage for ibos");
+ }
+
+ /* emit texture state: */
+ tu_cs_emit_pkt7(&cs, opcode, 3);
+ tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+ CP_LOAD_STATE6_0_STATE_TYPE(st) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
+ CP_LOAD_STATE6_0_NUM_UNIT(num_desc));
+ tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
+
+ tu_cs_emit_pkt4(&cs, ibo_addr_reg, 2);
+ tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
+
+ *entry = tu_cs_end_sub_stream(draw_state, &cs);
+ return VK_SUCCESS;
+}
+
+struct PACKED bcolor_entry {
+ uint32_t fp32[4];
+ uint16_t ui16[4];
+ int16_t si16[4];
+ uint16_t fp16[4];
+ uint16_t rgb565;
+ uint16_t rgb5a1;
+ uint16_t rgba4;
+ uint8_t __pad0[2];
+ uint8_t ui8[4];
+ int8_t si8[4];
+ uint32_t rgb10a2;
+ uint32_t z24; /* also s8? */
+ uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
+ uint8_t __pad1[56];
+} border_color[] = {
+ [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
+ [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
+ [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
+ .fp32[3] = 0x3f800000,
+ .ui16[3] = 0xffff,
+ .si16[3] = 0x7fff,
+ .fp16[3] = 0x3c00,
+ .rgb5a1 = 0x8000,
+ .rgba4 = 0xf000,
+ .ui8[3] = 0xff,
+ .si8[3] = 0x7f,
+ .rgb10a2 = 0xc0000000,
+ .srgb[3] = 0x3c00,
+ },
+ [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
+ .fp32[3] = 1,
+ .fp16[3] = 1,
+ },
+ [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
+ .fp32[0 ... 3] = 0x3f800000,
+ .ui16[0 ... 3] = 0xffff,
+ .si16[0 ... 3] = 0x7fff,
+ .fp16[0 ... 3] = 0x3c00,
+ .rgb565 = 0xffff,
+ .rgb5a1 = 0xffff,
+ .rgba4 = 0xffff,
+ .ui8[0 ... 3] = 0xff,
+ .si8[0 ... 3] = 0x7f,
+ .rgb10a2 = 0xffffffff,
+ .z24 = 0xffffff,
+ .srgb[0 ... 3] = 0x3c00,
+ },
+ [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
+ .fp32[0 ... 3] = 1,
+ .fp16[0 ... 3] = 1,
+ },
+};
+
+static VkResult
+tu6_emit_border_color(struct tu_cmd_buffer *cmd,
+ struct tu_cs *cs)
+{
+ STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
+
+ const struct tu_pipeline *pipeline = cmd->state.pipeline;
+ struct tu_descriptor_state *descriptors_state =
+ &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
+ const struct tu_descriptor_map *vs_sampler =
+ &pipeline->program.link[MESA_SHADER_VERTEX].sampler_map;
+ const struct tu_descriptor_map *fs_sampler =
+ &pipeline->program.link[MESA_SHADER_FRAGMENT].sampler_map;
+ struct ts_cs_memory ptr;
+
+ VkResult result = tu_cs_alloc(cmd->device, &cmd->sub_cs,
+ vs_sampler->num_desc + fs_sampler->num_desc,
+ 128 / 4,
+ &ptr);
+ if (result != VK_SUCCESS)
+ return result;
+
+ for (unsigned i = 0; i < vs_sampler->num; i++) {
+ for (unsigned j = 0; j < vs_sampler->array_size[i]; j++) {
+ const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
+ vs_sampler, i, j);
+ memcpy(ptr.map, &border_color[sampler->border], 128);
+ ptr.map += 128 / 4;
+ }
+ }
+
+ for (unsigned i = 0; i < fs_sampler->num; i++) {
+ for (unsigned j = 0; j < fs_sampler->array_size[i]; j++) {
+ const struct tu_sampler *sampler = sampler_ptr(descriptors_state,
+ fs_sampler, i, j);
+ memcpy(ptr.map, &border_color[sampler->border], 128);
+ ptr.map += 128 / 4;
+ }
+ }
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO, 2);
+ tu_cs_emit_qw(cs, ptr.iova);
+ return VK_SUCCESS;
+}
+
+static VkResult