#include "tu_cs.h"
+/* Emit IB that preloads the descriptors that the shader uses */
+
+static inline uint32_t
+tu6_vkstage2opcode(VkShaderStageFlags stage)
+{
+ switch (stage) {
+ case VK_SHADER_STAGE_VERTEX_BIT:
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+ case VK_SHADER_STAGE_GEOMETRY_BIT:
+ return CP_LOAD_STATE6_GEOM;
+ case VK_SHADER_STAGE_FRAGMENT_BIT:
+ case VK_SHADER_STAGE_COMPUTE_BIT:
+ return CP_LOAD_STATE6_FRAG;
+ default:
+ unreachable("bad shader type");
+ }
+}
+
+static enum a6xx_state_block
+tu6_tex_stage2sb(VkShaderStageFlags stage)
+{
+ switch (stage) {
+ case VK_SHADER_STAGE_VERTEX_BIT:
+ return SB6_VS_TEX;
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+ return SB6_HS_TEX;
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+ return SB6_DS_TEX;
+ case VK_SHADER_STAGE_GEOMETRY_BIT:
+ return SB6_GS_TEX;
+ case VK_SHADER_STAGE_FRAGMENT_BIT:
+ return SB6_FS_TEX;
+ case VK_SHADER_STAGE_COMPUTE_BIT:
+ return SB6_CS_TEX;
+ default:
+ unreachable("bad shader stage");
+ }
+}
+
+static enum a6xx_state_block
+tu6_ubo_stage2sb(VkShaderStageFlags stage)
+{
+ switch (stage) {
+ case VK_SHADER_STAGE_VERTEX_BIT:
+ return SB6_VS_SHADER;
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+ return SB6_HS_SHADER;
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+ return SB6_DS_SHADER;
+ case VK_SHADER_STAGE_GEOMETRY_BIT:
+ return SB6_GS_SHADER;
+ case VK_SHADER_STAGE_FRAGMENT_BIT:
+ return SB6_FS_SHADER;
+ case VK_SHADER_STAGE_COMPUTE_BIT:
+ return SB6_CS_SHADER;
+ default:
+ unreachable("bad shader stage");
+ }
+}
+
+static void
+emit_load_state(struct tu_cs *cs, unsigned opcode, enum a6xx_state_type st,
+ enum a6xx_state_block sb, unsigned base, unsigned offset,
+ unsigned count)
+{
+ /* Note: just emit one packet, even if count overflows NUM_UNIT. It's not
+ * clear if emitting more packets will even help anything. Presumably the
+ * descriptor cache is relatively small, and these packets stop doing
+ * anything when there are too many descriptors.
+ */
+ tu_cs_emit_pkt7(cs, opcode, 3);
+ tu_cs_emit(cs,
+ CP_LOAD_STATE6_0_STATE_TYPE(st) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_BINDLESS) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
+ CP_LOAD_STATE6_0_NUM_UNIT(MIN2(count, 1024-1)));
+ tu_cs_emit_qw(cs, offset | (base << 28));
+}
+
+static unsigned
+tu6_load_state_size(struct tu_pipeline_layout *layout, bool compute)
+{
+ const unsigned load_state_size = 4;
+ unsigned size = 0;
+ for (unsigned i = 0; i < layout->num_sets; i++) {
+ struct tu_descriptor_set_layout *set_layout = layout->set[i].layout;
+ for (unsigned j = 0; j < set_layout->binding_count; j++) {
+ struct tu_descriptor_set_binding_layout *binding = &set_layout->binding[j];
+ unsigned count = 0;
+ /* Note: some users, like amber for example, pass in
+ * VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so
+ * filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.
+ */
+ VkShaderStageFlags stages = compute ?
+ binding->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT :
+ binding->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
+ unsigned stage_count = util_bitcount(stages);
+ switch (binding->type) {
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ /* IBO-backed resources only need one packet for all graphics stages */
+ if (stages & ~VK_SHADER_STAGE_COMPUTE_BIT)
+ count += 1;
+ if (stages & VK_SHADER_STAGE_COMPUTE_BIT)
+ count += 1;
+ break;
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ /* Textures and UBO's needs a packet for each stage */
+ count = stage_count;
+ break;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ /* Because of how we pack combined images and samplers, we
+ * currently can't use one packet for the whole array.
+ */
+ count = stage_count * binding->array_size * 2;
+ break;
+ default:
+ unreachable("bad descriptor type");
+ }
+ size += count * load_state_size;
+ }
+ }
+ return size;
+}
+
+static void
+tu6_emit_load_state(struct tu_pipeline *pipeline, bool compute)
+{
+ unsigned size = tu6_load_state_size(pipeline->layout, compute);
+ if (size == 0)
+ return;
+
+ struct tu_cs cs;
+ tu_cs_begin_sub_stream(&pipeline->cs, size, &cs);
+
+ struct tu_pipeline_layout *layout = pipeline->layout;
+ for (unsigned i = 0; i < layout->num_sets; i++) {
+ struct tu_descriptor_set_layout *set_layout = layout->set[i].layout;
+ for (unsigned j = 0; j < set_layout->binding_count; j++) {
+ struct tu_descriptor_set_binding_layout *binding = &set_layout->binding[j];
+ unsigned base = i;
+ unsigned offset = binding->offset / 4;
+ /* Note: some users, like amber for example, pass in
+ * VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so
+ * filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.
+ */
+ VkShaderStageFlags stages = compute ?
+ binding->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT :
+ binding->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
+ unsigned count = binding->array_size;
+ if (count == 0 || stages == 0)
+ continue;
+ switch (binding->type) {
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ base = MAX_SETS;
+ offset = (layout->input_attachment_count +
+ layout->set[i].dynamic_offset_start +
+ binding->dynamic_offset_offset) * A6XX_TEX_CONST_DWORDS;
+ /* fallthrough */
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ /* IBO-backed resources only need one packet for all graphics stages */
+ if (stages & ~VK_SHADER_STAGE_COMPUTE_BIT) {
+ emit_load_state(&cs, CP_LOAD_STATE6, ST6_SHADER, SB6_IBO,
+ base, offset, count);
+ }
+ if (stages & VK_SHADER_STAGE_COMPUTE_BIT) {
+ emit_load_state(&cs, CP_LOAD_STATE6_FRAG, ST6_IBO, SB6_CS_SHADER,
+ base, offset, count);
+ }
+ break;
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ base = MAX_SETS;
+ offset = (layout->set[i].input_attachment_start +
+ binding->input_attachment_offset) * A6XX_TEX_CONST_DWORDS;
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
+ unsigned stage_log2;
+ for_each_bit(stage_log2, stages) {
+ VkShaderStageFlags stage = 1 << stage_log2;
+ emit_load_state(&cs, tu6_vkstage2opcode(stage),
+ binding->type == VK_DESCRIPTOR_TYPE_SAMPLER ?
+ ST6_SHADER : ST6_CONSTANTS,
+ tu6_tex_stage2sb(stage), base, offset, count);
+ }
+ break;
+ }
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ base = MAX_SETS;
+ offset = (layout->input_attachment_count +
+ layout->set[i].dynamic_offset_start +
+ binding->dynamic_offset_offset) * A6XX_TEX_CONST_DWORDS;
+ /* fallthrough */
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
+ unsigned stage_log2;
+ for_each_bit(stage_log2, stages) {
+ VkShaderStageFlags stage = 1 << stage_log2;
+ emit_load_state(&cs, tu6_vkstage2opcode(stage), ST6_UBO,
+ tu6_ubo_stage2sb(stage), base, offset, count);
+ }
+ break;
+ }
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
+ unsigned stage_log2;
+ for_each_bit(stage_log2, stages) {
+ VkShaderStageFlags stage = 1 << stage_log2;
+ /* TODO: We could emit less CP_LOAD_STATE6 if we used
+ * struct-of-arrays instead of array-of-structs.
+ */
+ for (unsigned i = 0; i < count; i++) {
+ unsigned tex_offset = offset + 2 * i * A6XX_TEX_CONST_DWORDS;
+ unsigned sam_offset = offset + (2 * i + 1) * A6XX_TEX_CONST_DWORDS;
+ emit_load_state(&cs, tu6_vkstage2opcode(stage),
+ ST6_CONSTANTS, tu6_tex_stage2sb(stage),
+ base, tex_offset, 1);
+ emit_load_state(&cs, tu6_vkstage2opcode(stage),
+ ST6_SHADER, tu6_tex_stage2sb(stage),
+ base, sam_offset, 1);
+ }
+ }
+ break;
+ }
+ default:
+ unreachable("bad descriptor type");
+ }
+ }
+ }
+
+ pipeline->load_state.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &cs);
+}
+
struct tu_pipeline_builder
{
struct tu_device *device;
bool rasterizer_discard;
/* these states are affectd by rasterizer_discard */
VkSampleCountFlagBits samples;
- bool use_depth_stencil_attachment;
bool use_color_attachments;
uint32_t color_attachment_count;
VkFormat color_attachment_formats[MAX_RTS];
+ VkFormat depth_attachment_format;
};
static enum tu_dynamic_state_bits
return TU_DYNAMIC_STENCIL_WRITE_MASK;
case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
return TU_DYNAMIC_STENCIL_REFERENCE;
+ case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT:
+ return TU_DYNAMIC_SAMPLE_LOCATIONS;
default:
unreachable("invalid dynamic state");
return 0;
}
}
-static const VkVertexInputAttributeDescription *
-tu_find_vertex_input_attribute(
- const VkPipelineVertexInputStateCreateInfo *vi_info, uint32_t slot)
-{
- assert(slot >= VERT_ATTRIB_GENERIC0);
- slot -= VERT_ATTRIB_GENERIC0;
- for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
- if (vi_info->pVertexAttributeDescriptions[i].location == slot)
- return &vi_info->pVertexAttributeDescriptions[i];
- }
- return NULL;
-}
-
-static const VkVertexInputBindingDescription *
-tu_find_vertex_input_binding(
- const VkPipelineVertexInputStateCreateInfo *vi_info,
- const VkVertexInputAttributeDescription *vi_attr)
-{
- assert(vi_attr);
- for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
- if (vi_info->pVertexBindingDescriptions[i].binding == vi_attr->binding)
- return &vi_info->pVertexBindingDescriptions[i];
- }
- return NULL;
-}
-
static bool
tu_logic_op_reads_dst(VkLogicOp op)
{
}
}
-static unsigned
-tu_shader_nibo(const struct tu_shader *shader)
-{
- /* Don't use ir3_shader_nibo(), because that would include declared but
- * unused storage images and SSBOs.
- */
- return shader->ssbo_map.num_desc + shader->image_map.num_desc;
+static uint32_t
+emit_xs_config(const struct ir3_shader_variant *sh)
+{
+ if (sh->instrlen) {
+ return A6XX_SP_VS_CONFIG_ENABLED |
+ COND(sh->bindless_tex, A6XX_SP_VS_CONFIG_BINDLESS_TEX) |
+ COND(sh->bindless_samp, A6XX_SP_VS_CONFIG_BINDLESS_SAMP) |
+ COND(sh->bindless_ibo, A6XX_SP_VS_CONFIG_BINDLESS_IBO) |
+ COND(sh->bindless_ubo, A6XX_SP_VS_CONFIG_BINDLESS_UBO);
+ } else {
+ return 0;
+ }
}
static void
if (vs->need_fine_derivatives)
sp_vs_ctrl |= A6XX_SP_VS_CTRL_REG0_DIFF_FINE;
- uint32_t sp_vs_config = A6XX_SP_VS_CONFIG_NTEX(shader->texture_map.num_desc) |
- A6XX_SP_VS_CONFIG_NSAMP(shader->sampler_map.num_desc);
- if (vs->instrlen)
- sp_vs_config |= A6XX_SP_VS_CONFIG_ENABLED;
-
tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_CTRL_REG0, 1);
tu_cs_emit(cs, sp_vs_ctrl);
tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_CONFIG, 2);
- tu_cs_emit(cs, sp_vs_config);
+ tu_cs_emit(cs, emit_xs_config(vs));
tu_cs_emit(cs, vs->instrlen);
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_VS_CNTL, 1);
tu6_emit_hs_config(struct tu_cs *cs, struct tu_shader *shader,
const struct ir3_shader_variant *hs)
{
- uint32_t sp_hs_config = 0;
- if (hs->instrlen)
- sp_hs_config |= A6XX_SP_HS_CONFIG_ENABLED;
-
tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_UNKNOWN_A831, 1);
tu_cs_emit(cs, 0);
tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_CONFIG, 2);
- tu_cs_emit(cs, sp_hs_config);
+ tu_cs_emit(cs, emit_xs_config(hs));
tu_cs_emit(cs, hs->instrlen);
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_HS_CNTL, 1);
tu6_emit_ds_config(struct tu_cs *cs, struct tu_shader *shader,
const struct ir3_shader_variant *ds)
{
- uint32_t sp_ds_config = 0;
- if (ds->instrlen)
- sp_ds_config |= A6XX_SP_DS_CONFIG_ENABLED;
-
tu_cs_emit_pkt4(cs, REG_A6XX_SP_DS_CONFIG, 2);
- tu_cs_emit(cs, sp_ds_config);
+ tu_cs_emit(cs, emit_xs_config(ds));
tu_cs_emit(cs, ds->instrlen);
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_DS_CNTL, 1);
const struct ir3_shader_variant *gs)
{
bool has_gs = gs->type != MESA_SHADER_NONE;
- tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_UNKNOWN_A871, 1);
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_PRIM_SIZE, 1);
tu_cs_emit(cs, 0);
tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_CONFIG, 2);
- tu_cs_emit(cs, COND(has_gs,
- A6XX_SP_GS_CONFIG_ENABLED |
- A6XX_SP_GS_CONFIG_NIBO(ir3_shader_nibo(gs)) |
- A6XX_SP_GS_CONFIG_NTEX(gs->num_samp) |
- A6XX_SP_GS_CONFIG_NSAMP(gs->num_samp)));
+ tu_cs_emit(cs, emit_xs_config(gs));
tu_cs_emit(cs, gs->instrlen);
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_GS_CNTL, 1);
if (fs->need_fine_derivatives)
sp_fs_ctrl |= A6XX_SP_FS_CTRL_REG0_DIFF_FINE;
- uint32_t sp_fs_config = 0;
- unsigned shader_nibo = 0;
- if (shader) {
- shader_nibo = tu_shader_nibo(shader);
- sp_fs_config = A6XX_SP_FS_CONFIG_NTEX(shader->texture_map.num_desc) |
- A6XX_SP_FS_CONFIG_NSAMP(shader->sampler_map.num_desc) |
- A6XX_SP_FS_CONFIG_NIBO(shader_nibo);
- }
-
- if (fs->instrlen)
- sp_fs_config |= A6XX_SP_FS_CONFIG_ENABLED;
-
tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_CTRL_REG0, 1);
tu_cs_emit(cs, sp_fs_ctrl);
tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_CONFIG, 2);
- tu_cs_emit(cs, sp_fs_config);
+ tu_cs_emit(cs, emit_xs_config(fs));
tu_cs_emit(cs, fs->instrlen);
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_FS_CNTL, 1);
tu_cs_emit(cs, A6XX_HLSQ_FS_CNTL_CONSTLEN(align(fs->constlen, 4)) |
A6XX_HLSQ_FS_CNTL_ENABLED);
-
- tu_cs_emit_pkt4(cs, REG_A6XX_SP_IBO_COUNT, 1);
- tu_cs_emit(cs, shader_nibo);
}
static void
A6XX_HLSQ_CS_CNTL_ENABLED);
tu_cs_emit_pkt4(cs, REG_A6XX_SP_CS_CONFIG, 2);
- tu_cs_emit(cs, A6XX_SP_CS_CONFIG_ENABLED |
- A6XX_SP_CS_CONFIG_NIBO(tu_shader_nibo(shader)) |
- A6XX_SP_CS_CONFIG_NTEX(shader->texture_map.num_desc) |
- A6XX_SP_CS_CONFIG_NSAMP(shader->sampler_map.num_desc));
+ tu_cs_emit(cs, emit_xs_config(v));
tu_cs_emit(cs, v->instrlen);
tu_cs_emit_pkt4(cs, REG_A6XX_SP_CS_CTRL_REG0, 1);
A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
tu_cs_emit(cs, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
-
- tu_cs_emit_pkt4(cs, REG_A6XX_SP_CS_IBO_COUNT, 1);
- tu_cs_emit(cs, tu_shader_nibo(shader));
}
static void
tu6_emit_vs_system_values(struct tu_cs *cs,
const struct ir3_shader_variant *vs,
- const struct ir3_shader_variant *gs)
+ const struct ir3_shader_variant *gs,
+ bool primid_passthru)
{
const uint32_t vertexid_regid =
ir3_find_sysval_regid(vs, SYSTEM_VALUE_VERTEX_ID);
tu_cs_emit(cs, 0x000000fc); /* VFD_CONTROL_4 */
tu_cs_emit(cs, A6XX_VFD_CONTROL_5_REGID_GSHEADER(gsheader_regid) |
0xfc00); /* VFD_CONTROL_5 */
- tu_cs_emit(cs, 0x00000000); /* VFD_CONTROL_6 */
+ tu_cs_emit(cs, COND(primid_passthru, A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU)); /* VFD_CONTROL_6 */
}
/* Add any missing varyings needed for stream-out. Otherwise varyings not
unsigned k = out->register_index;
unsigned idx;
+ /* Skip it, if there's an unused reg in the middle of outputs. */
+ if (v->outputs[k].regid == INVALID_REG)
+ continue;
+
tf->ncomp[out->output_buffer] += out->num_components;
/* linkage map sorted by order frag shader wants things, so
int size = DIV_ROUND_UP(num_loc, 4);
size = (MIN2(size + base, consumer->constlen) - base) * 4;
+ if (size <= 0)
+ return;
tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, base, SB6_GS_SHADER, 0, size,
patch_locs);
bool has_gs = gs->type != MESA_SHADER_NONE;
const struct ir3_shader_variant *last_shader = has_gs ? gs : vs;
struct ir3_shader_linkage linkage = { 0 };
- ir3_link_shaders(&linkage, last_shader, fs);
+ ir3_link_shaders(&linkage, last_shader, fs, true);
if (last_shader->shader->stream_output.num_outputs)
tu6_link_streamout(&linkage, last_shader);
- BITSET_DECLARE(vpc_var_enables, 128) = { 0 };
- for (uint32_t i = 0; i < linkage.cnt; i++) {
- const uint32_t comp_count = util_last_bit(linkage.var[i].compmask);
- for (uint32_t j = 0; j < comp_count; j++)
- BITSET_SET(vpc_var_enables, linkage.var[i].loc + j);
- }
+ /* We do this after linking shaders in order to know whether PrimID
+ * passthrough needs to be enabled.
+ */
+ bool primid_passthru = linkage.primid_loc != 0xff;
+ tu6_emit_vs_system_values(cs, vs, gs, primid_passthru);
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VAR_DISABLE(0), 4);
- tu_cs_emit(cs, ~vpc_var_enables[0]);
- tu_cs_emit(cs, ~vpc_var_enables[1]);
- tu_cs_emit(cs, ~vpc_var_enables[2]);
- tu_cs_emit(cs, ~vpc_var_enables[3]);
+ tu_cs_emit(cs, ~linkage.varmask[0]);
+ tu_cs_emit(cs, ~linkage.varmask[1]);
+ tu_cs_emit(cs, ~linkage.varmask[2]);
+ tu_cs_emit(cs, ~linkage.varmask[3]);
/* a6xx finds position/pointsize at the end */
const uint32_t position_regid =
tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_VPC_DST_REG(0), sp_vpc_dst_count);
tu_cs_emit_array(cs, sp_vpc_dst, sp_vpc_dst_count);
+ tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMID_CNTL, 1);
+ tu_cs_emit(cs, COND(primid_passthru, A6XX_PC_PRIMID_CNTL_PRIMID_PASSTHRU));
+
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_CNTL_0, 1);
tu_cs_emit(cs, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs->total_in) |
(fs->total_in > 0 ? A6XX_VPC_CNTL_0_VARYING : 0) |
- 0xff00ff00);
+ A6XX_VPC_CNTL_0_PRIMIDLOC(linkage.primid_loc) |
+ A6XX_VPC_CNTL_0_UNKLOC(0xff));
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_PACK, 1);
tu_cs_emit(cs, A6XX_VPC_PACK_POSITIONLOC(position_loc) |
tu_cs_emit_pkt4(cs, REG_A6XX_PC_UNKNOWN_9B07, 1);
tu_cs_emit(cs, 0);
- tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_UNKNOWN_A871, 1);
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_PRIM_SIZE, 1);
tu_cs_emit(cs, vs->shader->output_size);
}
A6XX_SP_FS_PREFETCH_CMD_CMD(prefetch->cmd));
}
+ if (fs->num_sampler_prefetch > 0) {
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(0), fs->num_sampler_prefetch);
+ for (int i = 0; i < fs->num_sampler_prefetch; i++) {
+ const struct ir3_sampler_prefetch *prefetch = &fs->sampler_prefetch[i];
+ tu_cs_emit(cs,
+ A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(prefetch->samp_bindless_id) |
+ A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(prefetch->tex_bindless_id));
+ }
+ }
+
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CONTROL_1_REG, 5);
tu_cs_emit(cs, 0x7);
tu_cs_emit(cs, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid) |
tu6_emit_gs_config(cs, builder->shaders[MESA_SHADER_GEOMETRY], gs);
tu6_emit_fs_config(cs, builder->shaders[MESA_SHADER_FRAGMENT], fs);
- tu6_emit_vs_system_values(cs, vs, gs);
tu6_emit_vpc(cs, vs, gs, fs, binning_pass, tf);
tu6_emit_vpc_varying_modes(cs, fs, binning_pass);
tu6_emit_fs_inputs(cs, fs);
static void
tu6_emit_vertex_input(struct tu_cs *cs,
const struct ir3_shader_variant *vs,
- const VkPipelineVertexInputStateCreateInfo *vi_info,
+ const VkPipelineVertexInputStateCreateInfo *info,
uint8_t bindings[MAX_VERTEX_ATTRIBS],
- uint16_t strides[MAX_VERTEX_ATTRIBS],
- uint16_t offsets[MAX_VERTEX_ATTRIBS],
uint32_t *count)
{
+ uint32_t vfd_fetch_idx = 0;
uint32_t vfd_decode_idx = 0;
+ uint32_t binding_instanced = 0; /* bitmask of instanced bindings */
- for (uint32_t i = 0; i < vs->inputs_count; i++) {
- if (vs->inputs[i].sysval || !vs->inputs[i].compmask)
- continue;
+ for (uint32_t i = 0; i < info->vertexBindingDescriptionCount; i++) {
+ const VkVertexInputBindingDescription *binding =
+ &info->pVertexBindingDescriptions[i];
- const VkVertexInputAttributeDescription *vi_attr =
- tu_find_vertex_input_attribute(vi_info, vs->inputs[i].slot);
- const VkVertexInputBindingDescription *vi_binding =
- tu_find_vertex_input_binding(vi_info, vi_attr);
- assert(vi_attr && vi_binding);
+ tu_cs_emit_regs(cs,
+ A6XX_VFD_FETCH_STRIDE(vfd_fetch_idx, binding->stride));
- const struct tu_native_format format = tu6_format_vtx(vi_attr->format);
+ if (binding->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE)
+ binding_instanced |= 1 << binding->binding;
- uint32_t vfd_decode = A6XX_VFD_DECODE_INSTR_IDX(vfd_decode_idx) |
- A6XX_VFD_DECODE_INSTR_FORMAT(format.fmt) |
- A6XX_VFD_DECODE_INSTR_SWAP(format.swap) |
- A6XX_VFD_DECODE_INSTR_UNK30;
- if (vi_binding->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE)
- vfd_decode |= A6XX_VFD_DECODE_INSTR_INSTANCED;
- if (!vk_format_is_int(vi_attr->format))
- vfd_decode |= A6XX_VFD_DECODE_INSTR_FLOAT;
+ bindings[vfd_fetch_idx] = binding->binding;
+ vfd_fetch_idx++;
+ }
- const uint32_t vfd_decode_step_rate = 1;
+ /* TODO: emit all VFD_DECODE/VFD_DEST_CNTL in same (two) pkt4 */
- const uint32_t vfd_dest_cntl =
- A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(vs->inputs[i].compmask) |
- A6XX_VFD_DEST_CNTL_INSTR_REGID(vs->inputs[i].regid);
+ for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
+ const VkVertexInputAttributeDescription *attr =
+ &info->pVertexAttributeDescriptions[i];
+ uint32_t binding_idx, input_idx;
- tu_cs_emit_pkt4(cs, REG_A6XX_VFD_DECODE(vfd_decode_idx), 2);
- tu_cs_emit(cs, vfd_decode);
- tu_cs_emit(cs, vfd_decode_step_rate);
+ for (binding_idx = 0; binding_idx < vfd_fetch_idx; binding_idx++) {
+ if (bindings[binding_idx] == attr->binding)
+ break;
+ }
+ assert(binding_idx < vfd_fetch_idx);
- tu_cs_emit_pkt4(cs, REG_A6XX_VFD_DEST_CNTL(vfd_decode_idx), 1);
- tu_cs_emit(cs, vfd_dest_cntl);
+ for (input_idx = 0; input_idx < vs->inputs_count; input_idx++) {
+ if ((vs->inputs[input_idx].slot - VERT_ATTRIB_GENERIC0) == attr->location)
+ break;
+ }
- bindings[vfd_decode_idx] = vi_binding->binding;
- strides[vfd_decode_idx] = vi_binding->stride;
- offsets[vfd_decode_idx] = vi_attr->offset;
+ /* attribute not used, skip it */
+ if (input_idx == vs->inputs_count)
+ continue;
+
+ const struct tu_native_format format = tu6_format_vtx(attr->format);
+ tu_cs_emit_regs(cs,
+ A6XX_VFD_DECODE_INSTR(vfd_decode_idx,
+ .idx = binding_idx,
+ .offset = attr->offset,
+ .instanced = binding_instanced & (1 << attr->binding),
+ .format = format.fmt,
+ .swap = format.swap,
+ .unk30 = 1,
+ ._float = !vk_format_is_int(attr->format)),
+ A6XX_VFD_DECODE_STEP_RATE(vfd_decode_idx, 1));
+
+ tu_cs_emit_regs(cs,
+ A6XX_VFD_DEST_CNTL_INSTR(vfd_decode_idx,
+ .writemask = vs->inputs[input_idx].compmask,
+ .regid = vs->inputs[input_idx].regid));
vfd_decode_idx++;
- assert(vfd_decode_idx <= MAX_VERTEX_ATTRIBS);
}
- tu_cs_emit_pkt4(cs, REG_A6XX_VFD_CONTROL_0, 1);
- tu_cs_emit(
- cs, A6XX_VFD_CONTROL_0_VTXCNT(vfd_decode_idx) | (vfd_decode_idx << 8));
+ tu_cs_emit_regs(cs,
+ A6XX_VFD_CONTROL_0(
+ .fetch_cnt = vfd_fetch_idx,
+ .decode_cnt = vfd_decode_idx));
- *count = vfd_decode_idx;
+ *count = vfd_fetch_idx;
}
static uint32_t
A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(max.y - 1));
}
+void
+tu6_emit_sample_locations(struct tu_cs *cs, const VkSampleLocationsInfoEXT *samp_loc)
+{
+ if (!samp_loc) {
+ tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CONFIG, 1);
+ tu_cs_emit(cs, 0);
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CONFIG, 1);
+ tu_cs_emit(cs, 0);
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_SAMPLE_CONFIG, 1);
+ tu_cs_emit(cs, 0);
+ return;
+ }
+
+ assert(samp_loc->sampleLocationsPerPixel == samp_loc->sampleLocationsCount);
+ assert(samp_loc->sampleLocationGridSize.width == 1);
+ assert(samp_loc->sampleLocationGridSize.height == 1);
+
+ uint32_t sample_config =
+ A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE;
+ uint32_t sample_locations = 0;
+ for (uint32_t i = 0; i < samp_loc->sampleLocationsCount; i++) {
+ sample_locations |=
+ (A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(samp_loc->pSampleLocations[i].x) |
+ A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(samp_loc->pSampleLocations[i].y)) << i*8;
+ }
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CONFIG, 2);
+ tu_cs_emit(cs, sample_config);
+ tu_cs_emit(cs, sample_locations);
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CONFIG, 2);
+ tu_cs_emit(cs, sample_config);
+ tu_cs_emit(cs, sample_locations);
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_SAMPLE_CONFIG, 2);
+ tu_cs_emit(cs, sample_config);
+ tu_cs_emit(cs, sample_locations);
+}
+
static void
tu6_emit_gras_unknowns(struct tu_cs *cs)
{
static VkResult
tu_pipeline_create(struct tu_device *dev,
+ struct tu_pipeline_layout *layout,
+ bool compute,
const VkAllocationCallbacks *pAllocator,
struct tu_pipeline **out_pipeline)
{
tu_cs_init(&pipeline->cs, dev, TU_CS_MODE_SUB_STREAM, 2048);
- /* reserve the space now such that tu_cs_begin_sub_stream never fails */
- VkResult result = tu_cs_reserve_space(&pipeline->cs, 2048);
+ /* Reserve the space now such that tu_cs_begin_sub_stream never fails. Note
+ * that LOAD_STATE can potentially take up a large amount of space so we
+ * calculate its size explicitly.
+ */
+ unsigned load_state_size = tu6_load_state_size(layout, compute);
+ VkResult result = tu_cs_reserve_space(&pipeline->cs, 2048 + load_state_size);
if (result != VK_SUCCESS) {
vk_free2(&dev->alloc, pAllocator, pipeline);
return result;
for (gl_shader_stage stage = MESA_SHADER_STAGES - 1;
stage > MESA_SHADER_NONE; stage--) {
const VkPipelineShaderStageCreateInfo *stage_info = stage_infos[stage];
- if (!stage_info)
+ if (!stage_info && stage != MESA_SHADER_FRAGMENT)
continue;
struct tu_shader *shader =
link->ubo_state = v->shader->ubo_state;
link->const_state = v->shader->const_state;
link->constlen = v->constlen;
- link->texture_map = shader->texture_map;
- link->sampler_map = shader->sampler_map;
- link->ubo_map = shader->ubo_map;
- link->ssbo_map = shader->ssbo_map;
- link->image_map = shader->image_map;
+ link->push_consts = shader->push_consts;
}
static void
builder->shaders[i],
&builder->shaders[i]->variants[0]);
}
+
+ if (builder->shaders[MESA_SHADER_FRAGMENT]) {
+ memcpy(pipeline->program.input_attachment_idx,
+ builder->shaders[MESA_SHADER_FRAGMENT]->attachment_idx,
+ sizeof(pipeline->program.input_attachment_idx));
+ }
}
static void
struct tu_cs vi_cs;
tu_cs_begin_sub_stream(&pipeline->cs,
- MAX_VERTEX_ATTRIBS * 5 + 2, &vi_cs);
+ MAX_VERTEX_ATTRIBS * 7 + 2, &vi_cs);
tu6_emit_vertex_input(&vi_cs, &vs->variants[0], vi_info,
- pipeline->vi.bindings, pipeline->vi.strides,
- pipeline->vi.offsets, &pipeline->vi.count);
+ pipeline->vi.bindings, &pipeline->vi.count);
pipeline->vi.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &vi_cs);
if (vs->has_binning_pass) {
tu_cs_begin_sub_stream(&pipeline->cs,
- MAX_VERTEX_ATTRIBS * 5 + 2, &vi_cs);
+ MAX_VERTEX_ATTRIBS * 7 + 2, &vi_cs);
tu6_emit_vertex_input(
&vi_cs, &vs->variants[1], vi_info, pipeline->vi.binning_bindings,
- pipeline->vi.binning_strides, pipeline->vi.binning_offsets,
&pipeline->vi.binning_count);
pipeline->vi.binning_state_ib =
tu_cs_end_sub_stream(&pipeline->cs, &vi_cs);
* render pass the pipeline is created against does not use a
* depth/stencil attachment.
*
- * We disable both depth and stenil tests in those cases.
+ * Disable both depth and stencil tests if there is no ds attachment,
+ * Disable depth test if ds attachment is S8_UINT, since S8_UINT defines
+ * only the separate stencil attachment
*/
static const VkPipelineDepthStencilStateCreateInfo dummy_ds_info;
const VkPipelineDepthStencilStateCreateInfo *ds_info =
- builder->use_depth_stencil_attachment
+ builder->depth_attachment_format != VK_FORMAT_UNDEFINED
? builder->create_info->pDepthStencilState
: &dummy_ds_info;
+ const VkPipelineDepthStencilStateCreateInfo *ds_info_depth =
+ builder->depth_attachment_format != VK_FORMAT_S8_UINT
+ ? ds_info : &dummy_ds_info;
struct tu_cs ds_cs;
tu_cs_begin_sub_stream(&pipeline->cs, 12, &ds_cs);
/* move to hw ctx init? */
tu6_emit_alpha_control_disable(&ds_cs);
- tu6_emit_depth_control(&ds_cs, ds_info, builder->create_info->pRasterizationState);
+ tu6_emit_depth_control(&ds_cs, ds_info_depth,
+ builder->create_info->pRasterizationState);
tu6_emit_stencil_control(&ds_cs, ds_info);
if (!(pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_COMPARE_MASK)) {
: &dummy_blend_info;
struct tu_cs blend_cs;
- tu_cs_begin_sub_stream(&pipeline->cs, MAX_RTS * 3 + 9, &blend_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, MAX_RTS * 3 + 18, &blend_cs);
uint32_t blend_enable_mask;
tu6_emit_rb_mrt_controls(&blend_cs, blend_info,
if (!(pipeline->dynamic_state.mask & TU_DYNAMIC_BLEND_CONSTANTS))
tu6_emit_blend_constants(&blend_cs, blend_info->blendConstants);
+ if (!(pipeline->dynamic_state.mask & TU_DYNAMIC_SAMPLE_LOCATIONS)) {
+ const struct VkPipelineSampleLocationsStateCreateInfoEXT *sample_locations =
+ vk_find_struct_const(msaa_info->pNext, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT);
+ const VkSampleLocationsInfoEXT *samp_loc = NULL;
+
+ if (sample_locations && sample_locations->sampleLocationsEnable)
+ samp_loc = &sample_locations->sampleLocationsInfo;
+
+ tu6_emit_sample_locations(&blend_cs, samp_loc);
+ }
+
tu6_emit_blend_control(&blend_cs, blend_enable_mask, msaa_info);
pipeline->blend.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &blend_cs);
tu_pipeline_builder_build(struct tu_pipeline_builder *builder,
struct tu_pipeline **pipeline)
{
- VkResult result = tu_pipeline_create(builder->device, builder->alloc,
- pipeline);
+ VkResult result = tu_pipeline_create(builder->device, builder->layout,
+ false, builder->alloc, pipeline);
if (result != VK_SUCCESS)
return result;
+ (*pipeline)->layout = builder->layout;
+
/* compile and upload shaders */
result = tu_pipeline_builder_compile_shaders(builder);
if (result == VK_SUCCESS)
tu_pipeline_builder_parse_rasterization(builder, *pipeline);
tu_pipeline_builder_parse_depth_stencil(builder, *pipeline);
tu_pipeline_builder_parse_multisample_and_color_blend(builder, *pipeline);
+ tu6_emit_load_state(*pipeline, false);
/* we should have reserved enough space upfront such that the CS never
* grows
const struct tu_subpass *subpass =
&pass->subpasses[create_info->subpass];
- builder->use_depth_stencil_attachment =
- subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED;
+ const uint32_t a = subpass->depth_stencil_attachment.attachment;
+ builder->depth_attachment_format = (a != VK_ATTACHMENT_UNUSED) ?
+ pass->attachments[a].format : VK_FORMAT_UNDEFINED;
assert(subpass->color_count == 0 ||
!create_info->pColorBlendState ||
*pPipeline = VK_NULL_HANDLE;
- result = tu_pipeline_create(dev, pAllocator, &pipeline);
+ result = tu_pipeline_create(dev, layout, true, pAllocator, &pipeline);
if (result != VK_SUCCESS)
return result;
tu6_emit_compute_program(&prog_cs, shader, &pipeline->program.binary_bo);
pipeline->program.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
+ tu6_emit_load_state(pipeline, true);
+
*pPipeline = tu_pipeline_to_handle(pipeline);
return VK_SUCCESS;