{
struct tu_device *device;
struct tu_pipeline_cache *cache;
+ struct tu_pipeline_layout *layout;
const VkAllocationCallbacks *alloc;
const VkGraphicsPipelineCreateInfo *create_info;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
return DI_PT_TRILIST;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
- return DI_PT_TRILIST;
+ return DI_PT_TRISTRIP;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
return DI_PT_TRIFAN;
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
}
}
+static unsigned
+tu_shader_nibo(const struct tu_shader *shader)
+{
+ /* Don't use ir3_shader_nibo(), because that would include declared but
+ * unused storage images and SSBOs.
+ */
+ return shader->ssbo_map.num_desc + shader->image_map.num_desc;
+}
+
static void
-tu6_emit_vs_config(struct tu_cs *cs, const struct ir3_shader_variant *vs)
+tu6_emit_vs_config(struct tu_cs *cs, struct tu_shader *shader,
+ const struct ir3_shader_variant *vs)
{
uint32_t sp_vs_ctrl =
A6XX_SP_VS_CTRL_REG0_THREADSIZE(FOUR_QUADS) |
A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(vs->info.max_reg + 1) |
A6XX_SP_VS_CTRL_REG0_MERGEDREGS |
A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(vs->branchstack);
- if (vs->num_samp)
+ if (vs->need_pixlod)
sp_vs_ctrl |= A6XX_SP_VS_CTRL_REG0_PIXLODENABLE;
+ if (vs->need_fine_derivatives)
+ sp_vs_ctrl |= A6XX_SP_VS_CTRL_REG0_DIFF_FINE;
- uint32_t sp_vs_config = A6XX_SP_VS_CONFIG_NTEX(vs->num_samp) |
- A6XX_SP_VS_CONFIG_NSAMP(vs->num_samp);
+ uint32_t sp_vs_config = A6XX_SP_VS_CONFIG_NTEX(shader->texture_map.num_desc) |
+ A6XX_SP_VS_CONFIG_NSAMP(shader->sampler_map.num_desc);
if (vs->instrlen)
sp_vs_config |= A6XX_SP_VS_CONFIG_ENABLED;
tu_cs_emit(cs, vs->instrlen);
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_VS_CNTL, 1);
- tu_cs_emit(cs, A6XX_HLSQ_VS_CNTL_CONSTLEN(align(vs->constlen, 4)) | 0x100);
+ tu_cs_emit(cs, A6XX_HLSQ_VS_CNTL_CONSTLEN(align(vs->constlen, 4)) |
+ A6XX_HLSQ_VS_CNTL_ENABLED);
}
static void
-tu6_emit_hs_config(struct tu_cs *cs, const struct ir3_shader_variant *hs)
+tu6_emit_hs_config(struct tu_cs *cs, struct tu_shader *shader,
+ const struct ir3_shader_variant *hs)
{
uint32_t sp_hs_config = 0;
if (hs->instrlen)
}
static void
-tu6_emit_ds_config(struct tu_cs *cs, const struct ir3_shader_variant *ds)
+tu6_emit_ds_config(struct tu_cs *cs, struct tu_shader *shader,
+ const struct ir3_shader_variant *ds)
{
uint32_t sp_ds_config = 0;
if (ds->instrlen)
}
static void
-tu6_emit_gs_config(struct tu_cs *cs, const struct ir3_shader_variant *gs)
+tu6_emit_gs_config(struct tu_cs *cs, struct tu_shader *shader,
+ const struct ir3_shader_variant *gs)
{
uint32_t sp_gs_config = 0;
if (gs->instrlen)
}
static void
-tu6_emit_fs_config(struct tu_cs *cs, const struct ir3_shader_variant *fs)
+tu6_emit_fs_config(struct tu_cs *cs, struct tu_shader *shader,
+ const struct ir3_shader_variant *fs)
{
uint32_t sp_fs_ctrl =
A6XX_SP_FS_CTRL_REG0_THREADSIZE(FOUR_QUADS) | 0x1000000 |
A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(fs->info.max_reg + 1) |
A6XX_SP_FS_CTRL_REG0_MERGEDREGS |
A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(fs->branchstack);
- if (fs->total_in > 0 || fs->frag_coord)
+ if (fs->total_in > 0)
sp_fs_ctrl |= A6XX_SP_FS_CTRL_REG0_VARYING;
- if (fs->num_samp > 0)
+ if (fs->need_pixlod)
sp_fs_ctrl |= A6XX_SP_FS_CTRL_REG0_PIXLODENABLE;
+ if (fs->need_fine_derivatives)
+ sp_fs_ctrl |= A6XX_SP_FS_CTRL_REG0_DIFF_FINE;
+
+ uint32_t sp_fs_config = 0;
+ unsigned shader_nibo = 0;
+ if (shader) {
+ shader_nibo = tu_shader_nibo(shader);
+ sp_fs_config = A6XX_SP_FS_CONFIG_NTEX(shader->texture_map.num_desc) |
+ A6XX_SP_FS_CONFIG_NSAMP(shader->sampler_map.num_desc) |
+ A6XX_SP_FS_CONFIG_NIBO(shader_nibo);
+ }
- uint32_t sp_fs_config = A6XX_SP_FS_CONFIG_NTEX(fs->num_samp) |
- A6XX_SP_FS_CONFIG_NSAMP(fs->num_samp);
if (fs->instrlen)
sp_fs_config |= A6XX_SP_FS_CONFIG_ENABLED;
- tu_cs_emit_pkt4(cs, REG_A6XX_SP_UNKNOWN_A99E, 1);
- tu_cs_emit(cs, 0x7fc0);
-
tu_cs_emit_pkt4(cs, REG_A6XX_SP_UNKNOWN_A9A8, 1);
tu_cs_emit(cs, 0);
tu_cs_emit(cs, fs->instrlen);
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_FS_CNTL, 1);
- tu_cs_emit(cs, A6XX_HLSQ_FS_CNTL_CONSTLEN(align(fs->constlen, 4)) | 0x100);
+ tu_cs_emit(cs, A6XX_HLSQ_FS_CNTL_CONSTLEN(align(fs->constlen, 4)) |
+ A6XX_HLSQ_FS_CNTL_ENABLED);
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_IBO_COUNT, 1);
+ tu_cs_emit(cs, shader_nibo);
+}
+
+static void
+tu6_emit_cs_config(struct tu_cs *cs, const struct tu_shader *shader,
+ const struct ir3_shader_variant *v)
+{
+ tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
+ tu_cs_emit(cs, 0xff);
+
+ unsigned constlen = align(v->constlen, 4);
+ tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CS_CNTL, 1);
+ tu_cs_emit(cs, A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen) |
+ A6XX_HLSQ_CS_CNTL_ENABLED);
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_CS_CONFIG, 2);
+ tu_cs_emit(cs, A6XX_SP_CS_CONFIG_ENABLED |
+ A6XX_SP_CS_CONFIG_NIBO(tu_shader_nibo(shader)) |
+ A6XX_SP_CS_CONFIG_NTEX(shader->texture_map.num_desc) |
+ A6XX_SP_CS_CONFIG_NSAMP(shader->sampler_map.num_desc));
+ tu_cs_emit(cs, v->instrlen);
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_CS_CTRL_REG0, 1);
+ tu_cs_emit(cs, A6XX_SP_CS_CTRL_REG0_THREADSIZE(FOUR_QUADS) |
+ A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(v->info.max_reg + 1) |
+ A6XX_SP_CS_CTRL_REG0_MERGEDREGS |
+ A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(v->branchstack) |
+ COND(v->need_pixlod, A6XX_SP_CS_CTRL_REG0_PIXLODENABLE) |
+ COND(v->need_fine_derivatives, A6XX_SP_CS_CTRL_REG0_DIFF_FINE));
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
+ tu_cs_emit(cs, 0x41);
+
+ uint32_t local_invocation_id =
+ ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
+ uint32_t work_group_id =
+ ir3_find_sysval_regid(v, SYSTEM_VALUE_WORK_GROUP_ID);
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CS_CNTL_0, 2);
+ tu_cs_emit(cs,
+ A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
+ A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
+ A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
+ A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
+ tu_cs_emit(cs, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_CS_IBO_COUNT, 1);
+ tu_cs_emit(cs, tu_shader_nibo(shader));
}
static void
const struct ir3_shader_variant *vs)
{
const uint32_t vertexid_regid =
- ir3_find_sysval_regid(vs, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
+ ir3_find_sysval_regid(vs, SYSTEM_VALUE_VERTEX_ID);
const uint32_t instanceid_regid =
ir3_find_sysval_regid(vs, SYSTEM_VALUE_INSTANCE_ID);
ir3_find_output_regid(vs, VARYING_SLOT_POS);
const uint32_t pointsize_regid =
ir3_find_output_regid(vs, VARYING_SLOT_PSIZ);
- uint32_t pointsize_loc = 0xff;
- if (position_regid != regid(63, 0))
+ uint32_t pointsize_loc = 0xff, position_loc = 0xff;
+ if (position_regid != regid(63, 0)) {
+ position_loc = linkage.max_loc;
ir3_link_add(&linkage, position_regid, 0xf, linkage.max_loc);
+ }
if (pointsize_regid != regid(63, 0)) {
pointsize_loc = linkage.max_loc;
ir3_link_add(&linkage, pointsize_regid, 0x1, linkage.max_loc);
0xff00ff00);
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_PACK, 1);
- tu_cs_emit(cs, A6XX_VPC_PACK_NUMNONPOSVAR(fs->total_in) |
+ tu_cs_emit(cs, A6XX_VPC_PACK_POSITIONLOC(position_loc) |
A6XX_VPC_PACK_PSIZELOC(pointsize_loc) |
A6XX_VPC_PACK_STRIDE_IN_VPC(linkage.max_loc));
}
static void
-tu6_emit_fs_system_values(struct tu_cs *cs,
- const struct ir3_shader_variant *fs)
-{
- const uint32_t frontfacing_regid =
- ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRONT_FACE);
- const uint32_t sampleid_regid =
- ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_ID);
- const uint32_t samplemaskin_regid =
- ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_MASK_IN);
- const uint32_t fragcoord_xy_regid =
- ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRAG_COORD);
- const uint32_t fragcoord_zw_regid = (fragcoord_xy_regid != regid(63, 0))
- ? (fragcoord_xy_regid + 2)
- : fragcoord_xy_regid;
- const uint32_t varyingcoord_regid =
- ir3_find_sysval_regid(fs, SYSTEM_VALUE_BARYCENTRIC_PIXEL);
+tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs)
+{
+ uint32_t face_regid, coord_regid, zwcoord_regid, samp_id_regid;
+ uint32_t ij_pix_regid, ij_samp_regid, ij_cent_regid, ij_size_regid;
+ uint32_t smask_in_regid;
+
+ bool sample_shading = fs->per_samp; /* TODO | key->sample_shading; */
+ bool enable_varyings = fs->total_in > 0;
+
+ samp_id_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_ID);
+ smask_in_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_MASK_IN);
+ face_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRONT_FACE);
+ coord_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRAG_COORD);
+ zwcoord_regid = VALIDREG(coord_regid) ? coord_regid + 2 : regid(63, 0);
+ ij_pix_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL);
+ ij_samp_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE);
+ ij_cent_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID);
+ ij_size_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE);
+
+ if (fs->num_sampler_prefetch > 0) {
+ assert(VALIDREG(ij_pix_regid));
+ /* also, it seems like ij_pix is *required* to be r0.x */
+ assert(ij_pix_regid == regid(0, 0));
+ }
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_PREFETCH_CNTL, 1 + fs->num_sampler_prefetch);
+ tu_cs_emit(cs, A6XX_SP_FS_PREFETCH_CNTL_COUNT(fs->num_sampler_prefetch) |
+ A6XX_SP_FS_PREFETCH_CNTL_UNK4(regid(63, 0)) |
+ 0x7000); // XXX);
+ for (int i = 0; i < fs->num_sampler_prefetch; i++) {
+ const struct ir3_sampler_prefetch *prefetch = &fs->sampler_prefetch[i];
+ tu_cs_emit(cs, A6XX_SP_FS_PREFETCH_CMD_SRC(prefetch->src) |
+ A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(prefetch->samp_id) |
+ A6XX_SP_FS_PREFETCH_CMD_TEX_ID(prefetch->tex_id) |
+ A6XX_SP_FS_PREFETCH_CMD_DST(prefetch->dst) |
+ A6XX_SP_FS_PREFETCH_CMD_WRMASK(prefetch->wrmask) |
+ COND(prefetch->half_precision, A6XX_SP_FS_PREFETCH_CMD_HALF) |
+ A6XX_SP_FS_PREFETCH_CMD_CMD(prefetch->cmd));
+ }
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CONTROL_1_REG, 5);
tu_cs_emit(cs, 0x7);
- tu_cs_emit(cs, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(frontfacing_regid) |
- A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(sampleid_regid) |
- A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(samplemaskin_regid) |
- A6XX_HLSQ_CONTROL_2_REG_SIZE(regid(63, 0)));
- tu_cs_emit(cs,
- A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_PIXEL(varyingcoord_regid) |
- A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_CENTROID(regid(63, 0)) |
- 0xfc00fc00);
- tu_cs_emit(cs,
- A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(fragcoord_xy_regid) |
- A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(fragcoord_zw_regid) |
- A6XX_HLSQ_CONTROL_4_REG_BARY_IJ_PIXEL_PERSAMP(regid(63, 0)) |
- 0x0000fc00);
+ tu_cs_emit(cs, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid) |
+ A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(samp_id_regid) |
+ A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(smask_in_regid) |
+ A6XX_HLSQ_CONTROL_2_REG_SIZE(ij_size_regid));
+ tu_cs_emit(cs, A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_PIXEL(ij_pix_regid) |
+ A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_CENTROID(ij_cent_regid) |
+ 0xfc00fc00);
+ tu_cs_emit(cs, A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(coord_regid) |
+ A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(zwcoord_regid) |
+ A6XX_HLSQ_CONTROL_4_REG_BARY_IJ_PIXEL_PERSAMP(ij_samp_regid) |
+ 0x0000fc00);
tu_cs_emit(cs, 0xfc);
-}
-static void
-tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs)
-{
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_UNKNOWN_B980, 1);
- tu_cs_emit(cs, fs->total_in > 0 ? 3 : 1);
+ tu_cs_emit(cs, enable_varyings ? 3 : 1);
tu_cs_emit_pkt4(cs, REG_A6XX_SP_UNKNOWN_A982, 1);
tu_cs_emit(cs, 0); /* XXX */
tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
tu_cs_emit(cs, 0xff); /* XXX */
- uint32_t gras_cntl = 0;
- if (fs->total_in > 0)
- gras_cntl |= A6XX_GRAS_CNTL_VARYING;
- if (fs->frag_coord) {
- gras_cntl |= A6XX_GRAS_CNTL_SIZE | A6XX_GRAS_CNTL_XCOORD |
- A6XX_GRAS_CNTL_YCOORD | A6XX_GRAS_CNTL_ZCOORD |
- A6XX_GRAS_CNTL_WCOORD;
- }
-
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CNTL, 1);
- tu_cs_emit(cs, gras_cntl);
-
- uint32_t rb_render_control = 0;
- if (fs->total_in > 0) {
- rb_render_control =
- A6XX_RB_RENDER_CONTROL0_VARYING | A6XX_RB_RENDER_CONTROL0_UNK10;
- }
- if (fs->frag_coord) {
- rb_render_control |=
- A6XX_RB_RENDER_CONTROL0_SIZE | A6XX_RB_RENDER_CONTROL0_XCOORD |
- A6XX_RB_RENDER_CONTROL0_YCOORD | A6XX_RB_RENDER_CONTROL0_ZCOORD |
- A6XX_RB_RENDER_CONTROL0_WCOORD;
- }
+ tu_cs_emit(cs,
+ CONDREG(ij_pix_regid, A6XX_GRAS_CNTL_VARYING) |
+ CONDREG(ij_cent_regid, A6XX_GRAS_CNTL_CENTROID) |
+ CONDREG(ij_samp_regid, A6XX_GRAS_CNTL_PERSAMP_VARYING) |
+ COND(VALIDREG(ij_size_regid) && !sample_shading, A6XX_GRAS_CNTL_SIZE) |
+ COND(VALIDREG(ij_size_regid) && sample_shading, A6XX_GRAS_CNTL_SIZE_PERSAMP) |
+ COND(fs->frag_coord,
+ A6XX_GRAS_CNTL_SIZE |
+ A6XX_GRAS_CNTL_XCOORD |
+ A6XX_GRAS_CNTL_YCOORD |
+ A6XX_GRAS_CNTL_ZCOORD |
+ A6XX_GRAS_CNTL_WCOORD) |
+ COND(fs->frag_face, A6XX_GRAS_CNTL_SIZE));
tu_cs_emit_pkt4(cs, REG_A6XX_RB_RENDER_CONTROL0, 2);
- tu_cs_emit(cs, rb_render_control);
- tu_cs_emit(cs, (fs->frag_face ? A6XX_RB_RENDER_CONTROL1_FACENESS : 0));
+ tu_cs_emit(cs,
+ CONDREG(ij_pix_regid, A6XX_RB_RENDER_CONTROL0_VARYING) |
+ CONDREG(ij_cent_regid, A6XX_RB_RENDER_CONTROL0_CENTROID) |
+ CONDREG(ij_samp_regid, A6XX_RB_RENDER_CONTROL0_PERSAMP_VARYING) |
+ COND(enable_varyings, A6XX_RB_RENDER_CONTROL0_UNK10) |
+ COND(VALIDREG(ij_size_regid) && !sample_shading, A6XX_RB_RENDER_CONTROL0_SIZE) |
+ COND(VALIDREG(ij_size_regid) && sample_shading, A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP) |
+ COND(fs->frag_coord,
+ A6XX_RB_RENDER_CONTROL0_SIZE |
+ A6XX_RB_RENDER_CONTROL0_XCOORD |
+ A6XX_RB_RENDER_CONTROL0_YCOORD |
+ A6XX_RB_RENDER_CONTROL0_ZCOORD |
+ A6XX_RB_RENDER_CONTROL0_WCOORD) |
+ COND(fs->frag_face, A6XX_RB_RENDER_CONTROL0_SIZE));
+ tu_cs_emit(cs,
+ CONDREG(smask_in_regid, A6XX_RB_RENDER_CONTROL1_SAMPLEMASK) |
+ CONDREG(samp_id_regid, A6XX_RB_RENDER_CONTROL1_SAMPLEID) |
+ CONDREG(ij_size_regid, A6XX_RB_RENDER_CONTROL1_SIZE) |
+ COND(fs->frag_face, A6XX_RB_RENDER_CONTROL1_FACENESS));
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CNTL, 1);
+ tu_cs_emit(cs, COND(sample_shading, A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE));
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_UNKNOWN_8101, 1);
+ tu_cs_emit(cs, COND(sample_shading, 0x6)); // XXX
+
+ tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CNTL, 1);
+ tu_cs_emit(cs, COND(sample_shading, A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE));
}
static void
const struct ir3_shader_variant *fs,
uint32_t mrt_count)
{
- const uint32_t fragdepth_regid =
- ir3_find_output_regid(fs, FRAG_RESULT_DEPTH);
+ uint32_t smask_regid, posz_regid;
+
+ posz_regid = ir3_find_output_regid(fs, FRAG_RESULT_DEPTH);
+ smask_regid = ir3_find_output_regid(fs, FRAG_RESULT_SAMPLE_MASK);
+
uint32_t fragdata_regid[8];
if (fs->color0_mrt) {
fragdata_regid[0] = ir3_find_output_regid(fs, FRAG_RESULT_COLOR);
}
tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2);
- tu_cs_emit(
- cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(fragdepth_regid) | 0xfcfc0000);
+ tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(posz_regid) |
+ A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(smask_regid) |
+ 0xfc000000);
tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count));
tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);
}
tu_cs_emit_pkt4(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 2);
- tu_cs_emit(cs, fs->writes_pos ? A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z : 0);
+ tu_cs_emit(cs, COND(fs->writes_pos, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z) |
+ COND(fs->writes_smask, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK));
tu_cs_emit(cs, A6XX_RB_FS_OUTPUT_CNTL1_MRT(mrt_count));
uint32_t gras_su_depth_plane_cntl = 0;
uint32_t rb_depth_plane_cntl = 0;
- if (fs->no_earlyz | fs->writes_pos) {
+ if (fs->no_earlyz || fs->writes_pos) {
gras_su_depth_plane_cntl |= A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z;
rb_depth_plane_cntl |= A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z;
}
}
}
+static void
+tu6_emit_immediates(struct tu_cs *cs, const struct ir3_shader_variant *v,
+ uint32_t opcode, enum a6xx_state_block block)
+{
+ /* dummy variant */
+ if (!v->shader)
+ return;
+
+ const struct ir3_const_state *const_state = &v->shader->const_state;
+ uint32_t base = const_state->offsets.immediate;
+ int size = const_state->immediates_count;
+
+ /* truncate size to avoid writing constants that shader
+ * does not use:
+ */
+ size = MIN2(size + base, v->constlen) - base;
+
+ if (size <= 0)
+ return;
+
+ tu_cs_emit_pkt7(cs, opcode, 3 + size * 4);
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(base) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(block) |
+ CP_LOAD_STATE6_0_NUM_UNIT(size));
+ tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
+ tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
+
+ for (unsigned i = 0; i < size; i++) {
+ tu_cs_emit(cs, const_state->immediates[i].val[0]);
+ tu_cs_emit(cs, const_state->immediates[i].val[1]);
+ tu_cs_emit(cs, const_state->immediates[i].val[2]);
+ tu_cs_emit(cs, const_state->immediates[i].val[3]);
+ }
+}
+
static void
tu6_emit_program(struct tu_cs *cs,
const struct tu_pipeline_builder *builder,
fs = &dummy_variant;
}
- tu6_emit_vs_config(cs, vs);
- tu6_emit_hs_config(cs, hs);
- tu6_emit_ds_config(cs, ds);
- tu6_emit_gs_config(cs, gs);
- tu6_emit_fs_config(cs, fs);
+ tu6_emit_vs_config(cs, builder->shaders[MESA_SHADER_VERTEX], vs);
+ tu6_emit_hs_config(cs, builder->shaders[MESA_SHADER_TESS_CTRL], hs);
+ tu6_emit_ds_config(cs, builder->shaders[MESA_SHADER_TESS_EVAL], ds);
+ tu6_emit_gs_config(cs, builder->shaders[MESA_SHADER_GEOMETRY], gs);
+ tu6_emit_fs_config(cs, builder->shaders[MESA_SHADER_FRAGMENT], fs);
tu6_emit_vs_system_values(cs, vs);
tu6_emit_vpc(cs, vs, fs, binning_pass);
tu6_emit_vpc_varying_modes(cs, fs, binning_pass);
- tu6_emit_fs_system_values(cs, fs);
tu6_emit_fs_inputs(cs, fs);
tu6_emit_fs_outputs(cs, fs, builder->color_attachment_count);
tu6_emit_shader_object(cs, MESA_SHADER_VERTEX, vs, binary_bo,
- builder->shader_offsets[MESA_SHADER_VERTEX]);
+ binning_pass ? builder->binning_vs_offset : builder->shader_offsets[MESA_SHADER_VERTEX]);
tu6_emit_shader_object(cs, MESA_SHADER_FRAGMENT, fs, binary_bo,
builder->shader_offsets[MESA_SHADER_FRAGMENT]);
+
+ tu6_emit_immediates(cs, vs, CP_LOAD_STATE6_GEOM, SB6_VS_SHADER);
+ if (!binning_pass)
+ tu6_emit_immediates(cs, fs, CP_LOAD_STATE6_FRAG, SB6_FS_SHADER);
}
static void
{
uint32_t vfd_decode_idx = 0;
- /* why do we go beyond inputs_count? */
- assert(vs->inputs_count + 1 <= MAX_VERTEX_ATTRIBS);
- for (uint32_t i = 0; i <= vs->inputs_count; i++) {
+ for (uint32_t i = 0; i < vs->inputs_count; i++) {
if (vs->inputs[i].sysval || !vs->inputs[i].compmask)
continue;
offsets[vfd_decode_idx] = vi_attr->offset;
vfd_decode_idx++;
+ assert(vfd_decode_idx <= MAX_VERTEX_ATTRIBS);
}
tu_cs_emit_pkt4(cs, REG_A6XX_VFD_CONTROL_0, 1);
guardband_adj.height = tu6_guardband_adj(max.y - min.y);
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CL_VPORT_XOFFSET_0, 6);
- tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_XOFFSET_0(offsets[0]));
- tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_XSCALE_0(scales[0]));
- tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_YOFFSET_0(offsets[1]));
- tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_YSCALE_0(scales[1]));
- tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_ZOFFSET_0(offsets[2]));
- tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_ZSCALE_0(scales[2]));
+ tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_XOFFSET_0(offsets[0]).value);
+ tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_XSCALE_0(scales[0]).value);
+ tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_YOFFSET_0(offsets[1]).value);
+ tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_YSCALE_0(scales[1]).value);
+ tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_ZOFFSET_0(offsets[2]).value);
+ tu_cs_emit(cs, A6XX_GRAS_CL_VPORT_ZSCALE_0(scales[2]).value);
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0, 2);
tu_cs_emit(cs, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(min.x) |
static void
tu6_emit_gras_unknowns(struct tu_cs *cs)
{
- tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_UNKNOWN_8000, 1);
- tu_cs_emit(cs, 0x80);
+ tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_DISABLE_CNTL, 1);
+ tu_cs_emit(cs, A6XX_GRAS_DISABLE_CNTL_VP_CLIP_CODE_IGNORE);
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_UNKNOWN_8001, 1);
tu_cs_emit(cs, 0x0);
- tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_UNKNOWN_8004, 1);
+ tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_LAYER_CNTL, 1);
tu_cs_emit(cs, 0x0);
}
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SU_POINT_MINMAX, 2);
tu_cs_emit(cs, A6XX_GRAS_SU_POINT_MINMAX_MIN(1.0f / 16.0f) |
A6XX_GRAS_SU_POINT_MINMAX_MAX(4092.0f));
- tu_cs_emit(cs, A6XX_GRAS_SU_POINT_SIZE(1.0f));
+ tu_cs_emit(cs, A6XX_GRAS_SU_POINT_SIZE(1.0f).value);
}
static uint32_t
float slope_factor)
{
tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE, 3);
- tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_SCALE(slope_factor));
- tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_OFFSET(constant_factor));
- tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(clamp));
+ tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_SCALE(slope_factor).value);
+ tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_OFFSET(constant_factor).value);
+ tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(clamp).value);
}
static void
uint32_t blend_enable_mask,
const VkPipelineMultisampleStateCreateInfo *msaa_info)
{
- assert(!msaa_info->sampleShadingEnable);
assert(!msaa_info->alphaToOneEnable);
uint32_t sp_blend_cntl = A6XX_SP_BLEND_CNTL_UNK8;
}
static VkResult
-tu_pipeline_builder_create_pipeline(struct tu_pipeline_builder *builder,
- struct tu_pipeline **out_pipeline)
+tu_pipeline_create(struct tu_device *dev,
+ const VkAllocationCallbacks *pAllocator,
+ struct tu_pipeline **out_pipeline)
{
- struct tu_device *dev = builder->device;
-
struct tu_pipeline *pipeline =
- vk_zalloc2(&dev->alloc, builder->alloc, sizeof(*pipeline), 8,
+ vk_zalloc2(&dev->alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pipeline)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- tu_cs_init(&pipeline->cs, TU_CS_MODE_SUB_STREAM, 2048);
+ tu_cs_init(&pipeline->cs, dev, TU_CS_MODE_SUB_STREAM, 2048);
/* reserve the space now such that tu_cs_begin_sub_stream never fails */
- VkResult result = tu_cs_reserve_space(dev, &pipeline->cs, 2048);
+ VkResult result = tu_cs_reserve_space(&pipeline->cs, 2048);
if (result != VK_SUCCESS) {
- vk_free2(&dev->alloc, builder->alloc, pipeline);
+ vk_free2(&dev->alloc, pAllocator, pipeline);
return result;
}
continue;
struct tu_shader *shader =
- tu_shader_create(builder->device, stage, stage_info, builder->alloc);
+ tu_shader_create(builder->device, stage, stage_info, builder->layout,
+ builder->alloc);
if (!shader)
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
+static void
+tu_pipeline_set_linkage(struct tu_program_descriptor_linkage *link,
+ struct tu_shader *shader,
+ struct ir3_shader_variant *v)
+{
+ link->ubo_state = v->shader->ubo_state;
+ link->const_state = v->shader->const_state;
+ link->constlen = v->constlen;
+ link->texture_map = shader->texture_map;
+ link->sampler_map = shader->sampler_map;
+ link->ubo_map = shader->ubo_map;
+ link->ssbo_map = shader->ssbo_map;
+ link->image_map = shader->image_map;
+}
+
static void
tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder *builder,
struct tu_pipeline *pipeline)
{
struct tu_cs prog_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 512, &prog_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
tu6_emit_program(&prog_cs, builder, &pipeline->program.binary_bo, false);
pipeline->program.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 512, &prog_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
tu6_emit_program(&prog_cs, builder, &pipeline->program.binary_bo, true);
pipeline->program.binning_state_ib =
tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (!builder->shaders[i])
+ continue;
+
+ tu_pipeline_set_linkage(&pipeline->program.link[i],
+ builder->shaders[i],
+ &builder->shaders[i]->variants[0]);
+ }
}
static void
const struct tu_shader *vs = builder->shaders[MESA_SHADER_VERTEX];
struct tu_cs vi_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs,
+ tu_cs_begin_sub_stream(&pipeline->cs,
MAX_VERTEX_ATTRIBS * 5 + 2, &vi_cs);
tu6_emit_vertex_input(&vi_cs, &vs->variants[0], vi_info,
pipeline->vi.bindings, pipeline->vi.strides,
pipeline->vi.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &vi_cs);
if (vs->has_binning_pass) {
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs,
+ tu_cs_begin_sub_stream(&pipeline->cs,
MAX_VERTEX_ATTRIBS * 5 + 2, &vi_cs);
tu6_emit_vertex_input(
&vi_cs, &vs->variants[1], vi_info, pipeline->vi.binning_bindings,
builder->create_info->pViewportState;
struct tu_cs vp_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 15, &vp_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 15, &vp_cs);
if (!(pipeline->dynamic_state.mask & TU_DYNAMIC_VIEWPORT)) {
assert(vp_info->viewportCount == 1);
assert(rast_info->polygonMode == VK_POLYGON_MODE_FILL);
struct tu_cs rast_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 20, &rast_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 20, &rast_cs);
/* move to hw ctx init? */
tu6_emit_gras_unknowns(&rast_cs);
: &dummy_ds_info;
struct tu_cs ds_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 12, &ds_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 12, &ds_cs);
/* move to hw ctx init? */
tu6_emit_alpha_control_disable(&ds_cs);
: &dummy_blend_info;
struct tu_cs blend_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, MAX_RTS * 3 + 9,
- &blend_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, MAX_RTS * 3 + 9, &blend_cs);
uint32_t blend_enable_mask;
tu6_emit_rb_mrt_controls(&blend_cs, blend_info,
struct tu_device *dev,
const VkAllocationCallbacks *alloc)
{
- tu_cs_finish(dev, &pipeline->cs);
+ tu_cs_finish(&pipeline->cs);
if (pipeline->program.binary_bo.gem_handle)
tu_bo_finish(dev, &pipeline->program.binary_bo);
tu_pipeline_builder_build(struct tu_pipeline_builder *builder,
struct tu_pipeline **pipeline)
{
- VkResult result = tu_pipeline_builder_create_pipeline(builder, pipeline);
+ VkResult result = tu_pipeline_create(builder->device, builder->alloc,
+ pipeline);
if (result != VK_SUCCESS)
return result;
const VkGraphicsPipelineCreateInfo *create_info,
const VkAllocationCallbacks *alloc)
{
+ TU_FROM_HANDLE(tu_pipeline_layout, layout, create_info->layout);
+
*builder = (struct tu_pipeline_builder) {
.device = dev,
.cache = cache,
.create_info = create_info,
.alloc = alloc,
+ .layout = layout,
};
builder->rasterizer_discard =
builder->use_depth_stencil_attachment =
subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED;
- assert(subpass->color_count ==
- create_info->pColorBlendState->attachmentCount);
+ assert(subpass->color_count == 0 ||
+ !create_info->pColorBlendState ||
+ subpass->color_count == create_info->pColorBlendState->attachmentCount);
builder->color_attachment_count = subpass->color_count;
for (uint32_t i = 0; i < subpass->color_count; i++) {
const uint32_t a = subpass->color_attachments[i].attachment;
}
}
+static VkResult
+tu_graphics_pipeline_create(VkDevice device,
+ VkPipelineCache pipelineCache,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipeline)
+{
+ TU_FROM_HANDLE(tu_device, dev, device);
+ TU_FROM_HANDLE(tu_pipeline_cache, cache, pipelineCache);
+
+ struct tu_pipeline_builder builder;
+ tu_pipeline_builder_init_graphics(&builder, dev, cache,
+ pCreateInfo, pAllocator);
+
+ struct tu_pipeline *pipeline = NULL;
+ VkResult result = tu_pipeline_builder_build(&builder, &pipeline);
+ tu_pipeline_builder_finish(&builder);
+
+ if (result == VK_SUCCESS)
+ *pPipeline = tu_pipeline_to_handle(pipeline);
+ else
+ *pPipeline = VK_NULL_HANDLE;
+
+ return result;
+}
+
VkResult
tu_CreateGraphicsPipelines(VkDevice device,
VkPipelineCache pipelineCache,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
- TU_FROM_HANDLE(tu_device, dev, device);
- TU_FROM_HANDLE(tu_pipeline_cache, cache, pipelineCache);
+ VkResult final_result = VK_SUCCESS;
for (uint32_t i = 0; i < count; i++) {
- struct tu_pipeline_builder builder;
- tu_pipeline_builder_init_graphics(&builder, dev, cache,
- &pCreateInfos[i], pAllocator);
-
- struct tu_pipeline *pipeline;
- VkResult result = tu_pipeline_builder_build(&builder, &pipeline);
- tu_pipeline_builder_finish(&builder);
-
- if (result != VK_SUCCESS) {
- for (uint32_t j = 0; j < i; j++) {
- tu_DestroyPipeline(device, pPipelines[j], pAllocator);
- pPipelines[j] = VK_NULL_HANDLE;
- }
+ VkResult result = tu_graphics_pipeline_create(device, pipelineCache,
+ &pCreateInfos[i], pAllocator,
+ &pPipelines[i]);
- return result;
- }
-
- pPipelines[i] = tu_pipeline_to_handle(pipeline);
+ if (result != VK_SUCCESS)
+ final_result = result;
}
+ return final_result;
+}
+
+static void
+tu6_emit_compute_program(struct tu_cs *cs,
+ struct tu_shader *shader,
+ const struct tu_bo *binary_bo)
+{
+ const struct ir3_shader_variant *v = &shader->variants[0];
+
+ tu6_emit_cs_config(cs, shader, v);
+
+ /* The compute program is the only one in the pipeline, so 0 offset. */
+ tu6_emit_shader_object(cs, MESA_SHADER_COMPUTE, v, binary_bo, 0);
+
+ tu6_emit_immediates(cs, v, CP_LOAD_STATE6_FRAG, SB6_CS_SHADER);
+}
+
+static VkResult
+tu_compute_upload_shader(VkDevice device,
+ struct tu_pipeline *pipeline,
+ struct tu_shader *shader)
+{
+ TU_FROM_HANDLE(tu_device, dev, device);
+ struct tu_bo *bo = &pipeline->program.binary_bo;
+ struct ir3_shader_variant *v = &shader->variants[0];
+
+ uint32_t shader_size = sizeof(uint32_t) * v->info.sizedwords;
+ VkResult result =
+ tu_bo_init_new(dev, bo, shader_size);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = tu_bo_map(dev, bo);
+ if (result != VK_SUCCESS)
+ return result;
+
+ memcpy(bo->map, shader->binary, shader_size);
+
return VK_SUCCESS;
}
+
static VkResult
-tu_compute_pipeline_create(VkDevice _device,
+tu_compute_pipeline_create(VkDevice device,
VkPipelineCache _cache,
const VkComputePipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
+ TU_FROM_HANDLE(tu_device, dev, device);
+ TU_FROM_HANDLE(tu_pipeline_layout, layout, pCreateInfo->layout);
+ const VkPipelineShaderStageCreateInfo *stage_info = &pCreateInfo->stage;
+ VkResult result;
+
+ struct tu_pipeline *pipeline;
+
+ *pPipeline = VK_NULL_HANDLE;
+
+ result = tu_pipeline_create(dev, pAllocator, &pipeline);
+ if (result != VK_SUCCESS)
+ return result;
+
+ pipeline->layout = layout;
+
+ struct tu_shader_compile_options options;
+ tu_shader_compile_options_init(&options, NULL);
+
+ struct tu_shader *shader =
+ tu_shader_create(dev, MESA_SHADER_COMPUTE, stage_info, layout, pAllocator);
+ if (!shader) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto fail;
+ }
+
+ result = tu_shader_compile(dev, shader, NULL, &options, pAllocator);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ struct ir3_shader_variant *v = &shader->variants[0];
+
+ tu_pipeline_set_linkage(&pipeline->program.link[MESA_SHADER_COMPUTE],
+ shader, v);
+
+ result = tu_compute_upload_shader(device, pipeline, shader);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ for (int i = 0; i < 3; i++)
+ pipeline->compute.local_size[i] = v->shader->nir->info.cs.local_size[i];
+
+ struct tu_cs prog_cs;
+ tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
+ tu6_emit_compute_program(&prog_cs, shader, &pipeline->program.binary_bo);
+ pipeline->program.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
+
+ *pPipeline = tu_pipeline_to_handle(pipeline);
return VK_SUCCESS;
+
+fail:
+ if (shader)
+ tu_shader_destroy(dev, shader, pAllocator);
+
+ tu_pipeline_finish(pipeline, dev, pAllocator);
+ vk_free2(&dev->alloc, pAllocator, pipeline);
+
+ return result;
}
VkResult
-tu_CreateComputePipelines(VkDevice _device,
+tu_CreateComputePipelines(VkDevice device,
VkPipelineCache pipelineCache,
uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
- VkResult result = VK_SUCCESS;
+ VkResult final_result = VK_SUCCESS;
- unsigned i = 0;
- for (; i < count; i++) {
- VkResult r;
- r = tu_compute_pipeline_create(_device, pipelineCache, &pCreateInfos[i],
- pAllocator, &pPipelines[i]);
- if (r != VK_SUCCESS) {
- result = r;
- pPipelines[i] = VK_NULL_HANDLE;
- }
+ for (uint32_t i = 0; i < count; i++) {
+ VkResult result = tu_compute_pipeline_create(device, pipelineCache,
+ &pCreateInfos[i],
+ pAllocator, &pPipelines[i]);
+ if (result != VK_SUCCESS)
+ final_result = result;
}
- return result;
+ return final_result;
}
void