2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "main/menums.h"
32 #include "nir/nir_builder.h"
33 #include "spirv/nir_spirv.h"
34 #include "util/debug.h"
35 #include "util/mesa-sha1.h"
36 #include "util/u_atomic.h"
37 #include "vk_format.h"
42 struct tu_pipeline_builder
44 struct tu_device
*device
;
45 struct tu_pipeline_cache
*cache
;
46 const VkAllocationCallbacks
*alloc
;
47 const VkGraphicsPipelineCreateInfo
*create_info
;
49 struct tu_shader
*shaders
[MESA_SHADER_STAGES
];
50 uint32_t shader_offsets
[MESA_SHADER_STAGES
];
51 uint32_t binning_vs_offset
;
52 uint32_t shader_total_size
;
54 bool rasterizer_discard
;
55 /* these states are affectd by rasterizer_discard */
56 VkSampleCountFlagBits samples
;
57 bool use_depth_stencil_attachment
;
58 bool use_color_attachments
;
59 VkFormat color_attachment_formats
[MAX_RTS
];
62 static enum tu_dynamic_state_bits
63 tu_dynamic_state_bit(VkDynamicState state
)
66 case VK_DYNAMIC_STATE_VIEWPORT
:
67 return TU_DYNAMIC_VIEWPORT
;
68 case VK_DYNAMIC_STATE_SCISSOR
:
69 return TU_DYNAMIC_SCISSOR
;
70 case VK_DYNAMIC_STATE_LINE_WIDTH
:
71 return TU_DYNAMIC_LINE_WIDTH
;
72 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
73 return TU_DYNAMIC_DEPTH_BIAS
;
74 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
75 return TU_DYNAMIC_BLEND_CONSTANTS
;
76 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
77 return TU_DYNAMIC_DEPTH_BOUNDS
;
78 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
79 return TU_DYNAMIC_STENCIL_COMPARE_MASK
;
80 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
81 return TU_DYNAMIC_STENCIL_WRITE_MASK
;
82 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
83 return TU_DYNAMIC_STENCIL_REFERENCE
;
85 unreachable("invalid dynamic state");
90 static gl_shader_stage
91 tu_shader_stage(VkShaderStageFlagBits stage
)
94 case VK_SHADER_STAGE_VERTEX_BIT
:
95 return MESA_SHADER_VERTEX
;
96 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
97 return MESA_SHADER_TESS_CTRL
;
98 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
99 return MESA_SHADER_TESS_EVAL
;
100 case VK_SHADER_STAGE_GEOMETRY_BIT
:
101 return MESA_SHADER_GEOMETRY
;
102 case VK_SHADER_STAGE_FRAGMENT_BIT
:
103 return MESA_SHADER_FRAGMENT
;
104 case VK_SHADER_STAGE_COMPUTE_BIT
:
105 return MESA_SHADER_COMPUTE
;
107 unreachable("invalid VkShaderStageFlagBits");
108 return MESA_SHADER_NONE
;
113 tu_logic_op_reads_dst(VkLogicOp op
)
116 case VK_LOGIC_OP_CLEAR
:
117 case VK_LOGIC_OP_COPY
:
118 case VK_LOGIC_OP_COPY_INVERTED
:
119 case VK_LOGIC_OP_SET
:
127 tu_blend_factor_no_dst_alpha(VkBlendFactor factor
)
129 /* treat dst alpha as 1.0 and avoid reading it */
131 case VK_BLEND_FACTOR_DST_ALPHA
:
132 return VK_BLEND_FACTOR_ONE
;
133 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
134 return VK_BLEND_FACTOR_ZERO
;
140 static enum pc_di_primtype
141 tu6_primtype(VkPrimitiveTopology topology
)
144 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
145 return DI_PT_POINTLIST
;
146 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
147 return DI_PT_LINELIST
;
148 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
149 return DI_PT_LINESTRIP
;
150 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
151 return DI_PT_TRILIST
;
152 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
153 return DI_PT_TRILIST
;
154 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
156 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
157 return DI_PT_LINE_ADJ
;
158 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
159 return DI_PT_LINESTRIP_ADJ
;
160 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
161 return DI_PT_TRI_ADJ
;
162 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
163 return DI_PT_TRISTRIP_ADJ
;
164 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
166 unreachable("invalid primitive topology");
171 static enum adreno_compare_func
172 tu6_compare_func(VkCompareOp op
)
175 case VK_COMPARE_OP_NEVER
:
177 case VK_COMPARE_OP_LESS
:
179 case VK_COMPARE_OP_EQUAL
:
181 case VK_COMPARE_OP_LESS_OR_EQUAL
:
183 case VK_COMPARE_OP_GREATER
:
185 case VK_COMPARE_OP_NOT_EQUAL
:
186 return FUNC_NOTEQUAL
;
187 case VK_COMPARE_OP_GREATER_OR_EQUAL
:
189 case VK_COMPARE_OP_ALWAYS
:
192 unreachable("invalid VkCompareOp");
197 static enum adreno_stencil_op
198 tu6_stencil_op(VkStencilOp op
)
201 case VK_STENCIL_OP_KEEP
:
203 case VK_STENCIL_OP_ZERO
:
205 case VK_STENCIL_OP_REPLACE
:
206 return STENCIL_REPLACE
;
207 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
208 return STENCIL_INCR_CLAMP
;
209 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
210 return STENCIL_DECR_CLAMP
;
211 case VK_STENCIL_OP_INVERT
:
212 return STENCIL_INVERT
;
213 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
214 return STENCIL_INCR_WRAP
;
215 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
216 return STENCIL_DECR_WRAP
;
218 unreachable("invalid VkStencilOp");
223 static enum a3xx_rop_code
224 tu6_rop(VkLogicOp op
)
227 case VK_LOGIC_OP_CLEAR
:
229 case VK_LOGIC_OP_AND
:
231 case VK_LOGIC_OP_AND_REVERSE
:
232 return ROP_AND_REVERSE
;
233 case VK_LOGIC_OP_COPY
:
235 case VK_LOGIC_OP_AND_INVERTED
:
236 return ROP_AND_INVERTED
;
237 case VK_LOGIC_OP_NO_OP
:
239 case VK_LOGIC_OP_XOR
:
243 case VK_LOGIC_OP_NOR
:
245 case VK_LOGIC_OP_EQUIVALENT
:
247 case VK_LOGIC_OP_INVERT
:
249 case VK_LOGIC_OP_OR_REVERSE
:
250 return ROP_OR_REVERSE
;
251 case VK_LOGIC_OP_COPY_INVERTED
:
252 return ROP_COPY_INVERTED
;
253 case VK_LOGIC_OP_OR_INVERTED
:
254 return ROP_OR_INVERTED
;
255 case VK_LOGIC_OP_NAND
:
257 case VK_LOGIC_OP_SET
:
260 unreachable("invalid VkLogicOp");
265 static enum adreno_rb_blend_factor
266 tu6_blend_factor(VkBlendFactor factor
)
269 case VK_BLEND_FACTOR_ZERO
:
271 case VK_BLEND_FACTOR_ONE
:
273 case VK_BLEND_FACTOR_SRC_COLOR
:
274 return FACTOR_SRC_COLOR
;
275 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
276 return FACTOR_ONE_MINUS_SRC_COLOR
;
277 case VK_BLEND_FACTOR_DST_COLOR
:
278 return FACTOR_DST_COLOR
;
279 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
280 return FACTOR_ONE_MINUS_DST_COLOR
;
281 case VK_BLEND_FACTOR_SRC_ALPHA
:
282 return FACTOR_SRC_ALPHA
;
283 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
284 return FACTOR_ONE_MINUS_SRC_ALPHA
;
285 case VK_BLEND_FACTOR_DST_ALPHA
:
286 return FACTOR_DST_ALPHA
;
287 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
288 return FACTOR_ONE_MINUS_DST_ALPHA
;
289 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
290 return FACTOR_CONSTANT_COLOR
;
291 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
292 return FACTOR_ONE_MINUS_CONSTANT_COLOR
;
293 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
294 return FACTOR_CONSTANT_ALPHA
;
295 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
296 return FACTOR_ONE_MINUS_CONSTANT_ALPHA
;
297 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
298 return FACTOR_SRC_ALPHA_SATURATE
;
299 case VK_BLEND_FACTOR_SRC1_COLOR
:
300 return FACTOR_SRC1_COLOR
;
301 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
302 return FACTOR_ONE_MINUS_SRC1_COLOR
;
303 case VK_BLEND_FACTOR_SRC1_ALPHA
:
304 return FACTOR_SRC1_ALPHA
;
305 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
306 return FACTOR_ONE_MINUS_SRC1_ALPHA
;
308 unreachable("invalid VkBlendFactor");
313 static enum a3xx_rb_blend_opcode
314 tu6_blend_op(VkBlendOp op
)
317 case VK_BLEND_OP_ADD
:
318 return BLEND_DST_PLUS_SRC
;
319 case VK_BLEND_OP_SUBTRACT
:
320 return BLEND_SRC_MINUS_DST
;
321 case VK_BLEND_OP_REVERSE_SUBTRACT
:
322 return BLEND_DST_MINUS_SRC
;
323 case VK_BLEND_OP_MIN
:
324 return BLEND_MIN_DST_SRC
;
325 case VK_BLEND_OP_MAX
:
326 return BLEND_MAX_DST_SRC
;
328 unreachable("invalid VkBlendOp");
329 return BLEND_DST_PLUS_SRC
;
334 tu6_guardband_adj(uint32_t v
)
337 return (uint32_t)(511.0 - 65.0 * (log2(v
) - 8.0));
343 tu6_emit_viewport(struct tu_cs
*cs
, const VkViewport
*viewport
)
347 scales
[0] = viewport
->width
/ 2.0f
;
348 scales
[1] = viewport
->height
/ 2.0f
;
349 scales
[2] = viewport
->maxDepth
- viewport
->minDepth
;
350 offsets
[0] = viewport
->x
+ scales
[0];
351 offsets
[1] = viewport
->y
+ scales
[1];
352 offsets
[2] = viewport
->minDepth
;
356 min
.x
= (int32_t) viewport
->x
;
357 max
.x
= (int32_t) ceilf(viewport
->x
+ viewport
->width
);
358 if (viewport
->height
>= 0.0f
) {
359 min
.y
= (int32_t) viewport
->y
;
360 max
.y
= (int32_t) ceilf(viewport
->y
+ viewport
->height
);
362 min
.y
= (int32_t)(viewport
->y
+ viewport
->height
);
363 max
.y
= (int32_t) ceilf(viewport
->y
);
365 /* the spec allows viewport->height to be 0.0f */
368 assert(min
.x
>= 0 && min
.x
< max
.x
);
369 assert(min
.y
>= 0 && min
.y
< max
.y
);
371 VkExtent2D guardband_adj
;
372 guardband_adj
.width
= tu6_guardband_adj(max
.x
- min
.x
);
373 guardband_adj
.height
= tu6_guardband_adj(max
.y
- min
.y
);
375 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CL_VPORT_XOFFSET_0
, 6);
376 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_XOFFSET_0(offsets
[0]));
377 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_XSCALE_0(scales
[0]));
378 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_YOFFSET_0(offsets
[1]));
379 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_YSCALE_0(scales
[1]));
380 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_ZOFFSET_0(offsets
[2]));
381 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_ZSCALE_0(scales
[2]));
383 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0
, 2);
384 tu_cs_emit(cs
, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(min
.x
) |
385 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(min
.y
));
386 tu_cs_emit(cs
, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(max
.x
- 1) |
387 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(max
.y
- 1));
389 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ
, 1);
391 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(guardband_adj
.width
) |
392 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(guardband_adj
.height
));
396 tu6_emit_scissor(struct tu_cs
*cs
, const VkRect2D
*scissor
)
398 const VkOffset2D min
= scissor
->offset
;
399 const VkOffset2D max
= {
400 scissor
->offset
.x
+ scissor
->extent
.width
,
401 scissor
->offset
.y
+ scissor
->extent
.height
,
404 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0
, 2);
405 tu_cs_emit(cs
, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(min
.x
) |
406 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(min
.y
));
407 tu_cs_emit(cs
, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(max
.x
- 1) |
408 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(max
.y
- 1));
412 tu6_emit_gras_unknowns(struct tu_cs
*cs
)
414 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8000
, 1);
415 tu_cs_emit(cs
, 0x80);
416 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8001
, 1);
418 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8004
, 1);
423 tu6_emit_point_size(struct tu_cs
*cs
)
425 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_POINT_MINMAX
, 2);
426 tu_cs_emit(cs
, A6XX_GRAS_SU_POINT_MINMAX_MIN(1.0f
/ 16.0f
) |
427 A6XX_GRAS_SU_POINT_MINMAX_MAX(4092.0f
));
428 tu_cs_emit(cs
, A6XX_GRAS_SU_POINT_SIZE(1.0f
));
432 tu6_gras_su_cntl(const VkPipelineRasterizationStateCreateInfo
*rast_info
,
433 VkSampleCountFlagBits samples
)
435 uint32_t gras_su_cntl
= 0;
437 if (rast_info
->cullMode
& VK_CULL_MODE_FRONT_BIT
)
438 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_CULL_FRONT
;
439 if (rast_info
->cullMode
& VK_CULL_MODE_BACK_BIT
)
440 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_CULL_BACK
;
442 if (rast_info
->frontFace
== VK_FRONT_FACE_CLOCKWISE
)
443 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_FRONT_CW
;
445 /* don't set A6XX_GRAS_SU_CNTL_LINEHALFWIDTH */
447 if (rast_info
->depthBiasEnable
)
448 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_POLY_OFFSET
;
450 if (samples
> VK_SAMPLE_COUNT_1_BIT
)
451 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_MSAA_ENABLE
;
457 tu6_emit_gras_su_cntl(struct tu_cs
*cs
,
458 uint32_t gras_su_cntl
,
461 assert((gras_su_cntl
& A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
) == 0);
462 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(line_width
/ 2.0f
);
464 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_CNTL
, 1);
465 tu_cs_emit(cs
, gras_su_cntl
);
469 tu6_emit_depth_bias(struct tu_cs
*cs
,
470 float constant_factor
,
474 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE
, 3);
475 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_SCALE(slope_factor
));
476 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_OFFSET(constant_factor
));
477 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(clamp
));
481 tu6_emit_alpha_control_disable(struct tu_cs
*cs
)
483 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_ALPHA_CONTROL
, 1);
488 tu6_emit_depth_control(struct tu_cs
*cs
,
489 const VkPipelineDepthStencilStateCreateInfo
*ds_info
)
491 assert(!ds_info
->depthBoundsTestEnable
);
493 uint32_t rb_depth_cntl
= 0;
494 if (ds_info
->depthTestEnable
) {
496 A6XX_RB_DEPTH_CNTL_Z_ENABLE
|
497 A6XX_RB_DEPTH_CNTL_ZFUNC(tu6_compare_func(ds_info
->depthCompareOp
)) |
498 A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE
;
500 if (ds_info
->depthWriteEnable
)
501 rb_depth_cntl
|= A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE
;
504 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_CNTL
, 1);
505 tu_cs_emit(cs
, rb_depth_cntl
);
509 tu6_emit_stencil_control(struct tu_cs
*cs
,
510 const VkPipelineDepthStencilStateCreateInfo
*ds_info
)
512 uint32_t rb_stencil_control
= 0;
513 if (ds_info
->stencilTestEnable
) {
514 const VkStencilOpState
*front
= &ds_info
->front
;
515 const VkStencilOpState
*back
= &ds_info
->back
;
516 rb_stencil_control
|=
517 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE
|
518 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF
|
519 A6XX_RB_STENCIL_CONTROL_STENCIL_READ
|
520 A6XX_RB_STENCIL_CONTROL_FUNC(tu6_compare_func(front
->compareOp
)) |
521 A6XX_RB_STENCIL_CONTROL_FAIL(tu6_stencil_op(front
->failOp
)) |
522 A6XX_RB_STENCIL_CONTROL_ZPASS(tu6_stencil_op(front
->passOp
)) |
523 A6XX_RB_STENCIL_CONTROL_ZFAIL(tu6_stencil_op(front
->depthFailOp
)) |
524 A6XX_RB_STENCIL_CONTROL_FUNC_BF(tu6_compare_func(back
->compareOp
)) |
525 A6XX_RB_STENCIL_CONTROL_FAIL_BF(tu6_stencil_op(back
->failOp
)) |
526 A6XX_RB_STENCIL_CONTROL_ZPASS_BF(tu6_stencil_op(back
->passOp
)) |
527 A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(tu6_stencil_op(back
->depthFailOp
));
530 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_CONTROL
, 1);
531 tu_cs_emit(cs
, rb_stencil_control
);
535 tu6_emit_stencil_compare_mask(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
537 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILMASK
, 1);
539 cs
, A6XX_RB_STENCILMASK_MASK(front
) | A6XX_RB_STENCILMASK_BFMASK(back
));
543 tu6_emit_stencil_write_mask(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
545 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILWRMASK
, 1);
546 tu_cs_emit(cs
, A6XX_RB_STENCILWRMASK_WRMASK(front
) |
547 A6XX_RB_STENCILWRMASK_BFWRMASK(back
));
551 tu6_emit_stencil_reference(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
553 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILREF
, 1);
555 A6XX_RB_STENCILREF_REF(front
) | A6XX_RB_STENCILREF_BFREF(back
));
559 tu6_rb_mrt_blend_control(const VkPipelineColorBlendAttachmentState
*att
,
562 const enum a3xx_rb_blend_opcode color_op
= tu6_blend_op(att
->colorBlendOp
);
563 const enum adreno_rb_blend_factor src_color_factor
= tu6_blend_factor(
564 has_alpha
? att
->srcColorBlendFactor
565 : tu_blend_factor_no_dst_alpha(att
->srcColorBlendFactor
));
566 const enum adreno_rb_blend_factor dst_color_factor
= tu6_blend_factor(
567 has_alpha
? att
->dstColorBlendFactor
568 : tu_blend_factor_no_dst_alpha(att
->dstColorBlendFactor
));
569 const enum a3xx_rb_blend_opcode alpha_op
= tu6_blend_op(att
->alphaBlendOp
);
570 const enum adreno_rb_blend_factor src_alpha_factor
=
571 tu6_blend_factor(att
->srcAlphaBlendFactor
);
572 const enum adreno_rb_blend_factor dst_alpha_factor
=
573 tu6_blend_factor(att
->dstAlphaBlendFactor
);
575 return A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(src_color_factor
) |
576 A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(color_op
) |
577 A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(dst_color_factor
) |
578 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(src_alpha_factor
) |
579 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(alpha_op
) |
580 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(dst_alpha_factor
);
584 tu6_rb_mrt_control(const VkPipelineColorBlendAttachmentState
*att
,
585 uint32_t rb_mrt_control_rop
,
589 uint32_t rb_mrt_control
=
590 A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(att
->colorWriteMask
);
592 /* ignore blending and logic op for integer attachments */
594 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY
);
595 return rb_mrt_control
;
598 rb_mrt_control
|= rb_mrt_control_rop
;
600 if (att
->blendEnable
) {
601 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_BLEND
;
604 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_BLEND2
;
607 return rb_mrt_control
;
611 tu6_emit_rb_mrt_controls(struct tu_cs
*cs
,
612 const VkPipelineColorBlendStateCreateInfo
*blend_info
,
613 const VkFormat attachment_formats
[MAX_RTS
],
614 uint32_t *blend_enable_mask
)
616 *blend_enable_mask
= 0;
618 bool rop_reads_dst
= false;
619 uint32_t rb_mrt_control_rop
= 0;
620 if (blend_info
->logicOpEnable
) {
621 rop_reads_dst
= tu_logic_op_reads_dst(blend_info
->logicOp
);
623 A6XX_RB_MRT_CONTROL_ROP_ENABLE
|
624 A6XX_RB_MRT_CONTROL_ROP_CODE(tu6_rop(blend_info
->logicOp
));
627 for (uint32_t i
= 0; i
< blend_info
->attachmentCount
; i
++) {
628 const VkPipelineColorBlendAttachmentState
*att
=
629 &blend_info
->pAttachments
[i
];
630 const VkFormat format
= attachment_formats
[i
];
632 uint32_t rb_mrt_control
= 0;
633 uint32_t rb_mrt_blend_control
= 0;
634 if (format
!= VK_FORMAT_UNDEFINED
) {
635 const bool is_int
= vk_format_is_int(format
);
636 const bool has_alpha
= vk_format_has_alpha(format
);
639 tu6_rb_mrt_control(att
, rb_mrt_control_rop
, is_int
, has_alpha
);
640 rb_mrt_blend_control
= tu6_rb_mrt_blend_control(att
, has_alpha
);
642 if (att
->blendEnable
|| rop_reads_dst
)
643 *blend_enable_mask
|= 1 << i
;
646 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_CONTROL(i
), 2);
647 tu_cs_emit(cs
, rb_mrt_control
);
648 tu_cs_emit(cs
, rb_mrt_blend_control
);
651 for (uint32_t i
= blend_info
->attachmentCount
; i
< MAX_RTS
; i
++) {
652 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_CONTROL(i
), 2);
659 tu6_emit_blend_control(struct tu_cs
*cs
,
660 uint32_t blend_enable_mask
,
661 const VkPipelineMultisampleStateCreateInfo
*msaa_info
)
663 assert(!msaa_info
->sampleShadingEnable
);
664 assert(!msaa_info
->alphaToOneEnable
);
666 uint32_t sp_blend_cntl
= A6XX_SP_BLEND_CNTL_UNK8
;
667 if (blend_enable_mask
)
668 sp_blend_cntl
|= A6XX_SP_BLEND_CNTL_ENABLED
;
669 if (msaa_info
->alphaToCoverageEnable
)
670 sp_blend_cntl
|= A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE
;
672 const uint32_t sample_mask
=
673 msaa_info
->pSampleMask
? *msaa_info
->pSampleMask
674 : ((1 << msaa_info
->rasterizationSamples
) - 1);
676 /* set A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND only when enabled? */
677 uint32_t rb_blend_cntl
=
678 A6XX_RB_BLEND_CNTL_ENABLE_BLEND(blend_enable_mask
) |
679 A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND
|
680 A6XX_RB_BLEND_CNTL_SAMPLE_MASK(sample_mask
);
681 if (msaa_info
->alphaToCoverageEnable
)
682 rb_blend_cntl
|= A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE
;
684 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_BLEND_CNTL
, 1);
685 tu_cs_emit(cs
, sp_blend_cntl
);
687 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLEND_CNTL
, 1);
688 tu_cs_emit(cs
, rb_blend_cntl
);
692 tu6_emit_blend_constants(struct tu_cs
*cs
, const float constants
[4])
694 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLEND_RED_F32
, 4);
695 tu_cs_emit_array(cs
, (const uint32_t *) constants
, 4);
699 tu_pipeline_builder_create_pipeline(struct tu_pipeline_builder
*builder
,
700 struct tu_pipeline
**out_pipeline
)
702 struct tu_device
*dev
= builder
->device
;
704 struct tu_pipeline
*pipeline
=
705 vk_zalloc2(&dev
->alloc
, builder
->alloc
, sizeof(*pipeline
), 8,
706 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
708 return VK_ERROR_OUT_OF_HOST_MEMORY
;
710 tu_cs_init(&pipeline
->cs
, TU_CS_MODE_SUB_STREAM
, 2048);
712 /* reserve the space now such that tu_cs_begin_sub_stream never fails */
713 VkResult result
= tu_cs_reserve_space(dev
, &pipeline
->cs
, 2048);
714 if (result
!= VK_SUCCESS
) {
715 vk_free2(&dev
->alloc
, builder
->alloc
, pipeline
);
719 *out_pipeline
= pipeline
;
725 tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder
*builder
)
727 const VkPipelineShaderStageCreateInfo
*stage_infos
[MESA_SHADER_STAGES
] = {
730 for (uint32_t i
= 0; i
< builder
->create_info
->stageCount
; i
++) {
731 gl_shader_stage stage
=
732 tu_shader_stage(builder
->create_info
->pStages
[i
].stage
);
733 stage_infos
[stage
] = &builder
->create_info
->pStages
[i
];
736 struct tu_shader_compile_options options
;
737 tu_shader_compile_options_init(&options
, builder
->create_info
);
739 /* compile shaders in reverse order */
740 struct tu_shader
*next_stage_shader
= NULL
;
741 for (gl_shader_stage stage
= MESA_SHADER_STAGES
- 1;
742 stage
> MESA_SHADER_NONE
; stage
--) {
743 const VkPipelineShaderStageCreateInfo
*stage_info
= stage_infos
[stage
];
747 struct tu_shader
*shader
=
748 tu_shader_create(builder
->device
, stage
, stage_info
, builder
->alloc
);
750 return VK_ERROR_OUT_OF_HOST_MEMORY
;
753 tu_shader_compile(builder
->device
, shader
, next_stage_shader
,
754 &options
, builder
->alloc
);
755 if (result
!= VK_SUCCESS
)
758 builder
->shaders
[stage
] = shader
;
759 builder
->shader_offsets
[stage
] = builder
->shader_total_size
;
760 builder
->shader_total_size
+=
761 sizeof(uint32_t) * shader
->variants
[0].info
.sizedwords
;
763 next_stage_shader
= shader
;
766 if (builder
->shaders
[MESA_SHADER_VERTEX
]->has_binning_pass
) {
767 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
768 builder
->binning_vs_offset
= builder
->shader_total_size
;
769 builder
->shader_total_size
+=
770 sizeof(uint32_t) * vs
->variants
[1].info
.sizedwords
;
777 tu_pipeline_builder_upload_shaders(struct tu_pipeline_builder
*builder
,
778 struct tu_pipeline
*pipeline
)
780 struct tu_bo
*bo
= &pipeline
->program
.binary_bo
;
783 tu_bo_init_new(builder
->device
, bo
, builder
->shader_total_size
);
784 if (result
!= VK_SUCCESS
)
787 result
= tu_bo_map(builder
->device
, bo
);
788 if (result
!= VK_SUCCESS
)
791 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
792 const struct tu_shader
*shader
= builder
->shaders
[i
];
796 memcpy(bo
->map
+ builder
->shader_offsets
[i
], shader
->binary
,
797 sizeof(uint32_t) * shader
->variants
[0].info
.sizedwords
);
800 if (builder
->shaders
[MESA_SHADER_VERTEX
]->has_binning_pass
) {
801 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
802 memcpy(bo
->map
+ builder
->binning_vs_offset
, vs
->binning_binary
,
803 sizeof(uint32_t) * vs
->variants
[1].info
.sizedwords
);
810 tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder
*builder
,
811 struct tu_pipeline
*pipeline
)
813 const VkPipelineDynamicStateCreateInfo
*dynamic_info
=
814 builder
->create_info
->pDynamicState
;
819 for (uint32_t i
= 0; i
< dynamic_info
->dynamicStateCount
; i
++) {
820 pipeline
->dynamic_state
.mask
|=
821 tu_dynamic_state_bit(dynamic_info
->pDynamicStates
[i
]);
826 tu_pipeline_builder_parse_input_assembly(struct tu_pipeline_builder
*builder
,
827 struct tu_pipeline
*pipeline
)
829 const VkPipelineInputAssemblyStateCreateInfo
*ia_info
=
830 builder
->create_info
->pInputAssemblyState
;
832 pipeline
->ia
.primtype
= tu6_primtype(ia_info
->topology
);
833 pipeline
->ia
.primitive_restart
= ia_info
->primitiveRestartEnable
;
837 tu_pipeline_builder_parse_viewport(struct tu_pipeline_builder
*builder
,
838 struct tu_pipeline
*pipeline
)
842 * pViewportState is a pointer to an instance of the
843 * VkPipelineViewportStateCreateInfo structure, and is ignored if the
844 * pipeline has rasterization disabled."
846 * We leave the relevant registers stale in that case.
848 if (builder
->rasterizer_discard
)
851 const VkPipelineViewportStateCreateInfo
*vp_info
=
852 builder
->create_info
->pViewportState
;
855 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
, 15, &vp_cs
);
857 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_VIEWPORT
)) {
858 assert(vp_info
->viewportCount
== 1);
859 tu6_emit_viewport(&vp_cs
, vp_info
->pViewports
);
862 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_SCISSOR
)) {
863 assert(vp_info
->scissorCount
== 1);
864 tu6_emit_scissor(&vp_cs
, vp_info
->pScissors
);
867 pipeline
->vp
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &vp_cs
);
871 tu_pipeline_builder_parse_rasterization(struct tu_pipeline_builder
*builder
,
872 struct tu_pipeline
*pipeline
)
874 const VkPipelineRasterizationStateCreateInfo
*rast_info
=
875 builder
->create_info
->pRasterizationState
;
877 assert(!rast_info
->depthClampEnable
);
878 assert(rast_info
->polygonMode
== VK_POLYGON_MODE_FILL
);
880 struct tu_cs rast_cs
;
881 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
, 20, &rast_cs
);
883 /* move to hw ctx init? */
884 tu6_emit_gras_unknowns(&rast_cs
);
885 tu6_emit_point_size(&rast_cs
);
887 const uint32_t gras_su_cntl
=
888 tu6_gras_su_cntl(rast_info
, builder
->samples
);
890 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_LINE_WIDTH
))
891 tu6_emit_gras_su_cntl(&rast_cs
, gras_su_cntl
, rast_info
->lineWidth
);
893 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_DEPTH_BIAS
)) {
894 tu6_emit_depth_bias(&rast_cs
, rast_info
->depthBiasConstantFactor
,
895 rast_info
->depthBiasClamp
,
896 rast_info
->depthBiasSlopeFactor
);
899 pipeline
->rast
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &rast_cs
);
901 pipeline
->rast
.gras_su_cntl
= gras_su_cntl
;
905 tu_pipeline_builder_parse_depth_stencil(struct tu_pipeline_builder
*builder
,
906 struct tu_pipeline
*pipeline
)
910 * pDepthStencilState is a pointer to an instance of the
911 * VkPipelineDepthStencilStateCreateInfo structure, and is ignored if
912 * the pipeline has rasterization disabled or if the subpass of the
913 * render pass the pipeline is created against does not use a
914 * depth/stencil attachment.
916 * We disable both depth and stenil tests in those cases.
918 static const VkPipelineDepthStencilStateCreateInfo dummy_ds_info
;
919 const VkPipelineDepthStencilStateCreateInfo
*ds_info
=
920 builder
->use_depth_stencil_attachment
921 ? builder
->create_info
->pDepthStencilState
925 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
, 12, &ds_cs
);
927 /* move to hw ctx init? */
928 tu6_emit_alpha_control_disable(&ds_cs
);
930 tu6_emit_depth_control(&ds_cs
, ds_info
);
931 tu6_emit_stencil_control(&ds_cs
, ds_info
);
933 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
)) {
934 tu6_emit_stencil_compare_mask(&ds_cs
, ds_info
->front
.compareMask
,
935 ds_info
->back
.compareMask
);
937 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
)) {
938 tu6_emit_stencil_write_mask(&ds_cs
, ds_info
->front
.writeMask
,
939 ds_info
->back
.writeMask
);
941 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_REFERENCE
)) {
942 tu6_emit_stencil_reference(&ds_cs
, ds_info
->front
.reference
,
943 ds_info
->back
.reference
);
946 pipeline
->ds
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &ds_cs
);
950 tu_pipeline_builder_parse_multisample_and_color_blend(
951 struct tu_pipeline_builder
*builder
, struct tu_pipeline
*pipeline
)
955 * pMultisampleState is a pointer to an instance of the
956 * VkPipelineMultisampleStateCreateInfo, and is ignored if the pipeline
957 * has rasterization disabled.
961 * pColorBlendState is a pointer to an instance of the
962 * VkPipelineColorBlendStateCreateInfo structure, and is ignored if the
963 * pipeline has rasterization disabled or if the subpass of the render
964 * pass the pipeline is created against does not use any color
967 * We leave the relevant registers stale when rasterization is disabled.
969 if (builder
->rasterizer_discard
)
972 static const VkPipelineColorBlendStateCreateInfo dummy_blend_info
;
973 const VkPipelineMultisampleStateCreateInfo
*msaa_info
=
974 builder
->create_info
->pMultisampleState
;
975 const VkPipelineColorBlendStateCreateInfo
*blend_info
=
976 builder
->use_color_attachments
? builder
->create_info
->pColorBlendState
979 struct tu_cs blend_cs
;
980 tu_cs_begin_sub_stream(builder
->device
, &pipeline
->cs
, MAX_RTS
* 3 + 9,
983 uint32_t blend_enable_mask
;
984 tu6_emit_rb_mrt_controls(&blend_cs
, blend_info
,
985 builder
->color_attachment_formats
,
988 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_BLEND_CONSTANTS
))
989 tu6_emit_blend_constants(&blend_cs
, blend_info
->blendConstants
);
991 tu6_emit_blend_control(&blend_cs
, blend_enable_mask
, msaa_info
);
993 pipeline
->blend
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &blend_cs
);
997 tu_pipeline_finish(struct tu_pipeline
*pipeline
,
998 struct tu_device
*dev
,
999 const VkAllocationCallbacks
*alloc
)
1001 tu_cs_finish(dev
, &pipeline
->cs
);
1003 if (pipeline
->program
.binary_bo
.gem_handle
)
1004 tu_bo_finish(dev
, &pipeline
->program
.binary_bo
);
1008 tu_pipeline_builder_build(struct tu_pipeline_builder
*builder
,
1009 struct tu_pipeline
**pipeline
)
1011 VkResult result
= tu_pipeline_builder_create_pipeline(builder
, pipeline
);
1012 if (result
!= VK_SUCCESS
)
1015 /* compile and upload shaders */
1016 result
= tu_pipeline_builder_compile_shaders(builder
);
1017 if (result
== VK_SUCCESS
)
1018 result
= tu_pipeline_builder_upload_shaders(builder
, *pipeline
);
1019 if (result
!= VK_SUCCESS
) {
1020 tu_pipeline_finish(*pipeline
, builder
->device
, builder
->alloc
);
1021 vk_free2(&builder
->device
->alloc
, builder
->alloc
, *pipeline
);
1022 *pipeline
= VK_NULL_HANDLE
;
1027 tu_pipeline_builder_parse_dynamic(builder
, *pipeline
);
1028 tu_pipeline_builder_parse_input_assembly(builder
, *pipeline
);
1029 tu_pipeline_builder_parse_viewport(builder
, *pipeline
);
1030 tu_pipeline_builder_parse_rasterization(builder
, *pipeline
);
1031 tu_pipeline_builder_parse_depth_stencil(builder
, *pipeline
);
1032 tu_pipeline_builder_parse_multisample_and_color_blend(builder
, *pipeline
);
1034 /* we should have reserved enough space upfront such that the CS never
1037 assert((*pipeline
)->cs
.bo_count
== 1);
1043 tu_pipeline_builder_finish(struct tu_pipeline_builder
*builder
)
1045 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1046 if (!builder
->shaders
[i
])
1048 tu_shader_destroy(builder
->device
, builder
->shaders
[i
], builder
->alloc
);
1053 tu_pipeline_builder_init_graphics(
1054 struct tu_pipeline_builder
*builder
,
1055 struct tu_device
*dev
,
1056 struct tu_pipeline_cache
*cache
,
1057 const VkGraphicsPipelineCreateInfo
*create_info
,
1058 const VkAllocationCallbacks
*alloc
)
1060 *builder
= (struct tu_pipeline_builder
) {
1063 .create_info
= create_info
,
1067 builder
->rasterizer_discard
=
1068 create_info
->pRasterizationState
->rasterizerDiscardEnable
;
1070 if (builder
->rasterizer_discard
) {
1071 builder
->samples
= VK_SAMPLE_COUNT_1_BIT
;
1073 builder
->samples
= create_info
->pMultisampleState
->rasterizationSamples
;
1075 const struct tu_render_pass
*pass
=
1076 tu_render_pass_from_handle(create_info
->renderPass
);
1077 const struct tu_subpass
*subpass
=
1078 &pass
->subpasses
[create_info
->subpass
];
1080 builder
->use_depth_stencil_attachment
=
1081 subpass
->depth_stencil_attachment
.attachment
!= VK_ATTACHMENT_UNUSED
;
1083 for (uint32_t i
= 0; i
< subpass
->color_count
; i
++) {
1084 const uint32_t a
= subpass
->color_attachments
[i
].attachment
;
1085 if (a
== VK_ATTACHMENT_UNUSED
)
1088 builder
->color_attachment_formats
[i
] = pass
->attachments
[a
].format
;
1089 builder
->use_color_attachments
= true;
1095 tu_CreateGraphicsPipelines(VkDevice device
,
1096 VkPipelineCache pipelineCache
,
1098 const VkGraphicsPipelineCreateInfo
*pCreateInfos
,
1099 const VkAllocationCallbacks
*pAllocator
,
1100 VkPipeline
*pPipelines
)
1102 TU_FROM_HANDLE(tu_device
, dev
, device
);
1103 TU_FROM_HANDLE(tu_pipeline_cache
, cache
, pipelineCache
);
1105 for (uint32_t i
= 0; i
< count
; i
++) {
1106 struct tu_pipeline_builder builder
;
1107 tu_pipeline_builder_init_graphics(&builder
, dev
, cache
,
1108 &pCreateInfos
[i
], pAllocator
);
1110 struct tu_pipeline
*pipeline
;
1111 VkResult result
= tu_pipeline_builder_build(&builder
, &pipeline
);
1112 tu_pipeline_builder_finish(&builder
);
1114 if (result
!= VK_SUCCESS
) {
1115 for (uint32_t j
= 0; j
< i
; j
++) {
1116 tu_DestroyPipeline(device
, pPipelines
[j
], pAllocator
);
1117 pPipelines
[j
] = VK_NULL_HANDLE
;
1123 pPipelines
[i
] = tu_pipeline_to_handle(pipeline
);
1130 tu_compute_pipeline_create(VkDevice _device
,
1131 VkPipelineCache _cache
,
1132 const VkComputePipelineCreateInfo
*pCreateInfo
,
1133 const VkAllocationCallbacks
*pAllocator
,
1134 VkPipeline
*pPipeline
)
1140 tu_CreateComputePipelines(VkDevice _device
,
1141 VkPipelineCache pipelineCache
,
1143 const VkComputePipelineCreateInfo
*pCreateInfos
,
1144 const VkAllocationCallbacks
*pAllocator
,
1145 VkPipeline
*pPipelines
)
1147 VkResult result
= VK_SUCCESS
;
1150 for (; i
< count
; i
++) {
1152 r
= tu_compute_pipeline_create(_device
, pipelineCache
, &pCreateInfos
[i
],
1153 pAllocator
, &pPipelines
[i
]);
1154 if (r
!= VK_SUCCESS
) {
1156 pPipelines
[i
] = VK_NULL_HANDLE
;
1164 tu_DestroyPipeline(VkDevice _device
,
1165 VkPipeline _pipeline
,
1166 const VkAllocationCallbacks
*pAllocator
)
1168 TU_FROM_HANDLE(tu_device
, dev
, _device
);
1169 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
1174 tu_pipeline_finish(pipeline
, dev
, pAllocator
);
1175 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);