tu: Use common guardband helper
[mesa.git] / src / freedreno / vulkan / tu_pipeline.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "common/freedreno_guardband.h"
29 #include "tu_private.h"
30
31 #include "ir3/ir3_nir.h"
32 #include "main/menums.h"
33 #include "nir/nir.h"
34 #include "nir/nir_builder.h"
35 #include "spirv/nir_spirv.h"
36 #include "util/debug.h"
37 #include "util/mesa-sha1.h"
38 #include "util/u_atomic.h"
39 #include "vk_format.h"
40 #include "vk_util.h"
41
42 #include "tu_cs.h"
43
44 /* Emit IB that preloads the descriptors that the shader uses */
45
46 static void
47 emit_load_state(struct tu_cs *cs, unsigned opcode, enum a6xx_state_type st,
48 enum a6xx_state_block sb, unsigned base, unsigned offset,
49 unsigned count)
50 {
51 /* Note: just emit one packet, even if count overflows NUM_UNIT. It's not
52 * clear if emitting more packets will even help anything. Presumably the
53 * descriptor cache is relatively small, and these packets stop doing
54 * anything when there are too many descriptors.
55 */
56 tu_cs_emit_pkt7(cs, opcode, 3);
57 tu_cs_emit(cs,
58 CP_LOAD_STATE6_0_STATE_TYPE(st) |
59 CP_LOAD_STATE6_0_STATE_SRC(SS6_BINDLESS) |
60 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
61 CP_LOAD_STATE6_0_NUM_UNIT(MIN2(count, 1024-1)));
62 tu_cs_emit_qw(cs, offset | (base << 28));
63 }
64
65 static unsigned
66 tu6_load_state_size(struct tu_pipeline *pipeline, bool compute)
67 {
68 const unsigned load_state_size = 4;
69 unsigned size = 0;
70 for (unsigned i = 0; i < pipeline->layout->num_sets; i++) {
71 if (pipeline && !(pipeline->active_desc_sets & (1u << i)))
72 continue;
73
74 struct tu_descriptor_set_layout *set_layout = pipeline->layout->set[i].layout;
75 for (unsigned j = 0; j < set_layout->binding_count; j++) {
76 struct tu_descriptor_set_binding_layout *binding = &set_layout->binding[j];
77 unsigned count = 0;
78 /* Note: some users, like amber for example, pass in
79 * VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so
80 * filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.
81 */
82 VkShaderStageFlags stages = compute ?
83 binding->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT :
84 binding->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
85 unsigned stage_count = util_bitcount(stages);
86
87 if (!binding->array_size)
88 continue;
89
90 switch (binding->type) {
91 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
92 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
93 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
94 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
95 /* IBO-backed resources only need one packet for all graphics stages */
96 if (stages & ~VK_SHADER_STAGE_COMPUTE_BIT)
97 count += 1;
98 if (stages & VK_SHADER_STAGE_COMPUTE_BIT)
99 count += 1;
100 break;
101 case VK_DESCRIPTOR_TYPE_SAMPLER:
102 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
103 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
104 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
105 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
106 /* Textures and UBO's needs a packet for each stage */
107 count = stage_count;
108 break;
109 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
110 /* Because of how we pack combined images and samplers, we
111 * currently can't use one packet for the whole array.
112 */
113 count = stage_count * binding->array_size * 2;
114 break;
115 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
116 break;
117 default:
118 unreachable("bad descriptor type");
119 }
120 size += count * load_state_size;
121 }
122 }
123 return size;
124 }
125
126 static void
127 tu6_emit_load_state(struct tu_pipeline *pipeline, bool compute)
128 {
129 unsigned size = tu6_load_state_size(pipeline, compute);
130 if (size == 0)
131 return;
132
133 struct tu_cs cs;
134 tu_cs_begin_sub_stream(&pipeline->cs, size, &cs);
135
136 struct tu_pipeline_layout *layout = pipeline->layout;
137 for (unsigned i = 0; i < layout->num_sets; i++) {
138 /* From 13.2.7. Descriptor Set Binding:
139 *
140 * A compatible descriptor set must be bound for all set numbers that
141 * any shaders in a pipeline access, at the time that a draw or
142 * dispatch command is recorded to execute using that pipeline.
143 * However, if none of the shaders in a pipeline statically use any
144 * bindings with a particular set number, then no descriptor set need
145 * be bound for that set number, even if the pipeline layout includes
146 * a non-trivial descriptor set layout for that set number.
147 *
148 * This means that descriptor sets unused by the pipeline may have a
149 * garbage or 0 BINDLESS_BASE register, which will cause context faults
150 * when prefetching descriptors from these sets. Skip prefetching for
151 * descriptors from them to avoid this. This is also an optimization,
152 * since these prefetches would be useless.
153 */
154 if (!(pipeline->active_desc_sets & (1u << i)))
155 continue;
156
157 struct tu_descriptor_set_layout *set_layout = layout->set[i].layout;
158 for (unsigned j = 0; j < set_layout->binding_count; j++) {
159 struct tu_descriptor_set_binding_layout *binding = &set_layout->binding[j];
160 unsigned base = i;
161 unsigned offset = binding->offset / 4;
162 /* Note: some users, like amber for example, pass in
163 * VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so
164 * filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.
165 */
166 VkShaderStageFlags stages = compute ?
167 binding->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT :
168 binding->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
169 unsigned count = binding->array_size;
170 if (count == 0 || stages == 0)
171 continue;
172 switch (binding->type) {
173 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
174 base = MAX_SETS;
175 offset = (layout->set[i].dynamic_offset_start +
176 binding->dynamic_offset_offset) * A6XX_TEX_CONST_DWORDS;
177 /* fallthrough */
178 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
179 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
180 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
181 /* IBO-backed resources only need one packet for all graphics stages */
182 if (stages & ~VK_SHADER_STAGE_COMPUTE_BIT) {
183 emit_load_state(&cs, CP_LOAD_STATE6, ST6_SHADER, SB6_IBO,
184 base, offset, count);
185 }
186 if (stages & VK_SHADER_STAGE_COMPUTE_BIT) {
187 emit_load_state(&cs, CP_LOAD_STATE6_FRAG, ST6_IBO, SB6_CS_SHADER,
188 base, offset, count);
189 }
190 break;
191 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
192 /* nothing - input attachment doesn't use bindless */
193 break;
194 case VK_DESCRIPTOR_TYPE_SAMPLER:
195 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
196 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
197 tu_foreach_stage(stage, stages) {
198 emit_load_state(&cs, tu6_stage2opcode(stage),
199 binding->type == VK_DESCRIPTOR_TYPE_SAMPLER ?
200 ST6_SHADER : ST6_CONSTANTS,
201 tu6_stage2texsb(stage), base, offset, count);
202 }
203 break;
204 }
205 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
206 base = MAX_SETS;
207 offset = (layout->set[i].dynamic_offset_start +
208 binding->dynamic_offset_offset) * A6XX_TEX_CONST_DWORDS;
209 /* fallthrough */
210 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
211 tu_foreach_stage(stage, stages) {
212 emit_load_state(&cs, tu6_stage2opcode(stage), ST6_UBO,
213 tu6_stage2shadersb(stage), base, offset, count);
214 }
215 break;
216 }
217 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
218 tu_foreach_stage(stage, stages) {
219 /* TODO: We could emit less CP_LOAD_STATE6 if we used
220 * struct-of-arrays instead of array-of-structs.
221 */
222 for (unsigned i = 0; i < count; i++) {
223 unsigned tex_offset = offset + 2 * i * A6XX_TEX_CONST_DWORDS;
224 unsigned sam_offset = offset + (2 * i + 1) * A6XX_TEX_CONST_DWORDS;
225 emit_load_state(&cs, tu6_stage2opcode(stage),
226 ST6_CONSTANTS, tu6_stage2texsb(stage),
227 base, tex_offset, 1);
228 emit_load_state(&cs, tu6_stage2opcode(stage),
229 ST6_SHADER, tu6_stage2texsb(stage),
230 base, sam_offset, 1);
231 }
232 }
233 break;
234 }
235 default:
236 unreachable("bad descriptor type");
237 }
238 }
239 }
240
241 pipeline->load_state = tu_cs_end_draw_state(&pipeline->cs, &cs);
242 }
243
244 struct tu_pipeline_builder
245 {
246 struct tu_device *device;
247 struct tu_pipeline_cache *cache;
248 struct tu_pipeline_layout *layout;
249 const VkAllocationCallbacks *alloc;
250 const VkGraphicsPipelineCreateInfo *create_info;
251
252 struct tu_shader *shaders[MESA_SHADER_STAGES];
253 struct ir3_shader_variant *variants[MESA_SHADER_STAGES];
254 struct ir3_shader_variant *binning_variant;
255 uint64_t shader_iova[MESA_SHADER_STAGES];
256 uint64_t binning_vs_iova;
257
258 bool rasterizer_discard;
259 /* these states are affectd by rasterizer_discard */
260 VkSampleCountFlagBits samples;
261 bool use_color_attachments;
262 bool use_dual_src_blend;
263 uint32_t color_attachment_count;
264 VkFormat color_attachment_formats[MAX_RTS];
265 VkFormat depth_attachment_format;
266 uint32_t render_components;
267 };
268
269 static bool
270 tu_logic_op_reads_dst(VkLogicOp op)
271 {
272 switch (op) {
273 case VK_LOGIC_OP_CLEAR:
274 case VK_LOGIC_OP_COPY:
275 case VK_LOGIC_OP_COPY_INVERTED:
276 case VK_LOGIC_OP_SET:
277 return false;
278 default:
279 return true;
280 }
281 }
282
283 static VkBlendFactor
284 tu_blend_factor_no_dst_alpha(VkBlendFactor factor)
285 {
286 /* treat dst alpha as 1.0 and avoid reading it */
287 switch (factor) {
288 case VK_BLEND_FACTOR_DST_ALPHA:
289 return VK_BLEND_FACTOR_ONE;
290 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
291 return VK_BLEND_FACTOR_ZERO;
292 default:
293 return factor;
294 }
295 }
296
297 static bool tu_blend_factor_is_dual_src(VkBlendFactor factor)
298 {
299 switch (factor) {
300 case VK_BLEND_FACTOR_SRC1_COLOR:
301 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR:
302 case VK_BLEND_FACTOR_SRC1_ALPHA:
303 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA:
304 return true;
305 default:
306 return false;
307 }
308 }
309
310 static bool
311 tu_blend_state_is_dual_src(const VkPipelineColorBlendStateCreateInfo *info)
312 {
313 if (!info)
314 return false;
315
316 for (unsigned i = 0; i < info->attachmentCount; i++) {
317 const VkPipelineColorBlendAttachmentState *blend = &info->pAttachments[i];
318 if (tu_blend_factor_is_dual_src(blend->srcColorBlendFactor) ||
319 tu_blend_factor_is_dual_src(blend->dstColorBlendFactor) ||
320 tu_blend_factor_is_dual_src(blend->srcAlphaBlendFactor) ||
321 tu_blend_factor_is_dual_src(blend->dstAlphaBlendFactor))
322 return true;
323 }
324
325 return false;
326 }
327
328 void
329 tu6_emit_xs_config(struct tu_cs *cs,
330 gl_shader_stage stage, /* xs->type, but xs may be NULL */
331 const struct ir3_shader_variant *xs,
332 uint64_t binary_iova)
333 {
334 static const struct xs_config {
335 uint16_t reg_sp_xs_ctrl;
336 uint16_t reg_sp_xs_config;
337 uint16_t reg_hlsq_xs_ctrl;
338 uint16_t reg_sp_vs_obj_start;
339 } xs_config[] = {
340 [MESA_SHADER_VERTEX] = {
341 REG_A6XX_SP_VS_CTRL_REG0,
342 REG_A6XX_SP_VS_CONFIG,
343 REG_A6XX_HLSQ_VS_CNTL,
344 REG_A6XX_SP_VS_OBJ_START_LO,
345 },
346 [MESA_SHADER_TESS_CTRL] = {
347 REG_A6XX_SP_HS_CTRL_REG0,
348 REG_A6XX_SP_HS_CONFIG,
349 REG_A6XX_HLSQ_HS_CNTL,
350 REG_A6XX_SP_HS_OBJ_START_LO,
351 },
352 [MESA_SHADER_TESS_EVAL] = {
353 REG_A6XX_SP_DS_CTRL_REG0,
354 REG_A6XX_SP_DS_CONFIG,
355 REG_A6XX_HLSQ_DS_CNTL,
356 REG_A6XX_SP_DS_OBJ_START_LO,
357 },
358 [MESA_SHADER_GEOMETRY] = {
359 REG_A6XX_SP_GS_CTRL_REG0,
360 REG_A6XX_SP_GS_CONFIG,
361 REG_A6XX_HLSQ_GS_CNTL,
362 REG_A6XX_SP_GS_OBJ_START_LO,
363 },
364 [MESA_SHADER_FRAGMENT] = {
365 REG_A6XX_SP_FS_CTRL_REG0,
366 REG_A6XX_SP_FS_CONFIG,
367 REG_A6XX_HLSQ_FS_CNTL,
368 REG_A6XX_SP_FS_OBJ_START_LO,
369 },
370 [MESA_SHADER_COMPUTE] = {
371 REG_A6XX_SP_CS_CTRL_REG0,
372 REG_A6XX_SP_CS_CONFIG,
373 REG_A6XX_HLSQ_CS_CNTL,
374 REG_A6XX_SP_CS_OBJ_START_LO,
375 },
376 };
377 const struct xs_config *cfg = &xs_config[stage];
378
379 if (!xs) {
380 /* shader stage disabled */
381 tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_config, 1);
382 tu_cs_emit(cs, 0);
383
384 tu_cs_emit_pkt4(cs, cfg->reg_hlsq_xs_ctrl, 1);
385 tu_cs_emit(cs, 0);
386 return;
387 }
388
389 bool is_fs = xs->type == MESA_SHADER_FRAGMENT;
390 enum a3xx_threadsize threadsize = FOUR_QUADS;
391
392 /* TODO:
393 * the "threadsize" field may have nothing to do with threadsize,
394 * use a value that matches the blob until it is figured out
395 */
396 if (xs->type == MESA_SHADER_GEOMETRY)
397 threadsize = TWO_QUADS;
398
399 tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_ctrl, 1);
400 tu_cs_emit(cs,
401 A6XX_SP_VS_CTRL_REG0_THREADSIZE(threadsize) |
402 A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(xs->info.max_reg + 1) |
403 A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(xs->info.max_half_reg + 1) |
404 COND(xs->mergedregs, A6XX_SP_VS_CTRL_REG0_MERGEDREGS) |
405 A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(xs->branchstack) |
406 COND(xs->need_pixlod, A6XX_SP_VS_CTRL_REG0_PIXLODENABLE) |
407 COND(xs->need_fine_derivatives, A6XX_SP_VS_CTRL_REG0_DIFF_FINE) |
408 /* only fragment shader sets VARYING bit */
409 COND(xs->total_in && is_fs, A6XX_SP_FS_CTRL_REG0_VARYING) |
410 /* unknown bit, seems unnecessary */
411 COND(is_fs, 0x1000000));
412
413 tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_config, 2);
414 tu_cs_emit(cs, A6XX_SP_VS_CONFIG_ENABLED |
415 COND(xs->bindless_tex, A6XX_SP_VS_CONFIG_BINDLESS_TEX) |
416 COND(xs->bindless_samp, A6XX_SP_VS_CONFIG_BINDLESS_SAMP) |
417 COND(xs->bindless_ibo, A6XX_SP_VS_CONFIG_BINDLESS_IBO) |
418 COND(xs->bindless_ubo, A6XX_SP_VS_CONFIG_BINDLESS_UBO) |
419 A6XX_SP_VS_CONFIG_NTEX(xs->num_samp) |
420 A6XX_SP_VS_CONFIG_NSAMP(xs->num_samp));
421 tu_cs_emit(cs, xs->instrlen);
422
423 tu_cs_emit_pkt4(cs, cfg->reg_hlsq_xs_ctrl, 1);
424 tu_cs_emit(cs, A6XX_HLSQ_VS_CNTL_CONSTLEN(xs->constlen) |
425 A6XX_HLSQ_VS_CNTL_ENABLED);
426
427 /* emit program binary
428 * binary_iova should be aligned to 1 instrlen unit (128 bytes)
429 */
430
431 assert((binary_iova & 0x7f) == 0);
432
433 tu_cs_emit_pkt4(cs, cfg->reg_sp_vs_obj_start, 2);
434 tu_cs_emit_qw(cs, binary_iova);
435
436 tu_cs_emit_pkt7(cs, tu6_stage2opcode(stage), 3);
437 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
438 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
439 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
440 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage)) |
441 CP_LOAD_STATE6_0_NUM_UNIT(xs->instrlen));
442 tu_cs_emit_qw(cs, binary_iova);
443
444 /* emit immediates */
445
446 const struct ir3_const_state *const_state = ir3_const_state(xs);
447 uint32_t base = const_state->offsets.immediate;
448 int size = const_state->immediates_count;
449
450 /* truncate size to avoid writing constants that shader
451 * does not use:
452 */
453 size = MIN2(size + base, xs->constlen) - base;
454
455 if (size <= 0)
456 return;
457
458 tu_cs_emit_pkt7(cs, tu6_stage2opcode(stage), 3 + size * 4);
459 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(base) |
460 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
461 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
462 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage)) |
463 CP_LOAD_STATE6_0_NUM_UNIT(size));
464 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
465 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
466
467 for (unsigned i = 0; i < size; i++) {
468 tu_cs_emit(cs, const_state->immediates[i].val[0]);
469 tu_cs_emit(cs, const_state->immediates[i].val[1]);
470 tu_cs_emit(cs, const_state->immediates[i].val[2]);
471 tu_cs_emit(cs, const_state->immediates[i].val[3]);
472 }
473 }
474
475 static void
476 tu6_emit_cs_config(struct tu_cs *cs, const struct tu_shader *shader,
477 const struct ir3_shader_variant *v,
478 uint32_t binary_iova)
479 {
480 tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
481 .cs_state = true,
482 .cs_ibo = true));
483
484 tu6_emit_xs_config(cs, MESA_SHADER_COMPUTE, v, binary_iova);
485
486 tu_cs_emit_pkt4(cs, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
487 tu_cs_emit(cs, 0x41);
488
489 uint32_t local_invocation_id =
490 ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
491 uint32_t work_group_id =
492 ir3_find_sysval_regid(v, SYSTEM_VALUE_WORK_GROUP_ID);
493
494 tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CS_CNTL_0, 2);
495 tu_cs_emit(cs,
496 A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
497 A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
498 A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
499 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
500 tu_cs_emit(cs, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
501 }
502
503 static void
504 tu6_emit_vs_system_values(struct tu_cs *cs,
505 const struct ir3_shader_variant *vs,
506 const struct ir3_shader_variant *hs,
507 const struct ir3_shader_variant *ds,
508 const struct ir3_shader_variant *gs,
509 bool primid_passthru)
510 {
511 const uint32_t vertexid_regid =
512 ir3_find_sysval_regid(vs, SYSTEM_VALUE_VERTEX_ID);
513 const uint32_t instanceid_regid =
514 ir3_find_sysval_regid(vs, SYSTEM_VALUE_INSTANCE_ID);
515 const uint32_t tess_coord_x_regid = hs ?
516 ir3_find_sysval_regid(ds, SYSTEM_VALUE_TESS_COORD) :
517 regid(63, 0);
518 const uint32_t tess_coord_y_regid = VALIDREG(tess_coord_x_regid) ?
519 tess_coord_x_regid + 1 :
520 regid(63, 0);
521 const uint32_t hs_patch_regid = hs ?
522 ir3_find_sysval_regid(hs, SYSTEM_VALUE_PRIMITIVE_ID) :
523 regid(63, 0);
524 const uint32_t ds_patch_regid = hs ?
525 ir3_find_sysval_regid(ds, SYSTEM_VALUE_PRIMITIVE_ID) :
526 regid(63, 0);
527 const uint32_t hs_invocation_regid = hs ?
528 ir3_find_sysval_regid(hs, SYSTEM_VALUE_TCS_HEADER_IR3) :
529 regid(63, 0);
530 const uint32_t primitiveid_regid = gs ?
531 ir3_find_sysval_regid(gs, SYSTEM_VALUE_PRIMITIVE_ID) :
532 regid(63, 0);
533 const uint32_t gsheader_regid = gs ?
534 ir3_find_sysval_regid(gs, SYSTEM_VALUE_GS_HEADER_IR3) :
535 regid(63, 0);
536
537 tu_cs_emit_pkt4(cs, REG_A6XX_VFD_CONTROL_1, 6);
538 tu_cs_emit(cs, A6XX_VFD_CONTROL_1_REGID4VTX(vertexid_regid) |
539 A6XX_VFD_CONTROL_1_REGID4INST(instanceid_regid) |
540 A6XX_VFD_CONTROL_1_REGID4PRIMID(primitiveid_regid) |
541 0xfc000000);
542 tu_cs_emit(cs, A6XX_VFD_CONTROL_2_REGID_HSPATCHID(hs_patch_regid) |
543 A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(hs_invocation_regid));
544 tu_cs_emit(cs, A6XX_VFD_CONTROL_3_REGID_DSPATCHID(ds_patch_regid) |
545 A6XX_VFD_CONTROL_3_REGID_TESSX(tess_coord_x_regid) |
546 A6XX_VFD_CONTROL_3_REGID_TESSY(tess_coord_y_regid) |
547 0xfc);
548 tu_cs_emit(cs, 0x000000fc); /* VFD_CONTROL_4 */
549 tu_cs_emit(cs, A6XX_VFD_CONTROL_5_REGID_GSHEADER(gsheader_regid) |
550 0xfc00); /* VFD_CONTROL_5 */
551 tu_cs_emit(cs, COND(primid_passthru, A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU)); /* VFD_CONTROL_6 */
552 }
553
554 /* Add any missing varyings needed for stream-out. Otherwise varyings not
555 * used by fragment shader will be stripped out.
556 */
557 static void
558 tu6_link_streamout(struct ir3_shader_linkage *l,
559 const struct ir3_shader_variant *v)
560 {
561 const struct ir3_stream_output_info *info = &v->shader->stream_output;
562
563 /*
564 * First, any stream-out varyings not already in linkage map (ie. also
565 * consumed by frag shader) need to be added:
566 */
567 for (unsigned i = 0; i < info->num_outputs; i++) {
568 const struct ir3_stream_output *out = &info->output[i];
569 unsigned compmask =
570 (1 << (out->num_components + out->start_component)) - 1;
571 unsigned k = out->register_index;
572 unsigned idx, nextloc = 0;
573
574 /* psize/pos need to be the last entries in linkage map, and will
575 * get added link_stream_out, so skip over them:
576 */
577 if (v->outputs[k].slot == VARYING_SLOT_PSIZ ||
578 v->outputs[k].slot == VARYING_SLOT_POS)
579 continue;
580
581 for (idx = 0; idx < l->cnt; idx++) {
582 if (l->var[idx].regid == v->outputs[k].regid)
583 break;
584 nextloc = MAX2(nextloc, l->var[idx].loc + 4);
585 }
586
587 /* add if not already in linkage map: */
588 if (idx == l->cnt)
589 ir3_link_add(l, v->outputs[k].regid, compmask, nextloc);
590
591 /* expand component-mask if needed, ie streaming out all components
592 * but frag shader doesn't consume all components:
593 */
594 if (compmask & ~l->var[idx].compmask) {
595 l->var[idx].compmask |= compmask;
596 l->max_loc = MAX2(l->max_loc, l->var[idx].loc +
597 util_last_bit(l->var[idx].compmask));
598 }
599 }
600 }
601
602 static void
603 tu6_setup_streamout(struct tu_cs *cs,
604 const struct ir3_shader_variant *v,
605 struct ir3_shader_linkage *l)
606 {
607 const struct ir3_stream_output_info *info = &v->shader->stream_output;
608 uint32_t prog[IR3_MAX_SO_OUTPUTS * 2] = {};
609 uint32_t ncomp[IR3_MAX_SO_BUFFERS] = {};
610 uint32_t prog_count = align(l->max_loc, 2) / 2;
611
612 /* TODO: streamout state should be in a non-GMEM draw state */
613
614 /* no streamout: */
615 if (info->num_outputs == 0) {
616 tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
617 tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
618 tu_cs_emit(cs, 0);
619 tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
620 tu_cs_emit(cs, 0);
621 return;
622 }
623
624 /* is there something to do with info->stride[i]? */
625
626 for (unsigned i = 0; i < info->num_outputs; i++) {
627 const struct ir3_stream_output *out = &info->output[i];
628 unsigned k = out->register_index;
629 unsigned idx;
630
631 /* Skip it, if there's an unused reg in the middle of outputs. */
632 if (v->outputs[k].regid == INVALID_REG)
633 continue;
634
635 ncomp[out->output_buffer] += out->num_components;
636
637 /* linkage map sorted by order frag shader wants things, so
638 * a bit less ideal here..
639 */
640 for (idx = 0; idx < l->cnt; idx++)
641 if (l->var[idx].regid == v->outputs[k].regid)
642 break;
643
644 debug_assert(idx < l->cnt);
645
646 for (unsigned j = 0; j < out->num_components; j++) {
647 unsigned c = j + out->start_component;
648 unsigned loc = l->var[idx].loc + c;
649 unsigned off = j + out->dst_offset; /* in dwords */
650
651 if (loc & 1) {
652 prog[loc/2] |= A6XX_VPC_SO_PROG_B_EN |
653 A6XX_VPC_SO_PROG_B_BUF(out->output_buffer) |
654 A6XX_VPC_SO_PROG_B_OFF(off * 4);
655 } else {
656 prog[loc/2] |= A6XX_VPC_SO_PROG_A_EN |
657 A6XX_VPC_SO_PROG_A_BUF(out->output_buffer) |
658 A6XX_VPC_SO_PROG_A_OFF(off * 4);
659 }
660 }
661 }
662
663 tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 12 + 2 * prog_count);
664 tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
665 tu_cs_emit(cs, A6XX_VPC_SO_BUF_CNTL_ENABLE |
666 COND(ncomp[0] > 0, A6XX_VPC_SO_BUF_CNTL_BUF0) |
667 COND(ncomp[1] > 0, A6XX_VPC_SO_BUF_CNTL_BUF1) |
668 COND(ncomp[2] > 0, A6XX_VPC_SO_BUF_CNTL_BUF2) |
669 COND(ncomp[3] > 0, A6XX_VPC_SO_BUF_CNTL_BUF3));
670 for (uint32_t i = 0; i < 4; i++) {
671 tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(i));
672 tu_cs_emit(cs, ncomp[i]);
673 }
674 /* note: "VPC_SO_CNTL" write seems to be responsible for resetting the SO_PROG */
675 tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
676 tu_cs_emit(cs, A6XX_VPC_SO_CNTL_ENABLE);
677 for (uint32_t i = 0; i < prog_count; i++) {
678 tu_cs_emit(cs, REG_A6XX_VPC_SO_PROG);
679 tu_cs_emit(cs, prog[i]);
680 }
681 }
682
683 static void
684 tu6_emit_const(struct tu_cs *cs, uint32_t opcode, uint32_t base,
685 enum a6xx_state_block block, uint32_t offset,
686 uint32_t size, uint32_t *dwords) {
687 assert(size % 4 == 0);
688
689 tu_cs_emit_pkt7(cs, opcode, 3 + size);
690 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(base) |
691 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
692 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
693 CP_LOAD_STATE6_0_STATE_BLOCK(block) |
694 CP_LOAD_STATE6_0_NUM_UNIT(size / 4));
695
696 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
697 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
698 dwords = (uint32_t *)&((uint8_t *)dwords)[offset];
699
700 tu_cs_emit_array(cs, dwords, size);
701 }
702
703 static void
704 tu6_emit_link_map(struct tu_cs *cs,
705 const struct ir3_shader_variant *producer,
706 const struct ir3_shader_variant *consumer,
707 enum a6xx_state_block sb)
708 {
709 const struct ir3_const_state *const_state = ir3_const_state(consumer);
710 uint32_t base = const_state->offsets.primitive_map;
711 uint32_t patch_locs[MAX_VARYING] = { }, num_loc;
712 num_loc = ir3_link_geometry_stages(producer, consumer, patch_locs);
713 int size = DIV_ROUND_UP(num_loc, 4);
714
715 size = (MIN2(size + base, consumer->constlen) - base) * 4;
716 if (size <= 0)
717 return;
718
719 tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, base, sb, 0, size,
720 patch_locs);
721 }
722
723 static uint16_t
724 gl_primitive_to_tess(uint16_t primitive) {
725 switch (primitive) {
726 case GL_POINTS:
727 return TESS_POINTS;
728 case GL_LINE_STRIP:
729 return TESS_LINES;
730 case GL_TRIANGLE_STRIP:
731 return TESS_CW_TRIS;
732 default:
733 unreachable("");
734 }
735 }
736
737 void
738 tu6_emit_vpc(struct tu_cs *cs,
739 const struct ir3_shader_variant *vs,
740 const struct ir3_shader_variant *hs,
741 const struct ir3_shader_variant *ds,
742 const struct ir3_shader_variant *gs,
743 const struct ir3_shader_variant *fs)
744 {
745 /* note: doesn't compile as static because of the array regs.. */
746 const struct reg_config {
747 uint16_t reg_sp_xs_out_reg;
748 uint16_t reg_sp_xs_vpc_dst_reg;
749 uint16_t reg_vpc_xs_pack;
750 uint16_t reg_vpc_xs_clip_cntl;
751 uint16_t reg_gras_xs_cl_cntl;
752 uint16_t reg_pc_xs_out_cntl;
753 uint16_t reg_sp_xs_primitive_cntl;
754 uint16_t reg_vpc_xs_layer_cntl;
755 uint16_t reg_gras_xs_layer_cntl;
756 } reg_config[] = {
757 [MESA_SHADER_VERTEX] = {
758 REG_A6XX_SP_VS_OUT_REG(0),
759 REG_A6XX_SP_VS_VPC_DST_REG(0),
760 REG_A6XX_VPC_VS_PACK,
761 REG_A6XX_VPC_VS_CLIP_CNTL,
762 REG_A6XX_GRAS_VS_CL_CNTL,
763 REG_A6XX_PC_VS_OUT_CNTL,
764 REG_A6XX_SP_VS_PRIMITIVE_CNTL,
765 REG_A6XX_VPC_VS_LAYER_CNTL,
766 REG_A6XX_GRAS_VS_LAYER_CNTL
767 },
768 [MESA_SHADER_TESS_EVAL] = {
769 REG_A6XX_SP_DS_OUT_REG(0),
770 REG_A6XX_SP_DS_VPC_DST_REG(0),
771 REG_A6XX_VPC_DS_PACK,
772 REG_A6XX_VPC_DS_CLIP_CNTL,
773 REG_A6XX_GRAS_DS_CL_CNTL,
774 REG_A6XX_PC_DS_OUT_CNTL,
775 REG_A6XX_SP_DS_PRIMITIVE_CNTL,
776 REG_A6XX_VPC_DS_LAYER_CNTL,
777 REG_A6XX_GRAS_DS_LAYER_CNTL
778 },
779 [MESA_SHADER_GEOMETRY] = {
780 REG_A6XX_SP_GS_OUT_REG(0),
781 REG_A6XX_SP_GS_VPC_DST_REG(0),
782 REG_A6XX_VPC_GS_PACK,
783 REG_A6XX_VPC_GS_CLIP_CNTL,
784 REG_A6XX_GRAS_GS_CL_CNTL,
785 REG_A6XX_PC_GS_OUT_CNTL,
786 REG_A6XX_SP_GS_PRIMITIVE_CNTL,
787 REG_A6XX_VPC_GS_LAYER_CNTL,
788 REG_A6XX_GRAS_GS_LAYER_CNTL
789 },
790 };
791
792 const struct ir3_shader_variant *last_shader;
793 if (gs) {
794 last_shader = gs;
795 } else if (hs) {
796 last_shader = ds;
797 } else {
798 last_shader = vs;
799 }
800
801 const struct reg_config *cfg = &reg_config[last_shader->type];
802
803 struct ir3_shader_linkage linkage = { .primid_loc = 0xff };
804 if (fs)
805 ir3_link_shaders(&linkage, last_shader, fs, true);
806
807 if (last_shader->shader->stream_output.num_outputs)
808 tu6_link_streamout(&linkage, last_shader);
809
810 /* We do this after linking shaders in order to know whether PrimID
811 * passthrough needs to be enabled.
812 */
813 bool primid_passthru = linkage.primid_loc != 0xff;
814 tu6_emit_vs_system_values(cs, vs, hs, ds, gs, primid_passthru);
815
816 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VAR_DISABLE(0), 4);
817 tu_cs_emit(cs, ~linkage.varmask[0]);
818 tu_cs_emit(cs, ~linkage.varmask[1]);
819 tu_cs_emit(cs, ~linkage.varmask[2]);
820 tu_cs_emit(cs, ~linkage.varmask[3]);
821
822 /* a6xx finds position/pointsize at the end */
823 const uint32_t position_regid =
824 ir3_find_output_regid(last_shader, VARYING_SLOT_POS);
825 const uint32_t pointsize_regid =
826 ir3_find_output_regid(last_shader, VARYING_SLOT_PSIZ);
827 const uint32_t layer_regid =
828 ir3_find_output_regid(last_shader, VARYING_SLOT_LAYER);
829 uint32_t primitive_regid = gs ?
830 ir3_find_sysval_regid(gs, SYSTEM_VALUE_PRIMITIVE_ID) : regid(63, 0);
831 uint32_t flags_regid = gs ?
832 ir3_find_output_regid(gs, VARYING_SLOT_GS_VERTEX_FLAGS_IR3) : 0;
833
834 uint32_t pointsize_loc = 0xff, position_loc = 0xff, layer_loc = 0xff;
835 if (layer_regid != regid(63, 0)) {
836 layer_loc = linkage.max_loc;
837 ir3_link_add(&linkage, layer_regid, 0x1, linkage.max_loc);
838 }
839 if (position_regid != regid(63, 0)) {
840 position_loc = linkage.max_loc;
841 ir3_link_add(&linkage, position_regid, 0xf, linkage.max_loc);
842 }
843 if (pointsize_regid != regid(63, 0)) {
844 pointsize_loc = linkage.max_loc;
845 ir3_link_add(&linkage, pointsize_regid, 0x1, linkage.max_loc);
846 }
847
848 tu6_setup_streamout(cs, last_shader, &linkage);
849
850 /* map outputs of the last shader to VPC */
851 assert(linkage.cnt <= 32);
852 const uint32_t sp_out_count = DIV_ROUND_UP(linkage.cnt, 2);
853 const uint32_t sp_vpc_dst_count = DIV_ROUND_UP(linkage.cnt, 4);
854 uint32_t sp_out[16];
855 uint32_t sp_vpc_dst[8];
856 for (uint32_t i = 0; i < linkage.cnt; i++) {
857 ((uint16_t *) sp_out)[i] =
858 A6XX_SP_VS_OUT_REG_A_REGID(linkage.var[i].regid) |
859 A6XX_SP_VS_OUT_REG_A_COMPMASK(linkage.var[i].compmask);
860 ((uint8_t *) sp_vpc_dst)[i] =
861 A6XX_SP_VS_VPC_DST_REG_OUTLOC0(linkage.var[i].loc);
862 }
863
864 tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_out_reg, sp_out_count);
865 tu_cs_emit_array(cs, sp_out, sp_out_count);
866
867 tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_vpc_dst_reg, sp_vpc_dst_count);
868 tu_cs_emit_array(cs, sp_vpc_dst, sp_vpc_dst_count);
869
870 tu_cs_emit_pkt4(cs, cfg->reg_vpc_xs_pack, 1);
871 tu_cs_emit(cs, A6XX_VPC_VS_PACK_POSITIONLOC(position_loc) |
872 A6XX_VPC_VS_PACK_PSIZELOC(pointsize_loc) |
873 A6XX_VPC_VS_PACK_STRIDE_IN_VPC(linkage.max_loc));
874
875 tu_cs_emit_pkt4(cs, cfg->reg_vpc_xs_clip_cntl, 1);
876 tu_cs_emit(cs, 0xffff00);
877
878 tu_cs_emit_pkt4(cs, cfg->reg_gras_xs_cl_cntl, 1);
879 tu_cs_emit(cs, 0);
880
881 tu_cs_emit_pkt4(cs, cfg->reg_pc_xs_out_cntl, 1);
882 tu_cs_emit(cs, A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(linkage.max_loc) |
883 CONDREG(pointsize_regid, A6XX_PC_VS_OUT_CNTL_PSIZE) |
884 CONDREG(layer_regid, A6XX_PC_VS_OUT_CNTL_LAYER) |
885 CONDREG(primitive_regid, A6XX_PC_VS_OUT_CNTL_PRIMITIVE_ID));
886
887 tu_cs_emit_pkt4(cs, cfg->reg_sp_xs_primitive_cntl, 1);
888 tu_cs_emit(cs, A6XX_SP_VS_PRIMITIVE_CNTL_OUT(linkage.cnt) |
889 A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(flags_regid));
890
891 tu_cs_emit_pkt4(cs, cfg->reg_vpc_xs_layer_cntl, 1);
892 tu_cs_emit(cs, A6XX_VPC_GS_LAYER_CNTL_LAYERLOC(layer_loc) | 0xff00);
893
894 tu_cs_emit_pkt4(cs, cfg->reg_gras_xs_layer_cntl, 1);
895 tu_cs_emit(cs, CONDREG(layer_regid, A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER));
896
897 tu_cs_emit_regs(cs, A6XX_PC_PRIMID_PASSTHRU(primid_passthru));
898
899 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_CNTL_0, 1);
900 tu_cs_emit(cs, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs ? fs->total_in : 0) |
901 COND(fs && fs->total_in, A6XX_VPC_CNTL_0_VARYING) |
902 A6XX_VPC_CNTL_0_PRIMIDLOC(linkage.primid_loc) |
903 A6XX_VPC_CNTL_0_UNKLOC(0xff));
904
905 if (hs) {
906 shader_info *hs_info = &hs->shader->nir->info;
907 tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESS_NUM_VERTEX, 1);
908 tu_cs_emit(cs, hs_info->tess.tcs_vertices_out);
909
910 /* Total attribute slots in HS incoming patch. */
911 tu_cs_emit_pkt4(cs, REG_A6XX_PC_UNKNOWN_9801, 1);
912 tu_cs_emit(cs,
913 hs_info->tess.tcs_vertices_out * vs->output_size / 4);
914
915 tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_UNKNOWN_A831, 1);
916 tu_cs_emit(cs, vs->output_size);
917 /* In SPIR-V generated from GLSL, the tessellation primitive params are
918 * are specified in the tess eval shader, but in SPIR-V generated from
919 * HLSL, they are specified in the tess control shader. */
920 shader_info *tess_info =
921 ds->shader->nir->info.tess.spacing == TESS_SPACING_UNSPECIFIED ?
922 &hs->shader->nir->info : &ds->shader->nir->info;
923 tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESS_CNTL, 1);
924 uint32_t output;
925 if (tess_info->tess.point_mode)
926 output = TESS_POINTS;
927 else if (tess_info->tess.primitive_mode == GL_ISOLINES)
928 output = TESS_LINES;
929 else if (tess_info->tess.ccw)
930 output = TESS_CCW_TRIS;
931 else
932 output = TESS_CW_TRIS;
933
934 enum a6xx_tess_spacing spacing;
935 switch (tess_info->tess.spacing) {
936 case TESS_SPACING_EQUAL:
937 spacing = TESS_EQUAL;
938 break;
939 case TESS_SPACING_FRACTIONAL_ODD:
940 spacing = TESS_FRACTIONAL_ODD;
941 break;
942 case TESS_SPACING_FRACTIONAL_EVEN:
943 spacing = TESS_FRACTIONAL_EVEN;
944 break;
945 case TESS_SPACING_UNSPECIFIED:
946 default:
947 unreachable("invalid tess spacing");
948 }
949 tu_cs_emit(cs, A6XX_PC_TESS_CNTL_SPACING(spacing) |
950 A6XX_PC_TESS_CNTL_OUTPUT(output));
951
952 tu6_emit_link_map(cs, vs, hs, SB6_HS_SHADER);
953 tu6_emit_link_map(cs, hs, ds, SB6_DS_SHADER);
954 }
955
956
957 if (gs) {
958 uint32_t vertices_out, invocations, output, vec4_size;
959 /* this detects the tu_clear_blit path, which doesn't set ->nir */
960 if (gs->shader->nir) {
961 if (hs) {
962 tu6_emit_link_map(cs, ds, gs, SB6_GS_SHADER);
963 } else {
964 tu6_emit_link_map(cs, vs, gs, SB6_GS_SHADER);
965 }
966 vertices_out = gs->shader->nir->info.gs.vertices_out - 1;
967 output = gl_primitive_to_tess(gs->shader->nir->info.gs.output_primitive);
968 invocations = gs->shader->nir->info.gs.invocations - 1;
969 /* Size of per-primitive alloction in ldlw memory in vec4s. */
970 vec4_size = gs->shader->nir->info.gs.vertices_in *
971 DIV_ROUND_UP(vs->output_size, 4);
972 } else {
973 vertices_out = 3;
974 output = TESS_CW_TRIS;
975 invocations = 0;
976 vec4_size = 0;
977 }
978
979 tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_5, 1);
980 tu_cs_emit(cs,
981 A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(vertices_out) |
982 A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(output) |
983 A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(invocations));
984
985 tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_3, 1);
986 tu_cs_emit(cs, 0);
987
988 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_UNKNOWN_9100, 1);
989 tu_cs_emit(cs, 0xff);
990
991 tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 1);
992 tu_cs_emit(cs, A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(vec4_size));
993
994 tu_cs_emit_pkt4(cs, REG_A6XX_PC_UNKNOWN_9B07, 1);
995 tu_cs_emit(cs, 0);
996
997 tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_PRIM_SIZE, 1);
998 tu_cs_emit(cs, vs->output_size);
999 }
1000 }
1001
1002 static int
1003 tu6_vpc_varying_mode(const struct ir3_shader_variant *fs,
1004 uint32_t index,
1005 uint8_t *interp_mode,
1006 uint8_t *ps_repl_mode)
1007 {
1008 enum
1009 {
1010 INTERP_SMOOTH = 0,
1011 INTERP_FLAT = 1,
1012 INTERP_ZERO = 2,
1013 INTERP_ONE = 3,
1014 };
1015 enum
1016 {
1017 PS_REPL_NONE = 0,
1018 PS_REPL_S = 1,
1019 PS_REPL_T = 2,
1020 PS_REPL_ONE_MINUS_T = 3,
1021 };
1022
1023 const uint32_t compmask = fs->inputs[index].compmask;
1024
1025 /* NOTE: varyings are packed, so if compmask is 0xb then first, second, and
1026 * fourth component occupy three consecutive varying slots
1027 */
1028 int shift = 0;
1029 *interp_mode = 0;
1030 *ps_repl_mode = 0;
1031 if (fs->inputs[index].slot == VARYING_SLOT_PNTC) {
1032 if (compmask & 0x1) {
1033 *ps_repl_mode |= PS_REPL_S << shift;
1034 shift += 2;
1035 }
1036 if (compmask & 0x2) {
1037 *ps_repl_mode |= PS_REPL_T << shift;
1038 shift += 2;
1039 }
1040 if (compmask & 0x4) {
1041 *interp_mode |= INTERP_ZERO << shift;
1042 shift += 2;
1043 }
1044 if (compmask & 0x8) {
1045 *interp_mode |= INTERP_ONE << 6;
1046 shift += 2;
1047 }
1048 } else if ((fs->inputs[index].interpolate == INTERP_MODE_FLAT) ||
1049 fs->inputs[index].rasterflat) {
1050 for (int i = 0; i < 4; i++) {
1051 if (compmask & (1 << i)) {
1052 *interp_mode |= INTERP_FLAT << shift;
1053 shift += 2;
1054 }
1055 }
1056 }
1057
1058 return shift;
1059 }
1060
1061 static void
1062 tu6_emit_vpc_varying_modes(struct tu_cs *cs,
1063 const struct ir3_shader_variant *fs)
1064 {
1065 uint32_t interp_modes[8] = { 0 };
1066 uint32_t ps_repl_modes[8] = { 0 };
1067
1068 if (fs) {
1069 for (int i = -1;
1070 (i = ir3_next_varying(fs, i)) < (int) fs->inputs_count;) {
1071
1072 /* get the mode for input i */
1073 uint8_t interp_mode;
1074 uint8_t ps_repl_mode;
1075 const int bits =
1076 tu6_vpc_varying_mode(fs, i, &interp_mode, &ps_repl_mode);
1077
1078 /* OR the mode into the array */
1079 const uint32_t inloc = fs->inputs[i].inloc * 2;
1080 uint32_t n = inloc / 32;
1081 uint32_t shift = inloc % 32;
1082 interp_modes[n] |= interp_mode << shift;
1083 ps_repl_modes[n] |= ps_repl_mode << shift;
1084 if (shift + bits > 32) {
1085 n++;
1086 shift = 32 - shift;
1087
1088 interp_modes[n] |= interp_mode >> shift;
1089 ps_repl_modes[n] |= ps_repl_mode >> shift;
1090 }
1091 }
1092 }
1093
1094 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
1095 tu_cs_emit_array(cs, interp_modes, 8);
1096
1097 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
1098 tu_cs_emit_array(cs, ps_repl_modes, 8);
1099 }
1100
1101 void
1102 tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs)
1103 {
1104 uint32_t face_regid, coord_regid, zwcoord_regid, samp_id_regid;
1105 uint32_t ij_regid[IJ_COUNT];
1106 uint32_t smask_in_regid;
1107
1108 bool sample_shading = fs->per_samp | fs->key.sample_shading;
1109 bool enable_varyings = fs->total_in > 0;
1110
1111 samp_id_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_ID);
1112 smask_in_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_MASK_IN);
1113 face_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRONT_FACE);
1114 coord_regid = ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRAG_COORD);
1115 zwcoord_regid = VALIDREG(coord_regid) ? coord_regid + 2 : regid(63, 0);
1116 for (unsigned i = 0; i < ARRAY_SIZE(ij_regid); i++)
1117 ij_regid[i] = ir3_find_sysval_regid(fs, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL + i);
1118
1119 if (VALIDREG(ij_regid[IJ_LINEAR_SAMPLE]))
1120 tu_finishme("linear sample varying");
1121
1122 if (VALIDREG(ij_regid[IJ_LINEAR_CENTROID]))
1123 tu_finishme("linear centroid varying");
1124
1125 if (fs->num_sampler_prefetch > 0) {
1126 assert(VALIDREG(ij_regid[IJ_PERSP_PIXEL]));
1127 /* also, it seems like ij_pix is *required* to be r0.x */
1128 assert(ij_regid[IJ_PERSP_PIXEL] == regid(0, 0));
1129 }
1130
1131 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_PREFETCH_CNTL, 1 + fs->num_sampler_prefetch);
1132 tu_cs_emit(cs, A6XX_SP_FS_PREFETCH_CNTL_COUNT(fs->num_sampler_prefetch) |
1133 A6XX_SP_FS_PREFETCH_CNTL_UNK4(regid(63, 0)) |
1134 0x7000); // XXX);
1135 for (int i = 0; i < fs->num_sampler_prefetch; i++) {
1136 const struct ir3_sampler_prefetch *prefetch = &fs->sampler_prefetch[i];
1137 tu_cs_emit(cs, A6XX_SP_FS_PREFETCH_CMD_SRC(prefetch->src) |
1138 A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(prefetch->samp_id) |
1139 A6XX_SP_FS_PREFETCH_CMD_TEX_ID(prefetch->tex_id) |
1140 A6XX_SP_FS_PREFETCH_CMD_DST(prefetch->dst) |
1141 A6XX_SP_FS_PREFETCH_CMD_WRMASK(prefetch->wrmask) |
1142 COND(prefetch->half_precision, A6XX_SP_FS_PREFETCH_CMD_HALF) |
1143 A6XX_SP_FS_PREFETCH_CMD_CMD(prefetch->cmd));
1144 }
1145
1146 if (fs->num_sampler_prefetch > 0) {
1147 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(0), fs->num_sampler_prefetch);
1148 for (int i = 0; i < fs->num_sampler_prefetch; i++) {
1149 const struct ir3_sampler_prefetch *prefetch = &fs->sampler_prefetch[i];
1150 tu_cs_emit(cs,
1151 A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(prefetch->samp_bindless_id) |
1152 A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(prefetch->tex_bindless_id));
1153 }
1154 }
1155
1156 tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CONTROL_1_REG, 5);
1157 tu_cs_emit(cs, 0x7);
1158 tu_cs_emit(cs, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid) |
1159 A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(samp_id_regid) |
1160 A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(smask_in_regid) |
1161 A6XX_HLSQ_CONTROL_2_REG_SIZE(ij_regid[IJ_PERSP_SIZE]));
1162 tu_cs_emit(cs, A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(ij_regid[IJ_PERSP_PIXEL]) |
1163 A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(ij_regid[IJ_LINEAR_PIXEL]) |
1164 A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(ij_regid[IJ_PERSP_CENTROID]) |
1165 A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(ij_regid[IJ_LINEAR_CENTROID]));
1166 tu_cs_emit(cs, A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(coord_regid) |
1167 A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(zwcoord_regid) |
1168 A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(ij_regid[IJ_PERSP_SAMPLE]) |
1169 A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(ij_regid[IJ_LINEAR_SAMPLE]));
1170 tu_cs_emit(cs, 0xfc);
1171
1172 tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_UNKNOWN_B980, 1);
1173 tu_cs_emit(cs, enable_varyings ? 3 : 1);
1174
1175 bool need_size = fs->frag_face || fs->fragcoord_compmask != 0;
1176 bool need_size_persamp = false;
1177 if (VALIDREG(ij_regid[IJ_PERSP_SIZE])) {
1178 if (sample_shading)
1179 need_size_persamp = true;
1180 else
1181 need_size = true;
1182 }
1183 if (VALIDREG(ij_regid[IJ_LINEAR_PIXEL]))
1184 need_size = true;
1185
1186 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CNTL, 1);
1187 tu_cs_emit(cs,
1188 CONDREG(ij_regid[IJ_PERSP_PIXEL], A6XX_GRAS_CNTL_IJ_PERSP_PIXEL) |
1189 CONDREG(ij_regid[IJ_PERSP_CENTROID], A6XX_GRAS_CNTL_IJ_PERSP_CENTROID) |
1190 CONDREG(ij_regid[IJ_PERSP_SAMPLE], A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE) |
1191 COND(need_size, A6XX_GRAS_CNTL_SIZE) |
1192 COND(need_size_persamp, A6XX_GRAS_CNTL_SIZE_PERSAMP) |
1193 COND(fs->fragcoord_compmask != 0, A6XX_GRAS_CNTL_COORD_MASK(fs->fragcoord_compmask)));
1194
1195 tu_cs_emit_pkt4(cs, REG_A6XX_RB_RENDER_CONTROL0, 2);
1196 tu_cs_emit(cs,
1197 CONDREG(ij_regid[IJ_PERSP_PIXEL], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL) |
1198 CONDREG(ij_regid[IJ_PERSP_CENTROID], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID) |
1199 CONDREG(ij_regid[IJ_PERSP_SAMPLE], A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE) |
1200 COND(need_size, A6XX_RB_RENDER_CONTROL0_SIZE) |
1201 COND(enable_varyings, A6XX_RB_RENDER_CONTROL0_UNK10) |
1202 COND(need_size_persamp, A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP) |
1203 COND(fs->fragcoord_compmask != 0,
1204 A6XX_RB_RENDER_CONTROL0_COORD_MASK(fs->fragcoord_compmask)));
1205 tu_cs_emit(cs,
1206 /* these two bits (UNK4/UNK5) relate to fragcoord
1207 * without them, fragcoord is the same for all samples
1208 */
1209 COND(sample_shading, A6XX_RB_RENDER_CONTROL1_UNK4) |
1210 COND(sample_shading, A6XX_RB_RENDER_CONTROL1_UNK5) |
1211 CONDREG(smask_in_regid, A6XX_RB_RENDER_CONTROL1_SAMPLEMASK) |
1212 CONDREG(samp_id_regid, A6XX_RB_RENDER_CONTROL1_SAMPLEID) |
1213 CONDREG(ij_regid[IJ_PERSP_SIZE], A6XX_RB_RENDER_CONTROL1_SIZE) |
1214 COND(fs->frag_face, A6XX_RB_RENDER_CONTROL1_FACENESS));
1215
1216 tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CNTL, 1);
1217 tu_cs_emit(cs, COND(sample_shading, A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE));
1218
1219 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_UNKNOWN_8101, 1);
1220 tu_cs_emit(cs, COND(sample_shading, 0x6)); // XXX
1221
1222 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CNTL, 1);
1223 tu_cs_emit(cs, COND(sample_shading, A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE));
1224 }
1225
1226 static void
1227 tu6_emit_fs_outputs(struct tu_cs *cs,
1228 const struct ir3_shader_variant *fs,
1229 uint32_t mrt_count, bool dual_src_blend,
1230 uint32_t render_components,
1231 bool is_s8_uint)
1232 {
1233 uint32_t smask_regid, posz_regid, stencilref_regid;
1234
1235 posz_regid = ir3_find_output_regid(fs, FRAG_RESULT_DEPTH);
1236 smask_regid = ir3_find_output_regid(fs, FRAG_RESULT_SAMPLE_MASK);
1237 stencilref_regid = ir3_find_output_regid(fs, FRAG_RESULT_STENCIL);
1238
1239 uint32_t fragdata_regid[8];
1240 if (fs->color0_mrt) {
1241 fragdata_regid[0] = ir3_find_output_regid(fs, FRAG_RESULT_COLOR);
1242 for (uint32_t i = 1; i < ARRAY_SIZE(fragdata_regid); i++)
1243 fragdata_regid[i] = fragdata_regid[0];
1244 } else {
1245 for (uint32_t i = 0; i < ARRAY_SIZE(fragdata_regid); i++)
1246 fragdata_regid[i] = ir3_find_output_regid(fs, FRAG_RESULT_DATA0 + i);
1247 }
1248
1249 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2);
1250 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(posz_regid) |
1251 A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(smask_regid) |
1252 A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID(stencilref_regid) |
1253 COND(dual_src_blend, A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE));
1254 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count));
1255
1256 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);
1257 for (uint32_t i = 0; i < ARRAY_SIZE(fragdata_regid); i++) {
1258 // TODO we could have a mix of half and full precision outputs,
1259 // we really need to figure out half-precision from IR3_REG_HALF
1260 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_REG_REGID(fragdata_regid[i]) |
1261 (false ? A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION : 0));
1262 }
1263
1264 tu_cs_emit_regs(cs,
1265 A6XX_SP_FS_RENDER_COMPONENTS(.dword = render_components));
1266
1267 tu_cs_emit_pkt4(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 2);
1268 tu_cs_emit(cs, COND(fs->writes_pos, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z) |
1269 COND(fs->writes_smask, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK) |
1270 COND(fs->writes_stencilref, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_STENCILREF) |
1271 COND(dual_src_blend, A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE));
1272 tu_cs_emit(cs, A6XX_RB_FS_OUTPUT_CNTL1_MRT(mrt_count));
1273
1274 tu_cs_emit_regs(cs,
1275 A6XX_RB_RENDER_COMPONENTS(.dword = render_components));
1276
1277 enum a6xx_ztest_mode zmode;
1278
1279 if (fs->no_earlyz || fs->has_kill || fs->writes_pos || fs->writes_stencilref || is_s8_uint) {
1280 zmode = A6XX_LATE_Z;
1281 } else {
1282 zmode = A6XX_EARLY_Z;
1283 }
1284
1285 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL, 1);
1286 tu_cs_emit(cs, A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE(zmode));
1287
1288 tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_PLANE_CNTL, 1);
1289 tu_cs_emit(cs, A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE(zmode));
1290 }
1291
1292 static void
1293 tu6_emit_geom_tess_consts(struct tu_cs *cs,
1294 const struct ir3_shader_variant *vs,
1295 const struct ir3_shader_variant *hs,
1296 const struct ir3_shader_variant *ds,
1297 const struct ir3_shader_variant *gs,
1298 uint32_t cps_per_patch)
1299 {
1300 uint32_t num_vertices =
1301 hs ? cps_per_patch : gs->shader->nir->info.gs.vertices_in;
1302
1303 uint32_t vs_params[4] = {
1304 vs->output_size * num_vertices * 4, /* vs primitive stride */
1305 vs->output_size * 4, /* vs vertex stride */
1306 0,
1307 0,
1308 };
1309 uint32_t vs_base = ir3_const_state(vs)->offsets.primitive_param;
1310 tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, vs_base, SB6_VS_SHADER, 0,
1311 ARRAY_SIZE(vs_params), vs_params);
1312
1313 if (hs) {
1314 assert(ds->type != MESA_SHADER_NONE);
1315 uint32_t hs_params[4] = {
1316 vs->output_size * num_vertices * 4, /* hs primitive stride */
1317 vs->output_size * 4, /* hs vertex stride */
1318 hs->output_size,
1319 cps_per_patch,
1320 };
1321
1322 uint32_t hs_base = hs->const_state->offsets.primitive_param;
1323 tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, hs_base, SB6_HS_SHADER, 0,
1324 ARRAY_SIZE(hs_params), hs_params);
1325 if (gs)
1326 num_vertices = gs->shader->nir->info.gs.vertices_in;
1327
1328 uint32_t ds_params[4] = {
1329 ds->output_size * num_vertices * 4, /* ds primitive stride */
1330 ds->output_size * 4, /* ds vertex stride */
1331 hs->output_size, /* hs vertex stride (dwords) */
1332 hs->shader->nir->info.tess.tcs_vertices_out
1333 };
1334
1335 uint32_t ds_base = ds->const_state->offsets.primitive_param;
1336 tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, ds_base, SB6_DS_SHADER, 0,
1337 ARRAY_SIZE(ds_params), ds_params);
1338 }
1339
1340 if (gs) {
1341 const struct ir3_shader_variant *prev = ds ? ds : vs;
1342 uint32_t gs_params[4] = {
1343 prev->output_size * num_vertices * 4, /* gs primitive stride */
1344 prev->output_size * 4, /* gs vertex stride */
1345 0,
1346 0,
1347 };
1348 uint32_t gs_base = gs->const_state->offsets.primitive_param;
1349 tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, gs_base, SB6_GS_SHADER, 0,
1350 ARRAY_SIZE(gs_params), gs_params);
1351 }
1352 }
1353
1354 static void
1355 tu6_emit_program(struct tu_cs *cs,
1356 struct tu_pipeline_builder *builder,
1357 bool binning_pass)
1358 {
1359 const struct ir3_shader_variant *vs = builder->variants[MESA_SHADER_VERTEX];
1360 const struct ir3_shader_variant *bs = builder->binning_variant;
1361 const struct ir3_shader_variant *hs = builder->variants[MESA_SHADER_TESS_CTRL];
1362 const struct ir3_shader_variant *ds = builder->variants[MESA_SHADER_TESS_EVAL];
1363 const struct ir3_shader_variant *gs = builder->variants[MESA_SHADER_GEOMETRY];
1364 const struct ir3_shader_variant *fs = builder->variants[MESA_SHADER_FRAGMENT];
1365 gl_shader_stage stage = MESA_SHADER_VERTEX;
1366
1367 STATIC_ASSERT(MESA_SHADER_VERTEX == 0);
1368
1369 tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
1370 .vs_state = true,
1371 .hs_state = true,
1372 .ds_state = true,
1373 .gs_state = true,
1374 .fs_state = true,
1375 .gfx_ibo = true));
1376
1377 /* Don't use the binning pass variant when GS is present because we don't
1378 * support compiling correct binning pass variants with GS.
1379 */
1380 if (binning_pass && !gs) {
1381 vs = bs;
1382 tu6_emit_xs_config(cs, stage, bs, builder->binning_vs_iova);
1383 stage++;
1384 }
1385
1386 for (; stage < ARRAY_SIZE(builder->shaders); stage++) {
1387 const struct ir3_shader_variant *xs = builder->variants[stage];
1388
1389 if (stage == MESA_SHADER_FRAGMENT && binning_pass)
1390 fs = xs = NULL;
1391
1392 tu6_emit_xs_config(cs, stage, xs, builder->shader_iova[stage]);
1393 }
1394
1395 tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_UNKNOWN_A831, 1);
1396 tu_cs_emit(cs, 0);
1397
1398 tu6_emit_vpc(cs, vs, hs, ds, gs, fs);
1399 tu6_emit_vpc_varying_modes(cs, fs);
1400
1401 if (fs) {
1402 tu6_emit_fs_inputs(cs, fs);
1403 tu6_emit_fs_outputs(cs, fs, builder->color_attachment_count,
1404 builder->use_dual_src_blend,
1405 builder->render_components,
1406 builder->depth_attachment_format == VK_FORMAT_S8_UINT);
1407 } else {
1408 /* TODO: check if these can be skipped if fs is disabled */
1409 struct ir3_shader_variant dummy_variant = {};
1410 tu6_emit_fs_inputs(cs, &dummy_variant);
1411 tu6_emit_fs_outputs(cs, &dummy_variant, builder->color_attachment_count,
1412 builder->use_dual_src_blend,
1413 builder->render_components,
1414 builder->depth_attachment_format == VK_FORMAT_S8_UINT);
1415 }
1416
1417 if (gs || hs) {
1418 uint32_t cps_per_patch = builder->create_info->pTessellationState ?
1419 builder->create_info->pTessellationState->patchControlPoints : 0;
1420 tu6_emit_geom_tess_consts(cs, vs, hs, ds, gs, cps_per_patch);
1421 }
1422 }
1423
1424 static void
1425 tu6_emit_vertex_input(struct tu_cs *cs,
1426 const struct ir3_shader_variant *vs,
1427 const VkPipelineVertexInputStateCreateInfo *info,
1428 uint32_t *bindings_used)
1429 {
1430 uint32_t vfd_decode_idx = 0;
1431 uint32_t binding_instanced = 0; /* bitmask of instanced bindings */
1432 uint32_t step_rate[MAX_VBS];
1433
1434 for (uint32_t i = 0; i < info->vertexBindingDescriptionCount; i++) {
1435 const VkVertexInputBindingDescription *binding =
1436 &info->pVertexBindingDescriptions[i];
1437
1438 tu_cs_emit_regs(cs,
1439 A6XX_VFD_FETCH_STRIDE(binding->binding, binding->stride));
1440
1441 if (binding->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE)
1442 binding_instanced |= 1 << binding->binding;
1443
1444 *bindings_used |= 1 << binding->binding;
1445 step_rate[binding->binding] = 1;
1446 }
1447
1448 const VkPipelineVertexInputDivisorStateCreateInfoEXT *div_state =
1449 vk_find_struct_const(info->pNext, PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
1450 if (div_state) {
1451 for (uint32_t i = 0; i < div_state->vertexBindingDivisorCount; i++) {
1452 const VkVertexInputBindingDivisorDescriptionEXT *desc =
1453 &div_state->pVertexBindingDivisors[i];
1454 step_rate[desc->binding] = desc->divisor;
1455 }
1456 }
1457
1458 /* TODO: emit all VFD_DECODE/VFD_DEST_CNTL in same (two) pkt4 */
1459
1460 for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
1461 const VkVertexInputAttributeDescription *attr =
1462 &info->pVertexAttributeDescriptions[i];
1463 uint32_t input_idx;
1464
1465 assert(*bindings_used & BIT(attr->binding));
1466
1467 for (input_idx = 0; input_idx < vs->inputs_count; input_idx++) {
1468 if ((vs->inputs[input_idx].slot - VERT_ATTRIB_GENERIC0) == attr->location)
1469 break;
1470 }
1471
1472 /* attribute not used, skip it */
1473 if (input_idx == vs->inputs_count)
1474 continue;
1475
1476 const struct tu_native_format format = tu6_format_vtx(attr->format);
1477 tu_cs_emit_regs(cs,
1478 A6XX_VFD_DECODE_INSTR(vfd_decode_idx,
1479 .idx = attr->binding,
1480 .offset = attr->offset,
1481 .instanced = binding_instanced & (1 << attr->binding),
1482 .format = format.fmt,
1483 .swap = format.swap,
1484 .unk30 = 1,
1485 ._float = !vk_format_is_int(attr->format)),
1486 A6XX_VFD_DECODE_STEP_RATE(vfd_decode_idx, step_rate[attr->binding]));
1487
1488 tu_cs_emit_regs(cs,
1489 A6XX_VFD_DEST_CNTL_INSTR(vfd_decode_idx,
1490 .writemask = vs->inputs[input_idx].compmask,
1491 .regid = vs->inputs[input_idx].regid));
1492
1493 vfd_decode_idx++;
1494 }
1495
1496 tu_cs_emit_regs(cs,
1497 A6XX_VFD_CONTROL_0(
1498 .fetch_cnt = vfd_decode_idx, /* decode_cnt for binning pass ? */
1499 .decode_cnt = vfd_decode_idx));
1500 }
1501
1502 void
1503 tu6_emit_viewport(struct tu_cs *cs, const VkViewport *viewport)
1504 {
1505 float offsets[3];
1506 float scales[3];
1507 scales[0] = viewport->width / 2.0f;
1508 scales[1] = viewport->height / 2.0f;
1509 scales[2] = viewport->maxDepth - viewport->minDepth;
1510 offsets[0] = viewport->x + scales[0];
1511 offsets[1] = viewport->y + scales[1];
1512 offsets[2] = viewport->minDepth;
1513
1514 VkOffset2D min;
1515 VkOffset2D max;
1516 min.x = (int32_t) viewport->x;
1517 max.x = (int32_t) ceilf(viewport->x + viewport->width);
1518 if (viewport->height >= 0.0f) {
1519 min.y = (int32_t) viewport->y;
1520 max.y = (int32_t) ceilf(viewport->y + viewport->height);
1521 } else {
1522 min.y = (int32_t)(viewport->y + viewport->height);
1523 max.y = (int32_t) ceilf(viewport->y);
1524 }
1525 /* the spec allows viewport->height to be 0.0f */
1526 if (min.y == max.y)
1527 max.y++;
1528 assert(min.x >= 0 && min.x < max.x);
1529 assert(min.y >= 0 && min.y < max.y);
1530
1531 VkExtent2D guardband_adj;
1532 guardband_adj.width = fd_calc_guardband(offsets[0], scales[0], false);
1533 guardband_adj.height = fd_calc_guardband(offsets[1], scales[1], false);
1534
1535 tu_cs_emit_regs(cs,
1536 A6XX_GRAS_CL_VPORT_XOFFSET(0, offsets[0]),
1537 A6XX_GRAS_CL_VPORT_XSCALE(0, scales[0]),
1538 A6XX_GRAS_CL_VPORT_YOFFSET(0, offsets[1]),
1539 A6XX_GRAS_CL_VPORT_YSCALE(0, scales[1]),
1540 A6XX_GRAS_CL_VPORT_ZOFFSET(0, offsets[2]),
1541 A6XX_GRAS_CL_VPORT_ZSCALE(0, scales[2]));
1542
1543 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(0), 2);
1544 tu_cs_emit(cs, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(min.x) |
1545 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(min.y));
1546 tu_cs_emit(cs, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(max.x - 1) |
1547 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(max.y - 1));
1548
1549 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ, 1);
1550 tu_cs_emit(cs,
1551 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(guardband_adj.width) |
1552 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(guardband_adj.height));
1553
1554 float z_clamp_min = MIN2(viewport->minDepth, viewport->maxDepth);
1555 float z_clamp_max = MAX2(viewport->minDepth, viewport->maxDepth);
1556
1557 tu_cs_emit_regs(cs,
1558 A6XX_GRAS_CL_Z_CLAMP_MIN(0, z_clamp_min),
1559 A6XX_GRAS_CL_Z_CLAMP_MAX(0, z_clamp_max));
1560
1561 tu_cs_emit_regs(cs,
1562 A6XX_RB_Z_CLAMP_MIN(z_clamp_min),
1563 A6XX_RB_Z_CLAMP_MAX(z_clamp_max));
1564 }
1565
1566 void
1567 tu6_emit_scissor(struct tu_cs *cs, const VkRect2D *scissor)
1568 {
1569 VkOffset2D min = scissor->offset;
1570 VkOffset2D max = {
1571 scissor->offset.x + scissor->extent.width,
1572 scissor->offset.y + scissor->extent.height,
1573 };
1574
1575 /* special case for empty scissor with max == 0 to avoid overflow */
1576 if (max.x == 0)
1577 min.x = max.x = 1;
1578 if (max.y == 0)
1579 min.y = max.y = 1;
1580
1581 /* avoid overflow with large scissor
1582 * note the max will be limited to min - 1, so that empty scissor works
1583 */
1584 uint32_t scissor_max = BITFIELD_MASK(15);
1585 min.x = MIN2(scissor_max, min.x);
1586 min.y = MIN2(scissor_max, min.y);
1587 max.x = MIN2(scissor_max, max.x);
1588 max.y = MIN2(scissor_max, max.y);
1589
1590 tu_cs_emit_regs(cs,
1591 A6XX_GRAS_SC_SCREEN_SCISSOR_TL(0, .x = min.x, .y = min.y),
1592 A6XX_GRAS_SC_SCREEN_SCISSOR_BR(0, .x = max.x - 1, .y = max.y - 1));
1593 }
1594
1595 void
1596 tu6_emit_sample_locations(struct tu_cs *cs, const VkSampleLocationsInfoEXT *samp_loc)
1597 {
1598 if (!samp_loc) {
1599 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CONFIG, 1);
1600 tu_cs_emit(cs, 0);
1601
1602 tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CONFIG, 1);
1603 tu_cs_emit(cs, 0);
1604
1605 tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_SAMPLE_CONFIG, 1);
1606 tu_cs_emit(cs, 0);
1607 return;
1608 }
1609
1610 assert(samp_loc->sampleLocationsPerPixel == samp_loc->sampleLocationsCount);
1611 assert(samp_loc->sampleLocationGridSize.width == 1);
1612 assert(samp_loc->sampleLocationGridSize.height == 1);
1613
1614 uint32_t sample_config =
1615 A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE;
1616 uint32_t sample_locations = 0;
1617 for (uint32_t i = 0; i < samp_loc->sampleLocationsCount; i++) {
1618 sample_locations |=
1619 (A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(samp_loc->pSampleLocations[i].x) |
1620 A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(samp_loc->pSampleLocations[i].y)) << i*8;
1621 }
1622
1623 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SAMPLE_CONFIG, 2);
1624 tu_cs_emit(cs, sample_config);
1625 tu_cs_emit(cs, sample_locations);
1626
1627 tu_cs_emit_pkt4(cs, REG_A6XX_RB_SAMPLE_CONFIG, 2);
1628 tu_cs_emit(cs, sample_config);
1629 tu_cs_emit(cs, sample_locations);
1630
1631 tu_cs_emit_pkt4(cs, REG_A6XX_SP_TP_SAMPLE_CONFIG, 2);
1632 tu_cs_emit(cs, sample_config);
1633 tu_cs_emit(cs, sample_locations);
1634 }
1635
1636 static uint32_t
1637 tu6_gras_su_cntl(const VkPipelineRasterizationStateCreateInfo *rast_info,
1638 VkSampleCountFlagBits samples)
1639 {
1640 uint32_t gras_su_cntl = 0;
1641
1642 if (rast_info->cullMode & VK_CULL_MODE_FRONT_BIT)
1643 gras_su_cntl |= A6XX_GRAS_SU_CNTL_CULL_FRONT;
1644 if (rast_info->cullMode & VK_CULL_MODE_BACK_BIT)
1645 gras_su_cntl |= A6XX_GRAS_SU_CNTL_CULL_BACK;
1646
1647 if (rast_info->frontFace == VK_FRONT_FACE_CLOCKWISE)
1648 gras_su_cntl |= A6XX_GRAS_SU_CNTL_FRONT_CW;
1649
1650 /* don't set A6XX_GRAS_SU_CNTL_LINEHALFWIDTH */
1651
1652 if (rast_info->depthBiasEnable)
1653 gras_su_cntl |= A6XX_GRAS_SU_CNTL_POLY_OFFSET;
1654
1655 if (samples > VK_SAMPLE_COUNT_1_BIT)
1656 gras_su_cntl |= A6XX_GRAS_SU_CNTL_MSAA_ENABLE;
1657
1658 return gras_su_cntl;
1659 }
1660
1661 void
1662 tu6_emit_depth_bias(struct tu_cs *cs,
1663 float constant_factor,
1664 float clamp,
1665 float slope_factor)
1666 {
1667 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE, 3);
1668 tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_SCALE(slope_factor).value);
1669 tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_OFFSET(constant_factor).value);
1670 tu_cs_emit(cs, A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(clamp).value);
1671 }
1672
1673 static void
1674 tu6_emit_depth_control(struct tu_cs *cs,
1675 const VkPipelineDepthStencilStateCreateInfo *ds_info,
1676 const VkPipelineRasterizationStateCreateInfo *rast_info)
1677 {
1678 uint32_t rb_depth_cntl = 0;
1679 if (ds_info->depthTestEnable) {
1680 rb_depth_cntl |=
1681 A6XX_RB_DEPTH_CNTL_Z_ENABLE |
1682 A6XX_RB_DEPTH_CNTL_ZFUNC(tu6_compare_func(ds_info->depthCompareOp)) |
1683 A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE; /* TODO: don't set for ALWAYS/NEVER */
1684
1685 if (rast_info->depthClampEnable)
1686 rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE;
1687
1688 if (ds_info->depthWriteEnable)
1689 rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE;
1690 }
1691
1692 if (ds_info->depthBoundsTestEnable)
1693 rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE | A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE;
1694
1695 tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_CNTL, 1);
1696 tu_cs_emit(cs, rb_depth_cntl);
1697 }
1698
1699 static void
1700 tu6_emit_stencil_control(struct tu_cs *cs,
1701 const VkPipelineDepthStencilStateCreateInfo *ds_info)
1702 {
1703 uint32_t rb_stencil_control = 0;
1704 if (ds_info->stencilTestEnable) {
1705 const VkStencilOpState *front = &ds_info->front;
1706 const VkStencilOpState *back = &ds_info->back;
1707 rb_stencil_control |=
1708 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE |
1709 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF |
1710 A6XX_RB_STENCIL_CONTROL_STENCIL_READ |
1711 A6XX_RB_STENCIL_CONTROL_FUNC(tu6_compare_func(front->compareOp)) |
1712 A6XX_RB_STENCIL_CONTROL_FAIL(tu6_stencil_op(front->failOp)) |
1713 A6XX_RB_STENCIL_CONTROL_ZPASS(tu6_stencil_op(front->passOp)) |
1714 A6XX_RB_STENCIL_CONTROL_ZFAIL(tu6_stencil_op(front->depthFailOp)) |
1715 A6XX_RB_STENCIL_CONTROL_FUNC_BF(tu6_compare_func(back->compareOp)) |
1716 A6XX_RB_STENCIL_CONTROL_FAIL_BF(tu6_stencil_op(back->failOp)) |
1717 A6XX_RB_STENCIL_CONTROL_ZPASS_BF(tu6_stencil_op(back->passOp)) |
1718 A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(tu6_stencil_op(back->depthFailOp));
1719 }
1720
1721 tu_cs_emit_pkt4(cs, REG_A6XX_RB_STENCIL_CONTROL, 1);
1722 tu_cs_emit(cs, rb_stencil_control);
1723 }
1724
1725 static uint32_t
1726 tu6_rb_mrt_blend_control(const VkPipelineColorBlendAttachmentState *att,
1727 bool has_alpha)
1728 {
1729 const enum a3xx_rb_blend_opcode color_op = tu6_blend_op(att->colorBlendOp);
1730 const enum adreno_rb_blend_factor src_color_factor = tu6_blend_factor(
1731 has_alpha ? att->srcColorBlendFactor
1732 : tu_blend_factor_no_dst_alpha(att->srcColorBlendFactor));
1733 const enum adreno_rb_blend_factor dst_color_factor = tu6_blend_factor(
1734 has_alpha ? att->dstColorBlendFactor
1735 : tu_blend_factor_no_dst_alpha(att->dstColorBlendFactor));
1736 const enum a3xx_rb_blend_opcode alpha_op = tu6_blend_op(att->alphaBlendOp);
1737 const enum adreno_rb_blend_factor src_alpha_factor =
1738 tu6_blend_factor(att->srcAlphaBlendFactor);
1739 const enum adreno_rb_blend_factor dst_alpha_factor =
1740 tu6_blend_factor(att->dstAlphaBlendFactor);
1741
1742 return A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(src_color_factor) |
1743 A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(color_op) |
1744 A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(dst_color_factor) |
1745 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(src_alpha_factor) |
1746 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(alpha_op) |
1747 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(dst_alpha_factor);
1748 }
1749
1750 static uint32_t
1751 tu6_rb_mrt_control(const VkPipelineColorBlendAttachmentState *att,
1752 uint32_t rb_mrt_control_rop,
1753 bool is_int,
1754 bool has_alpha)
1755 {
1756 uint32_t rb_mrt_control =
1757 A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(att->colorWriteMask);
1758
1759 /* ignore blending and logic op for integer attachments */
1760 if (is_int) {
1761 rb_mrt_control |= A6XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY);
1762 return rb_mrt_control;
1763 }
1764
1765 rb_mrt_control |= rb_mrt_control_rop;
1766
1767 if (att->blendEnable) {
1768 rb_mrt_control |= A6XX_RB_MRT_CONTROL_BLEND;
1769
1770 if (has_alpha)
1771 rb_mrt_control |= A6XX_RB_MRT_CONTROL_BLEND2;
1772 }
1773
1774 return rb_mrt_control;
1775 }
1776
1777 static void
1778 tu6_emit_rb_mrt_controls(struct tu_cs *cs,
1779 const VkPipelineColorBlendStateCreateInfo *blend_info,
1780 const VkFormat attachment_formats[MAX_RTS],
1781 uint32_t *blend_enable_mask)
1782 {
1783 *blend_enable_mask = 0;
1784
1785 bool rop_reads_dst = false;
1786 uint32_t rb_mrt_control_rop = 0;
1787 if (blend_info->logicOpEnable) {
1788 rop_reads_dst = tu_logic_op_reads_dst(blend_info->logicOp);
1789 rb_mrt_control_rop =
1790 A6XX_RB_MRT_CONTROL_ROP_ENABLE |
1791 A6XX_RB_MRT_CONTROL_ROP_CODE(tu6_rop(blend_info->logicOp));
1792 }
1793
1794 for (uint32_t i = 0; i < blend_info->attachmentCount; i++) {
1795 const VkPipelineColorBlendAttachmentState *att =
1796 &blend_info->pAttachments[i];
1797 const VkFormat format = attachment_formats[i];
1798
1799 uint32_t rb_mrt_control = 0;
1800 uint32_t rb_mrt_blend_control = 0;
1801 if (format != VK_FORMAT_UNDEFINED) {
1802 const bool is_int = vk_format_is_int(format);
1803 const bool has_alpha = vk_format_has_alpha(format);
1804
1805 rb_mrt_control =
1806 tu6_rb_mrt_control(att, rb_mrt_control_rop, is_int, has_alpha);
1807 rb_mrt_blend_control = tu6_rb_mrt_blend_control(att, has_alpha);
1808
1809 if (att->blendEnable || rop_reads_dst)
1810 *blend_enable_mask |= 1 << i;
1811 }
1812
1813 tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_CONTROL(i), 2);
1814 tu_cs_emit(cs, rb_mrt_control);
1815 tu_cs_emit(cs, rb_mrt_blend_control);
1816 }
1817 }
1818
1819 static void
1820 tu6_emit_blend_control(struct tu_cs *cs,
1821 uint32_t blend_enable_mask,
1822 bool dual_src_blend,
1823 const VkPipelineMultisampleStateCreateInfo *msaa_info)
1824 {
1825 const uint32_t sample_mask =
1826 msaa_info->pSampleMask ? (*msaa_info->pSampleMask & 0xffff)
1827 : ((1 << msaa_info->rasterizationSamples) - 1);
1828
1829 tu_cs_emit_regs(cs,
1830 A6XX_SP_BLEND_CNTL(.enabled = blend_enable_mask,
1831 .dual_color_in_enable = dual_src_blend,
1832 .alpha_to_coverage = msaa_info->alphaToCoverageEnable,
1833 .unk8 = true));
1834
1835 /* set A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND only when enabled? */
1836 tu_cs_emit_regs(cs,
1837 A6XX_RB_BLEND_CNTL(.enable_blend = blend_enable_mask,
1838 .independent_blend = true,
1839 .sample_mask = sample_mask,
1840 .dual_color_in_enable = dual_src_blend,
1841 .alpha_to_coverage = msaa_info->alphaToCoverageEnable,
1842 .alpha_to_one = msaa_info->alphaToOneEnable));
1843 }
1844
1845 static VkResult
1846 tu_pipeline_allocate_cs(struct tu_device *dev,
1847 struct tu_pipeline *pipeline,
1848 struct tu_pipeline_builder *builder,
1849 struct ir3_shader_variant *compute)
1850 {
1851 uint32_t size = 2048 + tu6_load_state_size(pipeline, compute);
1852
1853 /* graphics case: */
1854 if (builder) {
1855 for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) {
1856 if (builder->variants[i])
1857 size += builder->variants[i]->info.sizedwords;
1858 }
1859
1860 size += builder->binning_variant->info.sizedwords;
1861 } else {
1862 size += compute->info.sizedwords;
1863 }
1864
1865 tu_cs_init(&pipeline->cs, dev, TU_CS_MODE_SUB_STREAM, size);
1866
1867 /* Reserve the space now such that tu_cs_begin_sub_stream never fails. Note
1868 * that LOAD_STATE can potentially take up a large amount of space so we
1869 * calculate its size explicitly.
1870 */
1871 return tu_cs_reserve_space(&pipeline->cs, size);
1872 }
1873
1874 static void
1875 tu_pipeline_shader_key_init(struct ir3_shader_key *key,
1876 const VkGraphicsPipelineCreateInfo *pipeline_info)
1877 {
1878 for (uint32_t i = 0; i < pipeline_info->stageCount; i++) {
1879 if (pipeline_info->pStages[i].stage == VK_SHADER_STAGE_GEOMETRY_BIT) {
1880 key->has_gs = true;
1881 break;
1882 }
1883 }
1884
1885 if (pipeline_info->pRasterizationState->rasterizerDiscardEnable)
1886 return;
1887
1888 const VkPipelineMultisampleStateCreateInfo *msaa_info = pipeline_info->pMultisampleState;
1889 const struct VkPipelineSampleLocationsStateCreateInfoEXT *sample_locations =
1890 vk_find_struct_const(msaa_info->pNext, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT);
1891 if (msaa_info->rasterizationSamples > 1 ||
1892 /* also set msaa key when sample location is not the default
1893 * since this affects varying interpolation */
1894 (sample_locations && sample_locations->sampleLocationsEnable)) {
1895 key->msaa = true;
1896 }
1897
1898 /* note: not actually used by ir3, just checked in tu6_emit_fs_inputs */
1899 if (msaa_info->sampleShadingEnable)
1900 key->sample_shading = true;
1901
1902 /* We set this after we compile to NIR because we need the prim mode */
1903 key->tessellation = IR3_TESS_NONE;
1904 }
1905
1906 static uint32_t
1907 tu6_get_tessmode(struct tu_shader* shader)
1908 {
1909 uint32_t primitive_mode = shader->ir3_shader->nir->info.tess.primitive_mode;
1910 switch (primitive_mode) {
1911 case GL_ISOLINES:
1912 return IR3_TESS_ISOLINES;
1913 case GL_TRIANGLES:
1914 return IR3_TESS_TRIANGLES;
1915 case GL_QUADS:
1916 return IR3_TESS_QUADS;
1917 case GL_NONE:
1918 return IR3_TESS_NONE;
1919 default:
1920 unreachable("bad tessmode");
1921 }
1922 }
1923
1924 static uint64_t
1925 tu_upload_variant(struct tu_pipeline *pipeline,
1926 const struct ir3_shader_variant *variant)
1927 {
1928 struct tu_cs_memory memory;
1929
1930 if (!variant)
1931 return 0;
1932
1933 /* this expects to get enough alignment because shaders are allocated first
1934 * and sizedwords is always aligned correctly
1935 * note: an assert in tu6_emit_xs_config validates the alignment
1936 */
1937 tu_cs_alloc(&pipeline->cs, variant->info.sizedwords, 1, &memory);
1938
1939 memcpy(memory.map, variant->bin, sizeof(uint32_t) * variant->info.sizedwords);
1940 return memory.iova;
1941 }
1942
1943 static VkResult
1944 tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
1945 struct tu_pipeline *pipeline)
1946 {
1947 const struct ir3_compiler *compiler = builder->device->compiler;
1948 const VkPipelineShaderStageCreateInfo *stage_infos[MESA_SHADER_STAGES] = {
1949 NULL
1950 };
1951 for (uint32_t i = 0; i < builder->create_info->stageCount; i++) {
1952 gl_shader_stage stage =
1953 vk_to_mesa_shader_stage(builder->create_info->pStages[i].stage);
1954 stage_infos[stage] = &builder->create_info->pStages[i];
1955 }
1956
1957 struct ir3_shader_key key = {};
1958 tu_pipeline_shader_key_init(&key, builder->create_info);
1959
1960 for (gl_shader_stage stage = MESA_SHADER_VERTEX;
1961 stage < MESA_SHADER_STAGES; stage++) {
1962 const VkPipelineShaderStageCreateInfo *stage_info = stage_infos[stage];
1963 if (!stage_info && stage != MESA_SHADER_FRAGMENT)
1964 continue;
1965
1966 struct tu_shader *shader =
1967 tu_shader_create(builder->device, stage, stage_info, builder->layout,
1968 builder->alloc);
1969 if (!shader)
1970 return VK_ERROR_OUT_OF_HOST_MEMORY;
1971
1972 /* In SPIR-V generated from GLSL, the primitive mode is specified in the
1973 * tessellation evaluation shader, but in SPIR-V generated from HLSL,
1974 * the mode is specified in the tessellation control shader. */
1975 if ((stage == MESA_SHADER_TESS_EVAL || stage == MESA_SHADER_TESS_CTRL) &&
1976 key.tessellation == IR3_TESS_NONE) {
1977 key.tessellation = tu6_get_tessmode(shader);
1978 }
1979
1980 builder->shaders[stage] = shader;
1981 }
1982
1983 struct tu_shader *gs = builder->shaders[MESA_SHADER_GEOMETRY];
1984 key.layer_zero =
1985 !gs || !(gs->ir3_shader->nir->info.outputs_written & VARYING_SLOT_LAYER);
1986
1987 pipeline->tess.patch_type = key.tessellation;
1988
1989 for (gl_shader_stage stage = MESA_SHADER_VERTEX;
1990 stage < MESA_SHADER_STAGES; stage++) {
1991 if (!builder->shaders[stage])
1992 continue;
1993
1994 bool created;
1995 builder->variants[stage] =
1996 ir3_shader_get_variant(builder->shaders[stage]->ir3_shader,
1997 &key, false, &created);
1998 if (!builder->variants[stage])
1999 return VK_ERROR_OUT_OF_HOST_MEMORY;
2000 }
2001
2002 uint32_t safe_constlens = ir3_trim_constlen(builder->variants, compiler);
2003
2004 key.safe_constlen = true;
2005
2006 for (gl_shader_stage stage = MESA_SHADER_VERTEX;
2007 stage < MESA_SHADER_STAGES; stage++) {
2008 if (!builder->shaders[stage])
2009 continue;
2010
2011 if (safe_constlens & (1 << stage)) {
2012 bool created;
2013 builder->variants[stage] =
2014 ir3_shader_get_variant(builder->shaders[stage]->ir3_shader,
2015 &key, false, &created);
2016 if (!builder->variants[stage])
2017 return VK_ERROR_OUT_OF_HOST_MEMORY;
2018 }
2019 }
2020
2021 const struct tu_shader *vs = builder->shaders[MESA_SHADER_VERTEX];
2022 struct ir3_shader_variant *variant;
2023
2024 if (vs->ir3_shader->stream_output.num_outputs ||
2025 !ir3_has_binning_vs(&key)) {
2026 variant = builder->variants[MESA_SHADER_VERTEX];
2027 } else {
2028 bool created;
2029 key.safe_constlen = !!(safe_constlens & (1 << MESA_SHADER_VERTEX));
2030 variant = ir3_shader_get_variant(vs->ir3_shader, &key,
2031 true, &created);
2032 if (!variant)
2033 return VK_ERROR_OUT_OF_HOST_MEMORY;
2034 }
2035
2036 builder->binning_variant = variant;
2037
2038 return VK_SUCCESS;
2039 }
2040
2041 static void
2042 tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder *builder,
2043 struct tu_pipeline *pipeline)
2044 {
2045 const VkPipelineDynamicStateCreateInfo *dynamic_info =
2046 builder->create_info->pDynamicState;
2047
2048 if (!dynamic_info)
2049 return;
2050
2051 for (uint32_t i = 0; i < dynamic_info->dynamicStateCount; i++) {
2052 VkDynamicState state = dynamic_info->pDynamicStates[i];
2053 switch (state) {
2054 case VK_DYNAMIC_STATE_VIEWPORT ... VK_DYNAMIC_STATE_STENCIL_REFERENCE:
2055 pipeline->dynamic_state_mask |= BIT(state);
2056 break;
2057 case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT:
2058 pipeline->dynamic_state_mask |= BIT(TU_DYNAMIC_STATE_SAMPLE_LOCATIONS);
2059 break;
2060 default:
2061 assert(!"unsupported dynamic state");
2062 break;
2063 }
2064 }
2065 }
2066
2067 static void
2068 tu_pipeline_set_linkage(struct tu_program_descriptor_linkage *link,
2069 struct tu_shader *shader,
2070 struct ir3_shader_variant *v)
2071 {
2072 link->const_state = *ir3_const_state(v);
2073 link->constlen = v->constlen;
2074 link->push_consts = shader->push_consts;
2075 }
2076
2077 static void
2078 tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder *builder,
2079 struct tu_pipeline *pipeline)
2080 {
2081 struct tu_cs prog_cs;
2082 tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
2083 tu6_emit_program(&prog_cs, builder, false);
2084 pipeline->program.state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);
2085
2086 tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
2087 tu6_emit_program(&prog_cs, builder, true);
2088 pipeline->program.binning_state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);
2089
2090 VkShaderStageFlags stages = 0;
2091 for (unsigned i = 0; i < builder->create_info->stageCount; i++) {
2092 stages |= builder->create_info->pStages[i].stage;
2093 }
2094 pipeline->active_stages = stages;
2095
2096 uint32_t desc_sets = 0;
2097 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
2098 if (!builder->shaders[i])
2099 continue;
2100
2101 tu_pipeline_set_linkage(&pipeline->program.link[i],
2102 builder->shaders[i],
2103 builder->variants[i]);
2104 desc_sets |= builder->shaders[i]->active_desc_sets;
2105 }
2106 pipeline->active_desc_sets = desc_sets;
2107 }
2108
2109 static void
2110 tu_pipeline_builder_parse_vertex_input(struct tu_pipeline_builder *builder,
2111 struct tu_pipeline *pipeline)
2112 {
2113 const VkPipelineVertexInputStateCreateInfo *vi_info =
2114 builder->create_info->pVertexInputState;
2115 const struct ir3_shader_variant *vs = builder->variants[MESA_SHADER_VERTEX];
2116 const struct ir3_shader_variant *bs = builder->binning_variant;
2117
2118 struct tu_cs vi_cs;
2119 tu_cs_begin_sub_stream(&pipeline->cs,
2120 MAX_VERTEX_ATTRIBS * 7 + 2, &vi_cs);
2121 tu6_emit_vertex_input(&vi_cs, vs, vi_info,
2122 &pipeline->vi.bindings_used);
2123 pipeline->vi.state = tu_cs_end_draw_state(&pipeline->cs, &vi_cs);
2124
2125 if (bs) {
2126 tu_cs_begin_sub_stream(&pipeline->cs,
2127 MAX_VERTEX_ATTRIBS * 7 + 2, &vi_cs);
2128 tu6_emit_vertex_input(
2129 &vi_cs, bs, vi_info, &pipeline->vi.bindings_used);
2130 pipeline->vi.binning_state =
2131 tu_cs_end_draw_state(&pipeline->cs, &vi_cs);
2132 }
2133 }
2134
2135 static void
2136 tu_pipeline_builder_parse_input_assembly(struct tu_pipeline_builder *builder,
2137 struct tu_pipeline *pipeline)
2138 {
2139 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
2140 builder->create_info->pInputAssemblyState;
2141
2142 pipeline->ia.primtype = tu6_primtype(ia_info->topology);
2143 pipeline->ia.primitive_restart = ia_info->primitiveRestartEnable;
2144 }
2145
2146 static bool
2147 tu_pipeline_static_state(struct tu_pipeline *pipeline, struct tu_cs *cs,
2148 uint32_t id, uint32_t size)
2149 {
2150 assert(id < ARRAY_SIZE(pipeline->dynamic_state));
2151
2152 if (pipeline->dynamic_state_mask & BIT(id))
2153 return false;
2154
2155 pipeline->dynamic_state[id] = tu_cs_draw_state(&pipeline->cs, cs, size);
2156 return true;
2157 }
2158
2159 static void
2160 tu_pipeline_builder_parse_tessellation(struct tu_pipeline_builder *builder,
2161 struct tu_pipeline *pipeline)
2162 {
2163 const VkPipelineTessellationStateCreateInfo *tess_info =
2164 builder->create_info->pTessellationState;
2165
2166 if (!tess_info)
2167 return;
2168
2169 assert(pipeline->ia.primtype == DI_PT_PATCHES0);
2170 assert(tess_info->patchControlPoints <= 32);
2171 pipeline->ia.primtype += tess_info->patchControlPoints;
2172 const VkPipelineTessellationDomainOriginStateCreateInfo *domain_info =
2173 vk_find_struct_const(tess_info->pNext, PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO);
2174 pipeline->tess.upper_left_domain_origin = !domain_info ||
2175 domain_info->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT;
2176 const struct ir3_shader_variant *hs = builder->variants[MESA_SHADER_TESS_CTRL];
2177 const struct ir3_shader_variant *ds = builder->variants[MESA_SHADER_TESS_EVAL];
2178 pipeline->tess.param_stride = hs->output_size * 4;
2179 pipeline->tess.hs_bo_regid = hs->const_state->offsets.primitive_param + 1;
2180 pipeline->tess.ds_bo_regid = ds->const_state->offsets.primitive_param + 1;
2181 }
2182
2183 static void
2184 tu_pipeline_builder_parse_viewport(struct tu_pipeline_builder *builder,
2185 struct tu_pipeline *pipeline)
2186 {
2187 /* The spec says:
2188 *
2189 * pViewportState is a pointer to an instance of the
2190 * VkPipelineViewportStateCreateInfo structure, and is ignored if the
2191 * pipeline has rasterization disabled."
2192 *
2193 * We leave the relevant registers stale in that case.
2194 */
2195 if (builder->rasterizer_discard)
2196 return;
2197
2198 const VkPipelineViewportStateCreateInfo *vp_info =
2199 builder->create_info->pViewportState;
2200
2201 struct tu_cs cs;
2202
2203 if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_VIEWPORT, 18))
2204 tu6_emit_viewport(&cs, vp_info->pViewports);
2205
2206 if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_SCISSOR, 3))
2207 tu6_emit_scissor(&cs, vp_info->pScissors);
2208 }
2209
2210 static void
2211 tu_pipeline_builder_parse_rasterization(struct tu_pipeline_builder *builder,
2212 struct tu_pipeline *pipeline)
2213 {
2214 const VkPipelineRasterizationStateCreateInfo *rast_info =
2215 builder->create_info->pRasterizationState;
2216
2217 enum a6xx_polygon_mode mode = tu6_polygon_mode(rast_info->polygonMode);
2218
2219 struct tu_cs cs;
2220 pipeline->rast_state = tu_cs_draw_state(&pipeline->cs, &cs, 9);
2221
2222 tu_cs_emit_regs(&cs,
2223 A6XX_GRAS_CL_CNTL(
2224 .znear_clip_disable = rast_info->depthClampEnable,
2225 .zfar_clip_disable = rast_info->depthClampEnable,
2226 .unk5 = rast_info->depthClampEnable,
2227 .zero_gb_scale_z = 1,
2228 .vp_clip_code_ignore = 1));
2229
2230 tu_cs_emit_regs(&cs,
2231 A6XX_VPC_POLYGON_MODE(mode));
2232
2233 tu_cs_emit_regs(&cs,
2234 A6XX_PC_POLYGON_MODE(mode));
2235
2236 /* move to hw ctx init? */
2237 tu_cs_emit_regs(&cs,
2238 A6XX_GRAS_SU_POINT_MINMAX(.min = 1.0f / 16.0f, .max = 4092.0f),
2239 A6XX_GRAS_SU_POINT_SIZE(1.0f));
2240
2241 pipeline->gras_su_cntl =
2242 tu6_gras_su_cntl(rast_info, builder->samples);
2243
2244 if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_LINE_WIDTH, 2)) {
2245 pipeline->gras_su_cntl |=
2246 A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(rast_info->lineWidth / 2.0f);
2247 tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = pipeline->gras_su_cntl));
2248 }
2249
2250 if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_DEPTH_BIAS, 4)) {
2251 tu6_emit_depth_bias(&cs, rast_info->depthBiasConstantFactor,
2252 rast_info->depthBiasClamp,
2253 rast_info->depthBiasSlopeFactor);
2254 }
2255
2256 }
2257
2258 static void
2259 tu_pipeline_builder_parse_depth_stencil(struct tu_pipeline_builder *builder,
2260 struct tu_pipeline *pipeline)
2261 {
2262 /* The spec says:
2263 *
2264 * pDepthStencilState is a pointer to an instance of the
2265 * VkPipelineDepthStencilStateCreateInfo structure, and is ignored if
2266 * the pipeline has rasterization disabled or if the subpass of the
2267 * render pass the pipeline is created against does not use a
2268 * depth/stencil attachment.
2269 *
2270 * Disable both depth and stencil tests if there is no ds attachment,
2271 * Disable depth test if ds attachment is S8_UINT, since S8_UINT defines
2272 * only the separate stencil attachment
2273 */
2274 static const VkPipelineDepthStencilStateCreateInfo dummy_ds_info;
2275 const VkPipelineDepthStencilStateCreateInfo *ds_info =
2276 builder->depth_attachment_format != VK_FORMAT_UNDEFINED
2277 ? builder->create_info->pDepthStencilState
2278 : &dummy_ds_info;
2279 const VkPipelineDepthStencilStateCreateInfo *ds_info_depth =
2280 builder->depth_attachment_format != VK_FORMAT_S8_UINT
2281 ? ds_info : &dummy_ds_info;
2282
2283 struct tu_cs cs;
2284 pipeline->ds_state = tu_cs_draw_state(&pipeline->cs, &cs, 6);
2285
2286 /* move to hw ctx init? */
2287 tu_cs_emit_regs(&cs, A6XX_RB_ALPHA_CONTROL());
2288 tu6_emit_depth_control(&cs, ds_info_depth,
2289 builder->create_info->pRasterizationState);
2290 tu6_emit_stencil_control(&cs, ds_info);
2291
2292 if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_DEPTH_BOUNDS, 3)) {
2293 tu_cs_emit_regs(&cs,
2294 A6XX_RB_Z_BOUNDS_MIN(ds_info->minDepthBounds),
2295 A6XX_RB_Z_BOUNDS_MAX(ds_info->maxDepthBounds));
2296 }
2297
2298 if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, 2)) {
2299 tu_cs_emit_regs(&cs, A6XX_RB_STENCILMASK(.mask = ds_info->front.compareMask & 0xff,
2300 .bfmask = ds_info->back.compareMask & 0xff));
2301 }
2302
2303 if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, 2)) {
2304 tu_cs_emit_regs(&cs, A6XX_RB_STENCILWRMASK(.wrmask = ds_info->front.writeMask & 0xff,
2305 .bfwrmask = ds_info->back.writeMask & 0xff));
2306 }
2307
2308 if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_STENCIL_REFERENCE, 2)) {
2309 tu_cs_emit_regs(&cs, A6XX_RB_STENCILREF(.ref = ds_info->front.reference & 0xff,
2310 .bfref = ds_info->back.reference & 0xff));
2311 }
2312 }
2313
2314 static void
2315 tu_pipeline_builder_parse_multisample_and_color_blend(
2316 struct tu_pipeline_builder *builder, struct tu_pipeline *pipeline)
2317 {
2318 /* The spec says:
2319 *
2320 * pMultisampleState is a pointer to an instance of the
2321 * VkPipelineMultisampleStateCreateInfo, and is ignored if the pipeline
2322 * has rasterization disabled.
2323 *
2324 * Also,
2325 *
2326 * pColorBlendState is a pointer to an instance of the
2327 * VkPipelineColorBlendStateCreateInfo structure, and is ignored if the
2328 * pipeline has rasterization disabled or if the subpass of the render
2329 * pass the pipeline is created against does not use any color
2330 * attachments.
2331 *
2332 * We leave the relevant registers stale when rasterization is disabled.
2333 */
2334 if (builder->rasterizer_discard)
2335 return;
2336
2337 static const VkPipelineColorBlendStateCreateInfo dummy_blend_info;
2338 const VkPipelineMultisampleStateCreateInfo *msaa_info =
2339 builder->create_info->pMultisampleState;
2340 const VkPipelineColorBlendStateCreateInfo *blend_info =
2341 builder->use_color_attachments ? builder->create_info->pColorBlendState
2342 : &dummy_blend_info;
2343
2344 struct tu_cs cs;
2345 pipeline->blend_state =
2346 tu_cs_draw_state(&pipeline->cs, &cs, blend_info->attachmentCount * 3 + 4);
2347
2348 uint32_t blend_enable_mask;
2349 tu6_emit_rb_mrt_controls(&cs, blend_info,
2350 builder->color_attachment_formats,
2351 &blend_enable_mask);
2352
2353 tu6_emit_blend_control(&cs, blend_enable_mask,
2354 builder->use_dual_src_blend, msaa_info);
2355
2356 assert(cs.cur == cs.end); /* validate draw state size */
2357
2358 if (tu_pipeline_static_state(pipeline, &cs, VK_DYNAMIC_STATE_BLEND_CONSTANTS, 5)) {
2359 tu_cs_emit_pkt4(&cs, REG_A6XX_RB_BLEND_RED_F32, 4);
2360 tu_cs_emit_array(&cs, (const uint32_t *) blend_info->blendConstants, 4);
2361 }
2362
2363 const struct VkPipelineSampleLocationsStateCreateInfoEXT *sample_locations =
2364 vk_find_struct_const(msaa_info->pNext, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT);
2365 const VkSampleLocationsInfoEXT *samp_loc = NULL;
2366
2367 if (sample_locations && sample_locations->sampleLocationsEnable)
2368 samp_loc = &sample_locations->sampleLocationsInfo;
2369
2370 if (tu_pipeline_static_state(pipeline, &cs, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS,
2371 samp_loc ? 9 : 6)) {
2372 tu6_emit_sample_locations(&cs, samp_loc);
2373 }
2374 }
2375
2376 static void
2377 tu_pipeline_finish(struct tu_pipeline *pipeline,
2378 struct tu_device *dev,
2379 const VkAllocationCallbacks *alloc)
2380 {
2381 tu_cs_finish(&pipeline->cs);
2382 }
2383
2384 static VkResult
2385 tu_pipeline_builder_build(struct tu_pipeline_builder *builder,
2386 struct tu_pipeline **pipeline)
2387 {
2388 VkResult result;
2389
2390 *pipeline = vk_object_zalloc(&builder->device->vk, builder->alloc,
2391 sizeof(**pipeline), VK_OBJECT_TYPE_PIPELINE);
2392 if (!*pipeline)
2393 return VK_ERROR_OUT_OF_HOST_MEMORY;
2394
2395 (*pipeline)->layout = builder->layout;
2396
2397 /* compile and upload shaders */
2398 result = tu_pipeline_builder_compile_shaders(builder, *pipeline);
2399 if (result != VK_SUCCESS) {
2400 vk_object_free(&builder->device->vk, builder->alloc, *pipeline);
2401 return result;
2402 }
2403
2404 result = tu_pipeline_allocate_cs(builder->device, *pipeline, builder, NULL);
2405 if (result != VK_SUCCESS) {
2406 vk_object_free(&builder->device->vk, builder->alloc, *pipeline);
2407 return result;
2408 }
2409
2410 for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++)
2411 builder->shader_iova[i] = tu_upload_variant(*pipeline, builder->variants[i]);
2412
2413 builder->binning_vs_iova =
2414 tu_upload_variant(*pipeline, builder->binning_variant);
2415
2416 tu_pipeline_builder_parse_dynamic(builder, *pipeline);
2417 tu_pipeline_builder_parse_shader_stages(builder, *pipeline);
2418 tu_pipeline_builder_parse_vertex_input(builder, *pipeline);
2419 tu_pipeline_builder_parse_input_assembly(builder, *pipeline);
2420 tu_pipeline_builder_parse_tessellation(builder, *pipeline);
2421 tu_pipeline_builder_parse_viewport(builder, *pipeline);
2422 tu_pipeline_builder_parse_rasterization(builder, *pipeline);
2423 tu_pipeline_builder_parse_depth_stencil(builder, *pipeline);
2424 tu_pipeline_builder_parse_multisample_and_color_blend(builder, *pipeline);
2425 tu6_emit_load_state(*pipeline, false);
2426
2427 /* we should have reserved enough space upfront such that the CS never
2428 * grows
2429 */
2430 assert((*pipeline)->cs.bo_count == 1);
2431
2432 return VK_SUCCESS;
2433 }
2434
2435 static void
2436 tu_pipeline_builder_finish(struct tu_pipeline_builder *builder)
2437 {
2438 for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) {
2439 if (!builder->shaders[i])
2440 continue;
2441 tu_shader_destroy(builder->device, builder->shaders[i], builder->alloc);
2442 }
2443 }
2444
2445 static void
2446 tu_pipeline_builder_init_graphics(
2447 struct tu_pipeline_builder *builder,
2448 struct tu_device *dev,
2449 struct tu_pipeline_cache *cache,
2450 const VkGraphicsPipelineCreateInfo *create_info,
2451 const VkAllocationCallbacks *alloc)
2452 {
2453 TU_FROM_HANDLE(tu_pipeline_layout, layout, create_info->layout);
2454
2455 *builder = (struct tu_pipeline_builder) {
2456 .device = dev,
2457 .cache = cache,
2458 .create_info = create_info,
2459 .alloc = alloc,
2460 .layout = layout,
2461 };
2462
2463 builder->rasterizer_discard =
2464 create_info->pRasterizationState->rasterizerDiscardEnable;
2465
2466 if (builder->rasterizer_discard) {
2467 builder->samples = VK_SAMPLE_COUNT_1_BIT;
2468 } else {
2469 builder->samples = create_info->pMultisampleState->rasterizationSamples;
2470
2471 const struct tu_render_pass *pass =
2472 tu_render_pass_from_handle(create_info->renderPass);
2473 const struct tu_subpass *subpass =
2474 &pass->subpasses[create_info->subpass];
2475
2476 const uint32_t a = subpass->depth_stencil_attachment.attachment;
2477 builder->depth_attachment_format = (a != VK_ATTACHMENT_UNUSED) ?
2478 pass->attachments[a].format : VK_FORMAT_UNDEFINED;
2479
2480 assert(subpass->color_count == 0 ||
2481 !create_info->pColorBlendState ||
2482 subpass->color_count == create_info->pColorBlendState->attachmentCount);
2483 builder->color_attachment_count = subpass->color_count;
2484 for (uint32_t i = 0; i < subpass->color_count; i++) {
2485 const uint32_t a = subpass->color_attachments[i].attachment;
2486 if (a == VK_ATTACHMENT_UNUSED)
2487 continue;
2488
2489 builder->color_attachment_formats[i] = pass->attachments[a].format;
2490 builder->use_color_attachments = true;
2491 builder->render_components |= 0xf << (i * 4);
2492 }
2493
2494 if (tu_blend_state_is_dual_src(create_info->pColorBlendState)) {
2495 builder->color_attachment_count++;
2496 builder->use_dual_src_blend = true;
2497 /* dual source blending has an extra fs output in the 2nd slot */
2498 if (subpass->color_attachments[0].attachment != VK_ATTACHMENT_UNUSED)
2499 builder->render_components |= 0xf << 4;
2500 }
2501 }
2502 }
2503
2504 static VkResult
2505 tu_graphics_pipeline_create(VkDevice device,
2506 VkPipelineCache pipelineCache,
2507 const VkGraphicsPipelineCreateInfo *pCreateInfo,
2508 const VkAllocationCallbacks *pAllocator,
2509 VkPipeline *pPipeline)
2510 {
2511 TU_FROM_HANDLE(tu_device, dev, device);
2512 TU_FROM_HANDLE(tu_pipeline_cache, cache, pipelineCache);
2513
2514 struct tu_pipeline_builder builder;
2515 tu_pipeline_builder_init_graphics(&builder, dev, cache,
2516 pCreateInfo, pAllocator);
2517
2518 struct tu_pipeline *pipeline = NULL;
2519 VkResult result = tu_pipeline_builder_build(&builder, &pipeline);
2520 tu_pipeline_builder_finish(&builder);
2521
2522 if (result == VK_SUCCESS)
2523 *pPipeline = tu_pipeline_to_handle(pipeline);
2524 else
2525 *pPipeline = VK_NULL_HANDLE;
2526
2527 return result;
2528 }
2529
2530 VkResult
2531 tu_CreateGraphicsPipelines(VkDevice device,
2532 VkPipelineCache pipelineCache,
2533 uint32_t count,
2534 const VkGraphicsPipelineCreateInfo *pCreateInfos,
2535 const VkAllocationCallbacks *pAllocator,
2536 VkPipeline *pPipelines)
2537 {
2538 VkResult final_result = VK_SUCCESS;
2539
2540 for (uint32_t i = 0; i < count; i++) {
2541 VkResult result = tu_graphics_pipeline_create(device, pipelineCache,
2542 &pCreateInfos[i], pAllocator,
2543 &pPipelines[i]);
2544
2545 if (result != VK_SUCCESS)
2546 final_result = result;
2547 }
2548
2549 return final_result;
2550 }
2551
2552 static VkResult
2553 tu_compute_pipeline_create(VkDevice device,
2554 VkPipelineCache _cache,
2555 const VkComputePipelineCreateInfo *pCreateInfo,
2556 const VkAllocationCallbacks *pAllocator,
2557 VkPipeline *pPipeline)
2558 {
2559 TU_FROM_HANDLE(tu_device, dev, device);
2560 TU_FROM_HANDLE(tu_pipeline_layout, layout, pCreateInfo->layout);
2561 const VkPipelineShaderStageCreateInfo *stage_info = &pCreateInfo->stage;
2562 VkResult result;
2563
2564 struct tu_pipeline *pipeline;
2565
2566 *pPipeline = VK_NULL_HANDLE;
2567
2568 pipeline = vk_object_zalloc(&dev->vk, pAllocator, sizeof(*pipeline),
2569 VK_OBJECT_TYPE_PIPELINE);
2570 if (!pipeline)
2571 return VK_ERROR_OUT_OF_HOST_MEMORY;
2572
2573 pipeline->layout = layout;
2574
2575 struct ir3_shader_key key = {};
2576
2577 struct tu_shader *shader =
2578 tu_shader_create(dev, MESA_SHADER_COMPUTE, stage_info, layout, pAllocator);
2579 if (!shader) {
2580 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2581 goto fail;
2582 }
2583
2584 pipeline->active_desc_sets = shader->active_desc_sets;
2585
2586 bool created;
2587 struct ir3_shader_variant *v =
2588 ir3_shader_get_variant(shader->ir3_shader, &key, false, &created);
2589 if (!v) {
2590 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2591 goto fail;
2592 }
2593
2594 tu_pipeline_set_linkage(&pipeline->program.link[MESA_SHADER_COMPUTE],
2595 shader, v);
2596
2597 result = tu_pipeline_allocate_cs(dev, pipeline, NULL, v);
2598 if (result != VK_SUCCESS)
2599 goto fail;
2600
2601 uint64_t shader_iova = tu_upload_variant(pipeline, v);
2602
2603 for (int i = 0; i < 3; i++)
2604 pipeline->compute.local_size[i] = v->shader->nir->info.cs.local_size[i];
2605
2606 struct tu_cs prog_cs;
2607 tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
2608 tu6_emit_cs_config(&prog_cs, shader, v, shader_iova);
2609 pipeline->program.state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);
2610
2611 tu6_emit_load_state(pipeline, true);
2612
2613 *pPipeline = tu_pipeline_to_handle(pipeline);
2614 return VK_SUCCESS;
2615
2616 fail:
2617 if (shader)
2618 tu_shader_destroy(dev, shader, pAllocator);
2619
2620 vk_object_free(&dev->vk, pAllocator, pipeline);
2621
2622 return result;
2623 }
2624
2625 VkResult
2626 tu_CreateComputePipelines(VkDevice device,
2627 VkPipelineCache pipelineCache,
2628 uint32_t count,
2629 const VkComputePipelineCreateInfo *pCreateInfos,
2630 const VkAllocationCallbacks *pAllocator,
2631 VkPipeline *pPipelines)
2632 {
2633 VkResult final_result = VK_SUCCESS;
2634
2635 for (uint32_t i = 0; i < count; i++) {
2636 VkResult result = tu_compute_pipeline_create(device, pipelineCache,
2637 &pCreateInfos[i],
2638 pAllocator, &pPipelines[i]);
2639 if (result != VK_SUCCESS)
2640 final_result = result;
2641 }
2642
2643 return final_result;
2644 }
2645
2646 void
2647 tu_DestroyPipeline(VkDevice _device,
2648 VkPipeline _pipeline,
2649 const VkAllocationCallbacks *pAllocator)
2650 {
2651 TU_FROM_HANDLE(tu_device, dev, _device);
2652 TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
2653
2654 if (!_pipeline)
2655 return;
2656
2657 tu_pipeline_finish(pipeline, dev, pAllocator);
2658 vk_object_free(&dev->vk, pAllocator, pipeline);
2659 }