radeonsi: replace TGSI_SEMANTIC with VARYING_SLOT and FRAG_RESULT
[mesa.git] / src / gallium / drivers / radeonsi / si_shader_llvm_tess.c
1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "si_shader_internal.h"
27 #include "sid.h"
28
29 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
30 {
31 switch (ctx->stage) {
32 case MESA_SHADER_TESS_CTRL:
33 return si_unpack_param(ctx, ctx->args.tcs_rel_ids, 0, 8);
34
35 case MESA_SHADER_TESS_EVAL:
36 return ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id);
37
38 default:
39 assert(0);
40 return NULL;
41 }
42 }
43
44 /* Tessellation shaders pass outputs to the next shader using LDS.
45 *
46 * LS outputs = TCS inputs
47 * TCS outputs = TES inputs
48 *
49 * The LDS layout is:
50 * - TCS inputs for patch 0
51 * - TCS inputs for patch 1
52 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
53 * - ...
54 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
55 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
56 * - TCS outputs for patch 1
57 * - Per-patch TCS outputs for patch 1
58 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
59 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
60 * - ...
61 *
62 * All three shaders VS(LS), TCS, TES share the same LDS space.
63 */
64
65 static LLVMValueRef get_tcs_in_patch_stride(struct si_shader_context *ctx)
66 {
67 return si_unpack_param(ctx, ctx->vs_state_bits, 11, 13);
68 }
69
70 static unsigned get_tcs_out_vertex_dw_stride_constant(struct si_shader_context *ctx)
71 {
72 assert(ctx->stage == MESA_SHADER_TESS_CTRL);
73
74 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
75 return util_last_bit64(ctx->shader->key.mono.u.ff_tcs_inputs_to_copy) * 4;
76
77 return util_last_bit64(ctx->shader->selector->outputs_written) * 4;
78 }
79
80 static LLVMValueRef get_tcs_out_vertex_dw_stride(struct si_shader_context *ctx)
81 {
82 unsigned stride = get_tcs_out_vertex_dw_stride_constant(ctx);
83
84 return LLVMConstInt(ctx->ac.i32, stride, 0);
85 }
86
87 static LLVMValueRef get_tcs_out_patch_stride(struct si_shader_context *ctx)
88 {
89 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
90 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 0, 13);
91
92 const struct si_shader_info *info = &ctx->shader->selector->info;
93 unsigned tcs_out_vertices = info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
94 unsigned vertex_dw_stride = get_tcs_out_vertex_dw_stride_constant(ctx);
95 unsigned num_patch_outputs = util_last_bit64(ctx->shader->selector->patch_outputs_written);
96 unsigned patch_dw_stride = tcs_out_vertices * vertex_dw_stride + num_patch_outputs * 4;
97 return LLVMConstInt(ctx->ac.i32, patch_dw_stride, 0);
98 }
99
100 static LLVMValueRef get_tcs_out_patch0_offset(struct si_shader_context *ctx)
101 {
102 return LLVMBuildMul(ctx->ac.builder, si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 0, 16),
103 LLVMConstInt(ctx->ac.i32, 4, 0), "");
104 }
105
106 static LLVMValueRef get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
107 {
108 return LLVMBuildMul(ctx->ac.builder, si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 16, 16),
109 LLVMConstInt(ctx->ac.i32, 4, 0), "");
110 }
111
112 static LLVMValueRef get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
113 {
114 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
115 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
116
117 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
118 }
119
120 static LLVMValueRef get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
121 {
122 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
123 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
124 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
125
126 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_offset);
127 }
128
129 static LLVMValueRef get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
130 {
131 LLVMValueRef patch0_patch_data_offset = get_tcs_out_patch0_patch_data_offset(ctx);
132 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
133 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
134
135 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_patch_data_offset);
136 }
137
138 static LLVMValueRef get_num_tcs_out_vertices(struct si_shader_context *ctx)
139 {
140 unsigned tcs_out_vertices =
141 ctx->shader->selector ? ctx->shader->selector->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT]
142 : 0;
143
144 /* If !tcs_out_vertices, it's either the fixed-func TCS or the TCS epilog. */
145 if (ctx->stage == MESA_SHADER_TESS_CTRL && tcs_out_vertices)
146 return LLVMConstInt(ctx->ac.i32, tcs_out_vertices, 0);
147
148 return si_unpack_param(ctx, ctx->tcs_offchip_layout, 6, 6);
149 }
150
151 static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
152 {
153 unsigned stride;
154
155 switch (ctx->stage) {
156 case MESA_SHADER_VERTEX:
157 stride = ctx->shader->selector->lshs_vertex_stride / 4;
158 return LLVMConstInt(ctx->ac.i32, stride, 0);
159
160 case MESA_SHADER_TESS_CTRL:
161 if (ctx->screen->info.chip_class >= GFX9 && ctx->shader->is_monolithic) {
162 stride = ctx->shader->key.part.tcs.ls->lshs_vertex_stride / 4;
163 return LLVMConstInt(ctx->ac.i32, stride, 0);
164 }
165 return si_unpack_param(ctx, ctx->vs_state_bits, 24, 8);
166
167 default:
168 assert(0);
169 return NULL;
170 }
171 }
172
173 static LLVMValueRef
174 get_dw_address_from_generic_indices(struct si_shader_context *ctx, LLVMValueRef vertex_dw_stride,
175 LLVMValueRef base_addr, LLVMValueRef vertex_index,
176 LLVMValueRef param_index, ubyte name)
177 {
178 if (vertex_dw_stride) {
179 base_addr = ac_build_imad(&ctx->ac, vertex_index, vertex_dw_stride, base_addr);
180 }
181
182 if (param_index) {
183 base_addr = ac_build_imad(&ctx->ac, param_index, LLVMConstInt(ctx->ac.i32, 4, 0), base_addr);
184 }
185
186 int param = name >= VARYING_SLOT_PATCH0 ||
187 name == VARYING_SLOT_TESS_LEVEL_INNER ||
188 name == VARYING_SLOT_TESS_LEVEL_OUTER
189 ? si_shader_io_get_unique_index_patch(name)
190 : si_shader_io_get_unique_index(name, false);
191
192 /* Add the base address of the element. */
193 return LLVMBuildAdd(ctx->ac.builder, base_addr, LLVMConstInt(ctx->ac.i32, param * 4, 0), "");
194 }
195
196 /* The offchip buffer layout for TCS->TES is
197 *
198 * - attribute 0 of patch 0 vertex 0
199 * - attribute 0 of patch 0 vertex 1
200 * - attribute 0 of patch 0 vertex 2
201 * ...
202 * - attribute 0 of patch 1 vertex 0
203 * - attribute 0 of patch 1 vertex 1
204 * ...
205 * - attribute 1 of patch 0 vertex 0
206 * - attribute 1 of patch 0 vertex 1
207 * ...
208 * - per patch attribute 0 of patch 0
209 * - per patch attribute 0 of patch 1
210 * ...
211 *
212 * Note that every attribute has 4 components.
213 */
214 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
215 LLVMValueRef rel_patch_id, LLVMValueRef vertex_index,
216 LLVMValueRef param_index)
217 {
218 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
219 LLVMValueRef param_stride, constant16;
220
221 vertices_per_patch = get_num_tcs_out_vertices(ctx);
222 num_patches = si_unpack_param(ctx, ctx->tcs_offchip_layout, 0, 6);
223 total_vertices = LLVMBuildMul(ctx->ac.builder, vertices_per_patch, num_patches, "");
224
225 constant16 = LLVMConstInt(ctx->ac.i32, 16, 0);
226 if (vertex_index) {
227 base_addr = ac_build_imad(&ctx->ac, rel_patch_id, vertices_per_patch, vertex_index);
228 param_stride = total_vertices;
229 } else {
230 base_addr = rel_patch_id;
231 param_stride = num_patches;
232 }
233
234 base_addr = ac_build_imad(&ctx->ac, param_index, param_stride, base_addr);
235 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
236
237 if (!vertex_index) {
238 LLVMValueRef patch_data_offset = si_unpack_param(ctx, ctx->tcs_offchip_layout, 12, 20);
239
240 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr, patch_data_offset, "");
241 }
242 return base_addr;
243 }
244
245 static LLVMValueRef get_tcs_tes_buffer_address_from_generic_indices(struct si_shader_context *ctx,
246 LLVMValueRef vertex_index,
247 LLVMValueRef param_index,
248 ubyte name)
249 {
250 unsigned param_index_base;
251
252 param_index_base = name >= VARYING_SLOT_PATCH0 ||
253 name == VARYING_SLOT_TESS_LEVEL_INNER ||
254 name == VARYING_SLOT_TESS_LEVEL_OUTER
255 ? si_shader_io_get_unique_index_patch(name)
256 : si_shader_io_get_unique_index(name, false);
257
258 if (param_index) {
259 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
260 LLVMConstInt(ctx->ac.i32, param_index_base, 0), "");
261 } else {
262 param_index = LLVMConstInt(ctx->ac.i32, param_index_base, 0);
263 }
264
265 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), vertex_index, param_index);
266 }
267
268 static LLVMValueRef buffer_load(struct si_shader_context *ctx, LLVMTypeRef type, unsigned swizzle,
269 LLVMValueRef buffer, LLVMValueRef offset, LLVMValueRef base,
270 bool can_speculate)
271 {
272 LLVMValueRef value, value2;
273 LLVMTypeRef vec_type = LLVMVectorType(type, 4);
274
275 if (swizzle == ~0) {
276 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset, 0, ac_glc,
277 can_speculate, false);
278
279 return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
280 }
281
282 if (ac_get_type_size(type) != 8) {
283 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset, 0, ac_glc,
284 can_speculate, false);
285
286 value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
287 return LLVMBuildExtractElement(ctx->ac.builder, value, LLVMConstInt(ctx->ac.i32, swizzle, 0),
288 "");
289 }
290
291 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset, swizzle * 4, ac_glc,
292 can_speculate, false);
293
294 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset, swizzle * 4 + 4, ac_glc,
295 can_speculate, false);
296
297 return si_build_gather_64bit(ctx, type, value, value2);
298 }
299
300 /**
301 * Load from LSHS LDS storage.
302 *
303 * \param type output value type
304 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
305 * \param dw_addr address in dwords
306 */
307 static LLVMValueRef lshs_lds_load(struct si_shader_context *ctx, LLVMTypeRef type, unsigned swizzle,
308 LLVMValueRef dw_addr)
309 {
310 LLVMValueRef value;
311
312 if (swizzle == ~0) {
313 LLVMValueRef values[4];
314
315 for (unsigned chan = 0; chan < 4; chan++)
316 values[chan] = lshs_lds_load(ctx, type, chan, dw_addr);
317
318 return ac_build_gather_values(&ctx->ac, values, 4);
319 }
320
321 /* Split 64-bit loads. */
322 if (ac_get_type_size(type) == 8) {
323 LLVMValueRef lo, hi;
324
325 lo = lshs_lds_load(ctx, ctx->ac.i32, swizzle, dw_addr);
326 hi = lshs_lds_load(ctx, ctx->ac.i32, swizzle + 1, dw_addr);
327 return si_build_gather_64bit(ctx, type, lo, hi);
328 }
329
330 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, LLVMConstInt(ctx->ac.i32, swizzle, 0), "");
331
332 value = ac_lds_load(&ctx->ac, dw_addr);
333
334 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
335 }
336
337 /**
338 * Store to LSHS LDS storage.
339 *
340 * \param swizzle offset (typically 0..3)
341 * \param dw_addr address in dwords
342 * \param value value to store
343 */
344 static void lshs_lds_store(struct si_shader_context *ctx, unsigned dw_offset_imm,
345 LLVMValueRef dw_addr, LLVMValueRef value)
346 {
347 dw_addr =
348 LLVMBuildAdd(ctx->ac.builder, dw_addr, LLVMConstInt(ctx->ac.i32, dw_offset_imm, 0), "");
349
350 ac_lds_store(&ctx->ac, dw_addr, value);
351 }
352
353 enum si_tess_ring
354 {
355 TCS_FACTOR_RING,
356 TESS_OFFCHIP_RING_TCS,
357 TESS_OFFCHIP_RING_TES,
358 };
359
360 static LLVMValueRef get_tess_ring_descriptor(struct si_shader_context *ctx, enum si_tess_ring ring)
361 {
362 LLVMBuilderRef builder = ctx->ac.builder;
363 LLVMValueRef addr = ac_get_arg(
364 &ctx->ac, ring == TESS_OFFCHIP_RING_TES ? ctx->tes_offchip_addr : ctx->tcs_out_lds_layout);
365
366 /* TCS only receives high 13 bits of the address. */
367 if (ring == TESS_OFFCHIP_RING_TCS || ring == TCS_FACTOR_RING) {
368 addr = LLVMBuildAnd(builder, addr, LLVMConstInt(ctx->ac.i32, 0xfff80000, 0), "");
369 }
370
371 if (ring == TCS_FACTOR_RING) {
372 unsigned tf_offset = ctx->screen->tess_offchip_ring_size;
373 addr = LLVMBuildAdd(builder, addr, LLVMConstInt(ctx->ac.i32, tf_offset, 0), "");
374 }
375
376 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
377 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
378
379 if (ctx->screen->info.chip_class >= GFX10)
380 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
381 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
382 else
383 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
384 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
385
386 LLVMValueRef desc[4];
387 desc[0] = addr;
388 desc[1] = LLVMConstInt(ctx->ac.i32, S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
389 desc[2] = LLVMConstInt(ctx->ac.i32, 0xffffffff, 0);
390 desc[3] = LLVMConstInt(ctx->ac.i32, rsrc3, false);
391
392 return ac_build_gather_values(&ctx->ac, desc, 4);
393 }
394
395 void si_llvm_preload_tes_rings(struct si_shader_context *ctx)
396 {
397 ctx->tess_offchip_ring = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TES);
398 }
399
400 static LLVMValueRef si_nir_load_tcs_varyings(struct ac_shader_abi *abi, LLVMTypeRef type,
401 LLVMValueRef vertex_index, LLVMValueRef param_index,
402 unsigned const_index, unsigned location,
403 unsigned driver_location, unsigned component,
404 unsigned num_components, bool unused,
405 bool is_compact, bool load_input)
406 {
407 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
408 struct si_shader_info *info = &ctx->shader->selector->info;
409 LLVMValueRef dw_addr, stride;
410 ubyte semantic;
411
412 driver_location = driver_location / 4;
413
414 if (load_input) {
415 semantic = info->input_semantic[driver_location];
416 } else {
417 semantic = info->output_semantic[driver_location];
418 }
419
420 bool is_patch = vertex_index == NULL;
421 assert((semantic >= VARYING_SLOT_PATCH0 ||
422 semantic == VARYING_SLOT_TESS_LEVEL_INNER ||
423 semantic == VARYING_SLOT_TESS_LEVEL_OUTER) == is_patch);
424
425 if (load_input) {
426 stride = get_tcs_in_vertex_dw_stride(ctx);
427 dw_addr = get_tcs_in_current_patch_offset(ctx);
428 } else {
429 if (is_patch) {
430 stride = NULL;
431 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
432 } else {
433 stride = get_tcs_out_vertex_dw_stride(ctx);
434 dw_addr = get_tcs_out_current_patch_offset(ctx);
435 }
436 }
437
438 if (!param_index) {
439 param_index = LLVMConstInt(ctx->ac.i32, const_index, 0);
440 }
441
442 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr, vertex_index, param_index,
443 semantic);
444
445 LLVMValueRef value[4];
446 for (unsigned i = 0; i < num_components; i++) {
447 unsigned offset = i;
448 if (ac_get_type_size(type) == 8)
449 offset *= 2;
450
451 offset += component;
452 value[i + component] = lshs_lds_load(ctx, type, offset, dw_addr);
453 }
454
455 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
456 }
457
458 static LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi, LLVMTypeRef type,
459 LLVMValueRef vertex_index, LLVMValueRef param_index,
460 unsigned const_index, unsigned location,
461 unsigned driver_location, unsigned component,
462 unsigned num_components, bool unused, bool is_compact,
463 bool load_input)
464 {
465 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
466 struct si_shader_info *info = &ctx->shader->selector->info;
467 LLVMValueRef base, addr;
468
469 driver_location = driver_location / 4;
470 ubyte semantic = info->input_semantic[driver_location];
471
472 assert((semantic >= VARYING_SLOT_PATCH0 ||
473 semantic == VARYING_SLOT_TESS_LEVEL_INNER ||
474 semantic == VARYING_SLOT_TESS_LEVEL_OUTER) == (vertex_index == NULL));
475
476 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
477
478 if (!param_index) {
479 param_index = LLVMConstInt(ctx->ac.i32, const_index, 0);
480 }
481
482 addr =
483 get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index, param_index, semantic);
484
485 /* TODO: This will generate rather ordinary llvm code, although it
486 * should be easy for the optimiser to fix up. In future we might want
487 * to refactor buffer_load().
488 */
489 LLVMValueRef value[4];
490 for (unsigned i = 0; i < num_components; i++) {
491 unsigned offset = i;
492 if (ac_get_type_size(type) == 8) {
493 offset *= 2;
494 if (offset == 4) {
495 ubyte semantic = info->input_semantic[driver_location + 1];
496 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index, param_index,
497 semantic);
498 }
499
500 offset = offset % 4;
501 }
502
503 offset += component;
504 value[i + component] =
505 buffer_load(ctx, type, offset, ctx->tess_offchip_ring, base, addr, true);
506 }
507
508 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
509 }
510
511 static void si_nir_store_output_tcs(struct ac_shader_abi *abi, const struct nir_variable *var,
512 LLVMValueRef vertex_index, LLVMValueRef param_index,
513 unsigned const_index, LLVMValueRef src, unsigned writemask,
514 unsigned component, unsigned driver_location)
515 {
516 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
517 struct si_shader_info *info = &ctx->shader->selector->info;
518 LLVMValueRef dw_addr, stride;
519 LLVMValueRef buffer, base, addr;
520 LLVMValueRef values[8];
521 bool is_tess_factor = false, is_tess_inner = false;
522
523 driver_location = driver_location / 4;
524 ubyte semantic = info->output_semantic[driver_location];
525
526 bool is_const = !param_index;
527 if (!param_index)
528 param_index = LLVMConstInt(ctx->ac.i32, const_index, 0);
529
530 const bool is_patch = vertex_index == NULL;
531
532 /* Invalid SPIR-V can cause this. */
533 if ((semantic >= VARYING_SLOT_PATCH0 || semantic == VARYING_SLOT_TESS_LEVEL_INNER ||
534 semantic == VARYING_SLOT_TESS_LEVEL_OUTER) != is_patch)
535 return;
536
537 if (!is_patch) {
538 stride = get_tcs_out_vertex_dw_stride(ctx);
539 dw_addr = get_tcs_out_current_patch_offset(ctx);
540 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr, vertex_index, param_index,
541 semantic);
542 } else {
543 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
544 dw_addr = get_dw_address_from_generic_indices(ctx, NULL, dw_addr, vertex_index, param_index,
545 semantic);
546
547 if (is_const && const_index == 0) {
548 int semantic = info->output_semantic[driver_location];
549
550 /* Always write tess factors into LDS for the TCS epilog. */
551 if (semantic == VARYING_SLOT_TESS_LEVEL_INNER ||
552 semantic == VARYING_SLOT_TESS_LEVEL_OUTER) {
553 is_tess_factor = true;
554 is_tess_inner = semantic == VARYING_SLOT_TESS_LEVEL_INNER;
555 }
556 }
557 }
558
559 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
560
561 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
562
563 addr =
564 get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index, param_index, semantic);
565
566 for (unsigned chan = component; chan < 8; chan++) {
567 if (!(writemask & (1 << chan)))
568 continue;
569 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
570
571 unsigned buffer_store_offset = chan % 4;
572 if (chan == 4) {
573 ubyte semantic = info->output_semantic[driver_location + 1];
574 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index, param_index,
575 semantic);
576 }
577
578 /* Skip LDS stores if there is no LDS read of this output. */
579 if (info->output_readmask[driver_location + chan / 4] & (1 << (chan % 4)) ||
580 /* The epilog reads LDS if invocation 0 doesn't define tess factors. */
581 (is_tess_factor &&
582 !ctx->shader->selector->info.tessfactors_are_def_in_all_invocs))
583 lshs_lds_store(ctx, chan, dw_addr, value);
584
585 value = ac_to_integer(&ctx->ac, value);
586 values[chan] = value;
587
588 if (writemask != 0xF && !is_tess_factor) {
589 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1, addr, base,
590 4 * buffer_store_offset, ac_glc);
591 }
592
593 /* Write tess factors into VGPRs for the epilog. */
594 if (is_tess_factor && ctx->shader->selector->info.tessfactors_are_def_in_all_invocs) {
595 if (!is_tess_inner) {
596 LLVMBuildStore(ctx->ac.builder, value, /* outer */
597 ctx->invoc0_tess_factors[chan]);
598 } else if (chan < 2) {
599 LLVMBuildStore(ctx->ac.builder, value, /* inner */
600 ctx->invoc0_tess_factors[4 + chan]);
601 }
602 }
603 }
604
605 if (writemask == 0xF && !is_tess_factor) {
606 LLVMValueRef value = ac_build_gather_values(&ctx->ac, values, 4);
607 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr, base, 0, ac_glc);
608 }
609 }
610
611 static LLVMValueRef si_load_tess_coord(struct ac_shader_abi *abi)
612 {
613 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
614 LLVMValueRef coord[4] = {ac_get_arg(&ctx->ac, ctx->tes_u), ac_get_arg(&ctx->ac, ctx->tes_v),
615 ctx->ac.f32_0, ctx->ac.f32_0};
616
617 /* For triangles, the vector should be (u, v, 1-u-v). */
618 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_TRIANGLES) {
619 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
620 LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
621 }
622 return ac_build_gather_values(&ctx->ac, coord, 4);
623 }
624
625 static LLVMValueRef load_tess_level(struct si_shader_context *ctx, unsigned semantic)
626 {
627 LLVMValueRef base, addr;
628
629 int param = si_shader_io_get_unique_index_patch(semantic);
630
631 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
632 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
633 LLVMConstInt(ctx->ac.i32, param, 0));
634
635 return buffer_load(ctx, ctx->ac.f32, ~0, ctx->tess_offchip_ring, base, addr, true);
636 }
637
638 static LLVMValueRef load_tess_level_default(struct si_shader_context *ctx, unsigned sysval)
639 {
640 LLVMValueRef buf, slot, val[4];
641 int i, offset;
642
643 slot = LLVMConstInt(ctx->ac.i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
644 buf = ac_get_arg(&ctx->ac, ctx->rw_buffers);
645 buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
646 offset = sysval == SYSTEM_VALUE_TESS_LEVEL_INNER_DEFAULT ? 4 : 0;
647
648 for (i = 0; i < 4; i++)
649 val[i] = si_buffer_load_const(ctx, buf, LLVMConstInt(ctx->ac.i32, (offset + i) * 4, 0));
650 return ac_build_gather_values(&ctx->ac, val, 4);
651 }
652
653 static LLVMValueRef si_load_tess_level(struct ac_shader_abi *abi, unsigned varying_id,
654 bool load_default_state)
655 {
656 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
657 unsigned semantic;
658
659 if (load_default_state) {
660 switch (varying_id) {
661 case VARYING_SLOT_TESS_LEVEL_INNER:
662 semantic = SYSTEM_VALUE_TESS_LEVEL_INNER_DEFAULT;
663 break;
664 case VARYING_SLOT_TESS_LEVEL_OUTER:
665 semantic = SYSTEM_VALUE_TESS_LEVEL_OUTER_DEFAULT;
666 break;
667 default:
668 unreachable("unknown tess level");
669 }
670 return load_tess_level_default(ctx, semantic);
671 }
672
673 switch (varying_id) {
674 case VARYING_SLOT_TESS_LEVEL_INNER:
675 semantic = VARYING_SLOT_TESS_LEVEL_INNER;
676 break;
677 case VARYING_SLOT_TESS_LEVEL_OUTER:
678 semantic = VARYING_SLOT_TESS_LEVEL_OUTER;
679 break;
680 default:
681 unreachable("unknown tess level");
682 }
683
684 return load_tess_level(ctx, semantic);
685 }
686
687 static LLVMValueRef si_load_patch_vertices_in(struct ac_shader_abi *abi)
688 {
689 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
690 if (ctx->stage == MESA_SHADER_TESS_CTRL)
691 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 13, 6);
692 else if (ctx->stage == MESA_SHADER_TESS_EVAL)
693 return get_num_tcs_out_vertices(ctx);
694 else
695 unreachable("invalid shader stage for VERTICESIN");
696 }
697
698 /**
699 * Forward all outputs from the vertex shader to the TES. This is only used
700 * for the fixed function TCS.
701 */
702 static void si_copy_tcs_inputs(struct si_shader_context *ctx)
703 {
704 LLVMValueRef invocation_id, buffer, buffer_offset;
705 LLVMValueRef lds_vertex_stride, lds_base;
706 uint64_t inputs;
707
708 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
709 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
710 buffer_offset = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
711
712 lds_vertex_stride = get_tcs_in_vertex_dw_stride(ctx);
713 lds_base = get_tcs_in_current_patch_offset(ctx);
714 lds_base = ac_build_imad(&ctx->ac, invocation_id, lds_vertex_stride, lds_base);
715
716 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
717 while (inputs) {
718 unsigned i = u_bit_scan64(&inputs);
719
720 LLVMValueRef lds_ptr =
721 LLVMBuildAdd(ctx->ac.builder, lds_base, LLVMConstInt(ctx->ac.i32, 4 * i, 0), "");
722
723 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(
724 ctx, get_rel_patch_id(ctx), invocation_id, LLVMConstInt(ctx->ac.i32, i, 0));
725
726 LLVMValueRef value = lshs_lds_load(ctx, ctx->ac.i32, ~0, lds_ptr);
727
728 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr, buffer_offset, 0,
729 ac_glc);
730 }
731 }
732
733 static void si_write_tess_factors(struct si_shader_context *ctx, LLVMValueRef rel_patch_id,
734 LLVMValueRef invocation_id,
735 LLVMValueRef tcs_out_current_patch_data_offset,
736 LLVMValueRef invoc0_tf_outer[4], LLVMValueRef invoc0_tf_inner[2])
737 {
738 struct si_shader *shader = ctx->shader;
739 unsigned tess_inner_index, tess_outer_index;
740 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
741 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
742 unsigned stride, outer_comps, inner_comps, i, offset;
743
744 /* Add a barrier before loading tess factors from LDS. */
745 if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
746 si_llvm_emit_barrier(ctx);
747
748 /* Do this only for invocation 0, because the tess levels are per-patch,
749 * not per-vertex.
750 *
751 * This can't jump, because invocation 0 executes this. It should
752 * at least mask out the loads and stores for other invocations.
753 */
754 ac_build_ifcc(&ctx->ac,
755 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, invocation_id, ctx->ac.i32_0, ""), 6503);
756
757 /* Determine the layout of one tess factor element in the buffer. */
758 switch (shader->key.part.tcs.epilog.prim_mode) {
759 case PIPE_PRIM_LINES:
760 stride = 2; /* 2 dwords, 1 vec2 store */
761 outer_comps = 2;
762 inner_comps = 0;
763 break;
764 case PIPE_PRIM_TRIANGLES:
765 stride = 4; /* 4 dwords, 1 vec4 store */
766 outer_comps = 3;
767 inner_comps = 1;
768 break;
769 case PIPE_PRIM_QUADS:
770 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
771 outer_comps = 4;
772 inner_comps = 2;
773 break;
774 default:
775 assert(0);
776 return;
777 }
778
779 for (i = 0; i < 4; i++) {
780 inner[i] = LLVMGetUndef(ctx->ac.i32);
781 outer[i] = LLVMGetUndef(ctx->ac.i32);
782 }
783
784 if (shader->key.part.tcs.epilog.invoc0_tess_factors_are_def) {
785 /* Tess factors are in VGPRs. */
786 for (i = 0; i < outer_comps; i++)
787 outer[i] = out[i] = invoc0_tf_outer[i];
788 for (i = 0; i < inner_comps; i++)
789 inner[i] = out[outer_comps + i] = invoc0_tf_inner[i];
790 } else {
791 /* Load tess_inner and tess_outer from LDS.
792 * Any invocation can write them, so we can't get them from a temporary.
793 */
794 tess_inner_index = si_shader_io_get_unique_index_patch(VARYING_SLOT_TESS_LEVEL_INNER);
795 tess_outer_index = si_shader_io_get_unique_index_patch(VARYING_SLOT_TESS_LEVEL_OUTER);
796
797 lds_base = tcs_out_current_patch_data_offset;
798 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
799 LLVMConstInt(ctx->ac.i32, tess_inner_index * 4, 0), "");
800 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
801 LLVMConstInt(ctx->ac.i32, tess_outer_index * 4, 0), "");
802
803 for (i = 0; i < outer_comps; i++) {
804 outer[i] = out[i] = lshs_lds_load(ctx, ctx->ac.i32, i, lds_outer);
805 }
806 for (i = 0; i < inner_comps; i++) {
807 inner[i] = out[outer_comps + i] = lshs_lds_load(ctx, ctx->ac.i32, i, lds_inner);
808 }
809 }
810
811 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
812 /* For isolines, the hardware expects tess factors in the
813 * reverse order from what NIR specifies.
814 */
815 LLVMValueRef tmp = out[0];
816 out[0] = out[1];
817 out[1] = tmp;
818 }
819
820 /* Convert the outputs to vectors for stores. */
821 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
822 vec1 = NULL;
823
824 if (stride > 4)
825 vec1 = ac_build_gather_values(&ctx->ac, out + 4, stride - 4);
826
827 /* Get the buffer. */
828 buffer = get_tess_ring_descriptor(ctx, TCS_FACTOR_RING);
829
830 /* Get the offset. */
831 tf_base = ac_get_arg(&ctx->ac, ctx->tcs_factor_offset);
832 byteoffset =
833 LLVMBuildMul(ctx->ac.builder, rel_patch_id, LLVMConstInt(ctx->ac.i32, 4 * stride, 0), "");
834
835 ac_build_ifcc(&ctx->ac,
836 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, rel_patch_id, ctx->ac.i32_0, ""), 6504);
837
838 /* Store the dynamic HS control word. */
839 offset = 0;
840 if (ctx->screen->info.chip_class <= GFX8) {
841 ac_build_buffer_store_dword(&ctx->ac, buffer, LLVMConstInt(ctx->ac.i32, 0x80000000, 0), 1,
842 ctx->ac.i32_0, tf_base, offset, ac_glc);
843 offset += 4;
844 }
845
846 ac_build_endif(&ctx->ac, 6504);
847
848 /* Store the tessellation factors. */
849 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0, MIN2(stride, 4), byteoffset, tf_base, offset,
850 ac_glc);
851 offset += 16;
852 if (vec1)
853 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1, stride - 4, byteoffset, tf_base, offset,
854 ac_glc);
855
856 /* Store the tess factors into the offchip buffer if TES reads them. */
857 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
858 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
859 LLVMValueRef tf_inner_offset;
860 unsigned param_outer, param_inner;
861
862 buf = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
863 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
864
865 param_outer = si_shader_io_get_unique_index_patch(VARYING_SLOT_TESS_LEVEL_OUTER);
866 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
867 LLVMConstInt(ctx->ac.i32, param_outer, 0));
868
869 unsigned outer_vec_size = ac_has_vec3_support(ctx->screen->info.chip_class, false)
870 ? outer_comps
871 : util_next_power_of_two(outer_comps);
872 outer_vec = ac_build_gather_values(&ctx->ac, outer, outer_vec_size);
873
874 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec, outer_comps, tf_outer_offset, base, 0,
875 ac_glc);
876 if (inner_comps) {
877 param_inner = si_shader_io_get_unique_index_patch(VARYING_SLOT_TESS_LEVEL_INNER);
878 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
879 LLVMConstInt(ctx->ac.i32, param_inner, 0));
880
881 inner_vec =
882 inner_comps == 1 ? inner[0] : ac_build_gather_values(&ctx->ac, inner, inner_comps);
883 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec, inner_comps, tf_inner_offset, base,
884 0, ac_glc);
885 }
886 }
887
888 ac_build_endif(&ctx->ac, 6503);
889 }
890
891 /* This only writes the tessellation factor levels. */
892 static void si_llvm_emit_tcs_epilogue(struct ac_shader_abi *abi, unsigned max_outputs,
893 LLVMValueRef *addrs)
894 {
895 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
896 LLVMBuilderRef builder = ctx->ac.builder;
897 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
898
899 si_copy_tcs_inputs(ctx);
900
901 rel_patch_id = get_rel_patch_id(ctx);
902 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
903 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
904
905 if (ctx->screen->info.chip_class >= GFX9) {
906 LLVMBasicBlockRef blocks[2] = {LLVMGetInsertBlock(builder), ctx->merged_wrap_if_entry_block};
907 LLVMValueRef values[2];
908
909 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
910
911 values[0] = rel_patch_id;
912 values[1] = LLVMGetUndef(ctx->ac.i32);
913 rel_patch_id = ac_build_phi(&ctx->ac, ctx->ac.i32, 2, values, blocks);
914
915 values[0] = tf_lds_offset;
916 values[1] = LLVMGetUndef(ctx->ac.i32);
917 tf_lds_offset = ac_build_phi(&ctx->ac, ctx->ac.i32, 2, values, blocks);
918
919 values[0] = invocation_id;
920 values[1] = ctx->ac.i32_1; /* cause the epilog to skip threads */
921 invocation_id = ac_build_phi(&ctx->ac, ctx->ac.i32, 2, values, blocks);
922 }
923
924 /* Return epilog parameters from this function. */
925 LLVMValueRef ret = ctx->return_value;
926 unsigned vgpr;
927
928 if (ctx->screen->info.chip_class >= GFX9) {
929 ret =
930 si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout, 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
931 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout, 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
932 /* Tess offchip and tess factor offsets are at the beginning. */
933 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
934 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
935 vgpr = 8 + GFX9_SGPR_TCS_OUT_LAYOUT + 1;
936 } else {
937 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout, GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
938 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout, GFX6_SGPR_TCS_OUT_LAYOUT);
939 /* Tess offchip and tess factor offsets are after user SGPRs. */
940 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, GFX6_TCS_NUM_USER_SGPR);
941 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, GFX6_TCS_NUM_USER_SGPR + 1);
942 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
943 }
944
945 /* VGPRs */
946 rel_patch_id = ac_to_float(&ctx->ac, rel_patch_id);
947 invocation_id = ac_to_float(&ctx->ac, invocation_id);
948 tf_lds_offset = ac_to_float(&ctx->ac, tf_lds_offset);
949
950 /* Leave a hole corresponding to the two input VGPRs. This ensures that
951 * the invocation_id output does not alias the tcs_rel_ids input,
952 * which saves a V_MOV on gfx9.
953 */
954 vgpr += 2;
955
956 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
957 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
958
959 if (ctx->shader->selector->info.tessfactors_are_def_in_all_invocs) {
960 vgpr++; /* skip the tess factor LDS offset */
961 for (unsigned i = 0; i < 6; i++) {
962 LLVMValueRef value = LLVMBuildLoad(builder, ctx->invoc0_tess_factors[i], "");
963 value = ac_to_float(&ctx->ac, value);
964 ret = LLVMBuildInsertValue(builder, ret, value, vgpr++, "");
965 }
966 } else {
967 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
968 }
969 ctx->return_value = ret;
970 }
971
972 /* Pass TCS inputs from LS to TCS on GFX9. */
973 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
974 {
975 LLVMValueRef ret = ctx->return_value;
976
977 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
978 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
979 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
980 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
981 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
982 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
983
984 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers, 8 + SI_SGPR_RW_BUFFERS);
985 ret = si_insert_input_ptr(ctx, ret, ctx->bindless_samplers_and_images,
986 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
987
988 ret = si_insert_input_ret(ctx, ret, ctx->vs_state_bits, 8 + SI_SGPR_VS_STATE_BITS);
989
990 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout, 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
991 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_offsets, 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
992 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout, 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
993
994 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
995 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
996 ac_to_float(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id)),
997 vgpr++, "");
998 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
999 ac_to_float(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args.tcs_rel_ids)),
1000 vgpr++, "");
1001 ctx->return_value = ret;
1002 }
1003
1004 void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi, unsigned max_outputs, LLVMValueRef *addrs)
1005 {
1006 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1007 struct si_shader *shader = ctx->shader;
1008 struct si_shader_info *info = &shader->selector->info;
1009 unsigned i, chan;
1010 LLVMValueRef vertex_id = ac_get_arg(&ctx->ac, ctx->rel_auto_id);
1011 LLVMValueRef vertex_dw_stride = get_tcs_in_vertex_dw_stride(ctx);
1012 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id, vertex_dw_stride, "");
1013
1014 /* Write outputs to LDS. The next shader (TCS aka HS) will read
1015 * its inputs from it. */
1016 for (i = 0; i < info->num_outputs; i++) {
1017 unsigned semantic = info->output_semantic[i];
1018
1019 /* The ARB_shader_viewport_layer_array spec contains the
1020 * following issue:
1021 *
1022 * 2) What happens if gl_ViewportIndex or gl_Layer is
1023 * written in the vertex shader and a geometry shader is
1024 * present?
1025 *
1026 * RESOLVED: The value written by the last vertex processing
1027 * stage is used. If the last vertex processing stage
1028 * (vertex, tessellation evaluation or geometry) does not
1029 * statically assign to gl_ViewportIndex or gl_Layer, index
1030 * or layer zero is assumed.
1031 *
1032 * So writes to those outputs in VS-as-LS are simply ignored.
1033 */
1034 if (semantic == VARYING_SLOT_LAYER || semantic == VARYING_SLOT_VIEWPORT)
1035 continue;
1036
1037 int param = si_shader_io_get_unique_index(semantic, false);
1038 LLVMValueRef dw_addr =
1039 LLVMBuildAdd(ctx->ac.builder, base_dw_addr, LLVMConstInt(ctx->ac.i32, param * 4, 0), "");
1040
1041 for (chan = 0; chan < 4; chan++) {
1042 if (!(info->output_usagemask[i] & (1 << chan)))
1043 continue;
1044
1045 lshs_lds_store(ctx, chan, dw_addr,
1046 LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], ""));
1047 }
1048 }
1049
1050 if (ctx->screen->info.chip_class >= GFX9)
1051 si_set_ls_return_value_for_tcs(ctx);
1052 }
1053
1054 /**
1055 * Compile the TCS epilog function. This writes tesselation factors to memory
1056 * based on the output primitive type of the tesselator (determined by TES).
1057 */
1058 void si_llvm_build_tcs_epilog(struct si_shader_context *ctx, union si_shader_part_key *key)
1059 {
1060 memset(&ctx->args, 0, sizeof(ctx->args));
1061
1062 if (ctx->screen->info.chip_class >= GFX9) {
1063 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1064 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1065 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1066 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* wave info */
1067 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
1068 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1069 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1070 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1071 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1072 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1073 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1074 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1075 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1076 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1077 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1078 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1079 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1080 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1081 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
1082 } else {
1083 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1084 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1085 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1086 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1087 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1088 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1089 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
1090 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1091 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1092 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
1093 }
1094
1095 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* VGPR gap */
1096 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* VGPR gap */
1097 struct ac_arg rel_patch_id; /* patch index within the wave (REL_PATCH_ID) */
1098 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &rel_patch_id);
1099 struct ac_arg invocation_id; /* invocation ID within the patch */
1100 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &invocation_id);
1101 struct ac_arg
1102 tcs_out_current_patch_data_offset; /* LDS offset where tess factors should be loaded from */
1103 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &tcs_out_current_patch_data_offset);
1104
1105 struct ac_arg tess_factors[6];
1106 for (unsigned i = 0; i < 6; i++)
1107 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &tess_factors[i]);
1108
1109 /* Create the function. */
1110 si_llvm_create_func(ctx, "tcs_epilog", NULL, 0, ctx->screen->info.chip_class >= GFX7 ? 128 : 0);
1111 ac_declare_lds_as_pointer(&ctx->ac);
1112
1113 LLVMValueRef invoc0_tess_factors[6];
1114 for (unsigned i = 0; i < 6; i++)
1115 invoc0_tess_factors[i] = ac_get_arg(&ctx->ac, tess_factors[i]);
1116
1117 si_write_tess_factors(ctx, ac_get_arg(&ctx->ac, rel_patch_id),
1118 ac_get_arg(&ctx->ac, invocation_id),
1119 ac_get_arg(&ctx->ac, tcs_out_current_patch_data_offset),
1120 invoc0_tess_factors, invoc0_tess_factors + 4);
1121
1122 LLVMBuildRetVoid(ctx->ac.builder);
1123 }
1124
1125 void si_llvm_init_tcs_callbacks(struct si_shader_context *ctx)
1126 {
1127 ctx->abi.load_tess_varyings = si_nir_load_tcs_varyings;
1128 ctx->abi.load_tess_level = si_load_tess_level;
1129 ctx->abi.store_tcs_outputs = si_nir_store_output_tcs;
1130 ctx->abi.emit_outputs = si_llvm_emit_tcs_epilogue;
1131 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
1132 }
1133
1134 void si_llvm_init_tes_callbacks(struct si_shader_context *ctx, bool ngg_cull_shader)
1135 {
1136 ctx->abi.load_tess_varyings = si_nir_load_input_tes;
1137 ctx->abi.load_tess_coord = si_load_tess_coord;
1138 ctx->abi.load_tess_level = si_load_tess_level;
1139 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
1140
1141 if (ctx->shader->key.as_es)
1142 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
1143 else if (ngg_cull_shader)
1144 ctx->abi.emit_outputs = gfx10_emit_ngg_culling_epilogue;
1145 else if (ctx->shader->key.as_ngg)
1146 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
1147 else
1148 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
1149 }