ac: treat Mullins as Kabini, remove the enum
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "util/u_memory.h"
26 #include "util/u_string.h"
27 #include "tgsi/tgsi_build.h"
28 #include "tgsi/tgsi_strings.h"
29 #include "tgsi/tgsi_util.h"
30 #include "tgsi/tgsi_dump.h"
31
32 #include "ac_exp_param.h"
33 #include "ac_shader_util.h"
34 #include "ac_llvm_util.h"
35 #include "si_shader_internal.h"
36 #include "si_pipe.h"
37 #include "sid.h"
38
39 #include "compiler/nir/nir.h"
40
41 static const char *scratch_rsrc_dword0_symbol =
42 "SCRATCH_RSRC_DWORD0";
43
44 static const char *scratch_rsrc_dword1_symbol =
45 "SCRATCH_RSRC_DWORD1";
46
47 struct si_shader_output_values
48 {
49 LLVMValueRef values[4];
50 unsigned semantic_name;
51 unsigned semantic_index;
52 ubyte vertex_stream[4];
53 };
54
55 static void si_init_shader_ctx(struct si_shader_context *ctx,
56 struct si_screen *sscreen,
57 struct ac_llvm_compiler *compiler);
58
59 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
60 struct lp_build_tgsi_context *bld_base,
61 struct lp_build_emit_data *emit_data);
62
63 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
64 FILE *f);
65
66 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
67 union si_shader_part_key *key);
68 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
69 union si_shader_part_key *key);
70 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
71 union si_shader_part_key *key);
72 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
73 union si_shader_part_key *key);
74 static void si_fix_resource_usage(struct si_screen *sscreen,
75 struct si_shader *shader);
76
77 /* Ideally pass the sample mask input to the PS epilog as v14, which
78 * is its usual location, so that the shader doesn't have to add v_mov.
79 */
80 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 14
81
82 static bool llvm_type_is_64bit(struct si_shader_context *ctx,
83 LLVMTypeRef type)
84 {
85 if (type == ctx->ac.i64 || type == ctx->ac.f64)
86 return true;
87
88 return false;
89 }
90
91 static bool is_merged_shader(struct si_shader_context *ctx)
92 {
93 if (ctx->screen->info.chip_class <= GFX8)
94 return false;
95
96 return ctx->shader->key.as_ls ||
97 ctx->shader->key.as_es ||
98 ctx->type == PIPE_SHADER_TESS_CTRL ||
99 ctx->type == PIPE_SHADER_GEOMETRY;
100 }
101
102 void si_init_function_info(struct si_function_info *fninfo)
103 {
104 fninfo->num_params = 0;
105 fninfo->num_sgpr_params = 0;
106 }
107
108 unsigned add_arg_assign(struct si_function_info *fninfo,
109 enum si_arg_regfile regfile, LLVMTypeRef type,
110 LLVMValueRef *assign)
111 {
112 assert(regfile != ARG_SGPR || fninfo->num_sgpr_params == fninfo->num_params);
113
114 unsigned idx = fninfo->num_params++;
115 assert(idx < ARRAY_SIZE(fninfo->types));
116
117 if (regfile == ARG_SGPR)
118 fninfo->num_sgpr_params = fninfo->num_params;
119
120 fninfo->types[idx] = type;
121 fninfo->assign[idx] = assign;
122 return idx;
123 }
124
125 static unsigned add_arg(struct si_function_info *fninfo,
126 enum si_arg_regfile regfile, LLVMTypeRef type)
127 {
128 return add_arg_assign(fninfo, regfile, type, NULL);
129 }
130
131 static void add_arg_assign_checked(struct si_function_info *fninfo,
132 enum si_arg_regfile regfile, LLVMTypeRef type,
133 LLVMValueRef *assign, unsigned idx)
134 {
135 MAYBE_UNUSED unsigned actual = add_arg_assign(fninfo, regfile, type, assign);
136 assert(actual == idx);
137 }
138
139 static void add_arg_checked(struct si_function_info *fninfo,
140 enum si_arg_regfile regfile, LLVMTypeRef type,
141 unsigned idx)
142 {
143 add_arg_assign_checked(fninfo, regfile, type, NULL, idx);
144 }
145
146 /**
147 * Returns a unique index for a per-patch semantic name and index. The index
148 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
149 * can be calculated.
150 */
151 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
152 {
153 switch (semantic_name) {
154 case TGSI_SEMANTIC_TESSOUTER:
155 return 0;
156 case TGSI_SEMANTIC_TESSINNER:
157 return 1;
158 case TGSI_SEMANTIC_PATCH:
159 assert(index < 30);
160 return 2 + index;
161
162 default:
163 assert(!"invalid semantic name");
164 return 0;
165 }
166 }
167
168 /**
169 * Returns a unique index for a semantic name and index. The index must be
170 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
171 * calculated.
172 */
173 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
174 unsigned is_varying)
175 {
176 switch (semantic_name) {
177 case TGSI_SEMANTIC_POSITION:
178 return 0;
179 case TGSI_SEMANTIC_GENERIC:
180 /* Since some shader stages use the the highest used IO index
181 * to determine the size to allocate for inputs/outputs
182 * (in LDS, tess and GS rings). GENERIC should be placed right
183 * after POSITION to make that size as small as possible.
184 */
185 if (index < SI_MAX_IO_GENERIC)
186 return 1 + index;
187
188 assert(!"invalid generic index");
189 return 0;
190 case TGSI_SEMANTIC_PSIZE:
191 return SI_MAX_IO_GENERIC + 1;
192 case TGSI_SEMANTIC_CLIPDIST:
193 assert(index <= 1);
194 return SI_MAX_IO_GENERIC + 2 + index;
195 case TGSI_SEMANTIC_FOG:
196 return SI_MAX_IO_GENERIC + 4;
197 case TGSI_SEMANTIC_LAYER:
198 return SI_MAX_IO_GENERIC + 5;
199 case TGSI_SEMANTIC_VIEWPORT_INDEX:
200 return SI_MAX_IO_GENERIC + 6;
201 case TGSI_SEMANTIC_PRIMID:
202 return SI_MAX_IO_GENERIC + 7;
203 case TGSI_SEMANTIC_COLOR:
204 assert(index < 2);
205 return SI_MAX_IO_GENERIC + 8 + index;
206 case TGSI_SEMANTIC_BCOLOR:
207 assert(index < 2);
208 /* If it's a varying, COLOR and BCOLOR alias. */
209 if (is_varying)
210 return SI_MAX_IO_GENERIC + 8 + index;
211 else
212 return SI_MAX_IO_GENERIC + 10 + index;
213 case TGSI_SEMANTIC_TEXCOORD:
214 assert(index < 8);
215 STATIC_ASSERT(SI_MAX_IO_GENERIC + 12 + 8 <= 63);
216 return SI_MAX_IO_GENERIC + 12 + index;
217 case TGSI_SEMANTIC_CLIPVERTEX:
218 return 63;
219 default:
220 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
221 assert(!"invalid semantic name");
222 return 0;
223 }
224 }
225
226 /**
227 * Get the value of a shader input parameter and extract a bitfield.
228 */
229 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
230 LLVMValueRef value, unsigned rshift,
231 unsigned bitwidth)
232 {
233 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
234 value = ac_to_integer(&ctx->ac, value);
235
236 if (rshift)
237 value = LLVMBuildLShr(ctx->ac.builder, value,
238 LLVMConstInt(ctx->i32, rshift, 0), "");
239
240 if (rshift + bitwidth < 32) {
241 unsigned mask = (1 << bitwidth) - 1;
242 value = LLVMBuildAnd(ctx->ac.builder, value,
243 LLVMConstInt(ctx->i32, mask, 0), "");
244 }
245
246 return value;
247 }
248
249 LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
250 unsigned param, unsigned rshift,
251 unsigned bitwidth)
252 {
253 LLVMValueRef value = LLVMGetParam(ctx->main_fn, param);
254
255 return unpack_llvm_param(ctx, value, rshift, bitwidth);
256 }
257
258 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
259 {
260 switch (ctx->type) {
261 case PIPE_SHADER_TESS_CTRL:
262 return unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 0, 8);
263
264 case PIPE_SHADER_TESS_EVAL:
265 return LLVMGetParam(ctx->main_fn,
266 ctx->param_tes_rel_patch_id);
267
268 default:
269 assert(0);
270 return NULL;
271 }
272 }
273
274 /* Tessellation shaders pass outputs to the next shader using LDS.
275 *
276 * LS outputs = TCS inputs
277 * TCS outputs = TES inputs
278 *
279 * The LDS layout is:
280 * - TCS inputs for patch 0
281 * - TCS inputs for patch 1
282 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
283 * - ...
284 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
285 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
286 * - TCS outputs for patch 1
287 * - Per-patch TCS outputs for patch 1
288 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
289 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
290 * - ...
291 *
292 * All three shaders VS(LS), TCS, TES share the same LDS space.
293 */
294
295 static LLVMValueRef
296 get_tcs_in_patch_stride(struct si_shader_context *ctx)
297 {
298 return si_unpack_param(ctx, ctx->param_vs_state_bits, 8, 13);
299 }
300
301 static unsigned get_tcs_out_vertex_dw_stride_constant(struct si_shader_context *ctx)
302 {
303 assert(ctx->type == PIPE_SHADER_TESS_CTRL);
304
305 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
306 return util_last_bit64(ctx->shader->key.mono.u.ff_tcs_inputs_to_copy) * 4;
307
308 return util_last_bit64(ctx->shader->selector->outputs_written) * 4;
309 }
310
311 static LLVMValueRef get_tcs_out_vertex_dw_stride(struct si_shader_context *ctx)
312 {
313 unsigned stride = get_tcs_out_vertex_dw_stride_constant(ctx);
314
315 return LLVMConstInt(ctx->i32, stride, 0);
316 }
317
318 static LLVMValueRef get_tcs_out_patch_stride(struct si_shader_context *ctx)
319 {
320 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
321 return si_unpack_param(ctx, ctx->param_tcs_out_lds_layout, 0, 13);
322
323 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
324 unsigned tcs_out_vertices = info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
325 unsigned vertex_dw_stride = get_tcs_out_vertex_dw_stride_constant(ctx);
326 unsigned num_patch_outputs = util_last_bit64(ctx->shader->selector->patch_outputs_written);
327 unsigned patch_dw_stride = tcs_out_vertices * vertex_dw_stride +
328 num_patch_outputs * 4;
329 return LLVMConstInt(ctx->i32, patch_dw_stride, 0);
330 }
331
332 static LLVMValueRef
333 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
334 {
335 return LLVMBuildMul(ctx->ac.builder,
336 si_unpack_param(ctx,
337 ctx->param_tcs_out_lds_offsets,
338 0, 16),
339 LLVMConstInt(ctx->i32, 4, 0), "");
340 }
341
342 static LLVMValueRef
343 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
344 {
345 return LLVMBuildMul(ctx->ac.builder,
346 si_unpack_param(ctx,
347 ctx->param_tcs_out_lds_offsets,
348 16, 16),
349 LLVMConstInt(ctx->i32, 4, 0), "");
350 }
351
352 static LLVMValueRef
353 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
354 {
355 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
356 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
357
358 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
359 }
360
361 static LLVMValueRef
362 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
363 {
364 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
365 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
366 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
367
368 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_offset);
369 }
370
371 static LLVMValueRef
372 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
373 {
374 LLVMValueRef patch0_patch_data_offset =
375 get_tcs_out_patch0_patch_data_offset(ctx);
376 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
377 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
378
379 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_patch_data_offset);
380 }
381
382 static LLVMValueRef get_num_tcs_out_vertices(struct si_shader_context *ctx)
383 {
384 unsigned tcs_out_vertices =
385 ctx->shader->selector ?
386 ctx->shader->selector->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] : 0;
387
388 /* If !tcs_out_vertices, it's either the fixed-func TCS or the TCS epilog. */
389 if (ctx->type == PIPE_SHADER_TESS_CTRL && tcs_out_vertices)
390 return LLVMConstInt(ctx->i32, tcs_out_vertices, 0);
391
392 return si_unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
393 }
394
395 static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
396 {
397 unsigned stride;
398
399 switch (ctx->type) {
400 case PIPE_SHADER_VERTEX:
401 stride = ctx->shader->selector->lshs_vertex_stride / 4;
402 return LLVMConstInt(ctx->i32, stride, 0);
403
404 case PIPE_SHADER_TESS_CTRL:
405 if (ctx->screen->info.chip_class >= GFX9 &&
406 ctx->shader->is_monolithic) {
407 stride = ctx->shader->key.part.tcs.ls->lshs_vertex_stride / 4;
408 return LLVMConstInt(ctx->i32, stride, 0);
409 }
410 return si_unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
411
412 default:
413 assert(0);
414 return NULL;
415 }
416 }
417
418 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
419 LLVMValueRef i32, unsigned index)
420 {
421 assert(index <= 1);
422
423 if (index == 1)
424 return LLVMBuildAShr(ctx->ac.builder, i32,
425 LLVMConstInt(ctx->i32, 16, 0), "");
426
427 return LLVMBuildSExt(ctx->ac.builder,
428 LLVMBuildTrunc(ctx->ac.builder, i32,
429 ctx->ac.i16, ""),
430 ctx->i32, "");
431 }
432
433 void si_llvm_load_input_vs(
434 struct si_shader_context *ctx,
435 unsigned input_index,
436 LLVMValueRef out[4])
437 {
438 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
439 unsigned vs_blit_property = info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
440
441 if (vs_blit_property) {
442 LLVMValueRef vertex_id = ctx->abi.vertex_id;
443 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
444 LLVMIntULE, vertex_id,
445 ctx->i32_1, "");
446 /* Use LLVMIntNE, because we have 3 vertices and only
447 * the middle one should use y2.
448 */
449 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
450 LLVMIntNE, vertex_id,
451 ctx->i32_1, "");
452
453 if (input_index == 0) {
454 /* Position: */
455 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
456 ctx->param_vs_blit_inputs);
457 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
458 ctx->param_vs_blit_inputs + 1);
459
460 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
461 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
462 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
463 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
464
465 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
466 x1, x2, "");
467 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
468 y1, y2, "");
469
470 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
471 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
472 out[2] = LLVMGetParam(ctx->main_fn,
473 ctx->param_vs_blit_inputs + 2);
474 out[3] = ctx->ac.f32_1;
475 return;
476 }
477
478 /* Color or texture coordinates: */
479 assert(input_index == 1);
480
481 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
482 for (int i = 0; i < 4; i++) {
483 out[i] = LLVMGetParam(ctx->main_fn,
484 ctx->param_vs_blit_inputs + 3 + i);
485 }
486 } else {
487 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
488 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
489 ctx->param_vs_blit_inputs + 3);
490 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
491 ctx->param_vs_blit_inputs + 4);
492 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
493 ctx->param_vs_blit_inputs + 5);
494 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
495 ctx->param_vs_blit_inputs + 6);
496
497 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
498 x1, x2, "");
499 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
500 y1, y2, "");
501 out[2] = LLVMGetParam(ctx->main_fn,
502 ctx->param_vs_blit_inputs + 7);
503 out[3] = LLVMGetParam(ctx->main_fn,
504 ctx->param_vs_blit_inputs + 8);
505 }
506 return;
507 }
508
509 union si_vs_fix_fetch fix_fetch;
510 LLVMValueRef t_list_ptr;
511 LLVMValueRef t_offset;
512 LLVMValueRef t_list;
513 LLVMValueRef vertex_index;
514 LLVMValueRef tmp;
515
516 /* Load the T list */
517 t_list_ptr = LLVMGetParam(ctx->main_fn, ctx->param_vertex_buffers);
518
519 t_offset = LLVMConstInt(ctx->i32, input_index, 0);
520
521 t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
522
523 vertex_index = LLVMGetParam(ctx->main_fn,
524 ctx->param_vertex_index0 +
525 input_index);
526
527 /* Use the open-coded implementation for all loads of doubles and
528 * of dword-sized data that needs fixups. We need to insert conversion
529 * code anyway, and the amd/common code does it for us.
530 *
531 * Note: On LLVM <= 8, we can only open-code formats with
532 * channel size >= 4 bytes.
533 */
534 bool opencode = ctx->shader->key.mono.vs_fetch_opencode & (1 << input_index);
535 fix_fetch.bits = ctx->shader->key.mono.vs_fix_fetch[input_index].bits;
536 if (opencode ||
537 (fix_fetch.u.log_size == 3 && fix_fetch.u.format == AC_FETCH_FORMAT_FLOAT) ||
538 (fix_fetch.u.log_size == 2)) {
539 tmp = ac_build_opencoded_load_format(
540 &ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1,
541 fix_fetch.u.format, fix_fetch.u.reverse, !opencode,
542 t_list, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0,
543 false, false, true);
544 for (unsigned i = 0; i < 4; ++i)
545 out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->i32, i, false), "");
546 return;
547 }
548
549 /* Do multiple loads for special formats. */
550 unsigned required_channels = util_last_bit(info->input_usage_mask[input_index]);
551 LLVMValueRef fetches[4];
552 unsigned num_fetches;
553 unsigned fetch_stride;
554 unsigned channels_per_fetch;
555
556 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2) {
557 num_fetches = MIN2(required_channels, 3);
558 fetch_stride = 1 << fix_fetch.u.log_size;
559 channels_per_fetch = 1;
560 } else {
561 num_fetches = 1;
562 fetch_stride = 0;
563 channels_per_fetch = required_channels;
564 }
565
566 for (unsigned i = 0; i < num_fetches; ++i) {
567 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
568 fetches[i] = ac_build_buffer_load_format(&ctx->ac, t_list, vertex_index, voffset,
569 channels_per_fetch, false, true);
570 }
571
572 if (num_fetches == 1 && channels_per_fetch > 1) {
573 LLVMValueRef fetch = fetches[0];
574 for (unsigned i = 0; i < channels_per_fetch; ++i) {
575 tmp = LLVMConstInt(ctx->i32, i, false);
576 fetches[i] = LLVMBuildExtractElement(
577 ctx->ac.builder, fetch, tmp, "");
578 }
579 num_fetches = channels_per_fetch;
580 channels_per_fetch = 1;
581 }
582
583 for (unsigned i = num_fetches; i < 4; ++i)
584 fetches[i] = LLVMGetUndef(ctx->f32);
585
586 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 &&
587 required_channels == 4) {
588 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
589 fetches[3] = ctx->ac.i32_1;
590 else
591 fetches[3] = ctx->ac.f32_1;
592 } else if (fix_fetch.u.log_size == 3 &&
593 (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ||
594 fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED ||
595 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) &&
596 required_channels == 4) {
597 /* For 2_10_10_10, the hardware returns an unsigned value;
598 * convert it to a signed one.
599 */
600 LLVMValueRef tmp = fetches[3];
601 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
602
603 /* First, recover the sign-extended signed integer value. */
604 if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
605 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
606 else
607 tmp = ac_to_integer(&ctx->ac, tmp);
608
609 /* For the integer-like cases, do a natural sign extension.
610 *
611 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
612 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
613 * exponent.
614 */
615 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
616 fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ?
617 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
618 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
619
620 /* Convert back to the right type. */
621 if (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM) {
622 LLVMValueRef clamp;
623 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
624 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
625 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
626 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
627 } else if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED) {
628 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
629 }
630
631 fetches[3] = tmp;
632 }
633
634 for (unsigned i = 0; i < 4; ++i)
635 out[i] = ac_to_float(&ctx->ac, fetches[i]);
636 }
637
638 static void declare_input_vs(
639 struct si_shader_context *ctx,
640 unsigned input_index,
641 const struct tgsi_full_declaration *decl,
642 LLVMValueRef out[4])
643 {
644 si_llvm_load_input_vs(ctx, input_index, out);
645 }
646
647 static LLVMValueRef get_primitive_id(struct si_shader_context *ctx,
648 unsigned swizzle)
649 {
650 if (swizzle > 0)
651 return ctx->i32_0;
652
653 switch (ctx->type) {
654 case PIPE_SHADER_VERTEX:
655 return LLVMGetParam(ctx->main_fn,
656 ctx->param_vs_prim_id);
657 case PIPE_SHADER_TESS_CTRL:
658 return ctx->abi.tcs_patch_id;
659 case PIPE_SHADER_TESS_EVAL:
660 return ctx->abi.tes_patch_id;
661 case PIPE_SHADER_GEOMETRY:
662 return ctx->abi.gs_prim_id;
663 default:
664 assert(0);
665 return ctx->i32_0;
666 }
667 }
668
669 /**
670 * Return the value of tgsi_ind_register for indexing.
671 * This is the indirect index with the constant offset added to it.
672 */
673 LLVMValueRef si_get_indirect_index(struct si_shader_context *ctx,
674 const struct tgsi_ind_register *ind,
675 unsigned addr_mul,
676 int rel_index)
677 {
678 LLVMValueRef result;
679
680 if (ind->File == TGSI_FILE_ADDRESS) {
681 result = ctx->addrs[ind->Index][ind->Swizzle];
682 result = LLVMBuildLoad(ctx->ac.builder, result, "");
683 } else {
684 struct tgsi_full_src_register src = {};
685
686 src.Register.File = ind->File;
687 src.Register.Index = ind->Index;
688
689 /* Set the second index to 0 for constants. */
690 if (ind->File == TGSI_FILE_CONSTANT)
691 src.Register.Dimension = 1;
692
693 result = ctx->bld_base.emit_fetch_funcs[ind->File](&ctx->bld_base, &src,
694 TGSI_TYPE_SIGNED,
695 ind->Swizzle);
696 result = ac_to_integer(&ctx->ac, result);
697 }
698
699 return ac_build_imad(&ctx->ac, result, LLVMConstInt(ctx->i32, addr_mul, 0),
700 LLVMConstInt(ctx->i32, rel_index, 0));
701 }
702
703 /**
704 * Like si_get_indirect_index, but restricts the return value to a (possibly
705 * undefined) value inside [0..num).
706 */
707 LLVMValueRef si_get_bounded_indirect_index(struct si_shader_context *ctx,
708 const struct tgsi_ind_register *ind,
709 int rel_index, unsigned num)
710 {
711 LLVMValueRef result = si_get_indirect_index(ctx, ind, 1, rel_index);
712
713 return si_llvm_bound_index(ctx, result, num);
714 }
715
716 static LLVMValueRef get_dw_address_from_generic_indices(struct si_shader_context *ctx,
717 LLVMValueRef vertex_dw_stride,
718 LLVMValueRef base_addr,
719 LLVMValueRef vertex_index,
720 LLVMValueRef param_index,
721 unsigned input_index,
722 ubyte *name,
723 ubyte *index,
724 bool is_patch)
725 {
726 if (vertex_dw_stride) {
727 base_addr = ac_build_imad(&ctx->ac, vertex_index,
728 vertex_dw_stride, base_addr);
729 }
730
731 if (param_index) {
732 base_addr = ac_build_imad(&ctx->ac, param_index,
733 LLVMConstInt(ctx->i32, 4, 0), base_addr);
734 }
735
736 int param = is_patch ?
737 si_shader_io_get_unique_index_patch(name[input_index],
738 index[input_index]) :
739 si_shader_io_get_unique_index(name[input_index],
740 index[input_index], false);
741
742 /* Add the base address of the element. */
743 return LLVMBuildAdd(ctx->ac.builder, base_addr,
744 LLVMConstInt(ctx->i32, param * 4, 0), "");
745 }
746
747 /**
748 * Calculate a dword address given an input or output register and a stride.
749 */
750 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
751 const struct tgsi_full_dst_register *dst,
752 const struct tgsi_full_src_register *src,
753 LLVMValueRef vertex_dw_stride,
754 LLVMValueRef base_addr)
755 {
756 struct tgsi_shader_info *info = &ctx->shader->selector->info;
757 ubyte *name, *index, *array_first;
758 int input_index;
759 struct tgsi_full_dst_register reg;
760 LLVMValueRef vertex_index = NULL;
761 LLVMValueRef ind_index = NULL;
762
763 /* Set the register description. The address computation is the same
764 * for sources and destinations. */
765 if (src) {
766 reg.Register.File = src->Register.File;
767 reg.Register.Index = src->Register.Index;
768 reg.Register.Indirect = src->Register.Indirect;
769 reg.Register.Dimension = src->Register.Dimension;
770 reg.Indirect = src->Indirect;
771 reg.Dimension = src->Dimension;
772 reg.DimIndirect = src->DimIndirect;
773 } else
774 reg = *dst;
775
776 /* If the register is 2-dimensional (e.g. an array of vertices
777 * in a primitive), calculate the base address of the vertex. */
778 if (reg.Register.Dimension) {
779 if (reg.Dimension.Indirect)
780 vertex_index = si_get_indirect_index(ctx, &reg.DimIndirect,
781 1, reg.Dimension.Index);
782 else
783 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
784 }
785
786 /* Get information about the register. */
787 if (reg.Register.File == TGSI_FILE_INPUT) {
788 name = info->input_semantic_name;
789 index = info->input_semantic_index;
790 array_first = info->input_array_first;
791 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
792 name = info->output_semantic_name;
793 index = info->output_semantic_index;
794 array_first = info->output_array_first;
795 } else {
796 assert(0);
797 return NULL;
798 }
799
800 if (reg.Register.Indirect) {
801 /* Add the relative address of the element. */
802 if (reg.Indirect.ArrayID)
803 input_index = array_first[reg.Indirect.ArrayID];
804 else
805 input_index = reg.Register.Index;
806
807 ind_index = si_get_indirect_index(ctx, &reg.Indirect,
808 1, reg.Register.Index - input_index);
809 } else {
810 input_index = reg.Register.Index;
811 }
812
813 return get_dw_address_from_generic_indices(ctx, vertex_dw_stride,
814 base_addr, vertex_index,
815 ind_index, input_index,
816 name, index,
817 !reg.Register.Dimension);
818 }
819
820 /* The offchip buffer layout for TCS->TES is
821 *
822 * - attribute 0 of patch 0 vertex 0
823 * - attribute 0 of patch 0 vertex 1
824 * - attribute 0 of patch 0 vertex 2
825 * ...
826 * - attribute 0 of patch 1 vertex 0
827 * - attribute 0 of patch 1 vertex 1
828 * ...
829 * - attribute 1 of patch 0 vertex 0
830 * - attribute 1 of patch 0 vertex 1
831 * ...
832 * - per patch attribute 0 of patch 0
833 * - per patch attribute 0 of patch 1
834 * ...
835 *
836 * Note that every attribute has 4 components.
837 */
838 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
839 LLVMValueRef rel_patch_id,
840 LLVMValueRef vertex_index,
841 LLVMValueRef param_index)
842 {
843 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
844 LLVMValueRef param_stride, constant16;
845
846 vertices_per_patch = get_num_tcs_out_vertices(ctx);
847 num_patches = si_unpack_param(ctx, ctx->param_tcs_offchip_layout, 0, 6);
848 total_vertices = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
849 num_patches, "");
850
851 constant16 = LLVMConstInt(ctx->i32, 16, 0);
852 if (vertex_index) {
853 base_addr = ac_build_imad(&ctx->ac, rel_patch_id,
854 vertices_per_patch, vertex_index);
855 param_stride = total_vertices;
856 } else {
857 base_addr = rel_patch_id;
858 param_stride = num_patches;
859 }
860
861 base_addr = ac_build_imad(&ctx->ac, param_index, param_stride, base_addr);
862 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
863
864 if (!vertex_index) {
865 LLVMValueRef patch_data_offset =
866 si_unpack_param(ctx, ctx->param_tcs_offchip_layout, 12, 20);
867
868 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
869 patch_data_offset, "");
870 }
871 return base_addr;
872 }
873
874 /* This is a generic helper that can be shared by the NIR and TGSI backends */
875 static LLVMValueRef get_tcs_tes_buffer_address_from_generic_indices(
876 struct si_shader_context *ctx,
877 LLVMValueRef vertex_index,
878 LLVMValueRef param_index,
879 unsigned param_base,
880 ubyte *name,
881 ubyte *index,
882 bool is_patch)
883 {
884 unsigned param_index_base;
885
886 param_index_base = is_patch ?
887 si_shader_io_get_unique_index_patch(name[param_base], index[param_base]) :
888 si_shader_io_get_unique_index(name[param_base], index[param_base], false);
889
890 if (param_index) {
891 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
892 LLVMConstInt(ctx->i32, param_index_base, 0),
893 "");
894 } else {
895 param_index = LLVMConstInt(ctx->i32, param_index_base, 0);
896 }
897
898 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
899 vertex_index, param_index);
900 }
901
902 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
903 struct si_shader_context *ctx,
904 const struct tgsi_full_dst_register *dst,
905 const struct tgsi_full_src_register *src)
906 {
907 struct tgsi_shader_info *info = &ctx->shader->selector->info;
908 ubyte *name, *index, *array_first;
909 struct tgsi_full_src_register reg;
910 LLVMValueRef vertex_index = NULL;
911 LLVMValueRef param_index = NULL;
912 unsigned param_base;
913
914 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
915
916 if (reg.Register.Dimension) {
917
918 if (reg.Dimension.Indirect)
919 vertex_index = si_get_indirect_index(ctx, &reg.DimIndirect,
920 1, reg.Dimension.Index);
921 else
922 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
923 }
924
925 /* Get information about the register. */
926 if (reg.Register.File == TGSI_FILE_INPUT) {
927 name = info->input_semantic_name;
928 index = info->input_semantic_index;
929 array_first = info->input_array_first;
930 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
931 name = info->output_semantic_name;
932 index = info->output_semantic_index;
933 array_first = info->output_array_first;
934 } else {
935 assert(0);
936 return NULL;
937 }
938
939 if (reg.Register.Indirect) {
940 if (reg.Indirect.ArrayID)
941 param_base = array_first[reg.Indirect.ArrayID];
942 else
943 param_base = reg.Register.Index;
944
945 param_index = si_get_indirect_index(ctx, &reg.Indirect,
946 1, reg.Register.Index - param_base);
947
948 } else {
949 param_base = reg.Register.Index;
950 }
951
952 return get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
953 param_index, param_base,
954 name, index, !reg.Register.Dimension);
955 }
956
957 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
958 LLVMTypeRef type, unsigned swizzle,
959 LLVMValueRef buffer, LLVMValueRef offset,
960 LLVMValueRef base, bool can_speculate)
961 {
962 struct si_shader_context *ctx = si_shader_context(bld_base);
963 LLVMValueRef value, value2;
964 LLVMTypeRef vec_type = LLVMVectorType(type, 4);
965
966 if (swizzle == ~0) {
967 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
968 0, 1, 0, can_speculate, false);
969
970 return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
971 }
972
973 if (!llvm_type_is_64bit(ctx, type)) {
974 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
975 0, 1, 0, can_speculate, false);
976
977 value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
978 return LLVMBuildExtractElement(ctx->ac.builder, value,
979 LLVMConstInt(ctx->i32, swizzle, 0), "");
980 }
981
982 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
983 swizzle * 4, 1, 0, can_speculate, false);
984
985 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
986 swizzle * 4 + 4, 1, 0, can_speculate, false);
987
988 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
989 }
990
991 /**
992 * Load from LDS.
993 *
994 * \param type output value type
995 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
996 * \param dw_addr address in dwords
997 */
998 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
999 LLVMTypeRef type, unsigned swizzle,
1000 LLVMValueRef dw_addr)
1001 {
1002 struct si_shader_context *ctx = si_shader_context(bld_base);
1003 LLVMValueRef value;
1004
1005 if (swizzle == ~0) {
1006 LLVMValueRef values[TGSI_NUM_CHANNELS];
1007
1008 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
1009 values[chan] = lds_load(bld_base, type, chan, dw_addr);
1010
1011 return ac_build_gather_values(&ctx->ac, values,
1012 TGSI_NUM_CHANNELS);
1013 }
1014
1015 /* Split 64-bit loads. */
1016 if (llvm_type_is_64bit(ctx, type)) {
1017 LLVMValueRef lo, hi;
1018
1019 lo = lds_load(bld_base, ctx->i32, swizzle, dw_addr);
1020 hi = lds_load(bld_base, ctx->i32, swizzle + 1, dw_addr);
1021 return si_llvm_emit_fetch_64bit(bld_base, type, lo, hi);
1022 }
1023
1024 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1025 LLVMConstInt(ctx->i32, swizzle, 0), "");
1026
1027 value = ac_lds_load(&ctx->ac, dw_addr);
1028
1029 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1030 }
1031
1032 /**
1033 * Store to LDS.
1034 *
1035 * \param swizzle offset (typically 0..3)
1036 * \param dw_addr address in dwords
1037 * \param value value to store
1038 */
1039 static void lds_store(struct si_shader_context *ctx,
1040 unsigned dw_offset_imm, LLVMValueRef dw_addr,
1041 LLVMValueRef value)
1042 {
1043 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1044 LLVMConstInt(ctx->i32, dw_offset_imm, 0), "");
1045
1046 ac_lds_store(&ctx->ac, dw_addr, value);
1047 }
1048
1049 enum si_tess_ring {
1050 TCS_FACTOR_RING,
1051 TESS_OFFCHIP_RING_TCS,
1052 TESS_OFFCHIP_RING_TES,
1053 };
1054
1055 static LLVMValueRef get_tess_ring_descriptor(struct si_shader_context *ctx,
1056 enum si_tess_ring ring)
1057 {
1058 LLVMBuilderRef builder = ctx->ac.builder;
1059 unsigned param = ring == TESS_OFFCHIP_RING_TES ? ctx->param_tes_offchip_addr :
1060 ctx->param_tcs_out_lds_layout;
1061 LLVMValueRef addr = LLVMGetParam(ctx->main_fn, param);
1062
1063 /* TCS only receives high 13 bits of the address. */
1064 if (ring == TESS_OFFCHIP_RING_TCS || ring == TCS_FACTOR_RING) {
1065 addr = LLVMBuildAnd(builder, addr,
1066 LLVMConstInt(ctx->i32, 0xfff80000, 0), "");
1067 }
1068
1069 if (ring == TCS_FACTOR_RING) {
1070 unsigned tf_offset = ctx->screen->tess_offchip_ring_size;
1071 addr = LLVMBuildAdd(builder, addr,
1072 LLVMConstInt(ctx->i32, tf_offset, 0), "");
1073 }
1074
1075 LLVMValueRef desc[4];
1076 desc[0] = addr;
1077 desc[1] = LLVMConstInt(ctx->i32,
1078 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
1079 desc[2] = LLVMConstInt(ctx->i32, 0xffffffff, 0);
1080 desc[3] = LLVMConstInt(ctx->i32,
1081 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1082 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1083 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1084 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1085 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1086 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32), 0);
1087
1088 return ac_build_gather_values(&ctx->ac, desc, 4);
1089 }
1090
1091 static LLVMValueRef fetch_input_tcs(
1092 struct lp_build_tgsi_context *bld_base,
1093 const struct tgsi_full_src_register *reg,
1094 enum tgsi_opcode_type type, unsigned swizzle_in)
1095 {
1096 struct si_shader_context *ctx = si_shader_context(bld_base);
1097 LLVMValueRef dw_addr, stride;
1098 unsigned swizzle = swizzle_in & 0xffff;
1099 stride = get_tcs_in_vertex_dw_stride(ctx);
1100 dw_addr = get_tcs_in_current_patch_offset(ctx);
1101 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1102
1103 return lds_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle, dw_addr);
1104 }
1105
1106 static LLVMValueRef si_nir_load_tcs_varyings(struct ac_shader_abi *abi,
1107 LLVMTypeRef type,
1108 LLVMValueRef vertex_index,
1109 LLVMValueRef param_index,
1110 unsigned const_index,
1111 unsigned location,
1112 unsigned driver_location,
1113 unsigned component,
1114 unsigned num_components,
1115 bool is_patch,
1116 bool is_compact,
1117 bool load_input)
1118 {
1119 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1120 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1121 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
1122 LLVMValueRef dw_addr, stride;
1123
1124 driver_location = driver_location / 4;
1125
1126 if (load_input) {
1127 stride = get_tcs_in_vertex_dw_stride(ctx);
1128 dw_addr = get_tcs_in_current_patch_offset(ctx);
1129 } else {
1130 if (is_patch) {
1131 stride = NULL;
1132 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1133 } else {
1134 stride = get_tcs_out_vertex_dw_stride(ctx);
1135 dw_addr = get_tcs_out_current_patch_offset(ctx);
1136 }
1137 }
1138
1139 if (param_index) {
1140 /* Add the constant index to the indirect index */
1141 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1142 LLVMConstInt(ctx->i32, const_index, 0), "");
1143 } else {
1144 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1145 }
1146
1147 ubyte *names;
1148 ubyte *indices;
1149 if (load_input) {
1150 names = info->input_semantic_name;
1151 indices = info->input_semantic_index;
1152 } else {
1153 names = info->output_semantic_name;
1154 indices = info->output_semantic_index;
1155 }
1156
1157 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1158 vertex_index, param_index,
1159 driver_location,
1160 names, indices,
1161 is_patch);
1162
1163 LLVMValueRef value[4];
1164 for (unsigned i = 0; i < num_components; i++) {
1165 unsigned offset = i;
1166 if (llvm_type_is_64bit(ctx, type))
1167 offset *= 2;
1168
1169 offset += component;
1170 value[i + component] = lds_load(bld_base, type, offset, dw_addr);
1171 }
1172
1173 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1174 }
1175
1176 static LLVMValueRef fetch_output_tcs(
1177 struct lp_build_tgsi_context *bld_base,
1178 const struct tgsi_full_src_register *reg,
1179 enum tgsi_opcode_type type, unsigned swizzle_in)
1180 {
1181 struct si_shader_context *ctx = si_shader_context(bld_base);
1182 LLVMValueRef dw_addr, stride;
1183 unsigned swizzle = (swizzle_in & 0xffff);
1184
1185 if (reg->Register.Dimension) {
1186 stride = get_tcs_out_vertex_dw_stride(ctx);
1187 dw_addr = get_tcs_out_current_patch_offset(ctx);
1188 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1189 } else {
1190 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1191 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
1192 }
1193
1194 return lds_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle, dw_addr);
1195 }
1196
1197 static LLVMValueRef fetch_input_tes(
1198 struct lp_build_tgsi_context *bld_base,
1199 const struct tgsi_full_src_register *reg,
1200 enum tgsi_opcode_type type, unsigned swizzle_in)
1201 {
1202 struct si_shader_context *ctx = si_shader_context(bld_base);
1203 LLVMValueRef base, addr;
1204 unsigned swizzle = (swizzle_in & 0xffff);
1205
1206 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1207 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
1208
1209 return buffer_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle,
1210 ctx->tess_offchip_ring, base, addr, true);
1211 }
1212
1213 LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi,
1214 LLVMTypeRef type,
1215 LLVMValueRef vertex_index,
1216 LLVMValueRef param_index,
1217 unsigned const_index,
1218 unsigned location,
1219 unsigned driver_location,
1220 unsigned component,
1221 unsigned num_components,
1222 bool is_patch,
1223 bool is_compact,
1224 bool load_input)
1225 {
1226 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1227 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1228 LLVMValueRef base, addr;
1229
1230 driver_location = driver_location / 4;
1231
1232 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1233
1234 if (param_index) {
1235 /* Add the constant index to the indirect index */
1236 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1237 LLVMConstInt(ctx->i32, const_index, 0), "");
1238 } else {
1239 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1240 }
1241
1242 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1243 param_index, driver_location,
1244 info->input_semantic_name,
1245 info->input_semantic_index,
1246 is_patch);
1247
1248 /* TODO: This will generate rather ordinary llvm code, although it
1249 * should be easy for the optimiser to fix up. In future we might want
1250 * to refactor buffer_load(), but for now this maximises code sharing
1251 * between the NIR and TGSI backends.
1252 */
1253 LLVMValueRef value[4];
1254 for (unsigned i = 0; i < num_components; i++) {
1255 unsigned offset = i;
1256 if (llvm_type_is_64bit(ctx, type))
1257 offset *= 2;
1258
1259 offset += component;
1260 value[i + component] = buffer_load(&ctx->bld_base, type, offset,
1261 ctx->tess_offchip_ring, base, addr, true);
1262 }
1263
1264 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1265 }
1266
1267 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1268 const struct tgsi_full_instruction *inst,
1269 const struct tgsi_opcode_info *info,
1270 unsigned index,
1271 LLVMValueRef dst[4])
1272 {
1273 struct si_shader_context *ctx = si_shader_context(bld_base);
1274 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
1275 const struct tgsi_shader_info *sh_info = &ctx->shader->selector->info;
1276 unsigned chan_index;
1277 LLVMValueRef dw_addr, stride;
1278 LLVMValueRef buffer, base, buf_addr;
1279 LLVMValueRef values[4];
1280 bool skip_lds_store;
1281 bool is_tess_factor = false, is_tess_inner = false;
1282
1283 /* Only handle per-patch and per-vertex outputs here.
1284 * Vectors will be lowered to scalars and this function will be called again.
1285 */
1286 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1287 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1288 si_llvm_emit_store(bld_base, inst, info, index, dst);
1289 return;
1290 }
1291
1292 if (reg->Register.Dimension) {
1293 stride = get_tcs_out_vertex_dw_stride(ctx);
1294 dw_addr = get_tcs_out_current_patch_offset(ctx);
1295 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1296 skip_lds_store = !sh_info->reads_pervertex_outputs;
1297 } else {
1298 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1299 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1300 skip_lds_store = !sh_info->reads_perpatch_outputs;
1301
1302 if (!reg->Register.Indirect) {
1303 int name = sh_info->output_semantic_name[reg->Register.Index];
1304
1305 /* Always write tess factors into LDS for the TCS epilog. */
1306 if (name == TGSI_SEMANTIC_TESSINNER ||
1307 name == TGSI_SEMANTIC_TESSOUTER) {
1308 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1309 skip_lds_store = !sh_info->reads_tessfactor_outputs &&
1310 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs;
1311 is_tess_factor = true;
1312 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1313 }
1314 }
1315 }
1316
1317 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1318
1319 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1320 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1321
1322 uint32_t writemask = reg->Register.WriteMask;
1323 while (writemask) {
1324 chan_index = u_bit_scan(&writemask);
1325 LLVMValueRef value = dst[chan_index];
1326
1327 if (inst->Instruction.Saturate)
1328 value = ac_build_clamp(&ctx->ac, value);
1329
1330 /* Skip LDS stores if there is no LDS read of this output. */
1331 if (!skip_lds_store)
1332 lds_store(ctx, chan_index, dw_addr, value);
1333
1334 value = ac_to_integer(&ctx->ac, value);
1335 values[chan_index] = value;
1336
1337 if (reg->Register.WriteMask != 0xF && !is_tess_factor) {
1338 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1339 buf_addr, base,
1340 4 * chan_index, 1, 0, true, false);
1341 }
1342
1343 /* Write tess factors into VGPRs for the epilog. */
1344 if (is_tess_factor &&
1345 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
1346 if (!is_tess_inner) {
1347 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1348 ctx->invoc0_tess_factors[chan_index]);
1349 } else if (chan_index < 2) {
1350 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1351 ctx->invoc0_tess_factors[4 + chan_index]);
1352 }
1353 }
1354 }
1355
1356 if (reg->Register.WriteMask == 0xF && !is_tess_factor) {
1357 LLVMValueRef value = ac_build_gather_values(&ctx->ac,
1358 values, 4);
1359 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buf_addr,
1360 base, 0, 1, 0, true, false);
1361 }
1362 }
1363
1364 static void si_nir_store_output_tcs(struct ac_shader_abi *abi,
1365 const struct nir_variable *var,
1366 LLVMValueRef vertex_index,
1367 LLVMValueRef param_index,
1368 unsigned const_index,
1369 LLVMValueRef src,
1370 unsigned writemask)
1371 {
1372 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1373 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1374 const unsigned component = var->data.location_frac;
1375 const bool is_patch = var->data.patch;
1376 unsigned driver_location = var->data.driver_location;
1377 LLVMValueRef dw_addr, stride;
1378 LLVMValueRef buffer, base, addr;
1379 LLVMValueRef values[4];
1380 bool skip_lds_store;
1381 bool is_tess_factor = false, is_tess_inner = false;
1382
1383 driver_location = driver_location / 4;
1384
1385 if (param_index) {
1386 /* Add the constant index to the indirect index */
1387 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1388 LLVMConstInt(ctx->i32, const_index, 0), "");
1389 } else {
1390 if (const_index != 0)
1391 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1392 }
1393
1394 if (!is_patch) {
1395 stride = get_tcs_out_vertex_dw_stride(ctx);
1396 dw_addr = get_tcs_out_current_patch_offset(ctx);
1397 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1398 vertex_index, param_index,
1399 driver_location,
1400 info->output_semantic_name,
1401 info->output_semantic_index,
1402 is_patch);
1403
1404 skip_lds_store = !info->reads_pervertex_outputs;
1405 } else {
1406 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1407 dw_addr = get_dw_address_from_generic_indices(ctx, NULL, dw_addr,
1408 vertex_index, param_index,
1409 driver_location,
1410 info->output_semantic_name,
1411 info->output_semantic_index,
1412 is_patch);
1413
1414 skip_lds_store = !info->reads_perpatch_outputs;
1415
1416 if (!param_index) {
1417 int name = info->output_semantic_name[driver_location];
1418
1419 /* Always write tess factors into LDS for the TCS epilog. */
1420 if (name == TGSI_SEMANTIC_TESSINNER ||
1421 name == TGSI_SEMANTIC_TESSOUTER) {
1422 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1423 skip_lds_store = !info->reads_tessfactor_outputs &&
1424 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs;
1425 is_tess_factor = true;
1426 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1427 }
1428 }
1429 }
1430
1431 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1432
1433 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1434
1435 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1436 param_index, driver_location,
1437 info->output_semantic_name,
1438 info->output_semantic_index,
1439 is_patch);
1440
1441 for (unsigned chan = 0; chan < 4; chan++) {
1442 if (!(writemask & (1 << chan)))
1443 continue;
1444 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1445
1446 /* Skip LDS stores if there is no LDS read of this output. */
1447 if (!skip_lds_store)
1448 lds_store(ctx, chan, dw_addr, value);
1449
1450 value = ac_to_integer(&ctx->ac, value);
1451 values[chan] = value;
1452
1453 if (writemask != 0xF && !is_tess_factor) {
1454 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1455 addr, base,
1456 4 * chan, 1, 0, true, false);
1457 }
1458
1459 /* Write tess factors into VGPRs for the epilog. */
1460 if (is_tess_factor &&
1461 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
1462 if (!is_tess_inner) {
1463 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1464 ctx->invoc0_tess_factors[chan]);
1465 } else if (chan < 2) {
1466 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1467 ctx->invoc0_tess_factors[4 + chan]);
1468 }
1469 }
1470 }
1471
1472 if (writemask == 0xF && !is_tess_factor) {
1473 LLVMValueRef value = ac_build_gather_values(&ctx->ac,
1474 values, 4);
1475 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr,
1476 base, 0, 1, 0, true, false);
1477 }
1478 }
1479
1480 LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi,
1481 unsigned input_index,
1482 unsigned vtx_offset_param,
1483 LLVMTypeRef type,
1484 unsigned swizzle)
1485 {
1486 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1487 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
1488 struct si_shader *shader = ctx->shader;
1489 LLVMValueRef vtx_offset, soffset;
1490 struct tgsi_shader_info *info = &shader->selector->info;
1491 unsigned semantic_name = info->input_semantic_name[input_index];
1492 unsigned semantic_index = info->input_semantic_index[input_index];
1493 unsigned param;
1494 LLVMValueRef value;
1495
1496 param = si_shader_io_get_unique_index(semantic_name, semantic_index, false);
1497
1498 /* GFX9 has the ESGS ring in LDS. */
1499 if (ctx->screen->info.chip_class >= GFX9) {
1500 unsigned index = vtx_offset_param;
1501
1502 switch (index / 2) {
1503 case 0:
1504 vtx_offset = si_unpack_param(ctx, ctx->param_gs_vtx01_offset,
1505 index % 2 ? 16 : 0, 16);
1506 break;
1507 case 1:
1508 vtx_offset = si_unpack_param(ctx, ctx->param_gs_vtx23_offset,
1509 index % 2 ? 16 : 0, 16);
1510 break;
1511 case 2:
1512 vtx_offset = si_unpack_param(ctx, ctx->param_gs_vtx45_offset,
1513 index % 2 ? 16 : 0, 16);
1514 break;
1515 default:
1516 assert(0);
1517 return NULL;
1518 }
1519
1520 vtx_offset = LLVMBuildAdd(ctx->ac.builder, vtx_offset,
1521 LLVMConstInt(ctx->i32, param * 4, 0), "");
1522 return lds_load(bld_base, type, swizzle, vtx_offset);
1523 }
1524
1525 /* GFX6: input load from the ESGS ring in memory. */
1526 if (swizzle == ~0) {
1527 LLVMValueRef values[TGSI_NUM_CHANNELS];
1528 unsigned chan;
1529 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1530 values[chan] = si_llvm_load_input_gs(abi, input_index, vtx_offset_param,
1531 type, chan);
1532 }
1533 return ac_build_gather_values(&ctx->ac, values,
1534 TGSI_NUM_CHANNELS);
1535 }
1536
1537 /* Get the vertex offset parameter on GFX6. */
1538 LLVMValueRef gs_vtx_offset = ctx->gs_vtx_offset[vtx_offset_param];
1539
1540 vtx_offset = LLVMBuildMul(ctx->ac.builder, gs_vtx_offset,
1541 LLVMConstInt(ctx->i32, 4, 0), "");
1542
1543 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1544
1545 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1546 vtx_offset, soffset, 0, 1, 0, true, false);
1547 if (llvm_type_is_64bit(ctx, type)) {
1548 LLVMValueRef value2;
1549 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1550
1551 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1552 ctx->i32_0, vtx_offset, soffset,
1553 0, 1, 0, true, false);
1554 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
1555 }
1556 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1557 }
1558
1559 static LLVMValueRef si_nir_load_input_gs(struct ac_shader_abi *abi,
1560 unsigned location,
1561 unsigned driver_location,
1562 unsigned component,
1563 unsigned num_components,
1564 unsigned vertex_index,
1565 unsigned const_index,
1566 LLVMTypeRef type)
1567 {
1568 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1569
1570 LLVMValueRef value[4];
1571 for (unsigned i = 0; i < num_components; i++) {
1572 unsigned offset = i;
1573 if (llvm_type_is_64bit(ctx, type))
1574 offset *= 2;
1575
1576 offset += component;
1577 value[i + component] = si_llvm_load_input_gs(&ctx->abi, driver_location / 4,
1578 vertex_index, type, offset);
1579 }
1580
1581 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1582 }
1583
1584 static LLVMValueRef fetch_input_gs(
1585 struct lp_build_tgsi_context *bld_base,
1586 const struct tgsi_full_src_register *reg,
1587 enum tgsi_opcode_type type,
1588 unsigned swizzle_in)
1589 {
1590 struct si_shader_context *ctx = si_shader_context(bld_base);
1591 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1592 unsigned swizzle = swizzle_in & 0xffff;
1593
1594 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1595 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1596 return get_primitive_id(ctx, swizzle);
1597
1598 if (!reg->Register.Dimension)
1599 return NULL;
1600
1601 return si_llvm_load_input_gs(&ctx->abi, reg->Register.Index,
1602 reg->Dimension.Index,
1603 tgsi2llvmtype(bld_base, type),
1604 swizzle);
1605 }
1606
1607 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1608 {
1609 switch (interpolate) {
1610 case TGSI_INTERPOLATE_CONSTANT:
1611 return 0;
1612
1613 case TGSI_INTERPOLATE_LINEAR:
1614 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1615 return SI_PARAM_LINEAR_SAMPLE;
1616 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1617 return SI_PARAM_LINEAR_CENTROID;
1618 else
1619 return SI_PARAM_LINEAR_CENTER;
1620 break;
1621 case TGSI_INTERPOLATE_COLOR:
1622 case TGSI_INTERPOLATE_PERSPECTIVE:
1623 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1624 return SI_PARAM_PERSP_SAMPLE;
1625 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1626 return SI_PARAM_PERSP_CENTROID;
1627 else
1628 return SI_PARAM_PERSP_CENTER;
1629 break;
1630 default:
1631 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1632 return -1;
1633 }
1634 }
1635
1636 static LLVMValueRef si_build_fs_interp(struct si_shader_context *ctx,
1637 unsigned attr_index, unsigned chan,
1638 LLVMValueRef prim_mask,
1639 LLVMValueRef i, LLVMValueRef j)
1640 {
1641 if (i || j) {
1642 return ac_build_fs_interp(&ctx->ac,
1643 LLVMConstInt(ctx->i32, chan, 0),
1644 LLVMConstInt(ctx->i32, attr_index, 0),
1645 prim_mask, i, j);
1646 }
1647 return ac_build_fs_interp_mov(&ctx->ac,
1648 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1649 LLVMConstInt(ctx->i32, chan, 0),
1650 LLVMConstInt(ctx->i32, attr_index, 0),
1651 prim_mask);
1652 }
1653
1654 /**
1655 * Interpolate a fragment shader input.
1656 *
1657 * @param ctx context
1658 * @param input_index index of the input in hardware
1659 * @param semantic_name TGSI_SEMANTIC_*
1660 * @param semantic_index semantic index
1661 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1662 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1663 * @param interp_param interpolation weights (i,j)
1664 * @param prim_mask SI_PARAM_PRIM_MASK
1665 * @param face SI_PARAM_FRONT_FACE
1666 * @param result the return value (4 components)
1667 */
1668 static void interp_fs_input(struct si_shader_context *ctx,
1669 unsigned input_index,
1670 unsigned semantic_name,
1671 unsigned semantic_index,
1672 unsigned num_interp_inputs,
1673 unsigned colors_read_mask,
1674 LLVMValueRef interp_param,
1675 LLVMValueRef prim_mask,
1676 LLVMValueRef face,
1677 LLVMValueRef result[4])
1678 {
1679 LLVMValueRef i = NULL, j = NULL;
1680 unsigned chan;
1681
1682 /* fs.constant returns the param from the middle vertex, so it's not
1683 * really useful for flat shading. It's meant to be used for custom
1684 * interpolation (but the intrinsic can't fetch from the other two
1685 * vertices).
1686 *
1687 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1688 * to do the right thing. The only reason we use fs.constant is that
1689 * fs.interp cannot be used on integers, because they can be equal
1690 * to NaN.
1691 *
1692 * When interp is false we will use fs.constant or for newer llvm,
1693 * amdgcn.interp.mov.
1694 */
1695 bool interp = interp_param != NULL;
1696
1697 if (interp) {
1698 interp_param = LLVMBuildBitCast(ctx->ac.builder, interp_param,
1699 LLVMVectorType(ctx->f32, 2), "");
1700
1701 i = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1702 ctx->i32_0, "");
1703 j = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1704 ctx->i32_1, "");
1705 }
1706
1707 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1708 ctx->shader->key.part.ps.prolog.color_two_side) {
1709 LLVMValueRef is_face_positive;
1710
1711 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1712 * otherwise it's at offset "num_inputs".
1713 */
1714 unsigned back_attr_offset = num_interp_inputs;
1715 if (semantic_index == 1 && colors_read_mask & 0xf)
1716 back_attr_offset += 1;
1717
1718 is_face_positive = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
1719 face, ctx->i32_0, "");
1720
1721 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1722 LLVMValueRef front, back;
1723
1724 front = si_build_fs_interp(ctx,
1725 input_index, chan,
1726 prim_mask, i, j);
1727 back = si_build_fs_interp(ctx,
1728 back_attr_offset, chan,
1729 prim_mask, i, j);
1730
1731 result[chan] = LLVMBuildSelect(ctx->ac.builder,
1732 is_face_positive,
1733 front,
1734 back,
1735 "");
1736 }
1737 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1738 result[0] = si_build_fs_interp(ctx, input_index,
1739 0, prim_mask, i, j);
1740 result[1] =
1741 result[2] = LLVMConstReal(ctx->f32, 0.0f);
1742 result[3] = LLVMConstReal(ctx->f32, 1.0f);
1743 } else {
1744 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1745 result[chan] = si_build_fs_interp(ctx,
1746 input_index, chan,
1747 prim_mask, i, j);
1748 }
1749 }
1750 }
1751
1752 void si_llvm_load_input_fs(
1753 struct si_shader_context *ctx,
1754 unsigned input_index,
1755 LLVMValueRef out[4])
1756 {
1757 struct si_shader *shader = ctx->shader;
1758 struct tgsi_shader_info *info = &shader->selector->info;
1759 LLVMValueRef main_fn = ctx->main_fn;
1760 LLVMValueRef interp_param = NULL;
1761 int interp_param_idx;
1762 enum tgsi_semantic semantic_name = info->input_semantic_name[input_index];
1763 unsigned semantic_index = info->input_semantic_index[input_index];
1764 enum tgsi_interpolate_mode interp_mode = info->input_interpolate[input_index];
1765 enum tgsi_interpolate_loc interp_loc = info->input_interpolate_loc[input_index];
1766
1767 /* Get colors from input VGPRs (set by the prolog). */
1768 if (semantic_name == TGSI_SEMANTIC_COLOR) {
1769 unsigned colors_read = shader->selector->info.colors_read;
1770 unsigned mask = colors_read >> (semantic_index * 4);
1771 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1772 (semantic_index ? util_bitcount(colors_read & 0xf) : 0);
1773 LLVMValueRef undef = LLVMGetUndef(ctx->f32);
1774
1775 out[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : undef;
1776 out[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : undef;
1777 out[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : undef;
1778 out[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : undef;
1779 return;
1780 }
1781
1782 interp_param_idx = lookup_interp_param_index(interp_mode, interp_loc);
1783 if (interp_param_idx == -1)
1784 return;
1785 else if (interp_param_idx) {
1786 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
1787 }
1788
1789 interp_fs_input(ctx, input_index, semantic_name,
1790 semantic_index, 0, /* this param is unused */
1791 shader->selector->info.colors_read, interp_param,
1792 ctx->abi.prim_mask,
1793 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1794 &out[0]);
1795 }
1796
1797 static void declare_input_fs(
1798 struct si_shader_context *ctx,
1799 unsigned input_index,
1800 const struct tgsi_full_declaration *decl,
1801 LLVMValueRef out[4])
1802 {
1803 si_llvm_load_input_fs(ctx, input_index, out);
1804 }
1805
1806 LLVMValueRef si_get_sample_id(struct si_shader_context *ctx)
1807 {
1808 return si_unpack_param(ctx, SI_PARAM_ANCILLARY, 8, 4);
1809 }
1810
1811 static LLVMValueRef get_base_vertex(struct ac_shader_abi *abi)
1812 {
1813 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1814
1815 /* For non-indexed draws, the base vertex set by the driver
1816 * (for direct draws) or the CP (for indirect draws) is the
1817 * first vertex ID, but GLSL expects 0 to be returned.
1818 */
1819 LLVMValueRef vs_state = LLVMGetParam(ctx->main_fn,
1820 ctx->param_vs_state_bits);
1821 LLVMValueRef indexed;
1822
1823 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
1824 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
1825
1826 return LLVMBuildSelect(ctx->ac.builder, indexed, ctx->abi.base_vertex,
1827 ctx->i32_0, "");
1828 }
1829
1830 static LLVMValueRef get_block_size(struct ac_shader_abi *abi)
1831 {
1832 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1833
1834 LLVMValueRef values[3];
1835 LLVMValueRef result;
1836 unsigned i;
1837 unsigned *properties = ctx->shader->selector->info.properties;
1838
1839 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1840 unsigned sizes[3] = {
1841 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1842 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1843 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1844 };
1845
1846 for (i = 0; i < 3; ++i)
1847 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1848
1849 result = ac_build_gather_values(&ctx->ac, values, 3);
1850 } else {
1851 result = LLVMGetParam(ctx->main_fn, ctx->param_block_size);
1852 }
1853
1854 return result;
1855 }
1856
1857 /**
1858 * Load a dword from a constant buffer.
1859 */
1860 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1861 LLVMValueRef resource,
1862 LLVMValueRef offset)
1863 {
1864 return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL,
1865 0, 0, 0, true, true);
1866 }
1867
1868 static LLVMValueRef load_sample_position(struct ac_shader_abi *abi, LLVMValueRef sample_id)
1869 {
1870 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1871 LLVMValueRef desc = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1872 LLVMValueRef buf_index = LLVMConstInt(ctx->i32, SI_PS_CONST_SAMPLE_POSITIONS, 0);
1873 LLVMValueRef resource = ac_build_load_to_sgpr(&ctx->ac, desc, buf_index);
1874
1875 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1876 LLVMValueRef offset0 = LLVMBuildMul(ctx->ac.builder, sample_id, LLVMConstInt(ctx->i32, 8, 0), "");
1877 LLVMValueRef offset1 = LLVMBuildAdd(ctx->ac.builder, offset0, LLVMConstInt(ctx->i32, 4, 0), "");
1878
1879 LLVMValueRef pos[4] = {
1880 buffer_load_const(ctx, resource, offset0),
1881 buffer_load_const(ctx, resource, offset1),
1882 LLVMConstReal(ctx->f32, 0),
1883 LLVMConstReal(ctx->f32, 0)
1884 };
1885
1886 return ac_build_gather_values(&ctx->ac, pos, 4);
1887 }
1888
1889 static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
1890 {
1891 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1892 return ac_to_integer(&ctx->ac, abi->sample_coverage);
1893 }
1894
1895 static LLVMValueRef si_load_tess_coord(struct ac_shader_abi *abi)
1896 {
1897 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1898 LLVMValueRef coord[4] = {
1899 LLVMGetParam(ctx->main_fn, ctx->param_tes_u),
1900 LLVMGetParam(ctx->main_fn, ctx->param_tes_v),
1901 ctx->ac.f32_0,
1902 ctx->ac.f32_0
1903 };
1904
1905 /* For triangles, the vector should be (u, v, 1-u-v). */
1906 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1907 PIPE_PRIM_TRIANGLES) {
1908 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1909 LLVMBuildFAdd(ctx->ac.builder,
1910 coord[0], coord[1], ""), "");
1911 }
1912 return ac_build_gather_values(&ctx->ac, coord, 4);
1913 }
1914
1915 static LLVMValueRef load_tess_level(struct si_shader_context *ctx,
1916 unsigned semantic_name)
1917 {
1918 LLVMValueRef base, addr;
1919
1920 int param = si_shader_io_get_unique_index_patch(semantic_name, 0);
1921
1922 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1923 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1924 LLVMConstInt(ctx->i32, param, 0));
1925
1926 return buffer_load(&ctx->bld_base, ctx->f32,
1927 ~0, ctx->tess_offchip_ring, base, addr, true);
1928
1929 }
1930
1931 static LLVMValueRef si_load_tess_level(struct ac_shader_abi *abi,
1932 unsigned varying_id)
1933 {
1934 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1935 unsigned semantic_name;
1936
1937 switch (varying_id) {
1938 case VARYING_SLOT_TESS_LEVEL_INNER:
1939 semantic_name = TGSI_SEMANTIC_TESSINNER;
1940 break;
1941 case VARYING_SLOT_TESS_LEVEL_OUTER:
1942 semantic_name = TGSI_SEMANTIC_TESSOUTER;
1943 break;
1944 default:
1945 unreachable("unknown tess level");
1946 }
1947
1948 return load_tess_level(ctx, semantic_name);
1949
1950 }
1951
1952 static LLVMValueRef si_load_patch_vertices_in(struct ac_shader_abi *abi)
1953 {
1954 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1955 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1956 return si_unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 6);
1957 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1958 return get_num_tcs_out_vertices(ctx);
1959 else
1960 unreachable("invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1961 }
1962
1963 void si_load_system_value(struct si_shader_context *ctx,
1964 unsigned index,
1965 const struct tgsi_full_declaration *decl)
1966 {
1967 LLVMValueRef value = 0;
1968
1969 assert(index < RADEON_LLVM_MAX_SYSTEM_VALUES);
1970
1971 switch (decl->Semantic.Name) {
1972 case TGSI_SEMANTIC_INSTANCEID:
1973 value = ctx->abi.instance_id;
1974 break;
1975
1976 case TGSI_SEMANTIC_VERTEXID:
1977 value = LLVMBuildAdd(ctx->ac.builder,
1978 ctx->abi.vertex_id,
1979 ctx->abi.base_vertex, "");
1980 break;
1981
1982 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1983 /* Unused. Clarify the meaning in indexed vs. non-indexed
1984 * draws if this is ever used again. */
1985 assert(false);
1986 break;
1987
1988 case TGSI_SEMANTIC_BASEVERTEX:
1989 value = get_base_vertex(&ctx->abi);
1990 break;
1991
1992 case TGSI_SEMANTIC_BASEINSTANCE:
1993 value = ctx->abi.start_instance;
1994 break;
1995
1996 case TGSI_SEMANTIC_DRAWID:
1997 value = ctx->abi.draw_id;
1998 break;
1999
2000 case TGSI_SEMANTIC_INVOCATIONID:
2001 if (ctx->type == PIPE_SHADER_TESS_CTRL)
2002 value = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
2003 else if (ctx->type == PIPE_SHADER_GEOMETRY)
2004 value = ctx->abi.gs_invocation_id;
2005 else
2006 assert(!"INVOCATIONID not implemented");
2007 break;
2008
2009 case TGSI_SEMANTIC_POSITION:
2010 {
2011 LLVMValueRef pos[4] = {
2012 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
2013 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
2014 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Z_FLOAT),
2015 ac_build_fdiv(&ctx->ac, ctx->ac.f32_1,
2016 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_W_FLOAT)),
2017 };
2018 value = ac_build_gather_values(&ctx->ac, pos, 4);
2019 break;
2020 }
2021
2022 case TGSI_SEMANTIC_FACE:
2023 value = ctx->abi.front_face;
2024 break;
2025
2026 case TGSI_SEMANTIC_SAMPLEID:
2027 value = si_get_sample_id(ctx);
2028 break;
2029
2030 case TGSI_SEMANTIC_SAMPLEPOS: {
2031 LLVMValueRef pos[4] = {
2032 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
2033 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
2034 LLVMConstReal(ctx->f32, 0),
2035 LLVMConstReal(ctx->f32, 0)
2036 };
2037 pos[0] = ac_build_fract(&ctx->ac, pos[0], 32);
2038 pos[1] = ac_build_fract(&ctx->ac, pos[1], 32);
2039 value = ac_build_gather_values(&ctx->ac, pos, 4);
2040 break;
2041 }
2042
2043 case TGSI_SEMANTIC_SAMPLEMASK:
2044 /* This can only occur with the OpenGL Core profile, which
2045 * doesn't support smoothing.
2046 */
2047 value = LLVMGetParam(ctx->main_fn, SI_PARAM_SAMPLE_COVERAGE);
2048 break;
2049
2050 case TGSI_SEMANTIC_TESSCOORD:
2051 value = si_load_tess_coord(&ctx->abi);
2052 break;
2053
2054 case TGSI_SEMANTIC_VERTICESIN:
2055 value = si_load_patch_vertices_in(&ctx->abi);
2056 break;
2057
2058 case TGSI_SEMANTIC_TESSINNER:
2059 case TGSI_SEMANTIC_TESSOUTER:
2060 value = load_tess_level(ctx, decl->Semantic.Name);
2061 break;
2062
2063 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
2064 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
2065 {
2066 LLVMValueRef buf, slot, val[4];
2067 int i, offset;
2068
2069 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
2070 buf = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2071 buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
2072 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
2073
2074 for (i = 0; i < 4; i++)
2075 val[i] = buffer_load_const(ctx, buf,
2076 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
2077 value = ac_build_gather_values(&ctx->ac, val, 4);
2078 break;
2079 }
2080
2081 case TGSI_SEMANTIC_PRIMID:
2082 value = get_primitive_id(ctx, 0);
2083 break;
2084
2085 case TGSI_SEMANTIC_GRID_SIZE:
2086 value = ctx->abi.num_work_groups;
2087 break;
2088
2089 case TGSI_SEMANTIC_BLOCK_SIZE:
2090 value = get_block_size(&ctx->abi);
2091 break;
2092
2093 case TGSI_SEMANTIC_BLOCK_ID:
2094 {
2095 LLVMValueRef values[3];
2096
2097 for (int i = 0; i < 3; i++) {
2098 values[i] = ctx->i32_0;
2099 if (ctx->abi.workgroup_ids[i]) {
2100 values[i] = ctx->abi.workgroup_ids[i];
2101 }
2102 }
2103 value = ac_build_gather_values(&ctx->ac, values, 3);
2104 break;
2105 }
2106
2107 case TGSI_SEMANTIC_THREAD_ID:
2108 value = ctx->abi.local_invocation_ids;
2109 break;
2110
2111 case TGSI_SEMANTIC_HELPER_INVOCATION:
2112 value = ac_build_load_helper_invocation(&ctx->ac);
2113 break;
2114
2115 case TGSI_SEMANTIC_SUBGROUP_SIZE:
2116 value = LLVMConstInt(ctx->i32, 64, 0);
2117 break;
2118
2119 case TGSI_SEMANTIC_SUBGROUP_INVOCATION:
2120 value = ac_get_thread_id(&ctx->ac);
2121 break;
2122
2123 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
2124 {
2125 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
2126 id = LLVMBuildZExt(ctx->ac.builder, id, ctx->i64, "");
2127 value = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->i64, 1, 0), id, "");
2128 value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->v2i32, "");
2129 break;
2130 }
2131
2132 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
2133 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
2134 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
2135 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
2136 {
2137 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
2138 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_GT_MASK ||
2139 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK) {
2140 /* All bits set except LSB */
2141 value = LLVMConstInt(ctx->i64, -2, 0);
2142 } else {
2143 /* All bits set */
2144 value = LLVMConstInt(ctx->i64, -1, 0);
2145 }
2146 id = LLVMBuildZExt(ctx->ac.builder, id, ctx->i64, "");
2147 value = LLVMBuildShl(ctx->ac.builder, value, id, "");
2148 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK ||
2149 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LT_MASK)
2150 value = LLVMBuildNot(ctx->ac.builder, value, "");
2151 value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->v2i32, "");
2152 break;
2153 }
2154
2155 case TGSI_SEMANTIC_CS_USER_DATA:
2156 value = LLVMGetParam(ctx->main_fn, ctx->param_cs_user_data);
2157 break;
2158
2159 default:
2160 assert(!"unknown system value");
2161 return;
2162 }
2163
2164 ctx->system_values[index] = value;
2165 }
2166
2167 void si_declare_compute_memory(struct si_shader_context *ctx)
2168 {
2169 struct si_shader_selector *sel = ctx->shader->selector;
2170 unsigned lds_size = sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE];
2171
2172 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, AC_ADDR_SPACE_LDS);
2173 LLVMValueRef var;
2174
2175 assert(!ctx->ac.lds);
2176
2177 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2178 LLVMArrayType(ctx->i8, lds_size),
2179 "compute_lds",
2180 AC_ADDR_SPACE_LDS);
2181 LLVMSetAlignment(var, 4);
2182
2183 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
2184 }
2185
2186 void si_tgsi_declare_compute_memory(struct si_shader_context *ctx,
2187 const struct tgsi_full_declaration *decl)
2188 {
2189 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
2190 assert(decl->Range.First == decl->Range.Last);
2191
2192 si_declare_compute_memory(ctx);
2193 }
2194
2195 static LLVMValueRef load_const_buffer_desc_fast_path(struct si_shader_context *ctx)
2196 {
2197 LLVMValueRef ptr =
2198 LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2199 struct si_shader_selector *sel = ctx->shader->selector;
2200
2201 /* Do the bounds checking with a descriptor, because
2202 * doing computation and manual bounds checking of 64-bit
2203 * addresses generates horrible VALU code with very high
2204 * VGPR usage and very low SIMD occupancy.
2205 */
2206 ptr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->ac.intptr, "");
2207
2208 LLVMValueRef desc0, desc1;
2209 desc0 = ptr;
2210 desc1 = LLVMConstInt(ctx->i32,
2211 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
2212
2213 LLVMValueRef desc_elems[] = {
2214 desc0,
2215 desc1,
2216 LLVMConstInt(ctx->i32, (sel->info.const_file_max[0] + 1) * 16, 0),
2217 LLVMConstInt(ctx->i32,
2218 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2219 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2220 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2221 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2222 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2223 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32), 0)
2224 };
2225
2226 return ac_build_gather_values(&ctx->ac, desc_elems, 4);
2227 }
2228
2229 static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
2230 {
2231 LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
2232 ctx->param_const_and_shader_buffers);
2233
2234 return ac_build_load_to_sgpr(&ctx->ac, list_ptr,
2235 LLVMConstInt(ctx->i32, si_get_constbuf_slot(i), 0));
2236 }
2237
2238 static LLVMValueRef load_ubo(struct ac_shader_abi *abi, LLVMValueRef index)
2239 {
2240 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2241 struct si_shader_selector *sel = ctx->shader->selector;
2242
2243 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2244
2245 if (sel->info.const_buffers_declared == 1 &&
2246 sel->info.shader_buffers_declared == 0) {
2247 return load_const_buffer_desc_fast_path(ctx);
2248 }
2249
2250 index = si_llvm_bound_index(ctx, index, ctx->num_const_buffers);
2251 index = LLVMBuildAdd(ctx->ac.builder, index,
2252 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
2253
2254 return ac_build_load_to_sgpr(&ctx->ac, ptr, index);
2255 }
2256
2257 static LLVMValueRef
2258 load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, bool write)
2259 {
2260 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2261 LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
2262 ctx->param_const_and_shader_buffers);
2263
2264 index = si_llvm_bound_index(ctx, index, ctx->num_shader_buffers);
2265 index = LLVMBuildSub(ctx->ac.builder,
2266 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS - 1, 0),
2267 index, "");
2268
2269 return ac_build_load_to_sgpr(&ctx->ac, rsrc_ptr, index);
2270 }
2271
2272 static LLVMValueRef fetch_constant(
2273 struct lp_build_tgsi_context *bld_base,
2274 const struct tgsi_full_src_register *reg,
2275 enum tgsi_opcode_type type,
2276 unsigned swizzle_in)
2277 {
2278 struct si_shader_context *ctx = si_shader_context(bld_base);
2279 struct si_shader_selector *sel = ctx->shader->selector;
2280 const struct tgsi_ind_register *ireg = &reg->Indirect;
2281 unsigned buf, idx;
2282 unsigned swizzle = swizzle_in & 0xffff;
2283
2284 LLVMValueRef addr, bufp;
2285
2286 if (swizzle_in == LP_CHAN_ALL) {
2287 unsigned chan;
2288 LLVMValueRef values[4];
2289 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
2290 values[chan] = fetch_constant(bld_base, reg, type, chan);
2291
2292 return ac_build_gather_values(&ctx->ac, values, 4);
2293 }
2294
2295 /* Split 64-bit loads. */
2296 if (tgsi_type_is_64bit(type)) {
2297 LLVMValueRef lo, hi;
2298
2299 lo = fetch_constant(bld_base, reg, TGSI_TYPE_UNSIGNED, swizzle);
2300 hi = fetch_constant(bld_base, reg, TGSI_TYPE_UNSIGNED, (swizzle_in >> 16));
2301 return si_llvm_emit_fetch_64bit(bld_base, tgsi2llvmtype(bld_base, type),
2302 lo, hi);
2303 }
2304
2305 idx = reg->Register.Index * 4 + swizzle;
2306 if (reg->Register.Indirect) {
2307 addr = si_get_indirect_index(ctx, ireg, 16, idx * 4);
2308 } else {
2309 addr = LLVMConstInt(ctx->i32, idx * 4, 0);
2310 }
2311
2312 /* Fast path when user data SGPRs point to constant buffer 0 directly. */
2313 if (sel->info.const_buffers_declared == 1 &&
2314 sel->info.shader_buffers_declared == 0) {
2315 LLVMValueRef desc = load_const_buffer_desc_fast_path(ctx);
2316 LLVMValueRef result = buffer_load_const(ctx, desc, addr);
2317 return bitcast(bld_base, type, result);
2318 }
2319
2320 assert(reg->Register.Dimension);
2321 buf = reg->Dimension.Index;
2322
2323 if (reg->Dimension.Indirect) {
2324 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2325 LLVMValueRef index;
2326 index = si_get_bounded_indirect_index(ctx, &reg->DimIndirect,
2327 reg->Dimension.Index,
2328 ctx->num_const_buffers);
2329 index = LLVMBuildAdd(ctx->ac.builder, index,
2330 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
2331 bufp = ac_build_load_to_sgpr(&ctx->ac, ptr, index);
2332 } else
2333 bufp = load_const_buffer_desc(ctx, buf);
2334
2335 return bitcast(bld_base, type, buffer_load_const(ctx, bufp, addr));
2336 }
2337
2338 /* Initialize arguments for the shader export intrinsic */
2339 static void si_llvm_init_export_args(struct si_shader_context *ctx,
2340 LLVMValueRef *values,
2341 unsigned target,
2342 struct ac_export_args *args)
2343 {
2344 LLVMValueRef f32undef = LLVMGetUndef(ctx->ac.f32);
2345 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
2346 unsigned chan;
2347 bool is_int8, is_int10;
2348
2349 /* Default is 0xf. Adjusted below depending on the format. */
2350 args->enabled_channels = 0xf; /* writemask */
2351
2352 /* Specify whether the EXEC mask represents the valid mask */
2353 args->valid_mask = 0;
2354
2355 /* Specify whether this is the last export */
2356 args->done = 0;
2357
2358 /* Specify the target we are exporting */
2359 args->target = target;
2360
2361 if (ctx->type == PIPE_SHADER_FRAGMENT) {
2362 const struct si_shader_key *key = &ctx->shader->key;
2363 unsigned col_formats = key->part.ps.epilog.spi_shader_col_format;
2364 int cbuf = target - V_008DFC_SQ_EXP_MRT;
2365
2366 assert(cbuf >= 0 && cbuf < 8);
2367 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
2368 is_int8 = (key->part.ps.epilog.color_is_int8 >> cbuf) & 0x1;
2369 is_int10 = (key->part.ps.epilog.color_is_int10 >> cbuf) & 0x1;
2370 }
2371
2372 args->compr = false;
2373 args->out[0] = f32undef;
2374 args->out[1] = f32undef;
2375 args->out[2] = f32undef;
2376 args->out[3] = f32undef;
2377
2378 LLVMValueRef (*packf)(struct ac_llvm_context *ctx, LLVMValueRef args[2]) = NULL;
2379 LLVMValueRef (*packi)(struct ac_llvm_context *ctx, LLVMValueRef args[2],
2380 unsigned bits, bool hi) = NULL;
2381
2382 switch (spi_shader_col_format) {
2383 case V_028714_SPI_SHADER_ZERO:
2384 args->enabled_channels = 0; /* writemask */
2385 args->target = V_008DFC_SQ_EXP_NULL;
2386 break;
2387
2388 case V_028714_SPI_SHADER_32_R:
2389 args->enabled_channels = 1; /* writemask */
2390 args->out[0] = values[0];
2391 break;
2392
2393 case V_028714_SPI_SHADER_32_GR:
2394 args->enabled_channels = 0x3; /* writemask */
2395 args->out[0] = values[0];
2396 args->out[1] = values[1];
2397 break;
2398
2399 case V_028714_SPI_SHADER_32_AR:
2400 args->enabled_channels = 0x9; /* writemask */
2401 args->out[0] = values[0];
2402 args->out[3] = values[3];
2403 break;
2404
2405 case V_028714_SPI_SHADER_FP16_ABGR:
2406 packf = ac_build_cvt_pkrtz_f16;
2407 break;
2408
2409 case V_028714_SPI_SHADER_UNORM16_ABGR:
2410 packf = ac_build_cvt_pknorm_u16;
2411 break;
2412
2413 case V_028714_SPI_SHADER_SNORM16_ABGR:
2414 packf = ac_build_cvt_pknorm_i16;
2415 break;
2416
2417 case V_028714_SPI_SHADER_UINT16_ABGR:
2418 packi = ac_build_cvt_pk_u16;
2419 break;
2420
2421 case V_028714_SPI_SHADER_SINT16_ABGR:
2422 packi = ac_build_cvt_pk_i16;
2423 break;
2424
2425 case V_028714_SPI_SHADER_32_ABGR:
2426 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2427 break;
2428 }
2429
2430 /* Pack f16 or norm_i16/u16. */
2431 if (packf) {
2432 for (chan = 0; chan < 2; chan++) {
2433 LLVMValueRef pack_args[2] = {
2434 values[2 * chan],
2435 values[2 * chan + 1]
2436 };
2437 LLVMValueRef packed;
2438
2439 packed = packf(&ctx->ac, pack_args);
2440 args->out[chan] = ac_to_float(&ctx->ac, packed);
2441 }
2442 args->compr = 1; /* COMPR flag */
2443 }
2444 /* Pack i16/u16. */
2445 if (packi) {
2446 for (chan = 0; chan < 2; chan++) {
2447 LLVMValueRef pack_args[2] = {
2448 ac_to_integer(&ctx->ac, values[2 * chan]),
2449 ac_to_integer(&ctx->ac, values[2 * chan + 1])
2450 };
2451 LLVMValueRef packed;
2452
2453 packed = packi(&ctx->ac, pack_args,
2454 is_int8 ? 8 : is_int10 ? 10 : 16,
2455 chan == 1);
2456 args->out[chan] = ac_to_float(&ctx->ac, packed);
2457 }
2458 args->compr = 1; /* COMPR flag */
2459 }
2460 }
2461
2462 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2463 LLVMValueRef alpha)
2464 {
2465 struct si_shader_context *ctx = si_shader_context(bld_base);
2466
2467 if (ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2468 static LLVMRealPredicate cond_map[PIPE_FUNC_ALWAYS + 1] = {
2469 [PIPE_FUNC_LESS] = LLVMRealOLT,
2470 [PIPE_FUNC_EQUAL] = LLVMRealOEQ,
2471 [PIPE_FUNC_LEQUAL] = LLVMRealOLE,
2472 [PIPE_FUNC_GREATER] = LLVMRealOGT,
2473 [PIPE_FUNC_NOTEQUAL] = LLVMRealONE,
2474 [PIPE_FUNC_GEQUAL] = LLVMRealOGE,
2475 };
2476 LLVMRealPredicate cond = cond_map[ctx->shader->key.part.ps.epilog.alpha_func];
2477 assert(cond);
2478
2479 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
2480 SI_PARAM_ALPHA_REF);
2481 LLVMValueRef alpha_pass =
2482 LLVMBuildFCmp(ctx->ac.builder, cond, alpha, alpha_ref, "");
2483 ac_build_kill_if_false(&ctx->ac, alpha_pass);
2484 } else {
2485 ac_build_kill_if_false(&ctx->ac, ctx->i1false);
2486 }
2487 }
2488
2489 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2490 LLVMValueRef alpha,
2491 unsigned samplemask_param)
2492 {
2493 struct si_shader_context *ctx = si_shader_context(bld_base);
2494 LLVMValueRef coverage;
2495
2496 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2497 coverage = LLVMGetParam(ctx->main_fn,
2498 samplemask_param);
2499 coverage = ac_to_integer(&ctx->ac, coverage);
2500
2501 coverage = ac_build_intrinsic(&ctx->ac, "llvm.ctpop.i32",
2502 ctx->i32,
2503 &coverage, 1, AC_FUNC_ATTR_READNONE);
2504
2505 coverage = LLVMBuildUIToFP(ctx->ac.builder, coverage,
2506 ctx->f32, "");
2507
2508 coverage = LLVMBuildFMul(ctx->ac.builder, coverage,
2509 LLVMConstReal(ctx->f32,
2510 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2511
2512 return LLVMBuildFMul(ctx->ac.builder, alpha, coverage, "");
2513 }
2514
2515 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
2516 struct ac_export_args *pos, LLVMValueRef *out_elts)
2517 {
2518 unsigned reg_index;
2519 unsigned chan;
2520 unsigned const_chan;
2521 LLVMValueRef base_elt;
2522 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2523 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
2524 SI_VS_CONST_CLIP_PLANES, 0);
2525 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
2526
2527 for (reg_index = 0; reg_index < 2; reg_index ++) {
2528 struct ac_export_args *args = &pos[2 + reg_index];
2529
2530 args->out[0] =
2531 args->out[1] =
2532 args->out[2] =
2533 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
2534
2535 /* Compute dot products of position and user clip plane vectors */
2536 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2537 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2538 LLVMValueRef addr =
2539 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
2540 const_chan) * 4, 0);
2541 base_elt = buffer_load_const(ctx, const_resource,
2542 addr);
2543 args->out[chan] = ac_build_fmad(&ctx->ac, base_elt,
2544 out_elts[const_chan], args->out[chan]);
2545 }
2546 }
2547
2548 args->enabled_channels = 0xf;
2549 args->valid_mask = 0;
2550 args->done = 0;
2551 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
2552 args->compr = 0;
2553 }
2554 }
2555
2556 static void si_dump_streamout(struct pipe_stream_output_info *so)
2557 {
2558 unsigned i;
2559
2560 if (so->num_outputs)
2561 fprintf(stderr, "STREAMOUT\n");
2562
2563 for (i = 0; i < so->num_outputs; i++) {
2564 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2565 so->output[i].start_component;
2566 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2567 i, so->output[i].output_buffer,
2568 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2569 so->output[i].register_index,
2570 mask & 1 ? "x" : "",
2571 mask & 2 ? "y" : "",
2572 mask & 4 ? "z" : "",
2573 mask & 8 ? "w" : "");
2574 }
2575 }
2576
2577 static void emit_streamout_output(struct si_shader_context *ctx,
2578 LLVMValueRef const *so_buffers,
2579 LLVMValueRef const *so_write_offsets,
2580 struct pipe_stream_output *stream_out,
2581 struct si_shader_output_values *shader_out)
2582 {
2583 unsigned buf_idx = stream_out->output_buffer;
2584 unsigned start = stream_out->start_component;
2585 unsigned num_comps = stream_out->num_components;
2586 LLVMValueRef out[4];
2587
2588 assert(num_comps && num_comps <= 4);
2589 if (!num_comps || num_comps > 4)
2590 return;
2591
2592 /* Load the output as int. */
2593 for (int j = 0; j < num_comps; j++) {
2594 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
2595
2596 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
2597 }
2598
2599 /* Pack the output. */
2600 LLVMValueRef vdata = NULL;
2601
2602 switch (num_comps) {
2603 case 1: /* as i32 */
2604 vdata = out[0];
2605 break;
2606 case 2: /* as v2i32 */
2607 case 3: /* as v4i32 (aligned to 4) */
2608 out[3] = LLVMGetUndef(ctx->i32);
2609 /* fall through */
2610 case 4: /* as v4i32 */
2611 vdata = ac_build_gather_values(&ctx->ac, out, util_next_power_of_two(num_comps));
2612 break;
2613 }
2614
2615 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
2616 vdata, num_comps,
2617 so_write_offsets[buf_idx],
2618 ctx->i32_0,
2619 stream_out->dst_offset * 4, 1, 1, true, false);
2620 }
2621
2622 /**
2623 * Write streamout data to buffers for vertex stream @p stream (different
2624 * vertex streams can occur for GS copy shaders).
2625 */
2626 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2627 struct si_shader_output_values *outputs,
2628 unsigned noutput, unsigned stream)
2629 {
2630 struct si_shader_selector *sel = ctx->shader->selector;
2631 struct pipe_stream_output_info *so = &sel->so;
2632 LLVMBuilderRef builder = ctx->ac.builder;
2633 int i;
2634 struct lp_build_if_state if_ctx;
2635
2636 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2637 LLVMValueRef so_vtx_count =
2638 si_unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2639
2640 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
2641
2642 /* can_emit = tid < so_vtx_count; */
2643 LLVMValueRef can_emit =
2644 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2645
2646 /* Emit the streamout code conditionally. This actually avoids
2647 * out-of-bounds buffer access. The hw tells us via the SGPR
2648 * (so_vtx_count) which threads are allowed to emit streamout data. */
2649 lp_build_if(&if_ctx, &ctx->gallivm, can_emit);
2650 {
2651 /* The buffer offset is computed as follows:
2652 * ByteOffset = streamout_offset[buffer_id]*4 +
2653 * (streamout_write_index + thread_id)*stride[buffer_id] +
2654 * attrib_offset
2655 */
2656
2657 LLVMValueRef so_write_index =
2658 LLVMGetParam(ctx->main_fn,
2659 ctx->param_streamout_write_index);
2660
2661 /* Compute (streamout_write_index + thread_id). */
2662 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2663
2664 /* Load the descriptor and compute the write offset for each
2665 * enabled buffer. */
2666 LLVMValueRef so_write_offset[4] = {};
2667 LLVMValueRef so_buffers[4];
2668 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
2669 ctx->param_rw_buffers);
2670
2671 for (i = 0; i < 4; i++) {
2672 if (!so->stride[i])
2673 continue;
2674
2675 LLVMValueRef offset = LLVMConstInt(ctx->i32,
2676 SI_VS_STREAMOUT_BUF0 + i, 0);
2677
2678 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
2679
2680 LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
2681 ctx->param_streamout_offset[i]);
2682 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2683
2684 so_write_offset[i] = ac_build_imad(&ctx->ac, so_write_index,
2685 LLVMConstInt(ctx->i32, so->stride[i]*4, 0),
2686 so_offset);
2687 }
2688
2689 /* Write streamout data. */
2690 for (i = 0; i < so->num_outputs; i++) {
2691 unsigned reg = so->output[i].register_index;
2692
2693 if (reg >= noutput)
2694 continue;
2695
2696 if (stream != so->output[i].stream)
2697 continue;
2698
2699 emit_streamout_output(ctx, so_buffers, so_write_offset,
2700 &so->output[i], &outputs[reg]);
2701 }
2702 }
2703 lp_build_endif(&if_ctx);
2704 }
2705
2706 static void si_export_param(struct si_shader_context *ctx, unsigned index,
2707 LLVMValueRef *values)
2708 {
2709 struct ac_export_args args;
2710
2711 si_llvm_init_export_args(ctx, values,
2712 V_008DFC_SQ_EXP_PARAM + index, &args);
2713 ac_build_export(&ctx->ac, &args);
2714 }
2715
2716 static void si_build_param_exports(struct si_shader_context *ctx,
2717 struct si_shader_output_values *outputs,
2718 unsigned noutput)
2719 {
2720 struct si_shader *shader = ctx->shader;
2721 unsigned param_count = 0;
2722
2723 for (unsigned i = 0; i < noutput; i++) {
2724 unsigned semantic_name = outputs[i].semantic_name;
2725 unsigned semantic_index = outputs[i].semantic_index;
2726
2727 if (outputs[i].vertex_stream[0] != 0 &&
2728 outputs[i].vertex_stream[1] != 0 &&
2729 outputs[i].vertex_stream[2] != 0 &&
2730 outputs[i].vertex_stream[3] != 0)
2731 continue;
2732
2733 switch (semantic_name) {
2734 case TGSI_SEMANTIC_LAYER:
2735 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2736 case TGSI_SEMANTIC_CLIPDIST:
2737 case TGSI_SEMANTIC_COLOR:
2738 case TGSI_SEMANTIC_BCOLOR:
2739 case TGSI_SEMANTIC_PRIMID:
2740 case TGSI_SEMANTIC_FOG:
2741 case TGSI_SEMANTIC_TEXCOORD:
2742 case TGSI_SEMANTIC_GENERIC:
2743 break;
2744 default:
2745 continue;
2746 }
2747
2748 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
2749 semantic_index < SI_MAX_IO_GENERIC) &&
2750 shader->key.opt.kill_outputs &
2751 (1ull << si_shader_io_get_unique_index(semantic_name,
2752 semantic_index, true)))
2753 continue;
2754
2755 si_export_param(ctx, param_count, outputs[i].values);
2756
2757 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2758 shader->info.vs_output_param_offset[i] = param_count++;
2759 }
2760
2761 shader->info.nr_param_exports = param_count;
2762 }
2763
2764 /* Generate export instructions for hardware VS shader stage */
2765 static void si_llvm_export_vs(struct si_shader_context *ctx,
2766 struct si_shader_output_values *outputs,
2767 unsigned noutput)
2768 {
2769 struct si_shader *shader = ctx->shader;
2770 struct ac_export_args pos_args[4] = {};
2771 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2772 unsigned pos_idx;
2773 int i;
2774
2775 /* Build position exports. */
2776 for (i = 0; i < noutput; i++) {
2777 switch (outputs[i].semantic_name) {
2778 case TGSI_SEMANTIC_POSITION:
2779 si_llvm_init_export_args(ctx, outputs[i].values,
2780 V_008DFC_SQ_EXP_POS, &pos_args[0]);
2781 break;
2782 case TGSI_SEMANTIC_PSIZE:
2783 psize_value = outputs[i].values[0];
2784 break;
2785 case TGSI_SEMANTIC_LAYER:
2786 layer_value = outputs[i].values[0];
2787 break;
2788 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2789 viewport_index_value = outputs[i].values[0];
2790 break;
2791 case TGSI_SEMANTIC_EDGEFLAG:
2792 edgeflag_value = outputs[i].values[0];
2793 break;
2794 case TGSI_SEMANTIC_CLIPDIST:
2795 if (!shader->key.opt.clip_disable) {
2796 unsigned index = 2 + outputs[i].semantic_index;
2797 si_llvm_init_export_args(ctx, outputs[i].values,
2798 V_008DFC_SQ_EXP_POS + index,
2799 &pos_args[index]);
2800 }
2801 break;
2802 case TGSI_SEMANTIC_CLIPVERTEX:
2803 if (!shader->key.opt.clip_disable) {
2804 si_llvm_emit_clipvertex(ctx, pos_args,
2805 outputs[i].values);
2806 }
2807 break;
2808 }
2809 }
2810
2811 /* We need to add the position output manually if it's missing. */
2812 if (!pos_args[0].out[0]) {
2813 pos_args[0].enabled_channels = 0xf; /* writemask */
2814 pos_args[0].valid_mask = 0; /* EXEC mask */
2815 pos_args[0].done = 0; /* last export? */
2816 pos_args[0].target = V_008DFC_SQ_EXP_POS;
2817 pos_args[0].compr = 0; /* COMPR flag */
2818 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
2819 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
2820 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
2821 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
2822 }
2823
2824 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2825 if (shader->selector->info.writes_psize ||
2826 shader->selector->info.writes_edgeflag ||
2827 shader->selector->info.writes_viewport_index ||
2828 shader->selector->info.writes_layer) {
2829 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
2830 (shader->selector->info.writes_edgeflag << 1) |
2831 (shader->selector->info.writes_layer << 2);
2832
2833 pos_args[1].valid_mask = 0; /* EXEC mask */
2834 pos_args[1].done = 0; /* last export? */
2835 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2836 pos_args[1].compr = 0; /* COMPR flag */
2837 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
2838 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
2839 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
2840 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
2841
2842 if (shader->selector->info.writes_psize)
2843 pos_args[1].out[0] = psize_value;
2844
2845 if (shader->selector->info.writes_edgeflag) {
2846 /* The output is a float, but the hw expects an integer
2847 * with the first bit containing the edge flag. */
2848 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
2849 edgeflag_value,
2850 ctx->i32, "");
2851 edgeflag_value = ac_build_umin(&ctx->ac,
2852 edgeflag_value,
2853 ctx->i32_1);
2854
2855 /* The LLVM intrinsic expects a float. */
2856 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
2857 }
2858
2859 if (ctx->screen->info.chip_class >= GFX9) {
2860 /* GFX9 has the layer in out.z[10:0] and the viewport
2861 * index in out.z[19:16].
2862 */
2863 if (shader->selector->info.writes_layer)
2864 pos_args[1].out[2] = layer_value;
2865
2866 if (shader->selector->info.writes_viewport_index) {
2867 LLVMValueRef v = viewport_index_value;
2868
2869 v = ac_to_integer(&ctx->ac, v);
2870 v = LLVMBuildShl(ctx->ac.builder, v,
2871 LLVMConstInt(ctx->i32, 16, 0), "");
2872 v = LLVMBuildOr(ctx->ac.builder, v,
2873 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
2874 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
2875 pos_args[1].enabled_channels |= 1 << 2;
2876 }
2877 } else {
2878 if (shader->selector->info.writes_layer)
2879 pos_args[1].out[2] = layer_value;
2880
2881 if (shader->selector->info.writes_viewport_index) {
2882 pos_args[1].out[3] = viewport_index_value;
2883 pos_args[1].enabled_channels |= 1 << 3;
2884 }
2885 }
2886 }
2887
2888 for (i = 0; i < 4; i++)
2889 if (pos_args[i].out[0])
2890 shader->info.nr_pos_exports++;
2891
2892 pos_idx = 0;
2893 for (i = 0; i < 4; i++) {
2894 if (!pos_args[i].out[0])
2895 continue;
2896
2897 /* Specify the target we are exporting */
2898 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2899
2900 if (pos_idx == shader->info.nr_pos_exports)
2901 /* Specify that this is the last export */
2902 pos_args[i].done = 1;
2903
2904 ac_build_export(&ctx->ac, &pos_args[i]);
2905 }
2906
2907 /* Build parameter exports. */
2908 si_build_param_exports(ctx, outputs, noutput);
2909 }
2910
2911 /**
2912 * Forward all outputs from the vertex shader to the TES. This is only used
2913 * for the fixed function TCS.
2914 */
2915 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2916 {
2917 struct si_shader_context *ctx = si_shader_context(bld_base);
2918 LLVMValueRef invocation_id, buffer, buffer_offset;
2919 LLVMValueRef lds_vertex_stride, lds_base;
2920 uint64_t inputs;
2921
2922 invocation_id = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
2923 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
2924 buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2925
2926 lds_vertex_stride = get_tcs_in_vertex_dw_stride(ctx);
2927 lds_base = get_tcs_in_current_patch_offset(ctx);
2928 lds_base = ac_build_imad(&ctx->ac, invocation_id, lds_vertex_stride,
2929 lds_base);
2930
2931 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
2932 while (inputs) {
2933 unsigned i = u_bit_scan64(&inputs);
2934
2935 LLVMValueRef lds_ptr = LLVMBuildAdd(ctx->ac.builder, lds_base,
2936 LLVMConstInt(ctx->i32, 4 * i, 0),
2937 "");
2938
2939 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
2940 get_rel_patch_id(ctx),
2941 invocation_id,
2942 LLVMConstInt(ctx->i32, i, 0));
2943
2944 LLVMValueRef value = lds_load(bld_base, ctx->ac.i32, ~0,
2945 lds_ptr);
2946
2947 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
2948 buffer_offset, 0, 1, 0, true, false);
2949 }
2950 }
2951
2952 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
2953 LLVMValueRef rel_patch_id,
2954 LLVMValueRef invocation_id,
2955 LLVMValueRef tcs_out_current_patch_data_offset,
2956 LLVMValueRef invoc0_tf_outer[4],
2957 LLVMValueRef invoc0_tf_inner[2])
2958 {
2959 struct si_shader_context *ctx = si_shader_context(bld_base);
2960 struct si_shader *shader = ctx->shader;
2961 unsigned tess_inner_index, tess_outer_index;
2962 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2963 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2964 unsigned stride, outer_comps, inner_comps, i, offset;
2965 struct lp_build_if_state if_ctx, inner_if_ctx;
2966
2967 /* Add a barrier before loading tess factors from LDS. */
2968 if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
2969 si_llvm_emit_barrier(NULL, bld_base, NULL);
2970
2971 /* Do this only for invocation 0, because the tess levels are per-patch,
2972 * not per-vertex.
2973 *
2974 * This can't jump, because invocation 0 executes this. It should
2975 * at least mask out the loads and stores for other invocations.
2976 */
2977 lp_build_if(&if_ctx, &ctx->gallivm,
2978 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2979 invocation_id, ctx->i32_0, ""));
2980
2981 /* Determine the layout of one tess factor element in the buffer. */
2982 switch (shader->key.part.tcs.epilog.prim_mode) {
2983 case PIPE_PRIM_LINES:
2984 stride = 2; /* 2 dwords, 1 vec2 store */
2985 outer_comps = 2;
2986 inner_comps = 0;
2987 break;
2988 case PIPE_PRIM_TRIANGLES:
2989 stride = 4; /* 4 dwords, 1 vec4 store */
2990 outer_comps = 3;
2991 inner_comps = 1;
2992 break;
2993 case PIPE_PRIM_QUADS:
2994 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2995 outer_comps = 4;
2996 inner_comps = 2;
2997 break;
2998 default:
2999 assert(0);
3000 return;
3001 }
3002
3003 for (i = 0; i < 4; i++) {
3004 inner[i] = LLVMGetUndef(ctx->i32);
3005 outer[i] = LLVMGetUndef(ctx->i32);
3006 }
3007
3008 if (shader->key.part.tcs.epilog.invoc0_tess_factors_are_def) {
3009 /* Tess factors are in VGPRs. */
3010 for (i = 0; i < outer_comps; i++)
3011 outer[i] = out[i] = invoc0_tf_outer[i];
3012 for (i = 0; i < inner_comps; i++)
3013 inner[i] = out[outer_comps+i] = invoc0_tf_inner[i];
3014 } else {
3015 /* Load tess_inner and tess_outer from LDS.
3016 * Any invocation can write them, so we can't get them from a temporary.
3017 */
3018 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
3019 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
3020
3021 lds_base = tcs_out_current_patch_data_offset;
3022 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
3023 LLVMConstInt(ctx->i32,
3024 tess_inner_index * 4, 0), "");
3025 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
3026 LLVMConstInt(ctx->i32,
3027 tess_outer_index * 4, 0), "");
3028
3029 for (i = 0; i < outer_comps; i++) {
3030 outer[i] = out[i] =
3031 lds_load(bld_base, ctx->ac.i32, i, lds_outer);
3032 }
3033 for (i = 0; i < inner_comps; i++) {
3034 inner[i] = out[outer_comps+i] =
3035 lds_load(bld_base, ctx->ac.i32, i, lds_inner);
3036 }
3037 }
3038
3039 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
3040 /* For isolines, the hardware expects tess factors in the
3041 * reverse order from what GLSL / TGSI specify.
3042 */
3043 LLVMValueRef tmp = out[0];
3044 out[0] = out[1];
3045 out[1] = tmp;
3046 }
3047
3048 /* Convert the outputs to vectors for stores. */
3049 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
3050 vec1 = NULL;
3051
3052 if (stride > 4)
3053 vec1 = ac_build_gather_values(&ctx->ac, out+4, stride - 4);
3054
3055 /* Get the buffer. */
3056 buffer = get_tess_ring_descriptor(ctx, TCS_FACTOR_RING);
3057
3058 /* Get the offset. */
3059 tf_base = LLVMGetParam(ctx->main_fn,
3060 ctx->param_tcs_factor_offset);
3061 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
3062 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
3063
3064 lp_build_if(&inner_if_ctx, &ctx->gallivm,
3065 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
3066 rel_patch_id, ctx->i32_0, ""));
3067
3068 /* Store the dynamic HS control word. */
3069 offset = 0;
3070 if (ctx->screen->info.chip_class <= GFX8) {
3071 ac_build_buffer_store_dword(&ctx->ac, buffer,
3072 LLVMConstInt(ctx->i32, 0x80000000, 0),
3073 1, ctx->i32_0, tf_base,
3074 offset, 1, 0, true, false);
3075 offset += 4;
3076 }
3077
3078 lp_build_endif(&inner_if_ctx);
3079
3080 /* Store the tessellation factors. */
3081 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
3082 MIN2(stride, 4), byteoffset, tf_base,
3083 offset, 1, 0, true, false);
3084 offset += 16;
3085 if (vec1)
3086 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
3087 stride - 4, byteoffset, tf_base,
3088 offset, 1, 0, true, false);
3089
3090 /* Store the tess factors into the offchip buffer if TES reads them. */
3091 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
3092 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
3093 LLVMValueRef tf_inner_offset;
3094 unsigned param_outer, param_inner;
3095
3096 buf = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
3097 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
3098
3099 param_outer = si_shader_io_get_unique_index_patch(
3100 TGSI_SEMANTIC_TESSOUTER, 0);
3101 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
3102 LLVMConstInt(ctx->i32, param_outer, 0));
3103
3104 outer_vec = ac_build_gather_values(&ctx->ac, outer,
3105 util_next_power_of_two(outer_comps));
3106
3107 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
3108 outer_comps, tf_outer_offset,
3109 base, 0, 1, 0, true, false);
3110 if (inner_comps) {
3111 param_inner = si_shader_io_get_unique_index_patch(
3112 TGSI_SEMANTIC_TESSINNER, 0);
3113 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
3114 LLVMConstInt(ctx->i32, param_inner, 0));
3115
3116 inner_vec = inner_comps == 1 ? inner[0] :
3117 ac_build_gather_values(&ctx->ac, inner, inner_comps);
3118 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
3119 inner_comps, tf_inner_offset,
3120 base, 0, 1, 0, true, false);
3121 }
3122 }
3123
3124 lp_build_endif(&if_ctx);
3125 }
3126
3127 static LLVMValueRef
3128 si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
3129 unsigned param, unsigned return_index)
3130 {
3131 return LLVMBuildInsertValue(ctx->ac.builder, ret,
3132 LLVMGetParam(ctx->main_fn, param),
3133 return_index, "");
3134 }
3135
3136 static LLVMValueRef
3137 si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
3138 unsigned param, unsigned return_index)
3139 {
3140 LLVMBuilderRef builder = ctx->ac.builder;
3141 LLVMValueRef p = LLVMGetParam(ctx->main_fn, param);
3142
3143 return LLVMBuildInsertValue(builder, ret,
3144 ac_to_float(&ctx->ac, p),
3145 return_index, "");
3146 }
3147
3148 static LLVMValueRef
3149 si_insert_input_ptr(struct si_shader_context *ctx, LLVMValueRef ret,
3150 unsigned param, unsigned return_index)
3151 {
3152 LLVMBuilderRef builder = ctx->ac.builder;
3153 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, param);
3154 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i32, "");
3155 return LLVMBuildInsertValue(builder, ret, ptr, return_index, "");
3156 }
3157
3158 /* This only writes the tessellation factor levels. */
3159 static void si_llvm_emit_tcs_epilogue(struct ac_shader_abi *abi,
3160 unsigned max_outputs,
3161 LLVMValueRef *addrs)
3162 {
3163 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3164 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
3165 LLVMBuilderRef builder = ctx->ac.builder;
3166 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
3167
3168 si_copy_tcs_inputs(bld_base);
3169
3170 rel_patch_id = get_rel_patch_id(ctx);
3171 invocation_id = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
3172 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
3173
3174 if (ctx->screen->info.chip_class >= GFX9) {
3175 LLVMBasicBlockRef blocks[2] = {
3176 LLVMGetInsertBlock(builder),
3177 ctx->merged_wrap_if_state.entry_block
3178 };
3179 LLVMValueRef values[2];
3180
3181 lp_build_endif(&ctx->merged_wrap_if_state);
3182
3183 values[0] = rel_patch_id;
3184 values[1] = LLVMGetUndef(ctx->i32);
3185 rel_patch_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3186
3187 values[0] = tf_lds_offset;
3188 values[1] = LLVMGetUndef(ctx->i32);
3189 tf_lds_offset = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3190
3191 values[0] = invocation_id;
3192 values[1] = ctx->i32_1; /* cause the epilog to skip threads */
3193 invocation_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3194 }
3195
3196 /* Return epilog parameters from this function. */
3197 LLVMValueRef ret = ctx->return_value;
3198 unsigned vgpr;
3199
3200 if (ctx->screen->info.chip_class >= GFX9) {
3201 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3202 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
3203 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_layout,
3204 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
3205 /* Tess offchip and tess factor offsets are at the beginning. */
3206 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
3207 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
3208 vgpr = 8 + GFX9_SGPR_TCS_OUT_LAYOUT + 1;
3209 } else {
3210 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3211 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
3212 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_layout,
3213 GFX6_SGPR_TCS_OUT_LAYOUT);
3214 /* Tess offchip and tess factor offsets are after user SGPRs. */
3215 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset,
3216 GFX6_TCS_NUM_USER_SGPR);
3217 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset,
3218 GFX6_TCS_NUM_USER_SGPR + 1);
3219 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
3220 }
3221
3222 /* VGPRs */
3223 rel_patch_id = ac_to_float(&ctx->ac, rel_patch_id);
3224 invocation_id = ac_to_float(&ctx->ac, invocation_id);
3225 tf_lds_offset = ac_to_float(&ctx->ac, tf_lds_offset);
3226
3227 /* Leave a hole corresponding to the two input VGPRs. This ensures that
3228 * the invocation_id output does not alias the tcs_rel_ids input,
3229 * which saves a V_MOV on gfx9.
3230 */
3231 vgpr += 2;
3232
3233 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
3234 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
3235
3236 if (ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
3237 vgpr++; /* skip the tess factor LDS offset */
3238 for (unsigned i = 0; i < 6; i++) {
3239 LLVMValueRef value =
3240 LLVMBuildLoad(builder, ctx->invoc0_tess_factors[i], "");
3241 value = ac_to_float(&ctx->ac, value);
3242 ret = LLVMBuildInsertValue(builder, ret, value, vgpr++, "");
3243 }
3244 } else {
3245 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
3246 }
3247 ctx->return_value = ret;
3248 }
3249
3250 /* Pass TCS inputs from LS to TCS on GFX9. */
3251 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
3252 {
3253 LLVMValueRef ret = ctx->return_value;
3254
3255 ret = si_insert_input_ptr(ctx, ret, 0, 0);
3256 ret = si_insert_input_ptr(ctx, ret, 1, 1);
3257 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
3258 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
3259 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
3260 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
3261
3262 ret = si_insert_input_ptr(ctx, ret, ctx->param_rw_buffers,
3263 8 + SI_SGPR_RW_BUFFERS);
3264 ret = si_insert_input_ptr(ctx, ret,
3265 ctx->param_bindless_samplers_and_images,
3266 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
3267
3268 ret = si_insert_input_ret(ctx, ret, ctx->param_vs_state_bits,
3269 8 + SI_SGPR_VS_STATE_BITS);
3270
3271 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3272 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
3273 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_offsets,
3274 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
3275 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_layout,
3276 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
3277
3278 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
3279 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
3280 ac_to_float(&ctx->ac, ctx->abi.tcs_patch_id),
3281 vgpr++, "");
3282 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
3283 ac_to_float(&ctx->ac, ctx->abi.tcs_rel_ids),
3284 vgpr++, "");
3285 ctx->return_value = ret;
3286 }
3287
3288 /* Pass GS inputs from ES to GS on GFX9. */
3289 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
3290 {
3291 LLVMValueRef ret = ctx->return_value;
3292
3293 ret = si_insert_input_ptr(ctx, ret, 0, 0);
3294 ret = si_insert_input_ptr(ctx, ret, 1, 1);
3295 ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
3296 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
3297 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
3298
3299 ret = si_insert_input_ptr(ctx, ret, ctx->param_rw_buffers,
3300 8 + SI_SGPR_RW_BUFFERS);
3301 ret = si_insert_input_ptr(ctx, ret,
3302 ctx->param_bindless_samplers_and_images,
3303 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
3304
3305 unsigned vgpr;
3306 if (ctx->type == PIPE_SHADER_VERTEX)
3307 vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR;
3308 else
3309 vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
3310
3311 for (unsigned i = 0; i < 5; i++) {
3312 unsigned param = ctx->param_gs_vtx01_offset + i;
3313 ret = si_insert_input_ret_float(ctx, ret, param, vgpr++);
3314 }
3315 ctx->return_value = ret;
3316 }
3317
3318 static void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi,
3319 unsigned max_outputs,
3320 LLVMValueRef *addrs)
3321 {
3322 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3323 struct si_shader *shader = ctx->shader;
3324 struct tgsi_shader_info *info = &shader->selector->info;
3325 unsigned i, chan;
3326 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
3327 ctx->param_rel_auto_id);
3328 LLVMValueRef vertex_dw_stride = get_tcs_in_vertex_dw_stride(ctx);
3329 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
3330 vertex_dw_stride, "");
3331
3332 /* Write outputs to LDS. The next shader (TCS aka HS) will read
3333 * its inputs from it. */
3334 for (i = 0; i < info->num_outputs; i++) {
3335 unsigned name = info->output_semantic_name[i];
3336 unsigned index = info->output_semantic_index[i];
3337
3338 /* The ARB_shader_viewport_layer_array spec contains the
3339 * following issue:
3340 *
3341 * 2) What happens if gl_ViewportIndex or gl_Layer is
3342 * written in the vertex shader and a geometry shader is
3343 * present?
3344 *
3345 * RESOLVED: The value written by the last vertex processing
3346 * stage is used. If the last vertex processing stage
3347 * (vertex, tessellation evaluation or geometry) does not
3348 * statically assign to gl_ViewportIndex or gl_Layer, index
3349 * or layer zero is assumed.
3350 *
3351 * So writes to those outputs in VS-as-LS are simply ignored.
3352 */
3353 if (name == TGSI_SEMANTIC_LAYER ||
3354 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
3355 continue;
3356
3357 int param = si_shader_io_get_unique_index(name, index, false);
3358 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
3359 LLVMConstInt(ctx->i32, param * 4, 0), "");
3360
3361 for (chan = 0; chan < 4; chan++) {
3362 if (!(info->output_usagemask[i] & (1 << chan)))
3363 continue;
3364
3365 lds_store(ctx, chan, dw_addr,
3366 LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], ""));
3367 }
3368 }
3369
3370 if (ctx->screen->info.chip_class >= GFX9)
3371 si_set_ls_return_value_for_tcs(ctx);
3372 }
3373
3374 static void si_llvm_emit_es_epilogue(struct ac_shader_abi *abi,
3375 unsigned max_outputs,
3376 LLVMValueRef *addrs)
3377 {
3378 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3379 struct si_shader *es = ctx->shader;
3380 struct tgsi_shader_info *info = &es->selector->info;
3381 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
3382 ctx->param_es2gs_offset);
3383 LLVMValueRef lds_base = NULL;
3384 unsigned chan;
3385 int i;
3386
3387 if (ctx->screen->info.chip_class >= GFX9 && info->num_outputs) {
3388 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
3389 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
3390 LLVMValueRef wave_idx = si_unpack_param(ctx, ctx->param_merged_wave_info, 24, 4);
3391 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
3392 LLVMBuildMul(ctx->ac.builder, wave_idx,
3393 LLVMConstInt(ctx->i32, 64, false), ""), "");
3394 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
3395 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
3396 }
3397
3398 for (i = 0; i < info->num_outputs; i++) {
3399 int param;
3400
3401 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
3402 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
3403 continue;
3404
3405 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
3406 info->output_semantic_index[i], false);
3407
3408 for (chan = 0; chan < 4; chan++) {
3409 if (!(info->output_usagemask[i] & (1 << chan)))
3410 continue;
3411
3412 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
3413 out_val = ac_to_integer(&ctx->ac, out_val);
3414
3415 /* GFX9 has the ESGS ring in LDS. */
3416 if (ctx->screen->info.chip_class >= GFX9) {
3417 lds_store(ctx, param * 4 + chan, lds_base, out_val);
3418 continue;
3419 }
3420
3421 ac_build_buffer_store_dword(&ctx->ac,
3422 ctx->esgs_ring,
3423 out_val, 1, NULL, soffset,
3424 (4 * param + chan) * 4,
3425 1, 1, true, true);
3426 }
3427 }
3428
3429 if (ctx->screen->info.chip_class >= GFX9)
3430 si_set_es_return_value_for_gs(ctx);
3431 }
3432
3433 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
3434 {
3435 if (ctx->screen->info.chip_class >= GFX9)
3436 return si_unpack_param(ctx, ctx->param_merged_wave_info, 16, 8);
3437 else
3438 return LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id);
3439 }
3440
3441 static void emit_gs_epilogue(struct si_shader_context *ctx)
3442 {
3443 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
3444 si_get_gs_wave_id(ctx));
3445
3446 if (ctx->screen->info.chip_class >= GFX9)
3447 lp_build_endif(&ctx->merged_wrap_if_state);
3448 }
3449
3450 static void si_llvm_emit_gs_epilogue(struct ac_shader_abi *abi,
3451 unsigned max_outputs,
3452 LLVMValueRef *addrs)
3453 {
3454 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3455 struct tgsi_shader_info UNUSED *info = &ctx->shader->selector->info;
3456
3457 assert(info->num_outputs <= max_outputs);
3458
3459 emit_gs_epilogue(ctx);
3460 }
3461
3462 static void si_tgsi_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
3463 {
3464 struct si_shader_context *ctx = si_shader_context(bld_base);
3465 emit_gs_epilogue(ctx);
3466 }
3467
3468 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
3469 unsigned max_outputs,
3470 LLVMValueRef *addrs)
3471 {
3472 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3473 struct tgsi_shader_info *info = &ctx->shader->selector->info;
3474 struct si_shader_output_values *outputs = NULL;
3475 int i,j;
3476
3477 assert(!ctx->shader->is_gs_copy_shader);
3478 assert(info->num_outputs <= max_outputs);
3479
3480 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
3481
3482 /* Vertex color clamping.
3483 *
3484 * This uses a state constant loaded in a user data SGPR and
3485 * an IF statement is added that clamps all colors if the constant
3486 * is true.
3487 */
3488 struct lp_build_if_state if_ctx;
3489 LLVMValueRef cond = NULL;
3490 LLVMValueRef addr, val;
3491
3492 for (i = 0; i < info->num_outputs; i++) {
3493 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
3494 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
3495 continue;
3496
3497 /* We've found a color. */
3498 if (!cond) {
3499 /* The state is in the first bit of the user SGPR. */
3500 cond = LLVMGetParam(ctx->main_fn,
3501 ctx->param_vs_state_bits);
3502 cond = LLVMBuildTrunc(ctx->ac.builder, cond,
3503 ctx->i1, "");
3504 lp_build_if(&if_ctx, &ctx->gallivm, cond);
3505 }
3506
3507 for (j = 0; j < 4; j++) {
3508 addr = addrs[4 * i + j];
3509 val = LLVMBuildLoad(ctx->ac.builder, addr, "");
3510 val = ac_build_clamp(&ctx->ac, val);
3511 LLVMBuildStore(ctx->ac.builder, val, addr);
3512 }
3513 }
3514
3515 if (cond)
3516 lp_build_endif(&if_ctx);
3517
3518 for (i = 0; i < info->num_outputs; i++) {
3519 outputs[i].semantic_name = info->output_semantic_name[i];
3520 outputs[i].semantic_index = info->output_semantic_index[i];
3521
3522 for (j = 0; j < 4; j++) {
3523 outputs[i].values[j] =
3524 LLVMBuildLoad(ctx->ac.builder,
3525 addrs[4 * i + j],
3526 "");
3527 outputs[i].vertex_stream[j] =
3528 (info->output_streams[i] >> (2 * j)) & 3;
3529 }
3530 }
3531
3532 if (ctx->shader->selector->so.num_outputs)
3533 si_llvm_emit_streamout(ctx, outputs, i, 0);
3534
3535 /* Export PrimitiveID. */
3536 if (ctx->shader->key.mono.u.vs_export_prim_id) {
3537 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
3538 outputs[i].semantic_index = 0;
3539 outputs[i].values[0] = ac_to_float(&ctx->ac, get_primitive_id(ctx, 0));
3540 for (j = 1; j < 4; j++)
3541 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
3542
3543 memset(outputs[i].vertex_stream, 0,
3544 sizeof(outputs[i].vertex_stream));
3545 i++;
3546 }
3547
3548 si_llvm_export_vs(ctx, outputs, i);
3549 FREE(outputs);
3550 }
3551
3552 static void si_llvm_emit_prim_discard_cs_epilogue(struct ac_shader_abi *abi,
3553 unsigned max_outputs,
3554 LLVMValueRef *addrs)
3555 {
3556 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3557 struct tgsi_shader_info *info = &ctx->shader->selector->info;
3558 LLVMValueRef pos[4] = {};
3559
3560 assert(info->num_outputs <= max_outputs);
3561
3562 for (unsigned i = 0; i < info->num_outputs; i++) {
3563 if (info->output_semantic_name[i] != TGSI_SEMANTIC_POSITION)
3564 continue;
3565
3566 for (unsigned chan = 0; chan < 4; chan++)
3567 pos[chan] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
3568 break;
3569 }
3570 assert(pos[0] != NULL);
3571
3572 /* Return the position output. */
3573 LLVMValueRef ret = ctx->return_value;
3574 for (unsigned chan = 0; chan < 4; chan++)
3575 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, pos[chan], chan, "");
3576 ctx->return_value = ret;
3577 }
3578
3579 static void si_tgsi_emit_epilogue(struct lp_build_tgsi_context *bld_base)
3580 {
3581 struct si_shader_context *ctx = si_shader_context(bld_base);
3582
3583 ctx->abi.emit_outputs(&ctx->abi, RADEON_LLVM_MAX_OUTPUTS,
3584 &ctx->outputs[0][0]);
3585 }
3586
3587 struct si_ps_exports {
3588 unsigned num;
3589 struct ac_export_args args[10];
3590 };
3591
3592 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
3593 LLVMValueRef depth, LLVMValueRef stencil,
3594 LLVMValueRef samplemask, struct si_ps_exports *exp)
3595 {
3596 struct si_shader_context *ctx = si_shader_context(bld_base);
3597 struct ac_export_args args;
3598
3599 ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
3600
3601 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3602 }
3603
3604 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
3605 LLVMValueRef *color, unsigned index,
3606 unsigned samplemask_param,
3607 bool is_last, struct si_ps_exports *exp)
3608 {
3609 struct si_shader_context *ctx = si_shader_context(bld_base);
3610 int i;
3611
3612 /* Clamp color */
3613 if (ctx->shader->key.part.ps.epilog.clamp_color)
3614 for (i = 0; i < 4; i++)
3615 color[i] = ac_build_clamp(&ctx->ac, color[i]);
3616
3617 /* Alpha to one */
3618 if (ctx->shader->key.part.ps.epilog.alpha_to_one)
3619 color[3] = ctx->ac.f32_1;
3620
3621 /* Alpha test */
3622 if (index == 0 &&
3623 ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
3624 si_alpha_test(bld_base, color[3]);
3625
3626 /* Line & polygon smoothing */
3627 if (ctx->shader->key.part.ps.epilog.poly_line_smoothing)
3628 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
3629 samplemask_param);
3630
3631 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3632 if (ctx->shader->key.part.ps.epilog.last_cbuf > 0) {
3633 struct ac_export_args args[8];
3634 int c, last = -1;
3635
3636 /* Get the export arguments, also find out what the last one is. */
3637 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3638 si_llvm_init_export_args(ctx, color,
3639 V_008DFC_SQ_EXP_MRT + c, &args[c]);
3640 if (args[c].enabled_channels)
3641 last = c;
3642 }
3643
3644 /* Emit all exports. */
3645 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3646 if (is_last && last == c) {
3647 args[c].valid_mask = 1; /* whether the EXEC mask is valid */
3648 args[c].done = 1; /* DONE bit */
3649 } else if (!args[c].enabled_channels)
3650 continue; /* unnecessary NULL export */
3651
3652 memcpy(&exp->args[exp->num++], &args[c], sizeof(args[c]));
3653 }
3654 } else {
3655 struct ac_export_args args;
3656
3657 /* Export */
3658 si_llvm_init_export_args(ctx, color, V_008DFC_SQ_EXP_MRT + index,
3659 &args);
3660 if (is_last) {
3661 args.valid_mask = 1; /* whether the EXEC mask is valid */
3662 args.done = 1; /* DONE bit */
3663 } else if (!args.enabled_channels)
3664 return; /* unnecessary NULL export */
3665
3666 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3667 }
3668 }
3669
3670 static void si_emit_ps_exports(struct si_shader_context *ctx,
3671 struct si_ps_exports *exp)
3672 {
3673 for (unsigned i = 0; i < exp->num; i++)
3674 ac_build_export(&ctx->ac, &exp->args[i]);
3675 }
3676
3677 /**
3678 * Return PS outputs in this order:
3679 *
3680 * v[0:3] = color0.xyzw
3681 * v[4:7] = color1.xyzw
3682 * ...
3683 * vN+0 = Depth
3684 * vN+1 = Stencil
3685 * vN+2 = SampleMask
3686 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3687 *
3688 * The alpha-ref SGPR is returned via its original location.
3689 */
3690 static void si_llvm_return_fs_outputs(struct ac_shader_abi *abi,
3691 unsigned max_outputs,
3692 LLVMValueRef *addrs)
3693 {
3694 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3695 struct si_shader *shader = ctx->shader;
3696 struct tgsi_shader_info *info = &shader->selector->info;
3697 LLVMBuilderRef builder = ctx->ac.builder;
3698 unsigned i, j, first_vgpr, vgpr;
3699
3700 LLVMValueRef color[8][4] = {};
3701 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3702 LLVMValueRef ret;
3703
3704 if (ctx->postponed_kill)
3705 ac_build_kill_if_false(&ctx->ac, LLVMBuildLoad(builder, ctx->postponed_kill, ""));
3706
3707 /* Read the output values. */
3708 for (i = 0; i < info->num_outputs; i++) {
3709 unsigned semantic_name = info->output_semantic_name[i];
3710 unsigned semantic_index = info->output_semantic_index[i];
3711
3712 switch (semantic_name) {
3713 case TGSI_SEMANTIC_COLOR:
3714 assert(semantic_index < 8);
3715 for (j = 0; j < 4; j++) {
3716 LLVMValueRef ptr = addrs[4 * i + j];
3717 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3718 color[semantic_index][j] = result;
3719 }
3720 break;
3721 case TGSI_SEMANTIC_POSITION:
3722 depth = LLVMBuildLoad(builder,
3723 addrs[4 * i + 2], "");
3724 break;
3725 case TGSI_SEMANTIC_STENCIL:
3726 stencil = LLVMBuildLoad(builder,
3727 addrs[4 * i + 1], "");
3728 break;
3729 case TGSI_SEMANTIC_SAMPLEMASK:
3730 samplemask = LLVMBuildLoad(builder,
3731 addrs[4 * i + 0], "");
3732 break;
3733 default:
3734 fprintf(stderr, "Warning: GFX6 unhandled fs output type:%d\n",
3735 semantic_name);
3736 }
3737 }
3738
3739 /* Fill the return structure. */
3740 ret = ctx->return_value;
3741
3742 /* Set SGPRs. */
3743 ret = LLVMBuildInsertValue(builder, ret,
3744 ac_to_integer(&ctx->ac,
3745 LLVMGetParam(ctx->main_fn,
3746 SI_PARAM_ALPHA_REF)),
3747 SI_SGPR_ALPHA_REF, "");
3748
3749 /* Set VGPRs */
3750 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3751 for (i = 0; i < ARRAY_SIZE(color); i++) {
3752 if (!color[i][0])
3753 continue;
3754
3755 for (j = 0; j < 4; j++)
3756 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3757 }
3758 if (depth)
3759 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3760 if (stencil)
3761 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3762 if (samplemask)
3763 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3764
3765 /* Add the input sample mask for smoothing at the end. */
3766 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3767 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3768 ret = LLVMBuildInsertValue(builder, ret,
3769 LLVMGetParam(ctx->main_fn,
3770 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3771
3772 ctx->return_value = ret;
3773 }
3774
3775 static void membar_emit(
3776 const struct lp_build_tgsi_action *action,
3777 struct lp_build_tgsi_context *bld_base,
3778 struct lp_build_emit_data *emit_data)
3779 {
3780 struct si_shader_context *ctx = si_shader_context(bld_base);
3781 LLVMValueRef src0 = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
3782 unsigned flags = LLVMConstIntGetZExtValue(src0);
3783 unsigned waitcnt = NOOP_WAITCNT;
3784
3785 if (flags & TGSI_MEMBAR_THREAD_GROUP)
3786 waitcnt &= VM_CNT & LGKM_CNT;
3787
3788 if (flags & (TGSI_MEMBAR_ATOMIC_BUFFER |
3789 TGSI_MEMBAR_SHADER_BUFFER |
3790 TGSI_MEMBAR_SHADER_IMAGE))
3791 waitcnt &= VM_CNT;
3792
3793 if (flags & TGSI_MEMBAR_SHARED)
3794 waitcnt &= LGKM_CNT;
3795
3796 if (waitcnt != NOOP_WAITCNT)
3797 ac_build_waitcnt(&ctx->ac, waitcnt);
3798 }
3799
3800 static void clock_emit(
3801 const struct lp_build_tgsi_action *action,
3802 struct lp_build_tgsi_context *bld_base,
3803 struct lp_build_emit_data *emit_data)
3804 {
3805 struct si_shader_context *ctx = si_shader_context(bld_base);
3806 LLVMValueRef tmp = ac_build_shader_clock(&ctx->ac);
3807
3808 emit_data->output[0] =
3809 LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->i32_0, "");
3810 emit_data->output[1] =
3811 LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->i32_1, "");
3812 }
3813
3814 static void si_llvm_emit_ddxy(
3815 const struct lp_build_tgsi_action *action,
3816 struct lp_build_tgsi_context *bld_base,
3817 struct lp_build_emit_data *emit_data)
3818 {
3819 struct si_shader_context *ctx = si_shader_context(bld_base);
3820 unsigned opcode = emit_data->info->opcode;
3821 LLVMValueRef val;
3822 int idx;
3823 unsigned mask;
3824
3825 if (opcode == TGSI_OPCODE_DDX_FINE)
3826 mask = AC_TID_MASK_LEFT;
3827 else if (opcode == TGSI_OPCODE_DDY_FINE)
3828 mask = AC_TID_MASK_TOP;
3829 else
3830 mask = AC_TID_MASK_TOP_LEFT;
3831
3832 /* for DDX we want to next X pixel, DDY next Y pixel. */
3833 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
3834
3835 val = ac_to_integer(&ctx->ac, emit_data->args[0]);
3836 val = ac_build_ddxy(&ctx->ac, mask, idx, val);
3837 emit_data->output[emit_data->chan] = val;
3838 }
3839
3840 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
3841 struct lp_build_tgsi_context *bld_base,
3842 struct lp_build_emit_data *emit_data)
3843 {
3844 struct si_shader_context *ctx = si_shader_context(bld_base);
3845 struct si_shader *shader = ctx->shader;
3846 const struct tgsi_shader_info *info = &shader->selector->info;
3847 LLVMValueRef interp_param;
3848 const struct tgsi_full_instruction *inst = emit_data->inst;
3849 const struct tgsi_full_src_register *input = &inst->Src[0];
3850 int input_base, input_array_size;
3851 int chan;
3852 int i;
3853 LLVMValueRef prim_mask = ctx->abi.prim_mask;
3854 LLVMValueRef array_idx, offset_x = NULL, offset_y = NULL;
3855 int interp_param_idx;
3856 unsigned interp;
3857 unsigned location;
3858
3859 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
3860 /* offset is in second src, first two channels */
3861 offset_x = lp_build_emit_fetch(bld_base, emit_data->inst, 1,
3862 TGSI_CHAN_X);
3863 offset_y = lp_build_emit_fetch(bld_base, emit_data->inst, 1,
3864 TGSI_CHAN_Y);
3865 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3866 LLVMValueRef sample_position;
3867 LLVMValueRef sample_id;
3868 LLVMValueRef halfval = LLVMConstReal(ctx->f32, 0.5f);
3869
3870 /* fetch sample ID, then fetch its sample position,
3871 * and place into first two channels.
3872 */
3873 sample_id = lp_build_emit_fetch(bld_base,
3874 emit_data->inst, 1, TGSI_CHAN_X);
3875 sample_id = ac_to_integer(&ctx->ac, sample_id);
3876
3877 /* Section 8.13.2 (Interpolation Functions) of the OpenGL Shading
3878 * Language 4.50 spec says about interpolateAtSample:
3879 *
3880 * "Returns the value of the input interpolant variable at
3881 * the location of sample number sample. If multisample
3882 * buffers are not available, the input variable will be
3883 * evaluated at the center of the pixel. If sample sample
3884 * does not exist, the position used to interpolate the
3885 * input variable is undefined."
3886 *
3887 * This means that sample_id values outside of the valid are
3888 * in fact valid input, and the usual mechanism for loading the
3889 * sample position doesn't work.
3890 */
3891 if (ctx->shader->key.mono.u.ps.interpolate_at_sample_force_center) {
3892 LLVMValueRef center[4] = {
3893 LLVMConstReal(ctx->f32, 0.5),
3894 LLVMConstReal(ctx->f32, 0.5),
3895 ctx->ac.f32_0,
3896 ctx->ac.f32_0,
3897 };
3898
3899 sample_position = ac_build_gather_values(&ctx->ac, center, 4);
3900 } else {
3901 sample_position = load_sample_position(&ctx->abi, sample_id);
3902 }
3903
3904 offset_x = LLVMBuildExtractElement(ctx->ac.builder, sample_position,
3905 ctx->i32_0, "");
3906
3907 offset_x = LLVMBuildFSub(ctx->ac.builder, offset_x, halfval, "");
3908 offset_y = LLVMBuildExtractElement(ctx->ac.builder, sample_position,
3909 ctx->i32_1, "");
3910 offset_y = LLVMBuildFSub(ctx->ac.builder, offset_y, halfval, "");
3911 }
3912
3913 assert(input->Register.File == TGSI_FILE_INPUT);
3914
3915 if (input->Register.Indirect) {
3916 unsigned array_id = input->Indirect.ArrayID;
3917
3918 if (array_id) {
3919 input_base = info->input_array_first[array_id];
3920 input_array_size = info->input_array_last[array_id] - input_base + 1;
3921 } else {
3922 input_base = inst->Src[0].Register.Index;
3923 input_array_size = info->num_inputs - input_base;
3924 }
3925
3926 array_idx = si_get_indirect_index(ctx, &input->Indirect,
3927 1, input->Register.Index - input_base);
3928 } else {
3929 input_base = inst->Src[0].Register.Index;
3930 input_array_size = 1;
3931 array_idx = ctx->i32_0;
3932 }
3933
3934 interp = shader->selector->info.input_interpolate[input_base];
3935
3936 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3937 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
3938 location = TGSI_INTERPOLATE_LOC_CENTER;
3939 else
3940 location = TGSI_INTERPOLATE_LOC_CENTROID;
3941
3942 interp_param_idx = lookup_interp_param_index(interp, location);
3943 if (interp_param_idx == -1)
3944 return;
3945 else if (interp_param_idx)
3946 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
3947 else
3948 interp_param = NULL;
3949
3950 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3951 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3952 LLVMValueRef ij_out[2];
3953 LLVMValueRef ddxy_out = ac_build_ddxy_interp(&ctx->ac, interp_param);
3954
3955 /*
3956 * take the I then J parameters, and the DDX/Y for it, and
3957 * calculate the IJ inputs for the interpolator.
3958 * temp1 = ddx * offset/sample.x + I;
3959 * interp_param.I = ddy * offset/sample.y + temp1;
3960 * temp1 = ddx * offset/sample.x + J;
3961 * interp_param.J = ddy * offset/sample.y + temp1;
3962 */
3963 for (i = 0; i < 2; i++) {
3964 LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, 0);
3965 LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, 0);
3966 LLVMValueRef ddx_el = LLVMBuildExtractElement(ctx->ac.builder,
3967 ddxy_out, ix_ll, "");
3968 LLVMValueRef ddy_el = LLVMBuildExtractElement(ctx->ac.builder,
3969 ddxy_out, iy_ll, "");
3970 LLVMValueRef interp_el = LLVMBuildExtractElement(ctx->ac.builder,
3971 interp_param, ix_ll, "");
3972 LLVMValueRef temp;
3973
3974 interp_el = ac_to_float(&ctx->ac, interp_el);
3975
3976 temp = ac_build_fmad(&ctx->ac, ddx_el, offset_x, interp_el);
3977 ij_out[i] = ac_build_fmad(&ctx->ac, ddy_el, offset_y, temp);
3978 }
3979 interp_param = ac_build_gather_values(&ctx->ac, ij_out, 2);
3980 }
3981
3982 if (interp_param)
3983 interp_param = ac_to_float(&ctx->ac, interp_param);
3984
3985 for (chan = 0; chan < 4; chan++) {
3986 LLVMValueRef gather = LLVMGetUndef(LLVMVectorType(ctx->f32, input_array_size));
3987 unsigned schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
3988
3989 for (unsigned idx = 0; idx < input_array_size; ++idx) {
3990 LLVMValueRef v, i = NULL, j = NULL;
3991
3992 if (interp_param) {
3993 i = LLVMBuildExtractElement(
3994 ctx->ac.builder, interp_param, ctx->i32_0, "");
3995 j = LLVMBuildExtractElement(
3996 ctx->ac.builder, interp_param, ctx->i32_1, "");
3997 }
3998 v = si_build_fs_interp(ctx, input_base + idx, schan,
3999 prim_mask, i, j);
4000
4001 gather = LLVMBuildInsertElement(ctx->ac.builder,
4002 gather, v, LLVMConstInt(ctx->i32, idx, false), "");
4003 }
4004
4005 emit_data->output[chan] = LLVMBuildExtractElement(
4006 ctx->ac.builder, gather, array_idx, "");
4007 }
4008 }
4009
4010 static void vote_all_emit(
4011 const struct lp_build_tgsi_action *action,
4012 struct lp_build_tgsi_context *bld_base,
4013 struct lp_build_emit_data *emit_data)
4014 {
4015 struct si_shader_context *ctx = si_shader_context(bld_base);
4016
4017 LLVMValueRef tmp = ac_build_vote_all(&ctx->ac, emit_data->args[0]);
4018 emit_data->output[emit_data->chan] =
4019 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4020 }
4021
4022 static void vote_any_emit(
4023 const struct lp_build_tgsi_action *action,
4024 struct lp_build_tgsi_context *bld_base,
4025 struct lp_build_emit_data *emit_data)
4026 {
4027 struct si_shader_context *ctx = si_shader_context(bld_base);
4028
4029 LLVMValueRef tmp = ac_build_vote_any(&ctx->ac, emit_data->args[0]);
4030 emit_data->output[emit_data->chan] =
4031 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4032 }
4033
4034 static void vote_eq_emit(
4035 const struct lp_build_tgsi_action *action,
4036 struct lp_build_tgsi_context *bld_base,
4037 struct lp_build_emit_data *emit_data)
4038 {
4039 struct si_shader_context *ctx = si_shader_context(bld_base);
4040
4041 LLVMValueRef tmp = ac_build_vote_eq(&ctx->ac, emit_data->args[0]);
4042 emit_data->output[emit_data->chan] =
4043 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4044 }
4045
4046 static void ballot_emit(
4047 const struct lp_build_tgsi_action *action,
4048 struct lp_build_tgsi_context *bld_base,
4049 struct lp_build_emit_data *emit_data)
4050 {
4051 struct si_shader_context *ctx = si_shader_context(bld_base);
4052 LLVMBuilderRef builder = ctx->ac.builder;
4053 LLVMValueRef tmp;
4054
4055 tmp = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
4056 tmp = ac_build_ballot(&ctx->ac, tmp);
4057 tmp = LLVMBuildBitCast(builder, tmp, ctx->v2i32, "");
4058
4059 emit_data->output[0] = LLVMBuildExtractElement(builder, tmp, ctx->i32_0, "");
4060 emit_data->output[1] = LLVMBuildExtractElement(builder, tmp, ctx->i32_1, "");
4061 }
4062
4063 static void read_lane_emit(
4064 const struct lp_build_tgsi_action *action,
4065 struct lp_build_tgsi_context *bld_base,
4066 struct lp_build_emit_data *emit_data)
4067 {
4068 struct si_shader_context *ctx = si_shader_context(bld_base);
4069
4070 if (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_READ_INVOC) {
4071 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
4072 0, emit_data->src_chan);
4073
4074 /* Always read the source invocation (= lane) from the X channel. */
4075 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
4076 1, TGSI_CHAN_X);
4077 emit_data->arg_count = 2;
4078 }
4079
4080 /* We currently have no other way to prevent LLVM from lifting the icmp
4081 * calls to a dominating basic block.
4082 */
4083 ac_build_optimization_barrier(&ctx->ac, &emit_data->args[0]);
4084
4085 for (unsigned i = 0; i < emit_data->arg_count; ++i)
4086 emit_data->args[i] = ac_to_integer(&ctx->ac, emit_data->args[i]);
4087
4088 emit_data->output[emit_data->chan] =
4089 ac_build_intrinsic(&ctx->ac, action->intr_name,
4090 ctx->i32, emit_data->args, emit_data->arg_count,
4091 AC_FUNC_ATTR_READNONE |
4092 AC_FUNC_ATTR_CONVERGENT);
4093 }
4094
4095 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
4096 struct lp_build_emit_data *emit_data)
4097 {
4098 struct si_shader_context *ctx = si_shader_context(bld_base);
4099 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
4100 LLVMValueRef imm;
4101 unsigned stream;
4102
4103 assert(src0.File == TGSI_FILE_IMMEDIATE);
4104
4105 imm = ctx->imms[src0.Index * TGSI_NUM_CHANNELS + src0.SwizzleX];
4106 stream = LLVMConstIntGetZExtValue(imm) & 0x3;
4107 return stream;
4108 }
4109
4110 /* Emit one vertex from the geometry shader */
4111 static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
4112 unsigned stream,
4113 LLVMValueRef *addrs)
4114 {
4115 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
4116 struct tgsi_shader_info *info = &ctx->shader->selector->info;
4117 struct si_shader *shader = ctx->shader;
4118 struct lp_build_if_state if_state;
4119 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
4120 ctx->param_gs2vs_offset);
4121 LLVMValueRef gs_next_vertex;
4122 LLVMValueRef can_emit;
4123 unsigned chan, offset;
4124 int i;
4125
4126 /* Write vertex attribute values to GSVS ring */
4127 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
4128 ctx->gs_next_vertex[stream],
4129 "");
4130
4131 /* If this thread has already emitted the declared maximum number of
4132 * vertices, skip the write: excessive vertex emissions are not
4133 * supposed to have any effect.
4134 *
4135 * If the shader has no writes to memory, kill it instead. This skips
4136 * further memory loads and may allow LLVM to skip to the end
4137 * altogether.
4138 */
4139 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
4140 LLVMConstInt(ctx->i32,
4141 shader->selector->gs_max_out_vertices, 0), "");
4142
4143 bool use_kill = !info->writes_memory;
4144 if (use_kill) {
4145 ac_build_kill_if_false(&ctx->ac, can_emit);
4146 } else {
4147 lp_build_if(&if_state, &ctx->gallivm, can_emit);
4148 }
4149
4150 offset = 0;
4151 for (i = 0; i < info->num_outputs; i++) {
4152 for (chan = 0; chan < 4; chan++) {
4153 if (!(info->output_usagemask[i] & (1 << chan)) ||
4154 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
4155 continue;
4156
4157 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
4158 LLVMValueRef voffset =
4159 LLVMConstInt(ctx->i32, offset *
4160 shader->selector->gs_max_out_vertices, 0);
4161 offset++;
4162
4163 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
4164 voffset = LLVMBuildMul(ctx->ac.builder, voffset,
4165 LLVMConstInt(ctx->i32, 4, 0), "");
4166
4167 out_val = ac_to_integer(&ctx->ac, out_val);
4168
4169 ac_build_buffer_store_dword(&ctx->ac,
4170 ctx->gsvs_ring[stream],
4171 out_val, 1,
4172 voffset, soffset, 0,
4173 1, 1, true, true);
4174 }
4175 }
4176
4177 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex, ctx->i32_1, "");
4178 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
4179
4180 /* Signal vertex emission if vertex data was written. */
4181 if (offset) {
4182 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
4183 si_get_gs_wave_id(ctx));
4184 }
4185
4186 if (!use_kill)
4187 lp_build_endif(&if_state);
4188 }
4189
4190 /* Emit one vertex from the geometry shader */
4191 static void si_tgsi_emit_vertex(
4192 const struct lp_build_tgsi_action *action,
4193 struct lp_build_tgsi_context *bld_base,
4194 struct lp_build_emit_data *emit_data)
4195 {
4196 struct si_shader_context *ctx = si_shader_context(bld_base);
4197 unsigned stream = si_llvm_get_stream(bld_base, emit_data);
4198
4199 si_llvm_emit_vertex(&ctx->abi, stream, ctx->outputs[0]);
4200 }
4201
4202 /* Cut one primitive from the geometry shader */
4203 static void si_llvm_emit_primitive(struct ac_shader_abi *abi,
4204 unsigned stream)
4205 {
4206 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
4207
4208 /* Signal primitive cut */
4209 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
4210 si_get_gs_wave_id(ctx));
4211 }
4212
4213 /* Cut one primitive from the geometry shader */
4214 static void si_tgsi_emit_primitive(
4215 const struct lp_build_tgsi_action *action,
4216 struct lp_build_tgsi_context *bld_base,
4217 struct lp_build_emit_data *emit_data)
4218 {
4219 struct si_shader_context *ctx = si_shader_context(bld_base);
4220
4221 si_llvm_emit_primitive(&ctx->abi, si_llvm_get_stream(bld_base, emit_data));
4222 }
4223
4224 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
4225 struct lp_build_tgsi_context *bld_base,
4226 struct lp_build_emit_data *emit_data)
4227 {
4228 struct si_shader_context *ctx = si_shader_context(bld_base);
4229
4230 /* GFX6 only (thanks to a hw bug workaround):
4231 * The real barrier instruction isn’t needed, because an entire patch
4232 * always fits into a single wave.
4233 */
4234 if (ctx->screen->info.chip_class == GFX6 &&
4235 ctx->type == PIPE_SHADER_TESS_CTRL) {
4236 ac_build_waitcnt(&ctx->ac, LGKM_CNT & VM_CNT);
4237 return;
4238 }
4239
4240 ac_build_s_barrier(&ctx->ac);
4241 }
4242
4243 void si_create_function(struct si_shader_context *ctx,
4244 const char *name,
4245 LLVMTypeRef *returns, unsigned num_returns,
4246 struct si_function_info *fninfo,
4247 unsigned max_workgroup_size)
4248 {
4249 int i;
4250
4251 si_llvm_create_func(ctx, name, returns, num_returns,
4252 fninfo->types, fninfo->num_params);
4253 ctx->return_value = LLVMGetUndef(ctx->return_type);
4254
4255 for (i = 0; i < fninfo->num_sgpr_params; ++i) {
4256 LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
4257
4258 /* The combination of:
4259 * - noalias
4260 * - dereferenceable
4261 * - invariant.load
4262 * allows the optimization passes to move loads and reduces
4263 * SGPR spilling significantly.
4264 */
4265 ac_add_function_attr(ctx->ac.context, ctx->main_fn, i + 1,
4266 AC_FUNC_ATTR_INREG);
4267
4268 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
4269 ac_add_function_attr(ctx->ac.context, ctx->main_fn, i + 1,
4270 AC_FUNC_ATTR_NOALIAS);
4271 ac_add_attr_dereferenceable(P, UINT64_MAX);
4272 }
4273 }
4274
4275 for (i = 0; i < fninfo->num_params; ++i) {
4276 if (fninfo->assign[i])
4277 *fninfo->assign[i] = LLVMGetParam(ctx->main_fn, i);
4278 }
4279
4280 if (ctx->screen->info.address32_hi) {
4281 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
4282 "amdgpu-32bit-address-high-bits",
4283 ctx->screen->info.address32_hi);
4284 }
4285
4286 if (max_workgroup_size) {
4287 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
4288 "amdgpu-max-work-group-size",
4289 max_workgroup_size);
4290 }
4291 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4292 "no-signed-zeros-fp-math",
4293 "true");
4294
4295 if (ctx->screen->debug_flags & DBG(UNSAFE_MATH)) {
4296 /* These were copied from some LLVM test. */
4297 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4298 "less-precise-fpmad",
4299 "true");
4300 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4301 "no-infs-fp-math",
4302 "true");
4303 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4304 "no-nans-fp-math",
4305 "true");
4306 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4307 "unsafe-fp-math",
4308 "true");
4309 }
4310 }
4311
4312 static void declare_streamout_params(struct si_shader_context *ctx,
4313 struct pipe_stream_output_info *so,
4314 struct si_function_info *fninfo)
4315 {
4316 int i;
4317
4318 /* Streamout SGPRs. */
4319 if (so->num_outputs) {
4320 if (ctx->type != PIPE_SHADER_TESS_EVAL)
4321 ctx->param_streamout_config = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4322 else
4323 ctx->param_streamout_config = fninfo->num_params - 1;
4324
4325 ctx->param_streamout_write_index = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4326 }
4327 /* A streamout buffer offset is loaded if the stride is non-zero. */
4328 for (i = 0; i < 4; i++) {
4329 if (!so->stride[i])
4330 continue;
4331
4332 ctx->param_streamout_offset[i] = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4333 }
4334 }
4335
4336 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
4337 {
4338 switch (shader->selector->type) {
4339 case PIPE_SHADER_TESS_CTRL:
4340 /* Return this so that LLVM doesn't remove s_barrier
4341 * instructions on chips where we use s_barrier. */
4342 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 64;
4343
4344 case PIPE_SHADER_GEOMETRY:
4345 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 64;
4346
4347 case PIPE_SHADER_COMPUTE:
4348 break; /* see below */
4349
4350 default:
4351 return 0;
4352 }
4353
4354 const unsigned *properties = shader->selector->info.properties;
4355 unsigned max_work_group_size =
4356 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
4357 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
4358 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
4359
4360 if (!max_work_group_size) {
4361 /* This is a variable group size compute shader,
4362 * compile it for the maximum possible group size.
4363 */
4364 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
4365 }
4366 return max_work_group_size;
4367 }
4368
4369 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
4370 struct si_function_info *fninfo,
4371 bool assign_params)
4372 {
4373 LLVMTypeRef const_shader_buf_type;
4374
4375 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
4376 ctx->shader->selector->info.shader_buffers_declared == 0)
4377 const_shader_buf_type = ctx->f32;
4378 else
4379 const_shader_buf_type = ctx->v4i32;
4380
4381 unsigned const_and_shader_buffers =
4382 add_arg(fninfo, ARG_SGPR,
4383 ac_array_in_const32_addr_space(const_shader_buf_type));
4384
4385 if (assign_params)
4386 ctx->param_const_and_shader_buffers = const_and_shader_buffers;
4387 }
4388
4389 static void declare_samplers_and_images(struct si_shader_context *ctx,
4390 struct si_function_info *fninfo,
4391 bool assign_params)
4392 {
4393 unsigned samplers_and_images =
4394 add_arg(fninfo, ARG_SGPR,
4395 ac_array_in_const32_addr_space(ctx->v8i32));
4396
4397 if (assign_params)
4398 ctx->param_samplers_and_images = samplers_and_images;
4399 }
4400
4401 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
4402 struct si_function_info *fninfo,
4403 bool assign_params)
4404 {
4405 declare_const_and_shader_buffers(ctx, fninfo, assign_params);
4406 declare_samplers_and_images(ctx, fninfo, assign_params);
4407 }
4408
4409 static void declare_global_desc_pointers(struct si_shader_context *ctx,
4410 struct si_function_info *fninfo)
4411 {
4412 ctx->param_rw_buffers = add_arg(fninfo, ARG_SGPR,
4413 ac_array_in_const32_addr_space(ctx->v4i32));
4414 ctx->param_bindless_samplers_and_images = add_arg(fninfo, ARG_SGPR,
4415 ac_array_in_const32_addr_space(ctx->v8i32));
4416 }
4417
4418 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx,
4419 struct si_function_info *fninfo)
4420 {
4421 ctx->param_vs_state_bits = add_arg(fninfo, ARG_SGPR, ctx->i32);
4422 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.base_vertex);
4423 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.start_instance);
4424 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.draw_id);
4425 }
4426
4427 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
4428 struct si_function_info *fninfo,
4429 unsigned *num_prolog_vgprs)
4430 {
4431 struct si_shader *shader = ctx->shader;
4432
4433 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.vertex_id);
4434 if (shader->key.as_ls) {
4435 ctx->param_rel_auto_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4436 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
4437 } else {
4438 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
4439 ctx->param_vs_prim_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4440 }
4441 add_arg(fninfo, ARG_VGPR, ctx->i32); /* unused */
4442
4443 if (!shader->is_gs_copy_shader) {
4444 /* Vertex load indices. */
4445 ctx->param_vertex_index0 = fninfo->num_params;
4446 for (unsigned i = 0; i < shader->selector->info.num_inputs; i++)
4447 add_arg(fninfo, ARG_VGPR, ctx->i32);
4448 *num_prolog_vgprs += shader->selector->info.num_inputs;
4449 }
4450 }
4451
4452 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
4453 struct si_function_info *fninfo,
4454 unsigned vs_blit_property)
4455 {
4456 ctx->param_vs_blit_inputs = fninfo->num_params;
4457 add_arg(fninfo, ARG_SGPR, ctx->i32); /* i16 x1, y1 */
4458 add_arg(fninfo, ARG_SGPR, ctx->i32); /* i16 x2, y2 */
4459 add_arg(fninfo, ARG_SGPR, ctx->f32); /* depth */
4460
4461 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
4462 add_arg(fninfo, ARG_SGPR, ctx->f32); /* color0 */
4463 add_arg(fninfo, ARG_SGPR, ctx->f32); /* color1 */
4464 add_arg(fninfo, ARG_SGPR, ctx->f32); /* color2 */
4465 add_arg(fninfo, ARG_SGPR, ctx->f32); /* color3 */
4466 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
4467 add_arg(fninfo, ARG_SGPR, ctx->f32); /* texcoord.x1 */
4468 add_arg(fninfo, ARG_SGPR, ctx->f32); /* texcoord.y1 */
4469 add_arg(fninfo, ARG_SGPR, ctx->f32); /* texcoord.x2 */
4470 add_arg(fninfo, ARG_SGPR, ctx->f32); /* texcoord.y2 */
4471 add_arg(fninfo, ARG_SGPR, ctx->f32); /* texcoord.z */
4472 add_arg(fninfo, ARG_SGPR, ctx->f32); /* texcoord.w */
4473 }
4474 }
4475
4476 static void declare_tes_input_vgprs(struct si_shader_context *ctx,
4477 struct si_function_info *fninfo)
4478 {
4479 ctx->param_tes_u = add_arg(fninfo, ARG_VGPR, ctx->f32);
4480 ctx->param_tes_v = add_arg(fninfo, ARG_VGPR, ctx->f32);
4481 ctx->param_tes_rel_patch_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4482 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tes_patch_id);
4483 }
4484
4485 enum {
4486 /* Convenient merged shader definitions. */
4487 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
4488 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
4489 };
4490
4491 static void create_function(struct si_shader_context *ctx)
4492 {
4493 struct si_shader *shader = ctx->shader;
4494 struct si_function_info fninfo;
4495 LLVMTypeRef returns[16+32*4];
4496 unsigned i, num_return_sgprs;
4497 unsigned num_returns = 0;
4498 unsigned num_prolog_vgprs = 0;
4499 unsigned type = ctx->type;
4500 unsigned vs_blit_property =
4501 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
4502
4503 si_init_function_info(&fninfo);
4504
4505 /* Set MERGED shaders. */
4506 if (ctx->screen->info.chip_class >= GFX9) {
4507 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
4508 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
4509 else if (shader->key.as_es || type == PIPE_SHADER_GEOMETRY)
4510 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
4511 }
4512
4513 LLVMTypeRef v3i32 = LLVMVectorType(ctx->i32, 3);
4514
4515 switch (type) {
4516 case PIPE_SHADER_VERTEX:
4517 declare_global_desc_pointers(ctx, &fninfo);
4518
4519 if (vs_blit_property) {
4520 declare_vs_blit_inputs(ctx, &fninfo, vs_blit_property);
4521
4522 /* VGPRs */
4523 declare_vs_input_vgprs(ctx, &fninfo, &num_prolog_vgprs);
4524 break;
4525 }
4526
4527 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4528 declare_vs_specific_input_sgprs(ctx, &fninfo);
4529 ctx->param_vertex_buffers = add_arg(&fninfo, ARG_SGPR,
4530 ac_array_in_const32_addr_space(ctx->v4i32));
4531
4532 if (shader->key.as_es) {
4533 ctx->param_es2gs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4534 } else if (shader->key.as_ls) {
4535 /* no extra parameters */
4536 } else {
4537 if (shader->is_gs_copy_shader) {
4538 fninfo.num_params = ctx->param_vs_state_bits + 1;
4539 fninfo.num_sgpr_params = fninfo.num_params;
4540 }
4541
4542 /* The locations of the other parameters are assigned dynamically. */
4543 declare_streamout_params(ctx, &shader->selector->so,
4544 &fninfo);
4545 }
4546
4547 /* VGPRs */
4548 declare_vs_input_vgprs(ctx, &fninfo, &num_prolog_vgprs);
4549
4550 /* Return values */
4551 if (shader->key.opt.vs_as_prim_discard_cs) {
4552 for (i = 0; i < 4; i++)
4553 returns[num_returns++] = ctx->f32; /* VGPRs */
4554 }
4555 break;
4556
4557 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
4558 declare_global_desc_pointers(ctx, &fninfo);
4559 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4560 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4561 ctx->param_tcs_out_lds_offsets = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4562 ctx->param_tcs_out_lds_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4563 ctx->param_vs_state_bits = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4564 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4565 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4566
4567 /* VGPRs */
4568 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_patch_id);
4569 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_rel_ids);
4570
4571 /* param_tcs_offchip_offset and param_tcs_factor_offset are
4572 * placed after the user SGPRs.
4573 */
4574 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
4575 returns[num_returns++] = ctx->i32; /* SGPRs */
4576 for (i = 0; i < 11; i++)
4577 returns[num_returns++] = ctx->f32; /* VGPRs */
4578 break;
4579
4580 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
4581 /* Merged stages have 8 system SGPRs at the beginning. */
4582 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
4583 declare_per_stage_desc_pointers(ctx, &fninfo,
4584 ctx->type == PIPE_SHADER_TESS_CTRL);
4585 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4586 ctx->param_merged_wave_info = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4587 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4588 ctx->param_merged_scratch_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4589 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4590 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4591
4592 declare_global_desc_pointers(ctx, &fninfo);
4593 declare_per_stage_desc_pointers(ctx, &fninfo,
4594 ctx->type == PIPE_SHADER_VERTEX);
4595 declare_vs_specific_input_sgprs(ctx, &fninfo);
4596
4597 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4598 ctx->param_tcs_out_lds_offsets = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4599 ctx->param_tcs_out_lds_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4600 ctx->param_vertex_buffers = add_arg(&fninfo, ARG_SGPR,
4601 ac_array_in_const32_addr_space(ctx->v4i32));
4602
4603 /* VGPRs (first TCS, then VS) */
4604 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_patch_id);
4605 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_rel_ids);
4606
4607 if (ctx->type == PIPE_SHADER_VERTEX) {
4608 declare_vs_input_vgprs(ctx, &fninfo,
4609 &num_prolog_vgprs);
4610
4611 /* LS return values are inputs to the TCS main shader part. */
4612 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
4613 returns[num_returns++] = ctx->i32; /* SGPRs */
4614 for (i = 0; i < 2; i++)
4615 returns[num_returns++] = ctx->f32; /* VGPRs */
4616 } else {
4617 /* TCS return values are inputs to the TCS epilog.
4618 *
4619 * param_tcs_offchip_offset, param_tcs_factor_offset,
4620 * param_tcs_offchip_layout, and param_rw_buffers
4621 * should be passed to the epilog.
4622 */
4623 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
4624 returns[num_returns++] = ctx->i32; /* SGPRs */
4625 for (i = 0; i < 11; i++)
4626 returns[num_returns++] = ctx->f32; /* VGPRs */
4627 }
4628 break;
4629
4630 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
4631 /* Merged stages have 8 system SGPRs at the beginning. */
4632 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
4633 declare_per_stage_desc_pointers(ctx, &fninfo,
4634 ctx->type == PIPE_SHADER_GEOMETRY);
4635 ctx->param_gs2vs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4636 ctx->param_merged_wave_info = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4637 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4638 ctx->param_merged_scratch_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4639 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
4640 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
4641
4642 declare_global_desc_pointers(ctx, &fninfo);
4643 declare_per_stage_desc_pointers(ctx, &fninfo,
4644 (ctx->type == PIPE_SHADER_VERTEX ||
4645 ctx->type == PIPE_SHADER_TESS_EVAL));
4646 if (ctx->type == PIPE_SHADER_VERTEX) {
4647 declare_vs_specific_input_sgprs(ctx, &fninfo);
4648 } else {
4649 ctx->param_vs_state_bits = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4650 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4651 ctx->param_tes_offchip_addr = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4652 /* Declare as many input SGPRs as the VS has. */
4653 }
4654
4655 if (ctx->type == PIPE_SHADER_VERTEX) {
4656 ctx->param_vertex_buffers = add_arg(&fninfo, ARG_SGPR,
4657 ac_array_in_const32_addr_space(ctx->v4i32));
4658 }
4659
4660 /* VGPRs (first GS, then VS/TES) */
4661 ctx->param_gs_vtx01_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4662 ctx->param_gs_vtx23_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4663 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_prim_id);
4664 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_invocation_id);
4665 ctx->param_gs_vtx45_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4666
4667 if (ctx->type == PIPE_SHADER_VERTEX) {
4668 declare_vs_input_vgprs(ctx, &fninfo,
4669 &num_prolog_vgprs);
4670 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
4671 declare_tes_input_vgprs(ctx, &fninfo);
4672 }
4673
4674 if (ctx->type == PIPE_SHADER_VERTEX ||
4675 ctx->type == PIPE_SHADER_TESS_EVAL) {
4676 unsigned num_user_sgprs;
4677
4678 if (ctx->type == PIPE_SHADER_VERTEX)
4679 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR;
4680 else
4681 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
4682
4683 /* ES return values are inputs to GS. */
4684 for (i = 0; i < 8 + num_user_sgprs; i++)
4685 returns[num_returns++] = ctx->i32; /* SGPRs */
4686 for (i = 0; i < 5; i++)
4687 returns[num_returns++] = ctx->f32; /* VGPRs */
4688 }
4689 break;
4690
4691 case PIPE_SHADER_TESS_EVAL:
4692 declare_global_desc_pointers(ctx, &fninfo);
4693 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4694 ctx->param_vs_state_bits = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4695 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4696 ctx->param_tes_offchip_addr = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4697
4698 if (shader->key.as_es) {
4699 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4700 add_arg(&fninfo, ARG_SGPR, ctx->i32);
4701 ctx->param_es2gs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4702 } else {
4703 add_arg(&fninfo, ARG_SGPR, ctx->i32);
4704 declare_streamout_params(ctx, &shader->selector->so,
4705 &fninfo);
4706 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4707 }
4708
4709 /* VGPRs */
4710 declare_tes_input_vgprs(ctx, &fninfo);
4711 break;
4712
4713 case PIPE_SHADER_GEOMETRY:
4714 declare_global_desc_pointers(ctx, &fninfo);
4715 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4716 ctx->param_gs2vs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4717 ctx->param_gs_wave_id = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4718
4719 /* VGPRs */
4720 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[0]);
4721 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[1]);
4722 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_prim_id);
4723 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[2]);
4724 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[3]);
4725 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[4]);
4726 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[5]);
4727 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_invocation_id);
4728 break;
4729
4730 case PIPE_SHADER_FRAGMENT:
4731 declare_global_desc_pointers(ctx, &fninfo);
4732 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4733 add_arg_checked(&fninfo, ARG_SGPR, ctx->f32, SI_PARAM_ALPHA_REF);
4734 add_arg_assign_checked(&fninfo, ARG_SGPR, ctx->i32,
4735 &ctx->abi.prim_mask, SI_PARAM_PRIM_MASK);
4736
4737 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_SAMPLE);
4738 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_CENTER);
4739 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_CENTROID);
4740 add_arg_checked(&fninfo, ARG_VGPR, v3i32, SI_PARAM_PERSP_PULL_MODEL);
4741 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_SAMPLE);
4742 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_CENTER);
4743 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_CENTROID);
4744 add_arg_checked(&fninfo, ARG_VGPR, ctx->f32, SI_PARAM_LINE_STIPPLE_TEX);
4745 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4746 &ctx->abi.frag_pos[0], SI_PARAM_POS_X_FLOAT);
4747 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4748 &ctx->abi.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
4749 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4750 &ctx->abi.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
4751 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4752 &ctx->abi.frag_pos[3], SI_PARAM_POS_W_FLOAT);
4753 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->i32,
4754 &ctx->abi.front_face, SI_PARAM_FRONT_FACE);
4755 shader->info.face_vgpr_index = 20;
4756 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->i32,
4757 &ctx->abi.ancillary, SI_PARAM_ANCILLARY);
4758 shader->info.ancillary_vgpr_index = 21;
4759 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4760 &ctx->abi.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
4761 add_arg_checked(&fninfo, ARG_VGPR, ctx->i32, SI_PARAM_POS_FIXED_PT);
4762
4763 /* Color inputs from the prolog. */
4764 if (shader->selector->info.colors_read) {
4765 unsigned num_color_elements =
4766 util_bitcount(shader->selector->info.colors_read);
4767
4768 assert(fninfo.num_params + num_color_elements <= ARRAY_SIZE(fninfo.types));
4769 for (i = 0; i < num_color_elements; i++)
4770 add_arg(&fninfo, ARG_VGPR, ctx->f32);
4771
4772 num_prolog_vgprs += num_color_elements;
4773 }
4774
4775 /* Outputs for the epilog. */
4776 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
4777 num_returns =
4778 num_return_sgprs +
4779 util_bitcount(shader->selector->info.colors_written) * 4 +
4780 shader->selector->info.writes_z +
4781 shader->selector->info.writes_stencil +
4782 shader->selector->info.writes_samplemask +
4783 1 /* SampleMaskIn */;
4784
4785 num_returns = MAX2(num_returns,
4786 num_return_sgprs +
4787 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
4788
4789 for (i = 0; i < num_return_sgprs; i++)
4790 returns[i] = ctx->i32;
4791 for (; i < num_returns; i++)
4792 returns[i] = ctx->f32;
4793 break;
4794
4795 case PIPE_SHADER_COMPUTE:
4796 declare_global_desc_pointers(ctx, &fninfo);
4797 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4798 if (shader->selector->info.uses_grid_size)
4799 add_arg_assign(&fninfo, ARG_SGPR, v3i32, &ctx->abi.num_work_groups);
4800 if (shader->selector->info.uses_block_size &&
4801 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
4802 ctx->param_block_size = add_arg(&fninfo, ARG_SGPR, v3i32);
4803
4804 unsigned cs_user_data_dwords =
4805 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_DWORDS];
4806 if (cs_user_data_dwords) {
4807 ctx->param_cs_user_data = add_arg(&fninfo, ARG_SGPR,
4808 LLVMVectorType(ctx->i32, cs_user_data_dwords));
4809 }
4810
4811 for (i = 0; i < 3; i++) {
4812 ctx->abi.workgroup_ids[i] = NULL;
4813 if (shader->selector->info.uses_block_id[i])
4814 add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &ctx->abi.workgroup_ids[i]);
4815 }
4816
4817 add_arg_assign(&fninfo, ARG_VGPR, v3i32, &ctx->abi.local_invocation_ids);
4818 break;
4819 default:
4820 assert(0 && "unimplemented shader");
4821 return;
4822 }
4823
4824 si_create_function(ctx, "main", returns, num_returns, &fninfo,
4825 si_get_max_workgroup_size(shader));
4826
4827 /* Reserve register locations for VGPR inputs the PS prolog may need. */
4828 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
4829 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
4830 "InitialPSInputAddr",
4831 S_0286D0_PERSP_SAMPLE_ENA(1) |
4832 S_0286D0_PERSP_CENTER_ENA(1) |
4833 S_0286D0_PERSP_CENTROID_ENA(1) |
4834 S_0286D0_LINEAR_SAMPLE_ENA(1) |
4835 S_0286D0_LINEAR_CENTER_ENA(1) |
4836 S_0286D0_LINEAR_CENTROID_ENA(1) |
4837 S_0286D0_FRONT_FACE_ENA(1) |
4838 S_0286D0_ANCILLARY_ENA(1) |
4839 S_0286D0_POS_FIXED_PT_ENA(1));
4840 }
4841
4842 shader->info.num_input_sgprs = 0;
4843 shader->info.num_input_vgprs = 0;
4844
4845 for (i = 0; i < fninfo.num_sgpr_params; ++i)
4846 shader->info.num_input_sgprs += ac_get_type_size(fninfo.types[i]) / 4;
4847
4848 for (; i < fninfo.num_params; ++i)
4849 shader->info.num_input_vgprs += ac_get_type_size(fninfo.types[i]) / 4;
4850
4851 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
4852 shader->info.num_input_vgprs -= num_prolog_vgprs;
4853
4854 if (shader->key.as_ls ||
4855 ctx->type == PIPE_SHADER_TESS_CTRL ||
4856 /* GFX9 has the ESGS ring buffer in LDS. */
4857 type == SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY)
4858 ac_declare_lds_as_pointer(&ctx->ac);
4859 }
4860
4861 /**
4862 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
4863 * for later use.
4864 */
4865 static void preload_ring_buffers(struct si_shader_context *ctx)
4866 {
4867 LLVMBuilderRef builder = ctx->ac.builder;
4868
4869 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
4870 ctx->param_rw_buffers);
4871
4872 if (ctx->screen->info.chip_class <= GFX8 &&
4873 (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)) {
4874 unsigned ring =
4875 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
4876 : SI_ES_RING_ESGS;
4877 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
4878
4879 ctx->esgs_ring =
4880 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
4881 }
4882
4883 if (ctx->shader->is_gs_copy_shader) {
4884 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4885
4886 ctx->gsvs_ring[0] =
4887 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
4888 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
4889 const struct si_shader_selector *sel = ctx->shader->selector;
4890 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4891 LLVMValueRef base_ring;
4892
4893 base_ring = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
4894
4895 /* The conceptual layout of the GSVS ring is
4896 * v0c0 .. vLv0 v0c1 .. vLc1 ..
4897 * but the real memory layout is swizzled across
4898 * threads:
4899 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
4900 * t16v0c0 ..
4901 * Override the buffer descriptor accordingly.
4902 */
4903 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
4904 uint64_t stream_offset = 0;
4905
4906 for (unsigned stream = 0; stream < 4; ++stream) {
4907 unsigned num_components;
4908 unsigned stride;
4909 unsigned num_records;
4910 LLVMValueRef ring, tmp;
4911
4912 num_components = sel->info.num_stream_output_components[stream];
4913 if (!num_components)
4914 continue;
4915
4916 stride = 4 * num_components * sel->gs_max_out_vertices;
4917
4918 /* Limit on the stride field for <= GFX7. */
4919 assert(stride < (1 << 14));
4920
4921 num_records = 64;
4922
4923 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
4924 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
4925 tmp = LLVMBuildAdd(builder, tmp,
4926 LLVMConstInt(ctx->i64,
4927 stream_offset, 0), "");
4928 stream_offset += stride * 64;
4929
4930 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
4931 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
4932 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
4933 tmp = LLVMBuildOr(builder, tmp,
4934 LLVMConstInt(ctx->i32,
4935 S_008F04_STRIDE(stride) |
4936 S_008F04_SWIZZLE_ENABLE(1), 0), "");
4937 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
4938 ring = LLVMBuildInsertElement(builder, ring,
4939 LLVMConstInt(ctx->i32, num_records, 0),
4940 LLVMConstInt(ctx->i32, 2, 0), "");
4941 ring = LLVMBuildInsertElement(builder, ring,
4942 LLVMConstInt(ctx->i32,
4943 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
4944 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
4945 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
4946 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
4947 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
4948 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
4949 S_008F0C_ELEMENT_SIZE(1) | /* element_size = 4 (bytes) */
4950 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
4951 S_008F0C_ADD_TID_ENABLE(1),
4952 0),
4953 LLVMConstInt(ctx->i32, 3, 0), "");
4954
4955 ctx->gsvs_ring[stream] = ring;
4956 }
4957 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
4958 ctx->tess_offchip_ring = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TES);
4959 }
4960 }
4961
4962 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
4963 LLVMValueRef param_rw_buffers,
4964 unsigned param_pos_fixed_pt)
4965 {
4966 LLVMBuilderRef builder = ctx->ac.builder;
4967 LLVMValueRef slot, desc, offset, row, bit, address[2];
4968
4969 /* Use the fixed-point gl_FragCoord input.
4970 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
4971 * per coordinate to get the repeating effect.
4972 */
4973 address[0] = si_unpack_param(ctx, param_pos_fixed_pt, 0, 5);
4974 address[1] = si_unpack_param(ctx, param_pos_fixed_pt, 16, 5);
4975
4976 /* Load the buffer descriptor. */
4977 slot = LLVMConstInt(ctx->i32, SI_PS_CONST_POLY_STIPPLE, 0);
4978 desc = ac_build_load_to_sgpr(&ctx->ac, param_rw_buffers, slot);
4979
4980 /* The stipple pattern is 32x32, each row has 32 bits. */
4981 offset = LLVMBuildMul(builder, address[1],
4982 LLVMConstInt(ctx->i32, 4, 0), "");
4983 row = buffer_load_const(ctx, desc, offset);
4984 row = ac_to_integer(&ctx->ac, row);
4985 bit = LLVMBuildLShr(builder, row, address[0], "");
4986 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
4987 ac_build_kill_if_false(&ctx->ac, bit);
4988 }
4989
4990 void si_shader_binary_read_config(struct ac_shader_binary *binary,
4991 struct si_shader_config *conf,
4992 unsigned symbol_offset)
4993 {
4994 unsigned i;
4995 const unsigned char *config =
4996 ac_shader_binary_config_start(binary, symbol_offset);
4997 bool really_needs_scratch = false;
4998
4999 /* LLVM adds SGPR spills to the scratch size.
5000 * Find out if we really need the scratch buffer.
5001 */
5002 for (i = 0; i < binary->reloc_count; i++) {
5003 const struct ac_shader_reloc *reloc = &binary->relocs[i];
5004
5005 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
5006 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5007 really_needs_scratch = true;
5008 break;
5009 }
5010 }
5011
5012 /* XXX: We may be able to emit some of these values directly rather than
5013 * extracting fields to be emitted later.
5014 */
5015
5016 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
5017 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
5018 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
5019 switch (reg) {
5020 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
5021 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
5022 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
5023 case R_00B428_SPI_SHADER_PGM_RSRC1_HS:
5024 case R_00B848_COMPUTE_PGM_RSRC1:
5025 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
5026 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
5027 conf->float_mode = G_00B028_FLOAT_MODE(value);
5028 conf->rsrc1 = value;
5029 break;
5030 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
5031 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
5032 break;
5033 case R_00B84C_COMPUTE_PGM_RSRC2:
5034 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
5035 conf->rsrc2 = value;
5036 break;
5037 case R_0286CC_SPI_PS_INPUT_ENA:
5038 conf->spi_ps_input_ena = value;
5039 break;
5040 case R_0286D0_SPI_PS_INPUT_ADDR:
5041 conf->spi_ps_input_addr = value;
5042 break;
5043 case R_0286E8_SPI_TMPRING_SIZE:
5044 case R_00B860_COMPUTE_TMPRING_SIZE:
5045 /* WAVESIZE is in units of 256 dwords. */
5046 if (really_needs_scratch)
5047 conf->scratch_bytes_per_wave =
5048 G_00B860_WAVESIZE(value) * 256 * 4;
5049 break;
5050 case 0x4: /* SPILLED_SGPRS */
5051 conf->spilled_sgprs = value;
5052 break;
5053 case 0x8: /* SPILLED_VGPRS */
5054 conf->spilled_vgprs = value;
5055 break;
5056 default:
5057 {
5058 static bool printed;
5059
5060 if (!printed) {
5061 fprintf(stderr, "Warning: LLVM emitted unknown "
5062 "config register: 0x%x\n", reg);
5063 printed = true;
5064 }
5065 }
5066 break;
5067 }
5068 }
5069
5070 if (!conf->spi_ps_input_addr)
5071 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
5072 }
5073
5074 void si_shader_apply_scratch_relocs(struct si_shader *shader,
5075 uint64_t scratch_va)
5076 {
5077 unsigned i;
5078 uint32_t scratch_rsrc_dword0 = scratch_va;
5079 uint32_t scratch_rsrc_dword1 =
5080 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
5081
5082 /* Enable scratch coalescing. */
5083 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
5084
5085 for (i = 0 ; i < shader->binary.reloc_count; i++) {
5086 const struct ac_shader_reloc *reloc =
5087 &shader->binary.relocs[i];
5088 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
5089 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5090 &scratch_rsrc_dword0, 4);
5091 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5092 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5093 &scratch_rsrc_dword1, 4);
5094 }
5095 }
5096 }
5097
5098 /* For the UMR disassembler. */
5099 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
5100 #define DEBUGGER_NUM_MARKERS 5
5101
5102 static unsigned si_get_shader_binary_size(const struct si_shader *shader)
5103 {
5104 unsigned size = shader->binary.code_size;
5105
5106 if (shader->prolog)
5107 size += shader->prolog->binary.code_size;
5108 if (shader->previous_stage)
5109 size += shader->previous_stage->binary.code_size;
5110 if (shader->prolog2)
5111 size += shader->prolog2->binary.code_size;
5112 if (shader->epilog)
5113 size += shader->epilog->binary.code_size;
5114 return size + DEBUGGER_NUM_MARKERS * 4;
5115 }
5116
5117 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
5118 {
5119 const struct ac_shader_binary *prolog =
5120 shader->prolog ? &shader->prolog->binary : NULL;
5121 const struct ac_shader_binary *previous_stage =
5122 shader->previous_stage ? &shader->previous_stage->binary : NULL;
5123 const struct ac_shader_binary *prolog2 =
5124 shader->prolog2 ? &shader->prolog2->binary : NULL;
5125 const struct ac_shader_binary *epilog =
5126 shader->epilog ? &shader->epilog->binary : NULL;
5127 const struct ac_shader_binary *mainb = &shader->binary;
5128 unsigned bo_size = si_get_shader_binary_size(shader) +
5129 (!epilog ? mainb->rodata_size : 0);
5130 unsigned char *ptr;
5131
5132 assert(!prolog || !prolog->rodata_size);
5133 assert(!previous_stage || !previous_stage->rodata_size);
5134 assert(!prolog2 || !prolog2->rodata_size);
5135 assert((!prolog && !previous_stage && !prolog2 && !epilog) ||
5136 !mainb->rodata_size);
5137 assert(!epilog || !epilog->rodata_size);
5138
5139 si_resource_reference(&shader->bo, NULL);
5140 shader->bo = si_aligned_buffer_create(&sscreen->b,
5141 sscreen->cpdma_prefetch_writes_memory ?
5142 0 : SI_RESOURCE_FLAG_READ_ONLY,
5143 PIPE_USAGE_IMMUTABLE,
5144 align(bo_size, SI_CPDMA_ALIGNMENT),
5145 256);
5146 if (!shader->bo)
5147 return -ENOMEM;
5148
5149 /* Upload. */
5150 ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
5151 PIPE_TRANSFER_READ_WRITE |
5152 PIPE_TRANSFER_UNSYNCHRONIZED |
5153 RADEON_TRANSFER_TEMPORARY);
5154
5155 /* Don't use util_memcpy_cpu_to_le32. LLVM binaries are
5156 * endian-independent. */
5157 if (prolog) {
5158 memcpy(ptr, prolog->code, prolog->code_size);
5159 ptr += prolog->code_size;
5160 }
5161 if (previous_stage) {
5162 memcpy(ptr, previous_stage->code, previous_stage->code_size);
5163 ptr += previous_stage->code_size;
5164 }
5165 if (prolog2) {
5166 memcpy(ptr, prolog2->code, prolog2->code_size);
5167 ptr += prolog2->code_size;
5168 }
5169
5170 memcpy(ptr, mainb->code, mainb->code_size);
5171 ptr += mainb->code_size;
5172
5173 if (epilog) {
5174 memcpy(ptr, epilog->code, epilog->code_size);
5175 ptr += epilog->code_size;
5176 } else if (mainb->rodata_size > 0) {
5177 memcpy(ptr, mainb->rodata, mainb->rodata_size);
5178 ptr += mainb->rodata_size;
5179 }
5180
5181 /* Add end-of-code markers for the UMR disassembler. */
5182 uint32_t *ptr32 = (uint32_t*)ptr;
5183 for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
5184 ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
5185
5186 sscreen->ws->buffer_unmap(shader->bo->buf);
5187 return 0;
5188 }
5189
5190 static void si_shader_dump_disassembly(const struct ac_shader_binary *binary,
5191 struct pipe_debug_callback *debug,
5192 const char *name, FILE *file)
5193 {
5194 char *line, *p;
5195 unsigned i, count;
5196
5197 if (binary->disasm_string) {
5198 fprintf(file, "Shader %s disassembly:\n", name);
5199 fprintf(file, "%s", binary->disasm_string);
5200
5201 if (debug && debug->debug_message) {
5202 /* Very long debug messages are cut off, so send the
5203 * disassembly one line at a time. This causes more
5204 * overhead, but on the plus side it simplifies
5205 * parsing of resulting logs.
5206 */
5207 pipe_debug_message(debug, SHADER_INFO,
5208 "Shader Disassembly Begin");
5209
5210 line = binary->disasm_string;
5211 while (*line) {
5212 p = util_strchrnul(line, '\n');
5213 count = p - line;
5214
5215 if (count) {
5216 pipe_debug_message(debug, SHADER_INFO,
5217 "%.*s", count, line);
5218 }
5219
5220 if (!*p)
5221 break;
5222 line = p + 1;
5223 }
5224
5225 pipe_debug_message(debug, SHADER_INFO,
5226 "Shader Disassembly End");
5227 }
5228 } else {
5229 fprintf(file, "Shader %s binary:\n", name);
5230 for (i = 0; i < binary->code_size; i += 4) {
5231 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
5232 binary->code[i + 3], binary->code[i + 2],
5233 binary->code[i + 1], binary->code[i]);
5234 }
5235 }
5236 }
5237
5238 static void si_calculate_max_simd_waves(struct si_shader *shader)
5239 {
5240 struct si_screen *sscreen = shader->selector->screen;
5241 struct si_shader_config *conf = &shader->config;
5242 unsigned num_inputs = shader->selector->info.num_inputs;
5243 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
5244 unsigned lds_per_wave = 0;
5245 unsigned max_simd_waves;
5246
5247 max_simd_waves = ac_get_max_simd_waves(sscreen->info.family);
5248
5249 /* Compute LDS usage for PS. */
5250 switch (shader->selector->type) {
5251 case PIPE_SHADER_FRAGMENT:
5252 /* The minimum usage per wave is (num_inputs * 48). The maximum
5253 * usage is (num_inputs * 48 * 16).
5254 * We can get anything in between and it varies between waves.
5255 *
5256 * The 48 bytes per input for a single primitive is equal to
5257 * 4 bytes/component * 4 components/input * 3 points.
5258 *
5259 * Other stages don't know the size at compile time or don't
5260 * allocate LDS per wave, but instead they do it per thread group.
5261 */
5262 lds_per_wave = conf->lds_size * lds_increment +
5263 align(num_inputs * 48, lds_increment);
5264 break;
5265 case PIPE_SHADER_COMPUTE:
5266 if (shader->selector) {
5267 unsigned max_workgroup_size =
5268 si_get_max_workgroup_size(shader);
5269 lds_per_wave = (conf->lds_size * lds_increment) /
5270 DIV_ROUND_UP(max_workgroup_size, 64);
5271 }
5272 break;
5273 }
5274
5275 /* Compute the per-SIMD wave counts. */
5276 if (conf->num_sgprs) {
5277 max_simd_waves =
5278 MIN2(max_simd_waves,
5279 ac_get_num_physical_sgprs(sscreen->info.chip_class) / conf->num_sgprs);
5280 }
5281
5282 if (conf->num_vgprs)
5283 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
5284
5285 /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
5286 * 16KB makes some SIMDs unoccupied). */
5287 if (lds_per_wave)
5288 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
5289
5290 conf->max_simd_waves = max_simd_waves;
5291 }
5292
5293 void si_shader_dump_stats_for_shader_db(const struct si_shader *shader,
5294 struct pipe_debug_callback *debug)
5295 {
5296 const struct si_shader_config *conf = &shader->config;
5297
5298 pipe_debug_message(debug, SHADER_INFO,
5299 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
5300 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
5301 "Spilled VGPRs: %d PrivMem VGPRs: %d",
5302 conf->num_sgprs, conf->num_vgprs,
5303 si_get_shader_binary_size(shader),
5304 conf->lds_size, conf->scratch_bytes_per_wave,
5305 conf->max_simd_waves, conf->spilled_sgprs,
5306 conf->spilled_vgprs, conf->private_mem_vgprs);
5307 }
5308
5309 static void si_shader_dump_stats(struct si_screen *sscreen,
5310 const struct si_shader *shader,
5311 unsigned processor,
5312 FILE *file,
5313 bool check_debug_option)
5314 {
5315 const struct si_shader_config *conf = &shader->config;
5316
5317 if (!check_debug_option ||
5318 si_can_dump_shader(sscreen, processor)) {
5319 if (processor == PIPE_SHADER_FRAGMENT) {
5320 fprintf(file, "*** SHADER CONFIG ***\n"
5321 "SPI_PS_INPUT_ADDR = 0x%04x\n"
5322 "SPI_PS_INPUT_ENA = 0x%04x\n",
5323 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
5324 }
5325
5326 fprintf(file, "*** SHADER STATS ***\n"
5327 "SGPRS: %d\n"
5328 "VGPRS: %d\n"
5329 "Spilled SGPRs: %d\n"
5330 "Spilled VGPRs: %d\n"
5331 "Private memory VGPRs: %d\n"
5332 "Code Size: %d bytes\n"
5333 "LDS: %d blocks\n"
5334 "Scratch: %d bytes per wave\n"
5335 "Max Waves: %d\n"
5336 "********************\n\n\n",
5337 conf->num_sgprs, conf->num_vgprs,
5338 conf->spilled_sgprs, conf->spilled_vgprs,
5339 conf->private_mem_vgprs,
5340 si_get_shader_binary_size(shader),
5341 conf->lds_size, conf->scratch_bytes_per_wave,
5342 conf->max_simd_waves);
5343 }
5344 }
5345
5346 const char *si_get_shader_name(const struct si_shader *shader, unsigned processor)
5347 {
5348 switch (processor) {
5349 case PIPE_SHADER_VERTEX:
5350 if (shader->key.as_es)
5351 return "Vertex Shader as ES";
5352 else if (shader->key.as_ls)
5353 return "Vertex Shader as LS";
5354 else if (shader->key.opt.vs_as_prim_discard_cs)
5355 return "Vertex Shader as Primitive Discard CS";
5356 else
5357 return "Vertex Shader as VS";
5358 case PIPE_SHADER_TESS_CTRL:
5359 return "Tessellation Control Shader";
5360 case PIPE_SHADER_TESS_EVAL:
5361 if (shader->key.as_es)
5362 return "Tessellation Evaluation Shader as ES";
5363 else
5364 return "Tessellation Evaluation Shader as VS";
5365 case PIPE_SHADER_GEOMETRY:
5366 if (shader->is_gs_copy_shader)
5367 return "GS Copy Shader as VS";
5368 else
5369 return "Geometry Shader";
5370 case PIPE_SHADER_FRAGMENT:
5371 return "Pixel Shader";
5372 case PIPE_SHADER_COMPUTE:
5373 return "Compute Shader";
5374 default:
5375 return "Unknown Shader";
5376 }
5377 }
5378
5379 void si_shader_dump(struct si_screen *sscreen, const struct si_shader *shader,
5380 struct pipe_debug_callback *debug, unsigned processor,
5381 FILE *file, bool check_debug_option)
5382 {
5383 if (!check_debug_option ||
5384 si_can_dump_shader(sscreen, processor))
5385 si_dump_shader_key(processor, shader, file);
5386
5387 if (!check_debug_option && shader->binary.llvm_ir_string) {
5388 if (shader->previous_stage &&
5389 shader->previous_stage->binary.llvm_ir_string) {
5390 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
5391 si_get_shader_name(shader, processor));
5392 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
5393 }
5394
5395 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
5396 si_get_shader_name(shader, processor));
5397 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
5398 }
5399
5400 if (!check_debug_option ||
5401 (si_can_dump_shader(sscreen, processor) &&
5402 !(sscreen->debug_flags & DBG(NO_ASM)))) {
5403 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
5404
5405 if (shader->prolog)
5406 si_shader_dump_disassembly(&shader->prolog->binary,
5407 debug, "prolog", file);
5408 if (shader->previous_stage)
5409 si_shader_dump_disassembly(&shader->previous_stage->binary,
5410 debug, "previous stage", file);
5411 if (shader->prolog2)
5412 si_shader_dump_disassembly(&shader->prolog2->binary,
5413 debug, "prolog2", file);
5414
5415 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
5416
5417 if (shader->epilog)
5418 si_shader_dump_disassembly(&shader->epilog->binary,
5419 debug, "epilog", file);
5420 fprintf(file, "\n");
5421 }
5422
5423 si_shader_dump_stats(sscreen, shader, processor, file,
5424 check_debug_option);
5425 }
5426
5427 static int si_compile_llvm(struct si_screen *sscreen,
5428 struct ac_shader_binary *binary,
5429 struct si_shader_config *conf,
5430 struct ac_llvm_compiler *compiler,
5431 LLVMModuleRef mod,
5432 struct pipe_debug_callback *debug,
5433 unsigned processor,
5434 const char *name,
5435 bool less_optimized)
5436 {
5437 int r = 0;
5438 unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
5439
5440 if (si_can_dump_shader(sscreen, processor)) {
5441 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
5442
5443 if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
5444 fprintf(stderr, "%s LLVM IR:\n\n", name);
5445 ac_dump_module(mod);
5446 fprintf(stderr, "\n");
5447 }
5448 }
5449
5450 if (sscreen->record_llvm_ir) {
5451 char *ir = LLVMPrintModuleToString(mod);
5452 binary->llvm_ir_string = strdup(ir);
5453 LLVMDisposeMessage(ir);
5454 }
5455
5456 if (!si_replace_shader(count, binary)) {
5457 r = si_llvm_compile(mod, binary, compiler, debug,
5458 less_optimized);
5459 if (r)
5460 return r;
5461 }
5462
5463 si_shader_binary_read_config(binary, conf, 0);
5464
5465 /* Enable 64-bit and 16-bit denormals, because there is no performance
5466 * cost.
5467 *
5468 * If denormals are enabled, all floating-point output modifiers are
5469 * ignored.
5470 *
5471 * Don't enable denormals for 32-bit floats, because:
5472 * - Floating-point output modifiers would be ignored by the hw.
5473 * - Some opcodes don't support denormals, such as v_mad_f32. We would
5474 * have to stop using those.
5475 * - GFX6 & GFX7 would be very slow.
5476 */
5477 conf->float_mode |= V_00B028_FP_64_DENORMS;
5478
5479 FREE(binary->config);
5480 FREE(binary->global_symbol_offsets);
5481 binary->config = NULL;
5482 binary->global_symbol_offsets = NULL;
5483
5484 /* Some shaders can't have rodata because their binaries can be
5485 * concatenated.
5486 */
5487 if (binary->rodata_size &&
5488 (processor == PIPE_SHADER_VERTEX ||
5489 processor == PIPE_SHADER_TESS_CTRL ||
5490 processor == PIPE_SHADER_TESS_EVAL ||
5491 processor == PIPE_SHADER_FRAGMENT)) {
5492 fprintf(stderr, "radeonsi: The shader can't have rodata.");
5493 return -EINVAL;
5494 }
5495
5496 return r;
5497 }
5498
5499 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
5500 {
5501 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
5502 LLVMBuildRetVoid(ctx->ac.builder);
5503 else
5504 LLVMBuildRet(ctx->ac.builder, ret);
5505 }
5506
5507 /* Generate code for the hardware VS shader stage to go with a geometry shader */
5508 struct si_shader *
5509 si_generate_gs_copy_shader(struct si_screen *sscreen,
5510 struct ac_llvm_compiler *compiler,
5511 struct si_shader_selector *gs_selector,
5512 struct pipe_debug_callback *debug)
5513 {
5514 struct si_shader_context ctx;
5515 struct si_shader *shader;
5516 LLVMBuilderRef builder;
5517 struct si_shader_output_values outputs[SI_MAX_VS_OUTPUTS];
5518 struct tgsi_shader_info *gsinfo = &gs_selector->info;
5519 int i, r;
5520
5521
5522 shader = CALLOC_STRUCT(si_shader);
5523 if (!shader)
5524 return NULL;
5525
5526 /* We can leave the fence as permanently signaled because the GS copy
5527 * shader only becomes visible globally after it has been compiled. */
5528 util_queue_fence_init(&shader->ready);
5529
5530 shader->selector = gs_selector;
5531 shader->is_gs_copy_shader = true;
5532
5533 si_init_shader_ctx(&ctx, sscreen, compiler);
5534 ctx.shader = shader;
5535 ctx.type = PIPE_SHADER_VERTEX;
5536
5537 builder = ctx.ac.builder;
5538
5539 create_function(&ctx);
5540 preload_ring_buffers(&ctx);
5541
5542 LLVMValueRef voffset =
5543 LLVMBuildMul(ctx.ac.builder, ctx.abi.vertex_id,
5544 LLVMConstInt(ctx.i32, 4, 0), "");
5545
5546 /* Fetch the vertex stream ID.*/
5547 LLVMValueRef stream_id;
5548
5549 if (gs_selector->so.num_outputs)
5550 stream_id = si_unpack_param(&ctx, ctx.param_streamout_config, 24, 2);
5551 else
5552 stream_id = ctx.i32_0;
5553
5554 /* Fill in output information. */
5555 for (i = 0; i < gsinfo->num_outputs; ++i) {
5556 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
5557 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
5558
5559 for (int chan = 0; chan < 4; chan++) {
5560 outputs[i].vertex_stream[chan] =
5561 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
5562 }
5563 }
5564
5565 LLVMBasicBlockRef end_bb;
5566 LLVMValueRef switch_inst;
5567
5568 end_bb = LLVMAppendBasicBlockInContext(ctx.ac.context, ctx.main_fn, "end");
5569 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
5570
5571 for (int stream = 0; stream < 4; stream++) {
5572 LLVMBasicBlockRef bb;
5573 unsigned offset;
5574
5575 if (!gsinfo->num_stream_output_components[stream])
5576 continue;
5577
5578 if (stream > 0 && !gs_selector->so.num_outputs)
5579 continue;
5580
5581 bb = LLVMInsertBasicBlockInContext(ctx.ac.context, end_bb, "out");
5582 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
5583 LLVMPositionBuilderAtEnd(builder, bb);
5584
5585 /* Fetch vertex data from GSVS ring */
5586 offset = 0;
5587 for (i = 0; i < gsinfo->num_outputs; ++i) {
5588 for (unsigned chan = 0; chan < 4; chan++) {
5589 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
5590 outputs[i].vertex_stream[chan] != stream) {
5591 outputs[i].values[chan] = LLVMGetUndef(ctx.f32);
5592 continue;
5593 }
5594
5595 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
5596 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
5597 offset++;
5598
5599 outputs[i].values[chan] =
5600 ac_build_buffer_load(&ctx.ac,
5601 ctx.gsvs_ring[0], 1,
5602 ctx.i32_0, voffset,
5603 soffset, 0, 1, 1,
5604 true, false);
5605 }
5606 }
5607
5608 /* Streamout and exports. */
5609 if (gs_selector->so.num_outputs) {
5610 si_llvm_emit_streamout(&ctx, outputs,
5611 gsinfo->num_outputs,
5612 stream);
5613 }
5614
5615 if (stream == 0) {
5616 /* Vertex color clamping.
5617 *
5618 * This uses a state constant loaded in a user data SGPR and
5619 * an IF statement is added that clamps all colors if the constant
5620 * is true.
5621 */
5622 struct lp_build_if_state if_ctx;
5623 LLVMValueRef v[2], cond = NULL;
5624 LLVMBasicBlockRef blocks[2];
5625
5626 for (unsigned i = 0; i < gsinfo->num_outputs; i++) {
5627 if (gsinfo->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
5628 gsinfo->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
5629 continue;
5630
5631 /* We've found a color. */
5632 if (!cond) {
5633 /* The state is in the first bit of the user SGPR. */
5634 cond = LLVMGetParam(ctx.main_fn,
5635 ctx.param_vs_state_bits);
5636 cond = LLVMBuildTrunc(ctx.ac.builder, cond,
5637 ctx.i1, "");
5638 lp_build_if(&if_ctx, &ctx.gallivm, cond);
5639 /* Remember blocks for Phi. */
5640 blocks[0] = if_ctx.true_block;
5641 blocks[1] = if_ctx.entry_block;
5642 }
5643
5644 for (unsigned j = 0; j < 4; j++) {
5645 /* Insert clamp into the true block. */
5646 v[0] = ac_build_clamp(&ctx.ac, outputs[i].values[j]);
5647 v[1] = outputs[i].values[j];
5648
5649 /* Insert Phi into the endif block. */
5650 LLVMPositionBuilderAtEnd(ctx.ac.builder, if_ctx.merge_block);
5651 outputs[i].values[j] = ac_build_phi(&ctx.ac, ctx.f32, 2, v, blocks);
5652 LLVMPositionBuilderAtEnd(ctx.ac.builder, if_ctx.true_block);
5653 }
5654 }
5655 if (cond)
5656 lp_build_endif(&if_ctx);
5657
5658 si_llvm_export_vs(&ctx, outputs, gsinfo->num_outputs);
5659 }
5660
5661 LLVMBuildBr(builder, end_bb);
5662 }
5663
5664 LLVMPositionBuilderAtEnd(builder, end_bb);
5665
5666 LLVMBuildRetVoid(ctx.ac.builder);
5667
5668 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
5669 si_llvm_optimize_module(&ctx);
5670
5671 r = si_compile_llvm(sscreen, &ctx.shader->binary,
5672 &ctx.shader->config, ctx.compiler,
5673 ctx.ac.module,
5674 debug, PIPE_SHADER_GEOMETRY,
5675 "GS Copy Shader", false);
5676 if (!r) {
5677 if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
5678 fprintf(stderr, "GS Copy Shader:\n");
5679 si_shader_dump(sscreen, ctx.shader, debug,
5680 PIPE_SHADER_GEOMETRY, stderr, true);
5681 r = si_shader_binary_upload(sscreen, ctx.shader);
5682 }
5683
5684 si_llvm_dispose(&ctx);
5685
5686 if (r != 0) {
5687 FREE(shader);
5688 shader = NULL;
5689 } else {
5690 si_fix_resource_usage(sscreen, shader);
5691 }
5692 return shader;
5693 }
5694
5695 static void si_dump_shader_key_vs(const struct si_shader_key *key,
5696 const struct si_vs_prolog_bits *prolog,
5697 const char *prefix, FILE *f)
5698 {
5699 fprintf(f, " %s.instance_divisor_is_one = %u\n",
5700 prefix, prolog->instance_divisor_is_one);
5701 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
5702 prefix, prolog->instance_divisor_is_fetched);
5703 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
5704 prefix, prolog->unpack_instance_id_from_vertex_id);
5705 fprintf(f, " %s.ls_vgpr_fix = %u\n",
5706 prefix, prolog->ls_vgpr_fix);
5707
5708 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
5709 fprintf(f, " mono.vs.fix_fetch = {");
5710 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
5711 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
5712 if (i)
5713 fprintf(f, ", ");
5714 if (!fix.bits)
5715 fprintf(f, "0");
5716 else
5717 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
5718 fix.u.num_channels_m1, fix.u.format);
5719 }
5720 fprintf(f, "}\n");
5721 }
5722
5723 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
5724 FILE *f)
5725 {
5726 const struct si_shader_key *key = &shader->key;
5727
5728 fprintf(f, "SHADER KEY\n");
5729
5730 switch (processor) {
5731 case PIPE_SHADER_VERTEX:
5732 si_dump_shader_key_vs(key, &key->part.vs.prolog,
5733 "part.vs.prolog", f);
5734 fprintf(f, " as_es = %u\n", key->as_es);
5735 fprintf(f, " as_ls = %u\n", key->as_ls);
5736 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5737 key->mono.u.vs_export_prim_id);
5738 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
5739 key->opt.vs_as_prim_discard_cs);
5740 fprintf(f, " opt.cs_prim_type = %s\n",
5741 tgsi_primitive_names[key->opt.cs_prim_type]);
5742 fprintf(f, " opt.cs_indexed = %u\n",
5743 key->opt.cs_indexed);
5744 fprintf(f, " opt.cs_instancing = %u\n",
5745 key->opt.cs_instancing);
5746 fprintf(f, " opt.cs_primitive_restart = %u\n",
5747 key->opt.cs_primitive_restart);
5748 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
5749 key->opt.cs_provoking_vertex_first);
5750 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
5751 key->opt.cs_need_correct_orientation);
5752 fprintf(f, " opt.cs_cull_front = %u\n",
5753 key->opt.cs_cull_front);
5754 fprintf(f, " opt.cs_cull_back = %u\n",
5755 key->opt.cs_cull_back);
5756 fprintf(f, " opt.cs_cull_z = %u\n",
5757 key->opt.cs_cull_z);
5758 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
5759 key->opt.cs_halfz_clip_space);
5760 break;
5761
5762 case PIPE_SHADER_TESS_CTRL:
5763 if (shader->selector->screen->info.chip_class >= GFX9) {
5764 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
5765 "part.tcs.ls_prolog", f);
5766 }
5767 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
5768 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
5769 break;
5770
5771 case PIPE_SHADER_TESS_EVAL:
5772 fprintf(f, " as_es = %u\n", key->as_es);
5773 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5774 key->mono.u.vs_export_prim_id);
5775 break;
5776
5777 case PIPE_SHADER_GEOMETRY:
5778 if (shader->is_gs_copy_shader)
5779 break;
5780
5781 if (shader->selector->screen->info.chip_class >= GFX9 &&
5782 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
5783 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
5784 "part.gs.vs_prolog", f);
5785 }
5786 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
5787 break;
5788
5789 case PIPE_SHADER_COMPUTE:
5790 break;
5791
5792 case PIPE_SHADER_FRAGMENT:
5793 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
5794 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
5795 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
5796 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
5797 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
5798 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
5799 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
5800 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
5801 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
5802 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
5803 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
5804 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
5805 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
5806 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
5807 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
5808 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
5809 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
5810 break;
5811
5812 default:
5813 assert(0);
5814 }
5815
5816 if ((processor == PIPE_SHADER_GEOMETRY ||
5817 processor == PIPE_SHADER_TESS_EVAL ||
5818 processor == PIPE_SHADER_VERTEX) &&
5819 !key->as_es && !key->as_ls) {
5820 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
5821 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
5822 }
5823 }
5824
5825 static void si_init_shader_ctx(struct si_shader_context *ctx,
5826 struct si_screen *sscreen,
5827 struct ac_llvm_compiler *compiler)
5828 {
5829 struct lp_build_tgsi_context *bld_base;
5830
5831 si_llvm_context_init(ctx, sscreen, compiler);
5832
5833 bld_base = &ctx->bld_base;
5834 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
5835
5836 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID].emit = build_interp_intrinsic;
5837 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE].emit = build_interp_intrinsic;
5838 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET].emit = build_interp_intrinsic;
5839
5840 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
5841
5842 bld_base->op_actions[TGSI_OPCODE_CLOCK].emit = clock_emit;
5843
5844 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
5845 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
5846 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
5847 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
5848
5849 bld_base->op_actions[TGSI_OPCODE_VOTE_ALL].emit = vote_all_emit;
5850 bld_base->op_actions[TGSI_OPCODE_VOTE_ANY].emit = vote_any_emit;
5851 bld_base->op_actions[TGSI_OPCODE_VOTE_EQ].emit = vote_eq_emit;
5852 bld_base->op_actions[TGSI_OPCODE_BALLOT].emit = ballot_emit;
5853 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].intr_name = "llvm.amdgcn.readfirstlane";
5854 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].emit = read_lane_emit;
5855 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].intr_name = "llvm.amdgcn.readlane";
5856 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].emit = read_lane_emit;
5857
5858 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_tgsi_emit_vertex;
5859 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_tgsi_emit_primitive;
5860 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
5861 }
5862
5863 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
5864 {
5865 struct si_shader *shader = ctx->shader;
5866 struct tgsi_shader_info *info = &shader->selector->info;
5867
5868 if ((ctx->type != PIPE_SHADER_VERTEX &&
5869 ctx->type != PIPE_SHADER_TESS_EVAL) ||
5870 shader->key.as_ls ||
5871 shader->key.as_es)
5872 return;
5873
5874 ac_optimize_vs_outputs(&ctx->ac,
5875 ctx->main_fn,
5876 shader->info.vs_output_param_offset,
5877 info->num_outputs,
5878 &shader->info.nr_param_exports);
5879 }
5880
5881 static void si_init_exec_from_input(struct si_shader_context *ctx,
5882 unsigned param, unsigned bitoffset)
5883 {
5884 LLVMValueRef args[] = {
5885 LLVMGetParam(ctx->main_fn, param),
5886 LLVMConstInt(ctx->i32, bitoffset, 0),
5887 };
5888 ac_build_intrinsic(&ctx->ac,
5889 "llvm.amdgcn.init.exec.from.input",
5890 ctx->voidt, args, 2, AC_FUNC_ATTR_CONVERGENT);
5891 }
5892
5893 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
5894 const struct si_vs_prolog_bits *key)
5895 {
5896 /* VGPR initialization fixup for Vega10 and Raven is always done in the
5897 * VS prolog. */
5898 return sel->vs_needs_prolog || key->ls_vgpr_fix;
5899 }
5900
5901 static bool si_compile_tgsi_main(struct si_shader_context *ctx)
5902 {
5903 struct si_shader *shader = ctx->shader;
5904 struct si_shader_selector *sel = shader->selector;
5905 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
5906
5907 // TODO clean all this up!
5908 switch (ctx->type) {
5909 case PIPE_SHADER_VERTEX:
5910 ctx->load_input = declare_input_vs;
5911 if (shader->key.as_ls)
5912 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
5913 else if (shader->key.as_es)
5914 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
5915 else if (shader->key.opt.vs_as_prim_discard_cs)
5916 ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue;
5917 else
5918 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
5919 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5920 ctx->abi.load_base_vertex = get_base_vertex;
5921 break;
5922 case PIPE_SHADER_TESS_CTRL:
5923 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
5924 ctx->abi.load_tess_varyings = si_nir_load_tcs_varyings;
5925 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
5926 bld_base->emit_store = store_output_tcs;
5927 ctx->abi.store_tcs_outputs = si_nir_store_output_tcs;
5928 ctx->abi.emit_outputs = si_llvm_emit_tcs_epilogue;
5929 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
5930 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5931 break;
5932 case PIPE_SHADER_TESS_EVAL:
5933 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
5934 ctx->abi.load_tess_varyings = si_nir_load_input_tes;
5935 ctx->abi.load_tess_coord = si_load_tess_coord;
5936 ctx->abi.load_tess_level = si_load_tess_level;
5937 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
5938 if (shader->key.as_es)
5939 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
5940 else
5941 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
5942 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5943 break;
5944 case PIPE_SHADER_GEOMETRY:
5945 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
5946 ctx->abi.load_inputs = si_nir_load_input_gs;
5947 ctx->abi.emit_vertex = si_llvm_emit_vertex;
5948 ctx->abi.emit_primitive = si_llvm_emit_primitive;
5949 ctx->abi.emit_outputs = si_llvm_emit_gs_epilogue;
5950 bld_base->emit_epilogue = si_tgsi_emit_gs_epilogue;
5951 break;
5952 case PIPE_SHADER_FRAGMENT:
5953 ctx->load_input = declare_input_fs;
5954 ctx->abi.emit_outputs = si_llvm_return_fs_outputs;
5955 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5956 ctx->abi.lookup_interp_param = si_nir_lookup_interp_param;
5957 ctx->abi.load_sample_position = load_sample_position;
5958 ctx->abi.load_sample_mask_in = load_sample_mask_in;
5959 ctx->abi.emit_kill = si_llvm_emit_kill;
5960 break;
5961 case PIPE_SHADER_COMPUTE:
5962 ctx->abi.load_local_group_size = get_block_size;
5963 break;
5964 default:
5965 assert(!"Unsupported shader type");
5966 return false;
5967 }
5968
5969 ctx->abi.load_ubo = load_ubo;
5970 ctx->abi.load_ssbo = load_ssbo;
5971
5972 create_function(ctx);
5973 preload_ring_buffers(ctx);
5974
5975 /* For GFX9 merged shaders:
5976 * - Set EXEC for the first shader. If the prolog is present, set
5977 * EXEC there instead.
5978 * - Add a barrier before the second shader.
5979 * - In the second shader, reset EXEC to ~0 and wrap the main part in
5980 * an if-statement. This is required for correctness in geometry
5981 * shaders, to ensure that empty GS waves do not send GS_EMIT and
5982 * GS_CUT messages.
5983 *
5984 * For monolithic merged shaders, the first shader is wrapped in an
5985 * if-block together with its prolog in si_build_wrapper_function.
5986 */
5987 if (ctx->screen->info.chip_class >= GFX9) {
5988 if (!shader->is_monolithic &&
5989 sel->info.num_instructions > 1 && /* not empty shader */
5990 (shader->key.as_es || shader->key.as_ls) &&
5991 (ctx->type == PIPE_SHADER_TESS_EVAL ||
5992 (ctx->type == PIPE_SHADER_VERTEX &&
5993 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog)))) {
5994 si_init_exec_from_input(ctx,
5995 ctx->param_merged_wave_info, 0);
5996 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
5997 ctx->type == PIPE_SHADER_GEOMETRY) {
5998 if (!shader->is_monolithic)
5999 ac_init_exec_full_mask(&ctx->ac);
6000
6001 LLVMValueRef num_threads = si_unpack_param(ctx, ctx->param_merged_wave_info, 8, 8);
6002 LLVMValueRef ena =
6003 LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
6004 ac_get_thread_id(&ctx->ac), num_threads, "");
6005 lp_build_if(&ctx->merged_wrap_if_state, &ctx->gallivm, ena);
6006
6007 /* The barrier must execute for all shaders in a
6008 * threadgroup.
6009 *
6010 * Execute the barrier inside the conditional block,
6011 * so that empty waves can jump directly to s_endpgm,
6012 * which will also signal the barrier.
6013 *
6014 * If the shader is TCS and the TCS epilog is present
6015 * and contains a barrier, it will wait there and then
6016 * reach s_endpgm.
6017 */
6018 si_llvm_emit_barrier(NULL, bld_base, NULL);
6019 }
6020 }
6021
6022 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
6023 sel->tcs_info.tessfactors_are_def_in_all_invocs) {
6024 for (unsigned i = 0; i < 6; i++) {
6025 ctx->invoc0_tess_factors[i] =
6026 ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
6027 }
6028 }
6029
6030 if (ctx->type == PIPE_SHADER_GEOMETRY) {
6031 int i;
6032 for (i = 0; i < 4; i++) {
6033 ctx->gs_next_vertex[i] =
6034 ac_build_alloca(&ctx->ac, ctx->i32, "");
6035 }
6036 }
6037
6038 if (sel->force_correct_derivs_after_kill) {
6039 ctx->postponed_kill = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
6040 /* true = don't kill. */
6041 LLVMBuildStore(ctx->ac.builder, ctx->i1true,
6042 ctx->postponed_kill);
6043 }
6044
6045 if (sel->tokens) {
6046 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
6047 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
6048 return false;
6049 }
6050 } else {
6051 if (!si_nir_build_llvm(ctx, sel->nir)) {
6052 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
6053 return false;
6054 }
6055 }
6056
6057 si_llvm_build_ret(ctx, ctx->return_value);
6058 return true;
6059 }
6060
6061 /**
6062 * Compute the VS prolog key, which contains all the information needed to
6063 * build the VS prolog function, and set shader->info bits where needed.
6064 *
6065 * \param info Shader info of the vertex shader.
6066 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
6067 * \param prolog_key Key of the VS prolog
6068 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
6069 * \param key Output shader part key.
6070 */
6071 static void si_get_vs_prolog_key(const struct tgsi_shader_info *info,
6072 unsigned num_input_sgprs,
6073 const struct si_vs_prolog_bits *prolog_key,
6074 struct si_shader *shader_out,
6075 union si_shader_part_key *key)
6076 {
6077 memset(key, 0, sizeof(*key));
6078 key->vs_prolog.states = *prolog_key;
6079 key->vs_prolog.num_input_sgprs = num_input_sgprs;
6080 key->vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
6081 key->vs_prolog.as_ls = shader_out->key.as_ls;
6082 key->vs_prolog.as_es = shader_out->key.as_es;
6083
6084 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
6085 key->vs_prolog.as_ls = 1;
6086 key->vs_prolog.num_merged_next_stage_vgprs = 2;
6087 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
6088 key->vs_prolog.as_es = 1;
6089 key->vs_prolog.num_merged_next_stage_vgprs = 5;
6090 }
6091
6092 /* Enable loading the InstanceID VGPR. */
6093 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
6094
6095 if ((key->vs_prolog.states.instance_divisor_is_one |
6096 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
6097 shader_out->info.uses_instanceid = true;
6098 }
6099
6100 /**
6101 * Compute the PS prolog key, which contains all the information needed to
6102 * build the PS prolog function, and set related bits in shader->config.
6103 */
6104 static void si_get_ps_prolog_key(struct si_shader *shader,
6105 union si_shader_part_key *key,
6106 bool separate_prolog)
6107 {
6108 struct tgsi_shader_info *info = &shader->selector->info;
6109
6110 memset(key, 0, sizeof(*key));
6111 key->ps_prolog.states = shader->key.part.ps.prolog;
6112 key->ps_prolog.colors_read = info->colors_read;
6113 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
6114 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
6115 key->ps_prolog.wqm = info->uses_derivatives &&
6116 (key->ps_prolog.colors_read ||
6117 key->ps_prolog.states.force_persp_sample_interp ||
6118 key->ps_prolog.states.force_linear_sample_interp ||
6119 key->ps_prolog.states.force_persp_center_interp ||
6120 key->ps_prolog.states.force_linear_center_interp ||
6121 key->ps_prolog.states.bc_optimize_for_persp ||
6122 key->ps_prolog.states.bc_optimize_for_linear);
6123 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
6124
6125 if (info->colors_read) {
6126 unsigned *color = shader->selector->color_attr_index;
6127
6128 if (shader->key.part.ps.prolog.color_two_side) {
6129 /* BCOLORs are stored after the last input. */
6130 key->ps_prolog.num_interp_inputs = info->num_inputs;
6131 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
6132 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
6133 }
6134
6135 for (unsigned i = 0; i < 2; i++) {
6136 unsigned interp = info->input_interpolate[color[i]];
6137 unsigned location = info->input_interpolate_loc[color[i]];
6138
6139 if (!(info->colors_read & (0xf << i*4)))
6140 continue;
6141
6142 key->ps_prolog.color_attr_index[i] = color[i];
6143
6144 if (shader->key.part.ps.prolog.flatshade_colors &&
6145 interp == TGSI_INTERPOLATE_COLOR)
6146 interp = TGSI_INTERPOLATE_CONSTANT;
6147
6148 switch (interp) {
6149 case TGSI_INTERPOLATE_CONSTANT:
6150 key->ps_prolog.color_interp_vgpr_index[i] = -1;
6151 break;
6152 case TGSI_INTERPOLATE_PERSPECTIVE:
6153 case TGSI_INTERPOLATE_COLOR:
6154 /* Force the interpolation location for colors here. */
6155 if (shader->key.part.ps.prolog.force_persp_sample_interp)
6156 location = TGSI_INTERPOLATE_LOC_SAMPLE;
6157 if (shader->key.part.ps.prolog.force_persp_center_interp)
6158 location = TGSI_INTERPOLATE_LOC_CENTER;
6159
6160 switch (location) {
6161 case TGSI_INTERPOLATE_LOC_SAMPLE:
6162 key->ps_prolog.color_interp_vgpr_index[i] = 0;
6163 shader->config.spi_ps_input_ena |=
6164 S_0286CC_PERSP_SAMPLE_ENA(1);
6165 break;
6166 case TGSI_INTERPOLATE_LOC_CENTER:
6167 key->ps_prolog.color_interp_vgpr_index[i] = 2;
6168 shader->config.spi_ps_input_ena |=
6169 S_0286CC_PERSP_CENTER_ENA(1);
6170 break;
6171 case TGSI_INTERPOLATE_LOC_CENTROID:
6172 key->ps_prolog.color_interp_vgpr_index[i] = 4;
6173 shader->config.spi_ps_input_ena |=
6174 S_0286CC_PERSP_CENTROID_ENA(1);
6175 break;
6176 default:
6177 assert(0);
6178 }
6179 break;
6180 case TGSI_INTERPOLATE_LINEAR:
6181 /* Force the interpolation location for colors here. */
6182 if (shader->key.part.ps.prolog.force_linear_sample_interp)
6183 location = TGSI_INTERPOLATE_LOC_SAMPLE;
6184 if (shader->key.part.ps.prolog.force_linear_center_interp)
6185 location = TGSI_INTERPOLATE_LOC_CENTER;
6186
6187 /* The VGPR assignment for non-monolithic shaders
6188 * works because InitialPSInputAddr is set on the
6189 * main shader and PERSP_PULL_MODEL is never used.
6190 */
6191 switch (location) {
6192 case TGSI_INTERPOLATE_LOC_SAMPLE:
6193 key->ps_prolog.color_interp_vgpr_index[i] =
6194 separate_prolog ? 6 : 9;
6195 shader->config.spi_ps_input_ena |=
6196 S_0286CC_LINEAR_SAMPLE_ENA(1);
6197 break;
6198 case TGSI_INTERPOLATE_LOC_CENTER:
6199 key->ps_prolog.color_interp_vgpr_index[i] =
6200 separate_prolog ? 8 : 11;
6201 shader->config.spi_ps_input_ena |=
6202 S_0286CC_LINEAR_CENTER_ENA(1);
6203 break;
6204 case TGSI_INTERPOLATE_LOC_CENTROID:
6205 key->ps_prolog.color_interp_vgpr_index[i] =
6206 separate_prolog ? 10 : 13;
6207 shader->config.spi_ps_input_ena |=
6208 S_0286CC_LINEAR_CENTROID_ENA(1);
6209 break;
6210 default:
6211 assert(0);
6212 }
6213 break;
6214 default:
6215 assert(0);
6216 }
6217 }
6218 }
6219 }
6220
6221 /**
6222 * Check whether a PS prolog is required based on the key.
6223 */
6224 static bool si_need_ps_prolog(const union si_shader_part_key *key)
6225 {
6226 return key->ps_prolog.colors_read ||
6227 key->ps_prolog.states.force_persp_sample_interp ||
6228 key->ps_prolog.states.force_linear_sample_interp ||
6229 key->ps_prolog.states.force_persp_center_interp ||
6230 key->ps_prolog.states.force_linear_center_interp ||
6231 key->ps_prolog.states.bc_optimize_for_persp ||
6232 key->ps_prolog.states.bc_optimize_for_linear ||
6233 key->ps_prolog.states.poly_stipple ||
6234 key->ps_prolog.states.samplemask_log_ps_iter;
6235 }
6236
6237 /**
6238 * Compute the PS epilog key, which contains all the information needed to
6239 * build the PS epilog function.
6240 */
6241 static void si_get_ps_epilog_key(struct si_shader *shader,
6242 union si_shader_part_key *key)
6243 {
6244 struct tgsi_shader_info *info = &shader->selector->info;
6245 memset(key, 0, sizeof(*key));
6246 key->ps_epilog.colors_written = info->colors_written;
6247 key->ps_epilog.writes_z = info->writes_z;
6248 key->ps_epilog.writes_stencil = info->writes_stencil;
6249 key->ps_epilog.writes_samplemask = info->writes_samplemask;
6250 key->ps_epilog.states = shader->key.part.ps.epilog;
6251 }
6252
6253 /**
6254 * Build the GS prolog function. Rotate the input vertices for triangle strips
6255 * with adjacency.
6256 */
6257 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
6258 union si_shader_part_key *key)
6259 {
6260 unsigned num_sgprs, num_vgprs;
6261 struct si_function_info fninfo;
6262 LLVMBuilderRef builder = ctx->ac.builder;
6263 LLVMTypeRef returns[48];
6264 LLVMValueRef func, ret;
6265
6266 si_init_function_info(&fninfo);
6267
6268 if (ctx->screen->info.chip_class >= GFX9) {
6269 if (key->gs_prolog.states.gfx9_prev_is_vs)
6270 num_sgprs = 8 + GFX9_VSGS_NUM_USER_SGPR;
6271 else
6272 num_sgprs = 8 + GFX9_TESGS_NUM_USER_SGPR;
6273 num_vgprs = 5; /* ES inputs are not needed by GS */
6274 } else {
6275 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
6276 num_vgprs = 8;
6277 }
6278
6279 for (unsigned i = 0; i < num_sgprs; ++i) {
6280 add_arg(&fninfo, ARG_SGPR, ctx->i32);
6281 returns[i] = ctx->i32;
6282 }
6283
6284 for (unsigned i = 0; i < num_vgprs; ++i) {
6285 add_arg(&fninfo, ARG_VGPR, ctx->i32);
6286 returns[num_sgprs + i] = ctx->f32;
6287 }
6288
6289 /* Create the function. */
6290 si_create_function(ctx, "gs_prolog", returns, num_sgprs + num_vgprs,
6291 &fninfo, 0);
6292 func = ctx->main_fn;
6293
6294 /* Set the full EXEC mask for the prolog, because we are only fiddling
6295 * with registers here. The main shader part will set the correct EXEC
6296 * mask.
6297 */
6298 if (ctx->screen->info.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
6299 ac_init_exec_full_mask(&ctx->ac);
6300
6301 /* Copy inputs to outputs. This should be no-op, as the registers match,
6302 * but it will prevent the compiler from overwriting them unintentionally.
6303 */
6304 ret = ctx->return_value;
6305 for (unsigned i = 0; i < num_sgprs; i++) {
6306 LLVMValueRef p = LLVMGetParam(func, i);
6307 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
6308 }
6309 for (unsigned i = 0; i < num_vgprs; i++) {
6310 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
6311 p = ac_to_float(&ctx->ac, p);
6312 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
6313 }
6314
6315 if (key->gs_prolog.states.tri_strip_adj_fix) {
6316 /* Remap the input vertices for every other primitive. */
6317 const unsigned gfx6_vtx_params[6] = {
6318 num_sgprs,
6319 num_sgprs + 1,
6320 num_sgprs + 3,
6321 num_sgprs + 4,
6322 num_sgprs + 5,
6323 num_sgprs + 6
6324 };
6325 const unsigned gfx9_vtx_params[3] = {
6326 num_sgprs,
6327 num_sgprs + 1,
6328 num_sgprs + 4,
6329 };
6330 LLVMValueRef vtx_in[6], vtx_out[6];
6331 LLVMValueRef prim_id, rotate;
6332
6333 if (ctx->screen->info.chip_class >= GFX9) {
6334 for (unsigned i = 0; i < 3; i++) {
6335 vtx_in[i*2] = si_unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
6336 vtx_in[i*2+1] = si_unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
6337 }
6338 } else {
6339 for (unsigned i = 0; i < 6; i++)
6340 vtx_in[i] = LLVMGetParam(func, gfx6_vtx_params[i]);
6341 }
6342
6343 prim_id = LLVMGetParam(func, num_sgprs + 2);
6344 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
6345
6346 for (unsigned i = 0; i < 6; ++i) {
6347 LLVMValueRef base, rotated;
6348 base = vtx_in[i];
6349 rotated = vtx_in[(i + 4) % 6];
6350 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
6351 }
6352
6353 if (ctx->screen->info.chip_class >= GFX9) {
6354 for (unsigned i = 0; i < 3; i++) {
6355 LLVMValueRef hi, out;
6356
6357 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
6358 LLVMConstInt(ctx->i32, 16, 0), "");
6359 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
6360 out = ac_to_float(&ctx->ac, out);
6361 ret = LLVMBuildInsertValue(builder, ret, out,
6362 gfx9_vtx_params[i], "");
6363 }
6364 } else {
6365 for (unsigned i = 0; i < 6; i++) {
6366 LLVMValueRef out;
6367
6368 out = ac_to_float(&ctx->ac, vtx_out[i]);
6369 ret = LLVMBuildInsertValue(builder, ret, out,
6370 gfx6_vtx_params[i], "");
6371 }
6372 }
6373 }
6374
6375 LLVMBuildRet(builder, ret);
6376 }
6377
6378 /**
6379 * Given a list of shader part functions, build a wrapper function that
6380 * runs them in sequence to form a monolithic shader.
6381 */
6382 static void si_build_wrapper_function(struct si_shader_context *ctx,
6383 LLVMValueRef *parts,
6384 unsigned num_parts,
6385 unsigned main_part,
6386 unsigned next_shader_first_part)
6387 {
6388 LLVMBuilderRef builder = ctx->ac.builder;
6389 /* PS epilog has one arg per color component; gfx9 merged shader
6390 * prologs need to forward 32 user SGPRs.
6391 */
6392 struct si_function_info fninfo;
6393 LLVMValueRef initial[64], out[64];
6394 LLVMTypeRef function_type;
6395 unsigned num_first_params;
6396 unsigned num_out, initial_num_out;
6397 MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
6398 MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
6399 unsigned num_sgprs, num_vgprs;
6400 unsigned gprs;
6401 struct lp_build_if_state if_state;
6402
6403 si_init_function_info(&fninfo);
6404
6405 for (unsigned i = 0; i < num_parts; ++i) {
6406 ac_add_function_attr(ctx->ac.context, parts[i], -1,
6407 AC_FUNC_ATTR_ALWAYSINLINE);
6408 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
6409 }
6410
6411 /* The parameters of the wrapper function correspond to those of the
6412 * first part in terms of SGPRs and VGPRs, but we use the types of the
6413 * main part to get the right types. This is relevant for the
6414 * dereferenceable attribute on descriptor table pointers.
6415 */
6416 num_sgprs = 0;
6417 num_vgprs = 0;
6418
6419 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
6420 num_first_params = LLVMCountParamTypes(function_type);
6421
6422 for (unsigned i = 0; i < num_first_params; ++i) {
6423 LLVMValueRef param = LLVMGetParam(parts[0], i);
6424
6425 if (ac_is_sgpr_param(param)) {
6426 assert(num_vgprs == 0);
6427 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
6428 } else {
6429 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
6430 }
6431 }
6432
6433 gprs = 0;
6434 while (gprs < num_sgprs + num_vgprs) {
6435 LLVMValueRef param = LLVMGetParam(parts[main_part], fninfo.num_params);
6436 LLVMTypeRef type = LLVMTypeOf(param);
6437 unsigned size = ac_get_type_size(type) / 4;
6438
6439 add_arg(&fninfo, gprs < num_sgprs ? ARG_SGPR : ARG_VGPR, type);
6440
6441 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
6442 assert(gprs + size <= num_sgprs + num_vgprs &&
6443 (gprs >= num_sgprs || gprs + size <= num_sgprs));
6444
6445 gprs += size;
6446 }
6447
6448 /* Prepare the return type. */
6449 unsigned num_returns = 0;
6450 LLVMTypeRef returns[32], last_func_type, return_type;
6451
6452 last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
6453 return_type = LLVMGetReturnType(last_func_type);
6454
6455 switch (LLVMGetTypeKind(return_type)) {
6456 case LLVMStructTypeKind:
6457 num_returns = LLVMCountStructElementTypes(return_type);
6458 assert(num_returns <= ARRAY_SIZE(returns));
6459 LLVMGetStructElementTypes(return_type, returns);
6460 break;
6461 case LLVMVoidTypeKind:
6462 break;
6463 default:
6464 unreachable("unexpected type");
6465 }
6466
6467 si_create_function(ctx, "wrapper", returns, num_returns, &fninfo,
6468 si_get_max_workgroup_size(ctx->shader));
6469
6470 if (is_merged_shader(ctx))
6471 ac_init_exec_full_mask(&ctx->ac);
6472
6473 /* Record the arguments of the function as if they were an output of
6474 * a previous part.
6475 */
6476 num_out = 0;
6477 num_out_sgpr = 0;
6478
6479 for (unsigned i = 0; i < fninfo.num_params; ++i) {
6480 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
6481 LLVMTypeRef param_type = LLVMTypeOf(param);
6482 LLVMTypeRef out_type = i < fninfo.num_sgpr_params ? ctx->i32 : ctx->f32;
6483 unsigned size = ac_get_type_size(param_type) / 4;
6484
6485 if (size == 1) {
6486 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6487 param = LLVMBuildPtrToInt(builder, param, ctx->i32, "");
6488 param_type = ctx->i32;
6489 }
6490
6491 if (param_type != out_type)
6492 param = LLVMBuildBitCast(builder, param, out_type, "");
6493 out[num_out++] = param;
6494 } else {
6495 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
6496
6497 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6498 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
6499 param_type = ctx->i64;
6500 }
6501
6502 if (param_type != vector_type)
6503 param = LLVMBuildBitCast(builder, param, vector_type, "");
6504
6505 for (unsigned j = 0; j < size; ++j)
6506 out[num_out++] = LLVMBuildExtractElement(
6507 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
6508 }
6509
6510 if (i < fninfo.num_sgpr_params)
6511 num_out_sgpr = num_out;
6512 }
6513
6514 memcpy(initial, out, sizeof(out));
6515 initial_num_out = num_out;
6516 initial_num_out_sgpr = num_out_sgpr;
6517
6518 /* Now chain the parts. */
6519 LLVMValueRef ret;
6520 for (unsigned part = 0; part < num_parts; ++part) {
6521 LLVMValueRef in[48];
6522 LLVMTypeRef ret_type;
6523 unsigned out_idx = 0;
6524 unsigned num_params = LLVMCountParams(parts[part]);
6525
6526 /* Merged shaders are executed conditionally depending
6527 * on the number of enabled threads passed in the input SGPRs. */
6528 if (is_merged_shader(ctx) && part == 0) {
6529 LLVMValueRef ena, count = initial[3];
6530
6531 count = LLVMBuildAnd(builder, count,
6532 LLVMConstInt(ctx->i32, 0x7f, 0), "");
6533 ena = LLVMBuildICmp(builder, LLVMIntULT,
6534 ac_get_thread_id(&ctx->ac), count, "");
6535 lp_build_if(&if_state, &ctx->gallivm, ena);
6536 }
6537
6538 /* Derive arguments for the next part from outputs of the
6539 * previous one.
6540 */
6541 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
6542 LLVMValueRef param;
6543 LLVMTypeRef param_type;
6544 bool is_sgpr;
6545 unsigned param_size;
6546 LLVMValueRef arg = NULL;
6547
6548 param = LLVMGetParam(parts[part], param_idx);
6549 param_type = LLVMTypeOf(param);
6550 param_size = ac_get_type_size(param_type) / 4;
6551 is_sgpr = ac_is_sgpr_param(param);
6552
6553 if (is_sgpr) {
6554 ac_add_function_attr(ctx->ac.context, parts[part],
6555 param_idx + 1, AC_FUNC_ATTR_INREG);
6556 } else if (out_idx < num_out_sgpr) {
6557 /* Skip returned SGPRs the current part doesn't
6558 * declare on the input. */
6559 out_idx = num_out_sgpr;
6560 }
6561
6562 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
6563
6564 if (param_size == 1)
6565 arg = out[out_idx];
6566 else
6567 arg = ac_build_gather_values(&ctx->ac, &out[out_idx], param_size);
6568
6569 if (LLVMTypeOf(arg) != param_type) {
6570 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6571 if (LLVMGetPointerAddressSpace(param_type) ==
6572 AC_ADDR_SPACE_CONST_32BIT) {
6573 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
6574 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
6575 } else {
6576 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
6577 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
6578 }
6579 } else {
6580 arg = LLVMBuildBitCast(builder, arg, param_type, "");
6581 }
6582 }
6583
6584 in[param_idx] = arg;
6585 out_idx += param_size;
6586 }
6587
6588 ret = LLVMBuildCall(builder, parts[part], in, num_params, "");
6589
6590 if (is_merged_shader(ctx) &&
6591 part + 1 == next_shader_first_part) {
6592 lp_build_endif(&if_state);
6593
6594 /* The second half of the merged shader should use
6595 * the inputs from the toplevel (wrapper) function,
6596 * not the return value from the last call.
6597 *
6598 * That's because the last call was executed condi-
6599 * tionally, so we can't consume it in the main
6600 * block.
6601 */
6602 memcpy(out, initial, sizeof(initial));
6603 num_out = initial_num_out;
6604 num_out_sgpr = initial_num_out_sgpr;
6605 continue;
6606 }
6607
6608 /* Extract the returned GPRs. */
6609 ret_type = LLVMTypeOf(ret);
6610 num_out = 0;
6611 num_out_sgpr = 0;
6612
6613 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
6614 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
6615
6616 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
6617
6618 for (unsigned i = 0; i < ret_size; ++i) {
6619 LLVMValueRef val =
6620 LLVMBuildExtractValue(builder, ret, i, "");
6621
6622 assert(num_out < ARRAY_SIZE(out));
6623 out[num_out++] = val;
6624
6625 if (LLVMTypeOf(val) == ctx->i32) {
6626 assert(num_out_sgpr + 1 == num_out);
6627 num_out_sgpr = num_out;
6628 }
6629 }
6630 }
6631 }
6632
6633 /* Return the value from the last part. */
6634 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
6635 LLVMBuildRetVoid(builder);
6636 else
6637 LLVMBuildRet(builder, ret);
6638 }
6639
6640 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
6641 struct si_shader_selector *sel)
6642 {
6643 if (!compiler->low_opt_passes)
6644 return false;
6645
6646 /* Assume a slow CPU. */
6647 assert(!sel->screen->info.has_dedicated_vram &&
6648 sel->screen->info.chip_class <= GFX8);
6649
6650 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
6651 * buffer stores. */
6652 return sel->type == PIPE_SHADER_COMPUTE &&
6653 sel->info.num_memory_instructions > 1000;
6654 }
6655
6656 int si_compile_tgsi_shader(struct si_screen *sscreen,
6657 struct ac_llvm_compiler *compiler,
6658 struct si_shader *shader,
6659 struct pipe_debug_callback *debug)
6660 {
6661 struct si_shader_selector *sel = shader->selector;
6662 struct si_shader_context ctx;
6663 int r = -1;
6664
6665 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6666 * conversion fails. */
6667 if (si_can_dump_shader(sscreen, sel->info.processor) &&
6668 !(sscreen->debug_flags & DBG(NO_TGSI))) {
6669 if (sel->tokens)
6670 tgsi_dump(sel->tokens, 0);
6671 else
6672 nir_print_shader(sel->nir, stderr);
6673 si_dump_streamout(&sel->so);
6674 }
6675
6676 si_init_shader_ctx(&ctx, sscreen, compiler);
6677 si_llvm_context_set_tgsi(&ctx, shader);
6678
6679 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
6680 sizeof(shader->info.vs_output_param_offset));
6681
6682 shader->info.uses_instanceid = sel->info.uses_instanceid;
6683
6684 if (!si_compile_tgsi_main(&ctx)) {
6685 si_llvm_dispose(&ctx);
6686 return -1;
6687 }
6688
6689 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
6690 LLVMValueRef parts[2];
6691 bool need_prolog = sel->vs_needs_prolog;
6692
6693 parts[1] = ctx.main_fn;
6694
6695 if (need_prolog) {
6696 union si_shader_part_key prolog_key;
6697 si_get_vs_prolog_key(&sel->info,
6698 shader->info.num_input_sgprs,
6699 &shader->key.part.vs.prolog,
6700 shader, &prolog_key);
6701 si_build_vs_prolog_function(&ctx, &prolog_key);
6702 parts[0] = ctx.main_fn;
6703 }
6704
6705 si_build_wrapper_function(&ctx, parts + !need_prolog,
6706 1 + need_prolog, need_prolog, 0);
6707
6708 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
6709 si_build_prim_discard_compute_shader(&ctx);
6710 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
6711 if (sscreen->info.chip_class >= GFX9) {
6712 struct si_shader_selector *ls = shader->key.part.tcs.ls;
6713 LLVMValueRef parts[4];
6714 bool vs_needs_prolog =
6715 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog);
6716
6717 /* TCS main part */
6718 parts[2] = ctx.main_fn;
6719
6720 /* TCS epilog */
6721 union si_shader_part_key tcs_epilog_key;
6722 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
6723 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6724 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
6725 parts[3] = ctx.main_fn;
6726
6727 /* VS as LS main part */
6728 struct si_shader shader_ls = {};
6729 shader_ls.selector = ls;
6730 shader_ls.key.as_ls = 1;
6731 shader_ls.key.mono = shader->key.mono;
6732 shader_ls.key.opt = shader->key.opt;
6733 shader_ls.is_monolithic = true;
6734 si_llvm_context_set_tgsi(&ctx, &shader_ls);
6735
6736 if (!si_compile_tgsi_main(&ctx)) {
6737 si_llvm_dispose(&ctx);
6738 return -1;
6739 }
6740 shader->info.uses_instanceid |= ls->info.uses_instanceid;
6741 parts[1] = ctx.main_fn;
6742
6743 /* LS prolog */
6744 if (vs_needs_prolog) {
6745 union si_shader_part_key vs_prolog_key;
6746 si_get_vs_prolog_key(&ls->info,
6747 shader_ls.info.num_input_sgprs,
6748 &shader->key.part.tcs.ls_prolog,
6749 shader, &vs_prolog_key);
6750 vs_prolog_key.vs_prolog.is_monolithic = true;
6751 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6752 parts[0] = ctx.main_fn;
6753 }
6754
6755 /* Reset the shader context. */
6756 ctx.shader = shader;
6757 ctx.type = PIPE_SHADER_TESS_CTRL;
6758
6759 si_build_wrapper_function(&ctx,
6760 parts + !vs_needs_prolog,
6761 4 - !vs_needs_prolog, vs_needs_prolog,
6762 vs_needs_prolog ? 2 : 1);
6763 } else {
6764 LLVMValueRef parts[2];
6765 union si_shader_part_key epilog_key;
6766
6767 parts[0] = ctx.main_fn;
6768
6769 memset(&epilog_key, 0, sizeof(epilog_key));
6770 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6771 si_build_tcs_epilog_function(&ctx, &epilog_key);
6772 parts[1] = ctx.main_fn;
6773
6774 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
6775 }
6776 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
6777 if (ctx.screen->info.chip_class >= GFX9) {
6778 struct si_shader_selector *es = shader->key.part.gs.es;
6779 LLVMValueRef es_prolog = NULL;
6780 LLVMValueRef es_main = NULL;
6781 LLVMValueRef gs_prolog = NULL;
6782 LLVMValueRef gs_main = ctx.main_fn;
6783
6784 /* GS prolog */
6785 union si_shader_part_key gs_prolog_key;
6786 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
6787 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6788 gs_prolog_key.gs_prolog.is_monolithic = true;
6789 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
6790 gs_prolog = ctx.main_fn;
6791
6792 /* ES main part */
6793 struct si_shader shader_es = {};
6794 shader_es.selector = es;
6795 shader_es.key.as_es = 1;
6796 shader_es.key.mono = shader->key.mono;
6797 shader_es.key.opt = shader->key.opt;
6798 shader_es.is_monolithic = true;
6799 si_llvm_context_set_tgsi(&ctx, &shader_es);
6800
6801 if (!si_compile_tgsi_main(&ctx)) {
6802 si_llvm_dispose(&ctx);
6803 return -1;
6804 }
6805 shader->info.uses_instanceid |= es->info.uses_instanceid;
6806 es_main = ctx.main_fn;
6807
6808 /* ES prolog */
6809 if (es->vs_needs_prolog) {
6810 union si_shader_part_key vs_prolog_key;
6811 si_get_vs_prolog_key(&es->info,
6812 shader_es.info.num_input_sgprs,
6813 &shader->key.part.gs.vs_prolog,
6814 shader, &vs_prolog_key);
6815 vs_prolog_key.vs_prolog.is_monolithic = true;
6816 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6817 es_prolog = ctx.main_fn;
6818 }
6819
6820 /* Reset the shader context. */
6821 ctx.shader = shader;
6822 ctx.type = PIPE_SHADER_GEOMETRY;
6823
6824 /* Prepare the array of shader parts. */
6825 LLVMValueRef parts[4];
6826 unsigned num_parts = 0, main_part, next_first_part;
6827
6828 if (es_prolog)
6829 parts[num_parts++] = es_prolog;
6830
6831 parts[main_part = num_parts++] = es_main;
6832 parts[next_first_part = num_parts++] = gs_prolog;
6833 parts[num_parts++] = gs_main;
6834
6835 si_build_wrapper_function(&ctx, parts, num_parts,
6836 main_part, next_first_part);
6837 } else {
6838 LLVMValueRef parts[2];
6839 union si_shader_part_key prolog_key;
6840
6841 parts[1] = ctx.main_fn;
6842
6843 memset(&prolog_key, 0, sizeof(prolog_key));
6844 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6845 si_build_gs_prolog_function(&ctx, &prolog_key);
6846 parts[0] = ctx.main_fn;
6847
6848 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
6849 }
6850 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
6851 LLVMValueRef parts[3];
6852 union si_shader_part_key prolog_key;
6853 union si_shader_part_key epilog_key;
6854 bool need_prolog;
6855
6856 si_get_ps_prolog_key(shader, &prolog_key, false);
6857 need_prolog = si_need_ps_prolog(&prolog_key);
6858
6859 parts[need_prolog ? 1 : 0] = ctx.main_fn;
6860
6861 if (need_prolog) {
6862 si_build_ps_prolog_function(&ctx, &prolog_key);
6863 parts[0] = ctx.main_fn;
6864 }
6865
6866 si_get_ps_epilog_key(shader, &epilog_key);
6867 si_build_ps_epilog_function(&ctx, &epilog_key);
6868 parts[need_prolog ? 2 : 1] = ctx.main_fn;
6869
6870 si_build_wrapper_function(&ctx, parts, need_prolog ? 3 : 2,
6871 need_prolog ? 1 : 0, 0);
6872 }
6873
6874 si_llvm_optimize_module(&ctx);
6875
6876 /* Post-optimization transformations and analysis. */
6877 si_optimize_vs_outputs(&ctx);
6878
6879 if ((debug && debug->debug_message) ||
6880 si_can_dump_shader(sscreen, ctx.type)) {
6881 ctx.shader->config.private_mem_vgprs =
6882 ac_count_scratch_private_memory(ctx.main_fn);
6883 }
6884
6885 /* Make sure the input is a pointer and not integer followed by inttoptr. */
6886 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) ==
6887 LLVMPointerTypeKind);
6888
6889 /* Compile to bytecode. */
6890 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
6891 ctx.ac.module, debug, ctx.type,
6892 si_get_shader_name(shader, ctx.type),
6893 si_should_optimize_less(compiler, shader->selector));
6894 si_llvm_dispose(&ctx);
6895 if (r) {
6896 fprintf(stderr, "LLVM failed to compile shader\n");
6897 return r;
6898 }
6899
6900 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
6901 * LLVM 3.9svn has this bug.
6902 */
6903 if (sel->type == PIPE_SHADER_COMPUTE) {
6904 unsigned wave_size = 64;
6905 unsigned max_vgprs = 256;
6906 unsigned max_sgprs = sscreen->info.chip_class >= GFX8 ? 800 : 512;
6907 unsigned max_sgprs_per_wave = 128;
6908 unsigned max_block_threads = si_get_max_workgroup_size(shader);
6909 unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
6910 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
6911
6912 max_vgprs = max_vgprs / min_waves_per_simd;
6913 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
6914
6915 if (shader->config.num_sgprs > max_sgprs ||
6916 shader->config.num_vgprs > max_vgprs) {
6917 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6918 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6919 shader->config.num_sgprs, shader->config.num_vgprs,
6920 max_sgprs, max_vgprs);
6921
6922 /* Just terminate the process, because dependent
6923 * shaders can hang due to bad input data, but use
6924 * the env var to allow shader-db to work.
6925 */
6926 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6927 abort();
6928 }
6929 }
6930
6931 /* Add the scratch offset to input SGPRs. */
6932 if (shader->config.scratch_bytes_per_wave && !is_merged_shader(&ctx))
6933 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6934
6935 /* Calculate the number of fragment input VGPRs. */
6936 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6937 shader->info.num_input_vgprs = 0;
6938 shader->info.face_vgpr_index = -1;
6939 shader->info.ancillary_vgpr_index = -1;
6940
6941 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6942 shader->info.num_input_vgprs += 2;
6943 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6944 shader->info.num_input_vgprs += 2;
6945 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6946 shader->info.num_input_vgprs += 2;
6947 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6948 shader->info.num_input_vgprs += 3;
6949 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6950 shader->info.num_input_vgprs += 2;
6951 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6952 shader->info.num_input_vgprs += 2;
6953 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6954 shader->info.num_input_vgprs += 2;
6955 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6956 shader->info.num_input_vgprs += 1;
6957 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6958 shader->info.num_input_vgprs += 1;
6959 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6960 shader->info.num_input_vgprs += 1;
6961 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6962 shader->info.num_input_vgprs += 1;
6963 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6964 shader->info.num_input_vgprs += 1;
6965 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6966 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6967 shader->info.num_input_vgprs += 1;
6968 }
6969 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr)) {
6970 shader->info.ancillary_vgpr_index = shader->info.num_input_vgprs;
6971 shader->info.num_input_vgprs += 1;
6972 }
6973 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6974 shader->info.num_input_vgprs += 1;
6975 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6976 shader->info.num_input_vgprs += 1;
6977 }
6978
6979 si_calculate_max_simd_waves(shader);
6980 si_shader_dump_stats_for_shader_db(shader, debug);
6981 return 0;
6982 }
6983
6984 /**
6985 * Create, compile and return a shader part (prolog or epilog).
6986 *
6987 * \param sscreen screen
6988 * \param list list of shader parts of the same category
6989 * \param type shader type
6990 * \param key shader part key
6991 * \param prolog whether the part being requested is a prolog
6992 * \param tm LLVM target machine
6993 * \param debug debug callback
6994 * \param build the callback responsible for building the main function
6995 * \return non-NULL on success
6996 */
6997 static struct si_shader_part *
6998 si_get_shader_part(struct si_screen *sscreen,
6999 struct si_shader_part **list,
7000 enum pipe_shader_type type,
7001 bool prolog,
7002 union si_shader_part_key *key,
7003 struct ac_llvm_compiler *compiler,
7004 struct pipe_debug_callback *debug,
7005 void (*build)(struct si_shader_context *,
7006 union si_shader_part_key *),
7007 const char *name)
7008 {
7009 struct si_shader_part *result;
7010
7011 mtx_lock(&sscreen->shader_parts_mutex);
7012
7013 /* Find existing. */
7014 for (result = *list; result; result = result->next) {
7015 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
7016 mtx_unlock(&sscreen->shader_parts_mutex);
7017 return result;
7018 }
7019 }
7020
7021 /* Compile a new one. */
7022 result = CALLOC_STRUCT(si_shader_part);
7023 result->key = *key;
7024
7025 struct si_shader shader = {};
7026 struct si_shader_context ctx;
7027
7028 si_init_shader_ctx(&ctx, sscreen, compiler);
7029 ctx.shader = &shader;
7030 ctx.type = type;
7031
7032 switch (type) {
7033 case PIPE_SHADER_VERTEX:
7034 shader.key.as_ls = key->vs_prolog.as_ls;
7035 shader.key.as_es = key->vs_prolog.as_es;
7036 break;
7037 case PIPE_SHADER_TESS_CTRL:
7038 assert(!prolog);
7039 shader.key.part.tcs.epilog = key->tcs_epilog.states;
7040 break;
7041 case PIPE_SHADER_GEOMETRY:
7042 assert(prolog);
7043 break;
7044 case PIPE_SHADER_FRAGMENT:
7045 if (prolog)
7046 shader.key.part.ps.prolog = key->ps_prolog.states;
7047 else
7048 shader.key.part.ps.epilog = key->ps_epilog.states;
7049 break;
7050 default:
7051 unreachable("bad shader part");
7052 }
7053
7054 build(&ctx, key);
7055
7056 /* Compile. */
7057 si_llvm_optimize_module(&ctx);
7058
7059 if (si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
7060 ctx.ac.module, debug, ctx.type, name, false)) {
7061 FREE(result);
7062 result = NULL;
7063 goto out;
7064 }
7065
7066 result->next = *list;
7067 *list = result;
7068
7069 out:
7070 si_llvm_dispose(&ctx);
7071 mtx_unlock(&sscreen->shader_parts_mutex);
7072 return result;
7073 }
7074
7075 static LLVMValueRef si_prolog_get_rw_buffers(struct si_shader_context *ctx)
7076 {
7077 LLVMValueRef ptr[2], list;
7078 bool merged_shader = is_merged_shader(ctx);
7079
7080 ptr[0] = LLVMGetParam(ctx->main_fn, (merged_shader ? 8 : 0) + SI_SGPR_RW_BUFFERS);
7081 list = LLVMBuildIntToPtr(ctx->ac.builder, ptr[0],
7082 ac_array_in_const32_addr_space(ctx->v4i32), "");
7083 return list;
7084 }
7085
7086 /**
7087 * Build the vertex shader prolog function.
7088 *
7089 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
7090 * All inputs are returned unmodified. The vertex load indices are
7091 * stored after them, which will be used by the API VS for fetching inputs.
7092 *
7093 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
7094 * input_v0,
7095 * input_v1,
7096 * input_v2,
7097 * input_v3,
7098 * (VertexID + BaseVertex),
7099 * (InstanceID + StartInstance),
7100 * (InstanceID / 2 + StartInstance)
7101 */
7102 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
7103 union si_shader_part_key *key)
7104 {
7105 struct si_function_info fninfo;
7106 LLVMTypeRef *returns;
7107 LLVMValueRef ret, func;
7108 int num_returns, i;
7109 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
7110 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
7111 LLVMValueRef input_vgprs[9];
7112 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
7113 num_input_vgprs;
7114 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
7115
7116 si_init_function_info(&fninfo);
7117
7118 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
7119 returns = alloca((num_all_input_regs + key->vs_prolog.last_input + 1) *
7120 sizeof(LLVMTypeRef));
7121 num_returns = 0;
7122
7123 /* Declare input and output SGPRs. */
7124 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7125 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7126 returns[num_returns++] = ctx->i32;
7127 }
7128
7129 /* Preloaded VGPRs (outputs must be floats) */
7130 for (i = 0; i < num_input_vgprs; i++) {
7131 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &input_vgprs[i]);
7132 returns[num_returns++] = ctx->f32;
7133 }
7134
7135 /* Vertex load indices. */
7136 for (i = 0; i <= key->vs_prolog.last_input; i++)
7137 returns[num_returns++] = ctx->f32;
7138
7139 /* Create the function. */
7140 si_create_function(ctx, "vs_prolog", returns, num_returns, &fninfo, 0);
7141 func = ctx->main_fn;
7142
7143 if (key->vs_prolog.num_merged_next_stage_vgprs) {
7144 if (!key->vs_prolog.is_monolithic)
7145 si_init_exec_from_input(ctx, 3, 0);
7146
7147 if (key->vs_prolog.as_ls &&
7148 ctx->screen->has_ls_vgpr_init_bug) {
7149 /* If there are no HS threads, SPI loads the LS VGPRs
7150 * starting at VGPR 0. Shift them back to where they
7151 * belong.
7152 */
7153 LLVMValueRef has_hs_threads =
7154 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
7155 si_unpack_param(ctx, 3, 8, 8),
7156 ctx->i32_0, "");
7157
7158 for (i = 4; i > 0; --i) {
7159 input_vgprs[i + 1] =
7160 LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
7161 input_vgprs[i + 1],
7162 input_vgprs[i - 1], "");
7163 }
7164 }
7165 }
7166
7167 unsigned vertex_id_vgpr = first_vs_vgpr;
7168 unsigned instance_id_vgpr = first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
7169
7170 ctx->abi.vertex_id = input_vgprs[vertex_id_vgpr];
7171 ctx->abi.instance_id = input_vgprs[instance_id_vgpr];
7172
7173 /* InstanceID = VertexID >> 16;
7174 * VertexID = VertexID & 0xffff;
7175 */
7176 if (key->vs_prolog.states.unpack_instance_id_from_vertex_id) {
7177 ctx->abi.instance_id = LLVMBuildLShr(ctx->ac.builder, ctx->abi.vertex_id,
7178 LLVMConstInt(ctx->i32, 16, 0), "");
7179 ctx->abi.vertex_id = LLVMBuildAnd(ctx->ac.builder, ctx->abi.vertex_id,
7180 LLVMConstInt(ctx->i32, 0xffff, 0), "");
7181 }
7182
7183 /* Copy inputs to outputs. This should be no-op, as the registers match,
7184 * but it will prevent the compiler from overwriting them unintentionally.
7185 */
7186 ret = ctx->return_value;
7187 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7188 LLVMValueRef p = LLVMGetParam(func, i);
7189 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
7190 }
7191 for (i = 0; i < num_input_vgprs; i++) {
7192 LLVMValueRef p = input_vgprs[i];
7193
7194 if (i == vertex_id_vgpr)
7195 p = ctx->abi.vertex_id;
7196 else if (i == instance_id_vgpr)
7197 p = ctx->abi.instance_id;
7198
7199 p = ac_to_float(&ctx->ac, p);
7200 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p,
7201 key->vs_prolog.num_input_sgprs + i, "");
7202 }
7203
7204 /* Compute vertex load indices from instance divisors. */
7205 LLVMValueRef instance_divisor_constbuf = NULL;
7206
7207 if (key->vs_prolog.states.instance_divisor_is_fetched) {
7208 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
7209 LLVMValueRef buf_index =
7210 LLVMConstInt(ctx->i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
7211 instance_divisor_constbuf =
7212 ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
7213 }
7214
7215 for (i = 0; i <= key->vs_prolog.last_input; i++) {
7216 bool divisor_is_one =
7217 key->vs_prolog.states.instance_divisor_is_one & (1u << i);
7218 bool divisor_is_fetched =
7219 key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
7220 LLVMValueRef index = NULL;
7221
7222 if (divisor_is_one) {
7223 index = ctx->abi.instance_id;
7224 } else if (divisor_is_fetched) {
7225 LLVMValueRef udiv_factors[4];
7226
7227 for (unsigned j = 0; j < 4; j++) {
7228 udiv_factors[j] =
7229 buffer_load_const(ctx, instance_divisor_constbuf,
7230 LLVMConstInt(ctx->i32, i*16 + j*4, 0));
7231 udiv_factors[j] = ac_to_integer(&ctx->ac, udiv_factors[j]);
7232 }
7233 /* The faster NUW version doesn't work when InstanceID == UINT_MAX.
7234 * Such InstanceID might not be achievable in a reasonable time though.
7235 */
7236 index = ac_build_fast_udiv_nuw(&ctx->ac, ctx->abi.instance_id,
7237 udiv_factors[0], udiv_factors[1],
7238 udiv_factors[2], udiv_factors[3]);
7239 }
7240
7241 if (divisor_is_one || divisor_is_fetched) {
7242 /* Add StartInstance. */
7243 index = LLVMBuildAdd(ctx->ac.builder, index,
7244 LLVMGetParam(ctx->main_fn, user_sgpr_base +
7245 SI_SGPR_START_INSTANCE), "");
7246 } else {
7247 /* VertexID + BaseVertex */
7248 index = LLVMBuildAdd(ctx->ac.builder,
7249 ctx->abi.vertex_id,
7250 LLVMGetParam(func, user_sgpr_base +
7251 SI_SGPR_BASE_VERTEX), "");
7252 }
7253
7254 index = ac_to_float(&ctx->ac, index);
7255 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index,
7256 fninfo.num_params + i, "");
7257 }
7258
7259 si_llvm_build_ret(ctx, ret);
7260 }
7261
7262 static bool si_get_vs_prolog(struct si_screen *sscreen,
7263 struct ac_llvm_compiler *compiler,
7264 struct si_shader *shader,
7265 struct pipe_debug_callback *debug,
7266 struct si_shader *main_part,
7267 const struct si_vs_prolog_bits *key)
7268 {
7269 struct si_shader_selector *vs = main_part->selector;
7270
7271 if (!si_vs_needs_prolog(vs, key))
7272 return true;
7273
7274 /* Get the prolog. */
7275 union si_shader_part_key prolog_key;
7276 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
7277 key, shader, &prolog_key);
7278
7279 shader->prolog =
7280 si_get_shader_part(sscreen, &sscreen->vs_prologs,
7281 PIPE_SHADER_VERTEX, true, &prolog_key, compiler,
7282 debug, si_build_vs_prolog_function,
7283 "Vertex Shader Prolog");
7284 return shader->prolog != NULL;
7285 }
7286
7287 /**
7288 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
7289 */
7290 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
7291 struct ac_llvm_compiler *compiler,
7292 struct si_shader *shader,
7293 struct pipe_debug_callback *debug)
7294 {
7295 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader,
7296 &shader->key.part.vs.prolog);
7297 }
7298
7299 /**
7300 * Compile the TCS epilog function. This writes tesselation factors to memory
7301 * based on the output primitive type of the tesselator (determined by TES).
7302 */
7303 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
7304 union si_shader_part_key *key)
7305 {
7306 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7307 struct si_function_info fninfo;
7308 LLVMValueRef func;
7309
7310 si_init_function_info(&fninfo);
7311
7312 if (ctx->screen->info.chip_class >= GFX9) {
7313 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7314 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7315 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7316 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* wave info */
7317 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7318 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7319 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7320 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7321 add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7322 add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7323 add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7324 add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7325 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7326 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7327 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7328 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7329 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7330 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7331 ctx->param_tcs_out_lds_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7332 } else {
7333 add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7334 add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7335 add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7336 add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7337 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7338 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7339 ctx->param_tcs_out_lds_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7340 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7341 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7342 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7343 }
7344
7345 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* VGPR gap */
7346 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* VGPR gap */
7347 unsigned tess_factors_idx =
7348 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* patch index within the wave (REL_PATCH_ID) */
7349 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* invocation ID within the patch */
7350 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* LDS offset where tess factors should be loaded from */
7351
7352 for (unsigned i = 0; i < 6; i++)
7353 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* tess factors */
7354
7355 /* Create the function. */
7356 si_create_function(ctx, "tcs_epilog", NULL, 0, &fninfo,
7357 ctx->screen->info.chip_class >= GFX7 ? 128 : 64);
7358 ac_declare_lds_as_pointer(&ctx->ac);
7359 func = ctx->main_fn;
7360
7361 LLVMValueRef invoc0_tess_factors[6];
7362 for (unsigned i = 0; i < 6; i++)
7363 invoc0_tess_factors[i] = LLVMGetParam(func, tess_factors_idx + 3 + i);
7364
7365 si_write_tess_factors(bld_base,
7366 LLVMGetParam(func, tess_factors_idx),
7367 LLVMGetParam(func, tess_factors_idx + 1),
7368 LLVMGetParam(func, tess_factors_idx + 2),
7369 invoc0_tess_factors, invoc0_tess_factors + 4);
7370
7371 LLVMBuildRetVoid(ctx->ac.builder);
7372 }
7373
7374 /**
7375 * Select and compile (or reuse) TCS parts (epilog).
7376 */
7377 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
7378 struct ac_llvm_compiler *compiler,
7379 struct si_shader *shader,
7380 struct pipe_debug_callback *debug)
7381 {
7382 if (sscreen->info.chip_class >= GFX9) {
7383 struct si_shader *ls_main_part =
7384 shader->key.part.tcs.ls->main_shader_part_ls;
7385
7386 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
7387 &shader->key.part.tcs.ls_prolog))
7388 return false;
7389
7390 shader->previous_stage = ls_main_part;
7391 }
7392
7393 /* Get the epilog. */
7394 union si_shader_part_key epilog_key;
7395 memset(&epilog_key, 0, sizeof(epilog_key));
7396 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
7397
7398 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
7399 PIPE_SHADER_TESS_CTRL, false,
7400 &epilog_key, compiler, debug,
7401 si_build_tcs_epilog_function,
7402 "Tessellation Control Shader Epilog");
7403 return shader->epilog != NULL;
7404 }
7405
7406 /**
7407 * Select and compile (or reuse) GS parts (prolog).
7408 */
7409 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
7410 struct ac_llvm_compiler *compiler,
7411 struct si_shader *shader,
7412 struct pipe_debug_callback *debug)
7413 {
7414 if (sscreen->info.chip_class >= GFX9) {
7415 struct si_shader *es_main_part =
7416 shader->key.part.gs.es->main_shader_part_es;
7417
7418 if (shader->key.part.gs.es->type == PIPE_SHADER_VERTEX &&
7419 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
7420 &shader->key.part.gs.vs_prolog))
7421 return false;
7422
7423 shader->previous_stage = es_main_part;
7424 }
7425
7426 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
7427 return true;
7428
7429 union si_shader_part_key prolog_key;
7430 memset(&prolog_key, 0, sizeof(prolog_key));
7431 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
7432
7433 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
7434 PIPE_SHADER_GEOMETRY, true,
7435 &prolog_key, compiler, debug,
7436 si_build_gs_prolog_function,
7437 "Geometry Shader Prolog");
7438 return shader->prolog2 != NULL;
7439 }
7440
7441 /**
7442 * Build the pixel shader prolog function. This handles:
7443 * - two-side color selection and interpolation
7444 * - overriding interpolation parameters for the API PS
7445 * - polygon stippling
7446 *
7447 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
7448 * overriden by other states. (e.g. per-sample interpolation)
7449 * Interpolated colors are stored after the preloaded VGPRs.
7450 */
7451 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
7452 union si_shader_part_key *key)
7453 {
7454 struct si_function_info fninfo;
7455 LLVMValueRef ret, func;
7456 int num_returns, i, num_color_channels;
7457
7458 assert(si_need_ps_prolog(key));
7459
7460 si_init_function_info(&fninfo);
7461
7462 /* Declare inputs. */
7463 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
7464 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7465
7466 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
7467 add_arg(&fninfo, ARG_VGPR, ctx->f32);
7468
7469 /* Declare outputs (same as inputs + add colors if needed) */
7470 num_returns = fninfo.num_params;
7471 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
7472 for (i = 0; i < num_color_channels; i++)
7473 fninfo.types[num_returns++] = ctx->f32;
7474
7475 /* Create the function. */
7476 si_create_function(ctx, "ps_prolog", fninfo.types, num_returns,
7477 &fninfo, 0);
7478 func = ctx->main_fn;
7479
7480 /* Copy inputs to outputs. This should be no-op, as the registers match,
7481 * but it will prevent the compiler from overwriting them unintentionally.
7482 */
7483 ret = ctx->return_value;
7484 for (i = 0; i < fninfo.num_params; i++) {
7485 LLVMValueRef p = LLVMGetParam(func, i);
7486 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
7487 }
7488
7489 /* Polygon stippling. */
7490 if (key->ps_prolog.states.poly_stipple) {
7491 /* POS_FIXED_PT is always last. */
7492 unsigned pos = key->ps_prolog.num_input_sgprs +
7493 key->ps_prolog.num_input_vgprs - 1;
7494 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
7495
7496 si_llvm_emit_polygon_stipple(ctx, list, pos);
7497 }
7498
7499 if (key->ps_prolog.states.bc_optimize_for_persp ||
7500 key->ps_prolog.states.bc_optimize_for_linear) {
7501 unsigned i, base = key->ps_prolog.num_input_sgprs;
7502 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
7503
7504 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
7505 * The hw doesn't compute CENTROID if the whole wave only
7506 * contains fully-covered quads.
7507 *
7508 * PRIM_MASK is after user SGPRs.
7509 */
7510 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7511 bc_optimize = LLVMBuildLShr(ctx->ac.builder, bc_optimize,
7512 LLVMConstInt(ctx->i32, 31, 0), "");
7513 bc_optimize = LLVMBuildTrunc(ctx->ac.builder, bc_optimize,
7514 ctx->i1, "");
7515
7516 if (key->ps_prolog.states.bc_optimize_for_persp) {
7517 /* Read PERSP_CENTER. */
7518 for (i = 0; i < 2; i++)
7519 center[i] = LLVMGetParam(func, base + 2 + i);
7520 /* Read PERSP_CENTROID. */
7521 for (i = 0; i < 2; i++)
7522 centroid[i] = LLVMGetParam(func, base + 4 + i);
7523 /* Select PERSP_CENTROID. */
7524 for (i = 0; i < 2; i++) {
7525 tmp = LLVMBuildSelect(ctx->ac.builder, bc_optimize,
7526 center[i], centroid[i], "");
7527 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7528 tmp, base + 4 + i, "");
7529 }
7530 }
7531 if (key->ps_prolog.states.bc_optimize_for_linear) {
7532 /* Read LINEAR_CENTER. */
7533 for (i = 0; i < 2; i++)
7534 center[i] = LLVMGetParam(func, base + 8 + i);
7535 /* Read LINEAR_CENTROID. */
7536 for (i = 0; i < 2; i++)
7537 centroid[i] = LLVMGetParam(func, base + 10 + i);
7538 /* Select LINEAR_CENTROID. */
7539 for (i = 0; i < 2; i++) {
7540 tmp = LLVMBuildSelect(ctx->ac.builder, bc_optimize,
7541 center[i], centroid[i], "");
7542 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7543 tmp, base + 10 + i, "");
7544 }
7545 }
7546 }
7547
7548 /* Force per-sample interpolation. */
7549 if (key->ps_prolog.states.force_persp_sample_interp) {
7550 unsigned i, base = key->ps_prolog.num_input_sgprs;
7551 LLVMValueRef persp_sample[2];
7552
7553 /* Read PERSP_SAMPLE. */
7554 for (i = 0; i < 2; i++)
7555 persp_sample[i] = LLVMGetParam(func, base + i);
7556 /* Overwrite PERSP_CENTER. */
7557 for (i = 0; i < 2; i++)
7558 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7559 persp_sample[i], base + 2 + i, "");
7560 /* Overwrite PERSP_CENTROID. */
7561 for (i = 0; i < 2; i++)
7562 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7563 persp_sample[i], base + 4 + i, "");
7564 }
7565 if (key->ps_prolog.states.force_linear_sample_interp) {
7566 unsigned i, base = key->ps_prolog.num_input_sgprs;
7567 LLVMValueRef linear_sample[2];
7568
7569 /* Read LINEAR_SAMPLE. */
7570 for (i = 0; i < 2; i++)
7571 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
7572 /* Overwrite LINEAR_CENTER. */
7573 for (i = 0; i < 2; i++)
7574 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7575 linear_sample[i], base + 8 + i, "");
7576 /* Overwrite LINEAR_CENTROID. */
7577 for (i = 0; i < 2; i++)
7578 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7579 linear_sample[i], base + 10 + i, "");
7580 }
7581
7582 /* Force center interpolation. */
7583 if (key->ps_prolog.states.force_persp_center_interp) {
7584 unsigned i, base = key->ps_prolog.num_input_sgprs;
7585 LLVMValueRef persp_center[2];
7586
7587 /* Read PERSP_CENTER. */
7588 for (i = 0; i < 2; i++)
7589 persp_center[i] = LLVMGetParam(func, base + 2 + i);
7590 /* Overwrite PERSP_SAMPLE. */
7591 for (i = 0; i < 2; i++)
7592 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7593 persp_center[i], base + i, "");
7594 /* Overwrite PERSP_CENTROID. */
7595 for (i = 0; i < 2; i++)
7596 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7597 persp_center[i], base + 4 + i, "");
7598 }
7599 if (key->ps_prolog.states.force_linear_center_interp) {
7600 unsigned i, base = key->ps_prolog.num_input_sgprs;
7601 LLVMValueRef linear_center[2];
7602
7603 /* Read LINEAR_CENTER. */
7604 for (i = 0; i < 2; i++)
7605 linear_center[i] = LLVMGetParam(func, base + 8 + i);
7606 /* Overwrite LINEAR_SAMPLE. */
7607 for (i = 0; i < 2; i++)
7608 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7609 linear_center[i], base + 6 + i, "");
7610 /* Overwrite LINEAR_CENTROID. */
7611 for (i = 0; i < 2; i++)
7612 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7613 linear_center[i], base + 10 + i, "");
7614 }
7615
7616 /* Interpolate colors. */
7617 unsigned color_out_idx = 0;
7618 for (i = 0; i < 2; i++) {
7619 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
7620 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
7621 key->ps_prolog.face_vgpr_index;
7622 LLVMValueRef interp[2], color[4];
7623 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
7624
7625 if (!writemask)
7626 continue;
7627
7628 /* If the interpolation qualifier is not CONSTANT (-1). */
7629 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
7630 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
7631 key->ps_prolog.color_interp_vgpr_index[i];
7632
7633 /* Get the (i,j) updated by bc_optimize handling. */
7634 interp[0] = LLVMBuildExtractValue(ctx->ac.builder, ret,
7635 interp_vgpr, "");
7636 interp[1] = LLVMBuildExtractValue(ctx->ac.builder, ret,
7637 interp_vgpr + 1, "");
7638 interp_ij = ac_build_gather_values(&ctx->ac, interp, 2);
7639 }
7640
7641 /* Use the absolute location of the input. */
7642 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7643
7644 if (key->ps_prolog.states.color_two_side) {
7645 face = LLVMGetParam(func, face_vgpr);
7646 face = ac_to_integer(&ctx->ac, face);
7647 }
7648
7649 interp_fs_input(ctx,
7650 key->ps_prolog.color_attr_index[i],
7651 TGSI_SEMANTIC_COLOR, i,
7652 key->ps_prolog.num_interp_inputs,
7653 key->ps_prolog.colors_read, interp_ij,
7654 prim_mask, face, color);
7655
7656 while (writemask) {
7657 unsigned chan = u_bit_scan(&writemask);
7658 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, color[chan],
7659 fninfo.num_params + color_out_idx++, "");
7660 }
7661 }
7662
7663 /* Section 15.2.2 (Shader Inputs) of the OpenGL 4.5 (Core Profile) spec
7664 * says:
7665 *
7666 * "When per-sample shading is active due to the use of a fragment
7667 * input qualified by sample or due to the use of the gl_SampleID
7668 * or gl_SamplePosition variables, only the bit for the current
7669 * sample is set in gl_SampleMaskIn. When state specifies multiple
7670 * fragment shader invocations for a given fragment, the sample
7671 * mask for any single fragment shader invocation may specify a
7672 * subset of the covered samples for the fragment. In this case,
7673 * the bit corresponding to each covered sample will be set in
7674 * exactly one fragment shader invocation."
7675 *
7676 * The samplemask loaded by hardware is always the coverage of the
7677 * entire pixel/fragment, so mask bits out based on the sample ID.
7678 */
7679 if (key->ps_prolog.states.samplemask_log_ps_iter) {
7680 /* The bit pattern matches that used by fixed function fragment
7681 * processing. */
7682 static const uint16_t ps_iter_masks[] = {
7683 0xffff, /* not used */
7684 0x5555,
7685 0x1111,
7686 0x0101,
7687 0x0001,
7688 };
7689 assert(key->ps_prolog.states.samplemask_log_ps_iter < ARRAY_SIZE(ps_iter_masks));
7690
7691 uint32_t ps_iter_mask = ps_iter_masks[key->ps_prolog.states.samplemask_log_ps_iter];
7692 unsigned ancillary_vgpr = key->ps_prolog.num_input_sgprs +
7693 key->ps_prolog.ancillary_vgpr_index;
7694 LLVMValueRef sampleid = si_unpack_param(ctx, ancillary_vgpr, 8, 4);
7695 LLVMValueRef samplemask = LLVMGetParam(func, ancillary_vgpr + 1);
7696
7697 samplemask = ac_to_integer(&ctx->ac, samplemask);
7698 samplemask = LLVMBuildAnd(
7699 ctx->ac.builder,
7700 samplemask,
7701 LLVMBuildShl(ctx->ac.builder,
7702 LLVMConstInt(ctx->i32, ps_iter_mask, false),
7703 sampleid, ""),
7704 "");
7705 samplemask = ac_to_float(&ctx->ac, samplemask);
7706
7707 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, samplemask,
7708 ancillary_vgpr + 1, "");
7709 }
7710
7711 /* Tell LLVM to insert WQM instruction sequence when needed. */
7712 if (key->ps_prolog.wqm) {
7713 LLVMAddTargetDependentFunctionAttr(func,
7714 "amdgpu-ps-wqm-outputs", "");
7715 }
7716
7717 si_llvm_build_ret(ctx, ret);
7718 }
7719
7720 /**
7721 * Build the pixel shader epilog function. This handles everything that must be
7722 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7723 */
7724 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
7725 union si_shader_part_key *key)
7726 {
7727 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7728 struct si_function_info fninfo;
7729 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7730 int i;
7731 struct si_ps_exports exp = {};
7732
7733 si_init_function_info(&fninfo);
7734
7735 /* Declare input SGPRs. */
7736 ctx->param_rw_buffers = add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7737 ctx->param_bindless_samplers_and_images = add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7738 ctx->param_const_and_shader_buffers = add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7739 ctx->param_samplers_and_images = add_arg(&fninfo, ARG_SGPR, ctx->ac.intptr);
7740 add_arg_checked(&fninfo, ARG_SGPR, ctx->f32, SI_PARAM_ALPHA_REF);
7741
7742 /* Declare input VGPRs. */
7743 unsigned required_num_params =
7744 fninfo.num_sgpr_params +
7745 util_bitcount(key->ps_epilog.colors_written) * 4 +
7746 key->ps_epilog.writes_z +
7747 key->ps_epilog.writes_stencil +
7748 key->ps_epilog.writes_samplemask;
7749
7750 required_num_params = MAX2(required_num_params,
7751 fninfo.num_sgpr_params + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7752
7753 while (fninfo.num_params < required_num_params)
7754 add_arg(&fninfo, ARG_VGPR, ctx->f32);
7755
7756 /* Create the function. */
7757 si_create_function(ctx, "ps_epilog", NULL, 0, &fninfo, 0);
7758 /* Disable elimination of unused inputs. */
7759 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
7760 "InitialPSInputAddr", 0xffffff);
7761
7762 /* Process colors. */
7763 unsigned vgpr = fninfo.num_sgpr_params;
7764 unsigned colors_written = key->ps_epilog.colors_written;
7765 int last_color_export = -1;
7766
7767 /* Find the last color export. */
7768 if (!key->ps_epilog.writes_z &&
7769 !key->ps_epilog.writes_stencil &&
7770 !key->ps_epilog.writes_samplemask) {
7771 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7772
7773 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7774 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7775 /* Just set this if any of the colorbuffers are enabled. */
7776 if (spi_format &
7777 ((1ull << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7778 last_color_export = 0;
7779 } else {
7780 for (i = 0; i < 8; i++)
7781 if (colors_written & (1 << i) &&
7782 (spi_format >> (i * 4)) & 0xf)
7783 last_color_export = i;
7784 }
7785 }
7786
7787 while (colors_written) {
7788 LLVMValueRef color[4];
7789 int mrt = u_bit_scan(&colors_written);
7790
7791 for (i = 0; i < 4; i++)
7792 color[i] = LLVMGetParam(ctx->main_fn, vgpr++);
7793
7794 si_export_mrt_color(bld_base, color, mrt,
7795 fninfo.num_params - 1,
7796 mrt == last_color_export, &exp);
7797 }
7798
7799 /* Process depth, stencil, samplemask. */
7800 if (key->ps_epilog.writes_z)
7801 depth = LLVMGetParam(ctx->main_fn, vgpr++);
7802 if (key->ps_epilog.writes_stencil)
7803 stencil = LLVMGetParam(ctx->main_fn, vgpr++);
7804 if (key->ps_epilog.writes_samplemask)
7805 samplemask = LLVMGetParam(ctx->main_fn, vgpr++);
7806
7807 if (depth || stencil || samplemask)
7808 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
7809 else if (last_color_export == -1)
7810 ac_build_export_null(&ctx->ac);
7811
7812 if (exp.num)
7813 si_emit_ps_exports(ctx, &exp);
7814
7815 /* Compile. */
7816 LLVMBuildRetVoid(ctx->ac.builder);
7817 }
7818
7819 /**
7820 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7821 */
7822 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7823 struct ac_llvm_compiler *compiler,
7824 struct si_shader *shader,
7825 struct pipe_debug_callback *debug)
7826 {
7827 union si_shader_part_key prolog_key;
7828 union si_shader_part_key epilog_key;
7829
7830 /* Get the prolog. */
7831 si_get_ps_prolog_key(shader, &prolog_key, true);
7832
7833 /* The prolog is a no-op if these aren't set. */
7834 if (si_need_ps_prolog(&prolog_key)) {
7835 shader->prolog =
7836 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7837 PIPE_SHADER_FRAGMENT, true,
7838 &prolog_key, compiler, debug,
7839 si_build_ps_prolog_function,
7840 "Fragment Shader Prolog");
7841 if (!shader->prolog)
7842 return false;
7843 }
7844
7845 /* Get the epilog. */
7846 si_get_ps_epilog_key(shader, &epilog_key);
7847
7848 shader->epilog =
7849 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7850 PIPE_SHADER_FRAGMENT, false,
7851 &epilog_key, compiler, debug,
7852 si_build_ps_epilog_function,
7853 "Fragment Shader Epilog");
7854 if (!shader->epilog)
7855 return false;
7856
7857 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7858 if (shader->key.part.ps.prolog.poly_stipple) {
7859 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7860 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7861 }
7862
7863 /* Set up the enable bits for per-sample shading if needed. */
7864 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
7865 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7866 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7867 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7868 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7869 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7870 }
7871 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
7872 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7873 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7874 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7875 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7876 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7877 }
7878 if (shader->key.part.ps.prolog.force_persp_center_interp &&
7879 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7880 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7881 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
7882 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7883 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7884 }
7885 if (shader->key.part.ps.prolog.force_linear_center_interp &&
7886 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7887 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7888 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
7889 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7890 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7891 }
7892
7893 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7894 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7895 !(shader->config.spi_ps_input_ena & 0xf)) {
7896 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7897 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7898 }
7899
7900 /* At least one pair of interpolation weights must be enabled. */
7901 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7902 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7903 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7904 }
7905
7906 /* Samplemask fixup requires the sample ID. */
7907 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
7908 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
7909 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
7910 }
7911
7912 /* The sample mask input is always enabled, because the API shader always
7913 * passes it through to the epilog. Disable it here if it's unused.
7914 */
7915 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
7916 !shader->selector->info.reads_samplemask)
7917 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7918
7919 return true;
7920 }
7921
7922 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
7923 unsigned *lds_size)
7924 {
7925 /* If tessellation is all offchip and on-chip GS isn't used, this
7926 * workaround is not needed.
7927 */
7928 return;
7929
7930 /* SPI barrier management bug:
7931 * Make sure we have at least 4k of LDS in use to avoid the bug.
7932 * It applies to workgroup sizes of more than one wavefront.
7933 */
7934 if (sscreen->info.family == CHIP_BONAIRE ||
7935 sscreen->info.family == CHIP_KABINI)
7936 *lds_size = MAX2(*lds_size, 8);
7937 }
7938
7939 static void si_fix_resource_usage(struct si_screen *sscreen,
7940 struct si_shader *shader)
7941 {
7942 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7943
7944 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7945
7946 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
7947 si_get_max_workgroup_size(shader) > 64) {
7948 si_multiwave_lds_size_workaround(sscreen,
7949 &shader->config.lds_size);
7950 }
7951 }
7952
7953 int si_shader_create(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
7954 struct si_shader *shader,
7955 struct pipe_debug_callback *debug)
7956 {
7957 struct si_shader_selector *sel = shader->selector;
7958 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
7959 int r;
7960
7961 /* LS, ES, VS are compiled on demand if the main part hasn't been
7962 * compiled for that stage.
7963 *
7964 * Vertex shaders are compiled on demand when a vertex fetch
7965 * workaround must be applied.
7966 */
7967 if (shader->is_monolithic) {
7968 /* Monolithic shader (compiled as a whole, has many variants,
7969 * may take a long time to compile).
7970 */
7971 r = si_compile_tgsi_shader(sscreen, compiler, shader, debug);
7972 if (r)
7973 return r;
7974 } else {
7975 /* The shader consists of several parts:
7976 *
7977 * - the middle part is the user shader, it has 1 variant only
7978 * and it was compiled during the creation of the shader
7979 * selector
7980 * - the prolog part is inserted at the beginning
7981 * - the epilog part is inserted at the end
7982 *
7983 * The prolog and epilog have many (but simple) variants.
7984 *
7985 * Starting with gfx9, geometry and tessellation control
7986 * shaders also contain the prolog and user shader parts of
7987 * the previous shader stage.
7988 */
7989
7990 if (!mainp)
7991 return -1;
7992
7993 /* Copy the compiled TGSI shader data over. */
7994 shader->is_binary_shared = true;
7995 shader->binary = mainp->binary;
7996 shader->config = mainp->config;
7997 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7998 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7999 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
8000 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
8001 memcpy(shader->info.vs_output_param_offset,
8002 mainp->info.vs_output_param_offset,
8003 sizeof(mainp->info.vs_output_param_offset));
8004 shader->info.uses_instanceid = mainp->info.uses_instanceid;
8005 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
8006 shader->info.nr_param_exports = mainp->info.nr_param_exports;
8007
8008 /* Select prologs and/or epilogs. */
8009 switch (sel->type) {
8010 case PIPE_SHADER_VERTEX:
8011 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
8012 return -1;
8013 break;
8014 case PIPE_SHADER_TESS_CTRL:
8015 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
8016 return -1;
8017 break;
8018 case PIPE_SHADER_TESS_EVAL:
8019 break;
8020 case PIPE_SHADER_GEOMETRY:
8021 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
8022 return -1;
8023 break;
8024 case PIPE_SHADER_FRAGMENT:
8025 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
8026 return -1;
8027
8028 /* Make sure we have at least as many VGPRs as there
8029 * are allocated inputs.
8030 */
8031 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8032 shader->info.num_input_vgprs);
8033 break;
8034 }
8035
8036 /* Update SGPR and VGPR counts. */
8037 if (shader->prolog) {
8038 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8039 shader->prolog->config.num_sgprs);
8040 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8041 shader->prolog->config.num_vgprs);
8042 }
8043 if (shader->previous_stage) {
8044 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8045 shader->previous_stage->config.num_sgprs);
8046 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8047 shader->previous_stage->config.num_vgprs);
8048 shader->config.spilled_sgprs =
8049 MAX2(shader->config.spilled_sgprs,
8050 shader->previous_stage->config.spilled_sgprs);
8051 shader->config.spilled_vgprs =
8052 MAX2(shader->config.spilled_vgprs,
8053 shader->previous_stage->config.spilled_vgprs);
8054 shader->config.private_mem_vgprs =
8055 MAX2(shader->config.private_mem_vgprs,
8056 shader->previous_stage->config.private_mem_vgprs);
8057 shader->config.scratch_bytes_per_wave =
8058 MAX2(shader->config.scratch_bytes_per_wave,
8059 shader->previous_stage->config.scratch_bytes_per_wave);
8060 shader->info.uses_instanceid |=
8061 shader->previous_stage->info.uses_instanceid;
8062 }
8063 if (shader->prolog2) {
8064 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8065 shader->prolog2->config.num_sgprs);
8066 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8067 shader->prolog2->config.num_vgprs);
8068 }
8069 if (shader->epilog) {
8070 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8071 shader->epilog->config.num_sgprs);
8072 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8073 shader->epilog->config.num_vgprs);
8074 }
8075 si_calculate_max_simd_waves(shader);
8076 }
8077
8078 si_fix_resource_usage(sscreen, shader);
8079 si_shader_dump(sscreen, shader, debug, sel->info.processor,
8080 stderr, true);
8081
8082 /* Upload. */
8083 r = si_shader_binary_upload(sscreen, shader);
8084 if (r) {
8085 fprintf(stderr, "LLVM failed to upload shader\n");
8086 return r;
8087 }
8088
8089 return 0;
8090 }
8091
8092 void si_shader_destroy(struct si_shader *shader)
8093 {
8094 if (shader->scratch_bo)
8095 si_resource_reference(&shader->scratch_bo, NULL);
8096
8097 si_resource_reference(&shader->bo, NULL);
8098
8099 if (!shader->is_binary_shared)
8100 ac_shader_binary_clean(&shader->binary);
8101
8102 free(shader->shader_log);
8103 }