radeonsi: add get_block_size() helper
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "gallivm/lp_bld_const.h"
25 #include "gallivm/lp_bld_gather.h"
26 #include "gallivm/lp_bld_intr.h"
27 #include "gallivm/lp_bld_logic.h"
28 #include "gallivm/lp_bld_arit.h"
29 #include "gallivm/lp_bld_flow.h"
30 #include "gallivm/lp_bld_misc.h"
31 #include "util/u_memory.h"
32 #include "util/u_string.h"
33 #include "tgsi/tgsi_build.h"
34 #include "tgsi/tgsi_util.h"
35 #include "tgsi/tgsi_dump.h"
36
37 #include "ac_binary.h"
38 #include "ac_llvm_util.h"
39 #include "ac_exp_param.h"
40 #include "ac_shader_util.h"
41 #include "si_shader_internal.h"
42 #include "si_pipe.h"
43 #include "sid.h"
44
45 #include "compiler/nir/nir.h"
46
47 static const char *scratch_rsrc_dword0_symbol =
48 "SCRATCH_RSRC_DWORD0";
49
50 static const char *scratch_rsrc_dword1_symbol =
51 "SCRATCH_RSRC_DWORD1";
52
53 struct si_shader_output_values
54 {
55 LLVMValueRef values[4];
56 unsigned semantic_name;
57 unsigned semantic_index;
58 ubyte vertex_stream[4];
59 };
60
61 /**
62 * Used to collect types and other info about arguments of the LLVM function
63 * before the function is created.
64 */
65 struct si_function_info {
66 LLVMTypeRef types[100];
67 LLVMValueRef *assign[100];
68 unsigned num_sgpr_params;
69 unsigned num_params;
70 };
71
72 enum si_arg_regfile {
73 ARG_SGPR,
74 ARG_VGPR
75 };
76
77 static void si_init_shader_ctx(struct si_shader_context *ctx,
78 struct si_screen *sscreen,
79 LLVMTargetMachineRef tm);
80
81 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
82 struct lp_build_tgsi_context *bld_base,
83 struct lp_build_emit_data *emit_data);
84
85 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
86 FILE *f);
87
88 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
89 union si_shader_part_key *key);
90 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
91 union si_shader_part_key *key);
92 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
93 union si_shader_part_key *key);
94 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
95 union si_shader_part_key *key);
96
97 /* Ideally pass the sample mask input to the PS epilog as v14, which
98 * is its usual location, so that the shader doesn't have to add v_mov.
99 */
100 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 14
101
102 static bool llvm_type_is_64bit(struct si_shader_context *ctx,
103 LLVMTypeRef type)
104 {
105 if (type == ctx->ac.i64 || type == ctx->ac.f64)
106 return true;
107
108 return false;
109 }
110
111 static bool is_merged_shader(struct si_shader *shader)
112 {
113 if (shader->selector->screen->info.chip_class <= VI)
114 return false;
115
116 return shader->key.as_ls ||
117 shader->key.as_es ||
118 shader->selector->type == PIPE_SHADER_TESS_CTRL ||
119 shader->selector->type == PIPE_SHADER_GEOMETRY;
120 }
121
122 static void si_init_function_info(struct si_function_info *fninfo)
123 {
124 fninfo->num_params = 0;
125 fninfo->num_sgpr_params = 0;
126 }
127
128 static unsigned add_arg_assign(struct si_function_info *fninfo,
129 enum si_arg_regfile regfile, LLVMTypeRef type,
130 LLVMValueRef *assign)
131 {
132 assert(regfile != ARG_SGPR || fninfo->num_sgpr_params == fninfo->num_params);
133
134 unsigned idx = fninfo->num_params++;
135 assert(idx < ARRAY_SIZE(fninfo->types));
136
137 if (regfile == ARG_SGPR)
138 fninfo->num_sgpr_params = fninfo->num_params;
139
140 fninfo->types[idx] = type;
141 fninfo->assign[idx] = assign;
142 return idx;
143 }
144
145 static unsigned add_arg(struct si_function_info *fninfo,
146 enum si_arg_regfile regfile, LLVMTypeRef type)
147 {
148 return add_arg_assign(fninfo, regfile, type, NULL);
149 }
150
151 static void add_arg_assign_checked(struct si_function_info *fninfo,
152 enum si_arg_regfile regfile, LLVMTypeRef type,
153 LLVMValueRef *assign, unsigned idx)
154 {
155 MAYBE_UNUSED unsigned actual = add_arg_assign(fninfo, regfile, type, assign);
156 assert(actual == idx);
157 }
158
159 static void add_arg_checked(struct si_function_info *fninfo,
160 enum si_arg_regfile regfile, LLVMTypeRef type,
161 unsigned idx)
162 {
163 add_arg_assign_checked(fninfo, regfile, type, NULL, idx);
164 }
165
166 /**
167 * Returns a unique index for a per-patch semantic name and index. The index
168 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
169 * can be calculated.
170 */
171 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
172 {
173 switch (semantic_name) {
174 case TGSI_SEMANTIC_TESSOUTER:
175 return 0;
176 case TGSI_SEMANTIC_TESSINNER:
177 return 1;
178 case TGSI_SEMANTIC_PATCH:
179 assert(index < 30);
180 return 2 + index;
181
182 default:
183 assert(!"invalid semantic name");
184 return 0;
185 }
186 }
187
188 /**
189 * Returns a unique index for a semantic name and index. The index must be
190 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
191 * calculated.
192 */
193 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
194 {
195 switch (semantic_name) {
196 case TGSI_SEMANTIC_POSITION:
197 return 0;
198 case TGSI_SEMANTIC_GENERIC:
199 /* Since some shader stages use the the highest used IO index
200 * to determine the size to allocate for inputs/outputs
201 * (in LDS, tess and GS rings). GENERIC should be placed right
202 * after POSITION to make that size as small as possible.
203 */
204 if (index < SI_MAX_IO_GENERIC)
205 return 1 + index;
206
207 assert(!"invalid generic index");
208 return 0;
209 case TGSI_SEMANTIC_PSIZE:
210 return SI_MAX_IO_GENERIC + 1;
211 case TGSI_SEMANTIC_CLIPDIST:
212 assert(index <= 1);
213 return SI_MAX_IO_GENERIC + 2 + index;
214 case TGSI_SEMANTIC_FOG:
215 return SI_MAX_IO_GENERIC + 4;
216 case TGSI_SEMANTIC_LAYER:
217 return SI_MAX_IO_GENERIC + 5;
218 case TGSI_SEMANTIC_VIEWPORT_INDEX:
219 return SI_MAX_IO_GENERIC + 6;
220 case TGSI_SEMANTIC_PRIMID:
221 return SI_MAX_IO_GENERIC + 7;
222 case TGSI_SEMANTIC_COLOR: /* these alias */
223 case TGSI_SEMANTIC_BCOLOR:
224 assert(index < 2);
225 return SI_MAX_IO_GENERIC + 8 + index;
226 case TGSI_SEMANTIC_TEXCOORD:
227 assert(index < 8);
228 assert(SI_MAX_IO_GENERIC + 10 + index < 64);
229 return SI_MAX_IO_GENERIC + 10 + index;
230 default:
231 assert(!"invalid semantic name");
232 return 0;
233 }
234 }
235
236 /**
237 * Get the value of a shader input parameter and extract a bitfield.
238 */
239 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
240 LLVMValueRef value, unsigned rshift,
241 unsigned bitwidth)
242 {
243 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
244 value = ac_to_integer(&ctx->ac, value);
245
246 if (rshift)
247 value = LLVMBuildLShr(ctx->ac.builder, value,
248 LLVMConstInt(ctx->i32, rshift, 0), "");
249
250 if (rshift + bitwidth < 32) {
251 unsigned mask = (1 << bitwidth) - 1;
252 value = LLVMBuildAnd(ctx->ac.builder, value,
253 LLVMConstInt(ctx->i32, mask, 0), "");
254 }
255
256 return value;
257 }
258
259 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
260 unsigned param, unsigned rshift,
261 unsigned bitwidth)
262 {
263 LLVMValueRef value = LLVMGetParam(ctx->main_fn, param);
264
265 return unpack_llvm_param(ctx, value, rshift, bitwidth);
266 }
267
268 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
269 {
270 switch (ctx->type) {
271 case PIPE_SHADER_TESS_CTRL:
272 return unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 0, 8);
273
274 case PIPE_SHADER_TESS_EVAL:
275 return LLVMGetParam(ctx->main_fn,
276 ctx->param_tes_rel_patch_id);
277
278 default:
279 assert(0);
280 return NULL;
281 }
282 }
283
284 /* Tessellation shaders pass outputs to the next shader using LDS.
285 *
286 * LS outputs = TCS inputs
287 * TCS outputs = TES inputs
288 *
289 * The LDS layout is:
290 * - TCS inputs for patch 0
291 * - TCS inputs for patch 1
292 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
293 * - ...
294 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
295 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
296 * - TCS outputs for patch 1
297 * - Per-patch TCS outputs for patch 1
298 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
299 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
300 * - ...
301 *
302 * All three shaders VS(LS), TCS, TES share the same LDS space.
303 */
304
305 static LLVMValueRef
306 get_tcs_in_patch_stride(struct si_shader_context *ctx)
307 {
308 return unpack_param(ctx, ctx->param_vs_state_bits, 8, 13);
309 }
310
311 static unsigned get_tcs_out_vertex_dw_stride_constant(struct si_shader_context *ctx)
312 {
313 assert(ctx->type == PIPE_SHADER_TESS_CTRL);
314
315 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
316 return util_last_bit64(ctx->shader->key.mono.u.ff_tcs_inputs_to_copy) * 4;
317
318 return util_last_bit64(ctx->shader->selector->outputs_written) * 4;
319 }
320
321 static LLVMValueRef get_tcs_out_vertex_dw_stride(struct si_shader_context *ctx)
322 {
323 unsigned stride = get_tcs_out_vertex_dw_stride_constant(ctx);
324
325 return LLVMConstInt(ctx->i32, stride, 0);
326 }
327
328 static LLVMValueRef get_tcs_out_patch_stride(struct si_shader_context *ctx)
329 {
330 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
331 return unpack_param(ctx, ctx->param_tcs_out_lds_layout, 0, 13);
332
333 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
334 unsigned tcs_out_vertices = info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
335 unsigned vertex_dw_stride = get_tcs_out_vertex_dw_stride_constant(ctx);
336 unsigned num_patch_outputs = util_last_bit64(ctx->shader->selector->patch_outputs_written);
337 unsigned patch_dw_stride = tcs_out_vertices * vertex_dw_stride +
338 num_patch_outputs * 4;
339 return LLVMConstInt(ctx->i32, patch_dw_stride, 0);
340 }
341
342 static LLVMValueRef
343 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
344 {
345 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
346 unpack_param(ctx,
347 ctx->param_tcs_out_lds_offsets,
348 0, 16),
349 4);
350 }
351
352 static LLVMValueRef
353 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
354 {
355 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
356 unpack_param(ctx,
357 ctx->param_tcs_out_lds_offsets,
358 16, 16),
359 4);
360 }
361
362 static LLVMValueRef
363 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
364 {
365 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
366 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
367
368 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
369 }
370
371 static LLVMValueRef
372 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
373 {
374 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
375 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
376 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
377
378 return LLVMBuildAdd(ctx->ac.builder, patch0_offset,
379 LLVMBuildMul(ctx->ac.builder, patch_stride,
380 rel_patch_id, ""),
381 "");
382 }
383
384 static LLVMValueRef
385 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
386 {
387 LLVMValueRef patch0_patch_data_offset =
388 get_tcs_out_patch0_patch_data_offset(ctx);
389 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
390 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
391
392 return LLVMBuildAdd(ctx->ac.builder, patch0_patch_data_offset,
393 LLVMBuildMul(ctx->ac.builder, patch_stride,
394 rel_patch_id, ""),
395 "");
396 }
397
398 static LLVMValueRef get_num_tcs_out_vertices(struct si_shader_context *ctx)
399 {
400 unsigned tcs_out_vertices =
401 ctx->shader->selector ?
402 ctx->shader->selector->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] : 0;
403
404 /* If !tcs_out_vertices, it's either the fixed-func TCS or the TCS epilog. */
405 if (ctx->type == PIPE_SHADER_TESS_CTRL && tcs_out_vertices)
406 return LLVMConstInt(ctx->i32, tcs_out_vertices, 0);
407
408 return unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
409 }
410
411 static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
412 {
413 unsigned stride;
414
415 switch (ctx->type) {
416 case PIPE_SHADER_VERTEX:
417 stride = util_last_bit64(ctx->shader->selector->outputs_written);
418 return LLVMConstInt(ctx->i32, stride * 4, 0);
419
420 case PIPE_SHADER_TESS_CTRL:
421 if (ctx->screen->info.chip_class >= GFX9 &&
422 ctx->shader->is_monolithic) {
423 stride = util_last_bit64(ctx->shader->key.part.tcs.ls->outputs_written);
424 return LLVMConstInt(ctx->i32, stride * 4, 0);
425 }
426 return unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
427
428 default:
429 assert(0);
430 return NULL;
431 }
432 }
433
434 static LLVMValueRef get_instance_index_for_fetch(
435 struct si_shader_context *ctx,
436 unsigned param_start_instance, LLVMValueRef divisor)
437 {
438 LLVMValueRef result = ctx->abi.instance_id;
439
440 /* The division must be done before START_INSTANCE is added. */
441 if (divisor != ctx->i32_1)
442 result = LLVMBuildUDiv(ctx->ac.builder, result, divisor, "");
443
444 return LLVMBuildAdd(ctx->ac.builder, result,
445 LLVMGetParam(ctx->main_fn, param_start_instance), "");
446 }
447
448 /* Bitcast <4 x float> to <2 x double>, extract the component, and convert
449 * to float. */
450 static LLVMValueRef extract_double_to_float(struct si_shader_context *ctx,
451 LLVMValueRef vec4,
452 unsigned double_index)
453 {
454 LLVMBuilderRef builder = ctx->ac.builder;
455 LLVMTypeRef f64 = LLVMDoubleTypeInContext(ctx->ac.context);
456 LLVMValueRef dvec2 = LLVMBuildBitCast(builder, vec4,
457 LLVMVectorType(f64, 2), "");
458 LLVMValueRef index = LLVMConstInt(ctx->i32, double_index, 0);
459 LLVMValueRef value = LLVMBuildExtractElement(builder, dvec2, index, "");
460 return LLVMBuildFPTrunc(builder, value, ctx->f32, "");
461 }
462
463 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
464 LLVMValueRef i32, unsigned index)
465 {
466 assert(index <= 1);
467
468 if (index == 1)
469 return LLVMBuildAShr(ctx->ac.builder, i32,
470 LLVMConstInt(ctx->i32, 16, 0), "");
471
472 return LLVMBuildSExt(ctx->ac.builder,
473 LLVMBuildTrunc(ctx->ac.builder, i32,
474 ctx->ac.i16, ""),
475 ctx->i32, "");
476 }
477
478 void si_llvm_load_input_vs(
479 struct si_shader_context *ctx,
480 unsigned input_index,
481 LLVMValueRef out[4])
482 {
483 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
484 unsigned vs_blit_property = info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
485
486 if (vs_blit_property) {
487 LLVMValueRef vertex_id = ctx->abi.vertex_id;
488 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
489 LLVMIntULE, vertex_id,
490 ctx->i32_1, "");
491 /* Use LLVMIntNE, because we have 3 vertices and only
492 * the middle one should use y2.
493 */
494 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
495 LLVMIntNE, vertex_id,
496 ctx->i32_1, "");
497
498 if (input_index == 0) {
499 /* Position: */
500 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
501 ctx->param_vs_blit_inputs);
502 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
503 ctx->param_vs_blit_inputs + 1);
504
505 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
506 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
507 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
508 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
509
510 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
511 x1, x2, "");
512 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
513 y1, y2, "");
514
515 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
516 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
517 out[2] = LLVMGetParam(ctx->main_fn,
518 ctx->param_vs_blit_inputs + 2);
519 out[3] = ctx->ac.f32_1;
520 return;
521 }
522
523 /* Color or texture coordinates: */
524 assert(input_index == 1);
525
526 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
527 for (int i = 0; i < 4; i++) {
528 out[i] = LLVMGetParam(ctx->main_fn,
529 ctx->param_vs_blit_inputs + 3 + i);
530 }
531 } else {
532 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
533 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
534 ctx->param_vs_blit_inputs + 3);
535 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
536 ctx->param_vs_blit_inputs + 4);
537 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
538 ctx->param_vs_blit_inputs + 5);
539 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
540 ctx->param_vs_blit_inputs + 6);
541
542 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
543 x1, x2, "");
544 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
545 y1, y2, "");
546 out[2] = LLVMGetParam(ctx->main_fn,
547 ctx->param_vs_blit_inputs + 7);
548 out[3] = LLVMGetParam(ctx->main_fn,
549 ctx->param_vs_blit_inputs + 8);
550 }
551 return;
552 }
553
554 unsigned chan;
555 unsigned fix_fetch;
556 unsigned num_fetches;
557 unsigned fetch_stride;
558 unsigned num_channels;
559
560 LLVMValueRef t_list_ptr;
561 LLVMValueRef t_offset;
562 LLVMValueRef t_list;
563 LLVMValueRef vertex_index;
564 LLVMValueRef input[3];
565
566 /* Load the T list */
567 t_list_ptr = LLVMGetParam(ctx->main_fn, ctx->param_vertex_buffers);
568
569 t_offset = LLVMConstInt(ctx->i32, input_index, 0);
570
571 t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
572
573 vertex_index = LLVMGetParam(ctx->main_fn,
574 ctx->param_vertex_index0 +
575 input_index);
576
577 fix_fetch = ctx->shader->key.mono.vs_fix_fetch[input_index];
578
579 /* Do multiple loads for special formats. */
580 switch (fix_fetch) {
581 case SI_FIX_FETCH_RGB_64_FLOAT:
582 num_fetches = 3; /* 3 2-dword loads */
583 fetch_stride = 8;
584 num_channels = 2;
585 break;
586 case SI_FIX_FETCH_RGBA_64_FLOAT:
587 num_fetches = 2; /* 2 4-dword loads */
588 fetch_stride = 16;
589 num_channels = 4;
590 break;
591 case SI_FIX_FETCH_RGB_8:
592 case SI_FIX_FETCH_RGB_8_INT:
593 num_fetches = 3;
594 fetch_stride = 1;
595 num_channels = 1;
596 break;
597 case SI_FIX_FETCH_RGB_16:
598 case SI_FIX_FETCH_RGB_16_INT:
599 num_fetches = 3;
600 fetch_stride = 2;
601 num_channels = 1;
602 break;
603 default:
604 num_fetches = 1;
605 fetch_stride = 0;
606 num_channels = util_last_bit(info->input_usage_mask[input_index]);
607 }
608
609 for (unsigned i = 0; i < num_fetches; i++) {
610 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
611
612 input[i] = ac_build_buffer_load_format(&ctx->ac, t_list,
613 vertex_index, voffset,
614 num_channels, false, true);
615 input[i] = ac_build_expand_to_vec4(&ctx->ac, input[i], num_channels);
616 }
617
618 /* Break up the vec4 into individual components */
619 for (chan = 0; chan < 4; chan++) {
620 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
621 out[chan] = LLVMBuildExtractElement(ctx->ac.builder,
622 input[0], llvm_chan, "");
623 }
624
625 switch (fix_fetch) {
626 case SI_FIX_FETCH_A2_SNORM:
627 case SI_FIX_FETCH_A2_SSCALED:
628 case SI_FIX_FETCH_A2_SINT: {
629 /* The hardware returns an unsigned value; convert it to a
630 * signed one.
631 */
632 LLVMValueRef tmp = out[3];
633 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
634
635 /* First, recover the sign-extended signed integer value. */
636 if (fix_fetch == SI_FIX_FETCH_A2_SSCALED)
637 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
638 else
639 tmp = ac_to_integer(&ctx->ac, tmp);
640
641 /* For the integer-like cases, do a natural sign extension.
642 *
643 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
644 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
645 * exponent.
646 */
647 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
648 fix_fetch == SI_FIX_FETCH_A2_SNORM ?
649 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
650 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
651
652 /* Convert back to the right type. */
653 if (fix_fetch == SI_FIX_FETCH_A2_SNORM) {
654 LLVMValueRef clamp;
655 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
656 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
657 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
658 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
659 } else if (fix_fetch == SI_FIX_FETCH_A2_SSCALED) {
660 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
661 }
662
663 out[3] = tmp;
664 break;
665 }
666 case SI_FIX_FETCH_RGBA_32_UNORM:
667 case SI_FIX_FETCH_RGBX_32_UNORM:
668 for (chan = 0; chan < 4; chan++) {
669 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
670 out[chan] = LLVMBuildUIToFP(ctx->ac.builder,
671 out[chan], ctx->f32, "");
672 out[chan] = LLVMBuildFMul(ctx->ac.builder, out[chan],
673 LLVMConstReal(ctx->f32, 1.0 / UINT_MAX), "");
674 }
675 /* RGBX UINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
676 if (fix_fetch == SI_FIX_FETCH_RGBX_32_UNORM)
677 out[3] = LLVMConstReal(ctx->f32, 1);
678 break;
679 case SI_FIX_FETCH_RGBA_32_SNORM:
680 case SI_FIX_FETCH_RGBX_32_SNORM:
681 case SI_FIX_FETCH_RGBA_32_FIXED:
682 case SI_FIX_FETCH_RGBX_32_FIXED: {
683 double scale;
684 if (fix_fetch >= SI_FIX_FETCH_RGBA_32_FIXED)
685 scale = 1.0 / 0x10000;
686 else
687 scale = 1.0 / INT_MAX;
688
689 for (chan = 0; chan < 4; chan++) {
690 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
691 out[chan] = LLVMBuildSIToFP(ctx->ac.builder,
692 out[chan], ctx->f32, "");
693 out[chan] = LLVMBuildFMul(ctx->ac.builder, out[chan],
694 LLVMConstReal(ctx->f32, scale), "");
695 }
696 /* RGBX SINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
697 if (fix_fetch == SI_FIX_FETCH_RGBX_32_SNORM ||
698 fix_fetch == SI_FIX_FETCH_RGBX_32_FIXED)
699 out[3] = LLVMConstReal(ctx->f32, 1);
700 break;
701 }
702 case SI_FIX_FETCH_RGBA_32_USCALED:
703 for (chan = 0; chan < 4; chan++) {
704 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
705 out[chan] = LLVMBuildUIToFP(ctx->ac.builder,
706 out[chan], ctx->f32, "");
707 }
708 break;
709 case SI_FIX_FETCH_RGBA_32_SSCALED:
710 for (chan = 0; chan < 4; chan++) {
711 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
712 out[chan] = LLVMBuildSIToFP(ctx->ac.builder,
713 out[chan], ctx->f32, "");
714 }
715 break;
716 case SI_FIX_FETCH_RG_64_FLOAT:
717 for (chan = 0; chan < 2; chan++)
718 out[chan] = extract_double_to_float(ctx, input[0], chan);
719
720 out[2] = LLVMConstReal(ctx->f32, 0);
721 out[3] = LLVMConstReal(ctx->f32, 1);
722 break;
723 case SI_FIX_FETCH_RGB_64_FLOAT:
724 for (chan = 0; chan < 3; chan++)
725 out[chan] = extract_double_to_float(ctx, input[chan], 0);
726
727 out[3] = LLVMConstReal(ctx->f32, 1);
728 break;
729 case SI_FIX_FETCH_RGBA_64_FLOAT:
730 for (chan = 0; chan < 4; chan++) {
731 out[chan] = extract_double_to_float(ctx, input[chan / 2],
732 chan % 2);
733 }
734 break;
735 case SI_FIX_FETCH_RGB_8:
736 case SI_FIX_FETCH_RGB_8_INT:
737 case SI_FIX_FETCH_RGB_16:
738 case SI_FIX_FETCH_RGB_16_INT:
739 for (chan = 0; chan < 3; chan++) {
740 out[chan] = LLVMBuildExtractElement(ctx->ac.builder,
741 input[chan],
742 ctx->i32_0, "");
743 }
744 if (fix_fetch == SI_FIX_FETCH_RGB_8 ||
745 fix_fetch == SI_FIX_FETCH_RGB_16) {
746 out[3] = LLVMConstReal(ctx->f32, 1);
747 } else {
748 out[3] = ac_to_float(&ctx->ac, ctx->i32_1);
749 }
750 break;
751 }
752 }
753
754 static void declare_input_vs(
755 struct si_shader_context *ctx,
756 unsigned input_index,
757 const struct tgsi_full_declaration *decl,
758 LLVMValueRef out[4])
759 {
760 si_llvm_load_input_vs(ctx, input_index, out);
761 }
762
763 static LLVMValueRef get_primitive_id(struct si_shader_context *ctx,
764 unsigned swizzle)
765 {
766 if (swizzle > 0)
767 return ctx->i32_0;
768
769 switch (ctx->type) {
770 case PIPE_SHADER_VERTEX:
771 return LLVMGetParam(ctx->main_fn,
772 ctx->param_vs_prim_id);
773 case PIPE_SHADER_TESS_CTRL:
774 return ctx->abi.tcs_patch_id;
775 case PIPE_SHADER_TESS_EVAL:
776 return ctx->abi.tes_patch_id;
777 case PIPE_SHADER_GEOMETRY:
778 return ctx->abi.gs_prim_id;
779 default:
780 assert(0);
781 return ctx->i32_0;
782 }
783 }
784
785 /**
786 * Return the value of tgsi_ind_register for indexing.
787 * This is the indirect index with the constant offset added to it.
788 */
789 LLVMValueRef si_get_indirect_index(struct si_shader_context *ctx,
790 const struct tgsi_ind_register *ind,
791 unsigned addr_mul,
792 int rel_index)
793 {
794 LLVMValueRef result;
795
796 if (ind->File == TGSI_FILE_ADDRESS) {
797 result = ctx->addrs[ind->Index][ind->Swizzle];
798 result = LLVMBuildLoad(ctx->ac.builder, result, "");
799 } else {
800 struct tgsi_full_src_register src = {};
801
802 src.Register.File = ind->File;
803 src.Register.Index = ind->Index;
804
805 /* Set the second index to 0 for constants. */
806 if (ind->File == TGSI_FILE_CONSTANT)
807 src.Register.Dimension = 1;
808
809 result = ctx->bld_base.emit_fetch_funcs[ind->File](&ctx->bld_base, &src,
810 TGSI_TYPE_SIGNED,
811 ind->Swizzle);
812 result = ac_to_integer(&ctx->ac, result);
813 }
814
815 if (addr_mul != 1)
816 result = LLVMBuildMul(ctx->ac.builder, result,
817 LLVMConstInt(ctx->i32, addr_mul, 0), "");
818 result = LLVMBuildAdd(ctx->ac.builder, result,
819 LLVMConstInt(ctx->i32, rel_index, 0), "");
820 return result;
821 }
822
823 /**
824 * Like si_get_indirect_index, but restricts the return value to a (possibly
825 * undefined) value inside [0..num).
826 */
827 LLVMValueRef si_get_bounded_indirect_index(struct si_shader_context *ctx,
828 const struct tgsi_ind_register *ind,
829 int rel_index, unsigned num)
830 {
831 LLVMValueRef result = si_get_indirect_index(ctx, ind, 1, rel_index);
832
833 return si_llvm_bound_index(ctx, result, num);
834 }
835
836 static LLVMValueRef get_dw_address_from_generic_indices(struct si_shader_context *ctx,
837 LLVMValueRef vertex_dw_stride,
838 LLVMValueRef base_addr,
839 LLVMValueRef vertex_index,
840 LLVMValueRef param_index,
841 unsigned input_index,
842 ubyte *name,
843 ubyte *index,
844 bool is_patch)
845 {
846 if (vertex_dw_stride) {
847 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
848 LLVMBuildMul(ctx->ac.builder, vertex_index,
849 vertex_dw_stride, ""), "");
850 }
851
852 if (param_index) {
853 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
854 LLVMBuildMul(ctx->ac.builder, param_index,
855 LLVMConstInt(ctx->i32, 4, 0), ""), "");
856 }
857
858 int param = is_patch ?
859 si_shader_io_get_unique_index_patch(name[input_index],
860 index[input_index]) :
861 si_shader_io_get_unique_index(name[input_index],
862 index[input_index]);
863
864 /* Add the base address of the element. */
865 return LLVMBuildAdd(ctx->ac.builder, base_addr,
866 LLVMConstInt(ctx->i32, param * 4, 0), "");
867 }
868
869 /**
870 * Calculate a dword address given an input or output register and a stride.
871 */
872 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
873 const struct tgsi_full_dst_register *dst,
874 const struct tgsi_full_src_register *src,
875 LLVMValueRef vertex_dw_stride,
876 LLVMValueRef base_addr)
877 {
878 struct tgsi_shader_info *info = &ctx->shader->selector->info;
879 ubyte *name, *index, *array_first;
880 int input_index;
881 struct tgsi_full_dst_register reg;
882 LLVMValueRef vertex_index = NULL;
883 LLVMValueRef ind_index = NULL;
884
885 /* Set the register description. The address computation is the same
886 * for sources and destinations. */
887 if (src) {
888 reg.Register.File = src->Register.File;
889 reg.Register.Index = src->Register.Index;
890 reg.Register.Indirect = src->Register.Indirect;
891 reg.Register.Dimension = src->Register.Dimension;
892 reg.Indirect = src->Indirect;
893 reg.Dimension = src->Dimension;
894 reg.DimIndirect = src->DimIndirect;
895 } else
896 reg = *dst;
897
898 /* If the register is 2-dimensional (e.g. an array of vertices
899 * in a primitive), calculate the base address of the vertex. */
900 if (reg.Register.Dimension) {
901 if (reg.Dimension.Indirect)
902 vertex_index = si_get_indirect_index(ctx, &reg.DimIndirect,
903 1, reg.Dimension.Index);
904 else
905 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
906 }
907
908 /* Get information about the register. */
909 if (reg.Register.File == TGSI_FILE_INPUT) {
910 name = info->input_semantic_name;
911 index = info->input_semantic_index;
912 array_first = info->input_array_first;
913 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
914 name = info->output_semantic_name;
915 index = info->output_semantic_index;
916 array_first = info->output_array_first;
917 } else {
918 assert(0);
919 return NULL;
920 }
921
922 if (reg.Register.Indirect) {
923 /* Add the relative address of the element. */
924 if (reg.Indirect.ArrayID)
925 input_index = array_first[reg.Indirect.ArrayID];
926 else
927 input_index = reg.Register.Index;
928
929 ind_index = si_get_indirect_index(ctx, &reg.Indirect,
930 1, reg.Register.Index - input_index);
931 } else {
932 input_index = reg.Register.Index;
933 }
934
935 return get_dw_address_from_generic_indices(ctx, vertex_dw_stride,
936 base_addr, vertex_index,
937 ind_index, input_index,
938 name, index,
939 !reg.Register.Dimension);
940 }
941
942 /* The offchip buffer layout for TCS->TES is
943 *
944 * - attribute 0 of patch 0 vertex 0
945 * - attribute 0 of patch 0 vertex 1
946 * - attribute 0 of patch 0 vertex 2
947 * ...
948 * - attribute 0 of patch 1 vertex 0
949 * - attribute 0 of patch 1 vertex 1
950 * ...
951 * - attribute 1 of patch 0 vertex 0
952 * - attribute 1 of patch 0 vertex 1
953 * ...
954 * - per patch attribute 0 of patch 0
955 * - per patch attribute 0 of patch 1
956 * ...
957 *
958 * Note that every attribute has 4 components.
959 */
960 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
961 LLVMValueRef rel_patch_id,
962 LLVMValueRef vertex_index,
963 LLVMValueRef param_index)
964 {
965 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
966 LLVMValueRef param_stride, constant16;
967
968 vertices_per_patch = get_num_tcs_out_vertices(ctx);
969 num_patches = unpack_param(ctx, ctx->param_tcs_offchip_layout, 0, 6);
970 total_vertices = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
971 num_patches, "");
972
973 constant16 = LLVMConstInt(ctx->i32, 16, 0);
974 if (vertex_index) {
975 base_addr = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
976 vertices_per_patch, "");
977
978 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
979 vertex_index, "");
980
981 param_stride = total_vertices;
982 } else {
983 base_addr = rel_patch_id;
984 param_stride = num_patches;
985 }
986
987 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
988 LLVMBuildMul(ctx->ac.builder, param_index,
989 param_stride, ""), "");
990
991 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
992
993 if (!vertex_index) {
994 LLVMValueRef patch_data_offset =
995 unpack_param(ctx, ctx->param_tcs_offchip_layout, 12, 20);
996
997 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
998 patch_data_offset, "");
999 }
1000 return base_addr;
1001 }
1002
1003 /* This is a generic helper that can be shared by the NIR and TGSI backends */
1004 static LLVMValueRef get_tcs_tes_buffer_address_from_generic_indices(
1005 struct si_shader_context *ctx,
1006 LLVMValueRef vertex_index,
1007 LLVMValueRef param_index,
1008 unsigned param_base,
1009 ubyte *name,
1010 ubyte *index,
1011 bool is_patch)
1012 {
1013 unsigned param_index_base;
1014
1015 param_index_base = is_patch ?
1016 si_shader_io_get_unique_index_patch(name[param_base], index[param_base]) :
1017 si_shader_io_get_unique_index(name[param_base], index[param_base]);
1018
1019 if (param_index) {
1020 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1021 LLVMConstInt(ctx->i32, param_index_base, 0),
1022 "");
1023 } else {
1024 param_index = LLVMConstInt(ctx->i32, param_index_base, 0);
1025 }
1026
1027 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
1028 vertex_index, param_index);
1029 }
1030
1031 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
1032 struct si_shader_context *ctx,
1033 const struct tgsi_full_dst_register *dst,
1034 const struct tgsi_full_src_register *src)
1035 {
1036 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1037 ubyte *name, *index, *array_first;
1038 struct tgsi_full_src_register reg;
1039 LLVMValueRef vertex_index = NULL;
1040 LLVMValueRef param_index = NULL;
1041 unsigned param_base;
1042
1043 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
1044
1045 if (reg.Register.Dimension) {
1046
1047 if (reg.Dimension.Indirect)
1048 vertex_index = si_get_indirect_index(ctx, &reg.DimIndirect,
1049 1, reg.Dimension.Index);
1050 else
1051 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
1052 }
1053
1054 /* Get information about the register. */
1055 if (reg.Register.File == TGSI_FILE_INPUT) {
1056 name = info->input_semantic_name;
1057 index = info->input_semantic_index;
1058 array_first = info->input_array_first;
1059 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
1060 name = info->output_semantic_name;
1061 index = info->output_semantic_index;
1062 array_first = info->output_array_first;
1063 } else {
1064 assert(0);
1065 return NULL;
1066 }
1067
1068 if (reg.Register.Indirect) {
1069 if (reg.Indirect.ArrayID)
1070 param_base = array_first[reg.Indirect.ArrayID];
1071 else
1072 param_base = reg.Register.Index;
1073
1074 param_index = si_get_indirect_index(ctx, &reg.Indirect,
1075 1, reg.Register.Index - param_base);
1076
1077 } else {
1078 param_base = reg.Register.Index;
1079 }
1080
1081 return get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1082 param_index, param_base,
1083 name, index, !reg.Register.Dimension);
1084 }
1085
1086 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
1087 LLVMTypeRef type, unsigned swizzle,
1088 LLVMValueRef buffer, LLVMValueRef offset,
1089 LLVMValueRef base, bool can_speculate)
1090 {
1091 struct si_shader_context *ctx = si_shader_context(bld_base);
1092 LLVMValueRef value, value2;
1093 LLVMTypeRef vec_type = LLVMVectorType(type, 4);
1094
1095 if (swizzle == ~0) {
1096 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
1097 0, 1, 0, can_speculate, false);
1098
1099 return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
1100 }
1101
1102 if (!llvm_type_is_64bit(ctx, type)) {
1103 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
1104 0, 1, 0, can_speculate, false);
1105
1106 value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
1107 return LLVMBuildExtractElement(ctx->ac.builder, value,
1108 LLVMConstInt(ctx->i32, swizzle, 0), "");
1109 }
1110
1111 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
1112 swizzle * 4, 1, 0, can_speculate, false);
1113
1114 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
1115 swizzle * 4 + 4, 1, 0, can_speculate, false);
1116
1117 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
1118 }
1119
1120 /**
1121 * Load from LDS.
1122 *
1123 * \param type output value type
1124 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
1125 * \param dw_addr address in dwords
1126 */
1127 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
1128 LLVMTypeRef type, unsigned swizzle,
1129 LLVMValueRef dw_addr)
1130 {
1131 struct si_shader_context *ctx = si_shader_context(bld_base);
1132 LLVMValueRef value;
1133
1134 if (swizzle == ~0) {
1135 LLVMValueRef values[TGSI_NUM_CHANNELS];
1136
1137 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
1138 values[chan] = lds_load(bld_base, type, chan, dw_addr);
1139
1140 return lp_build_gather_values(&ctx->gallivm, values,
1141 TGSI_NUM_CHANNELS);
1142 }
1143
1144 /* Split 64-bit loads. */
1145 if (llvm_type_is_64bit(ctx, type)) {
1146 LLVMValueRef lo, hi;
1147
1148 lo = lds_load(bld_base, ctx->i32, swizzle, dw_addr);
1149 hi = lds_load(bld_base, ctx->i32, swizzle + 1, dw_addr);
1150 return si_llvm_emit_fetch_64bit(bld_base, type, lo, hi);
1151 }
1152
1153 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
1154 LLVMConstInt(ctx->i32, swizzle, 0));
1155
1156 value = ac_lds_load(&ctx->ac, dw_addr);
1157
1158 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1159 }
1160
1161 /**
1162 * Store to LDS.
1163 *
1164 * \param swizzle offset (typically 0..3)
1165 * \param dw_addr address in dwords
1166 * \param value value to store
1167 */
1168 static void lds_store(struct si_shader_context *ctx,
1169 unsigned dw_offset_imm, LLVMValueRef dw_addr,
1170 LLVMValueRef value)
1171 {
1172 dw_addr = lp_build_add(&ctx->bld_base.uint_bld, dw_addr,
1173 LLVMConstInt(ctx->i32, dw_offset_imm, 0));
1174
1175 ac_lds_store(&ctx->ac, dw_addr, value);
1176 }
1177
1178 static LLVMValueRef desc_from_addr_base64k(struct si_shader_context *ctx,
1179 unsigned param)
1180 {
1181 LLVMBuilderRef builder = ctx->ac.builder;
1182
1183 LLVMValueRef addr = LLVMGetParam(ctx->main_fn, param);
1184 addr = LLVMBuildZExt(builder, addr, ctx->i64, "");
1185 addr = LLVMBuildShl(builder, addr, LLVMConstInt(ctx->i64, 16, 0), "");
1186
1187 uint64_t desc2 = 0xffffffff;
1188 uint64_t desc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1189 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1190 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1191 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1192 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1193 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1194 LLVMValueRef hi = LLVMConstInt(ctx->i64, desc2 | (desc3 << 32), 0);
1195
1196 LLVMValueRef desc = LLVMGetUndef(LLVMVectorType(ctx->i64, 2));
1197 desc = LLVMBuildInsertElement(builder, desc, addr, ctx->i32_0, "");
1198 desc = LLVMBuildInsertElement(builder, desc, hi, ctx->i32_1, "");
1199 return LLVMBuildBitCast(builder, desc, ctx->v4i32, "");
1200 }
1201
1202 static LLVMValueRef fetch_input_tcs(
1203 struct lp_build_tgsi_context *bld_base,
1204 const struct tgsi_full_src_register *reg,
1205 enum tgsi_opcode_type type, unsigned swizzle)
1206 {
1207 struct si_shader_context *ctx = si_shader_context(bld_base);
1208 LLVMValueRef dw_addr, stride;
1209
1210 stride = get_tcs_in_vertex_dw_stride(ctx);
1211 dw_addr = get_tcs_in_current_patch_offset(ctx);
1212 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1213
1214 return lds_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle, dw_addr);
1215 }
1216
1217 static LLVMValueRef si_nir_load_tcs_varyings(struct ac_shader_abi *abi,
1218 LLVMValueRef vertex_index,
1219 LLVMValueRef param_index,
1220 unsigned const_index,
1221 unsigned location,
1222 unsigned driver_location,
1223 unsigned component,
1224 unsigned num_components,
1225 bool is_patch,
1226 bool is_compact,
1227 bool load_input)
1228 {
1229 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1230 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1231 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
1232 LLVMValueRef dw_addr, stride;
1233
1234 driver_location = driver_location / 4;
1235
1236 if (load_input) {
1237 stride = get_tcs_in_vertex_dw_stride(ctx);
1238 dw_addr = get_tcs_in_current_patch_offset(ctx);
1239 } else {
1240 if (is_patch) {
1241 stride = NULL;
1242 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1243 } else {
1244 stride = get_tcs_out_vertex_dw_stride(ctx);
1245 dw_addr = get_tcs_out_current_patch_offset(ctx);
1246 }
1247 }
1248
1249 if (param_index) {
1250 /* Add the constant index to the indirect index */
1251 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1252 LLVMConstInt(ctx->i32, const_index, 0), "");
1253 } else {
1254 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1255 }
1256
1257 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1258 vertex_index, param_index,
1259 driver_location,
1260 info->input_semantic_name,
1261 info->input_semantic_index,
1262 is_patch);
1263
1264 LLVMValueRef value[4];
1265 for (unsigned i = 0; i < num_components + component; i++) {
1266 value[i] = lds_load(bld_base, ctx->i32, i, dw_addr);
1267 }
1268
1269 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1270 }
1271
1272 static LLVMValueRef fetch_output_tcs(
1273 struct lp_build_tgsi_context *bld_base,
1274 const struct tgsi_full_src_register *reg,
1275 enum tgsi_opcode_type type, unsigned swizzle)
1276 {
1277 struct si_shader_context *ctx = si_shader_context(bld_base);
1278 LLVMValueRef dw_addr, stride;
1279
1280 if (reg->Register.Dimension) {
1281 stride = get_tcs_out_vertex_dw_stride(ctx);
1282 dw_addr = get_tcs_out_current_patch_offset(ctx);
1283 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1284 } else {
1285 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1286 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
1287 }
1288
1289 return lds_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle, dw_addr);
1290 }
1291
1292 static LLVMValueRef fetch_input_tes(
1293 struct lp_build_tgsi_context *bld_base,
1294 const struct tgsi_full_src_register *reg,
1295 enum tgsi_opcode_type type, unsigned swizzle)
1296 {
1297 struct si_shader_context *ctx = si_shader_context(bld_base);
1298 LLVMValueRef buffer, base, addr;
1299
1300 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1301
1302 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1303 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
1304
1305 return buffer_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle,
1306 buffer, base, addr, true);
1307 }
1308
1309 LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi,
1310 LLVMValueRef vertex_index,
1311 LLVMValueRef param_index,
1312 unsigned const_index,
1313 unsigned location,
1314 unsigned driver_location,
1315 unsigned component,
1316 unsigned num_components,
1317 bool is_patch,
1318 bool is_compact,
1319 bool load_input)
1320 {
1321 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1322 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1323 LLVMValueRef buffer, base, addr;
1324
1325 driver_location = driver_location / 4;
1326
1327 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1328
1329 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1330
1331 if (param_index) {
1332 /* Add the constant index to the indirect index */
1333 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1334 LLVMConstInt(ctx->i32, const_index, 0), "");
1335 } else {
1336 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1337 }
1338
1339 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1340 param_index, driver_location,
1341 info->input_semantic_name,
1342 info->input_semantic_index,
1343 is_patch);
1344
1345 /* TODO: This will generate rather ordinary llvm code, although it
1346 * should be easy for the optimiser to fix up. In future we might want
1347 * to refactor buffer_load(), but for now this maximises code sharing
1348 * between the NIR and TGSI backends.
1349 */
1350 LLVMValueRef value[4];
1351 for (unsigned i = component; i < num_components + component; i++) {
1352 value[i] = buffer_load(&ctx->bld_base, ctx->i32, i, buffer, base, addr, true);
1353 }
1354
1355 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1356 }
1357
1358 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1359 const struct tgsi_full_instruction *inst,
1360 const struct tgsi_opcode_info *info,
1361 unsigned index,
1362 LLVMValueRef dst[4])
1363 {
1364 struct si_shader_context *ctx = si_shader_context(bld_base);
1365 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
1366 const struct tgsi_shader_info *sh_info = &ctx->shader->selector->info;
1367 unsigned chan_index;
1368 LLVMValueRef dw_addr, stride;
1369 LLVMValueRef buffer, base, buf_addr;
1370 LLVMValueRef values[4];
1371 bool skip_lds_store;
1372 bool is_tess_factor = false, is_tess_inner = false;
1373
1374 /* Only handle per-patch and per-vertex outputs here.
1375 * Vectors will be lowered to scalars and this function will be called again.
1376 */
1377 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1378 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1379 si_llvm_emit_store(bld_base, inst, info, index, dst);
1380 return;
1381 }
1382
1383 if (reg->Register.Dimension) {
1384 stride = get_tcs_out_vertex_dw_stride(ctx);
1385 dw_addr = get_tcs_out_current_patch_offset(ctx);
1386 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1387 skip_lds_store = !sh_info->reads_pervertex_outputs;
1388 } else {
1389 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1390 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1391 skip_lds_store = !sh_info->reads_perpatch_outputs;
1392
1393 if (!reg->Register.Indirect) {
1394 int name = sh_info->output_semantic_name[reg->Register.Index];
1395
1396 /* Always write tess factors into LDS for the TCS epilog. */
1397 if (name == TGSI_SEMANTIC_TESSINNER ||
1398 name == TGSI_SEMANTIC_TESSOUTER) {
1399 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1400 skip_lds_store = !sh_info->reads_tessfactor_outputs &&
1401 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs;
1402 is_tess_factor = true;
1403 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1404 }
1405 }
1406 }
1407
1408 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1409
1410 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1411 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1412
1413 uint32_t writemask = reg->Register.WriteMask;
1414 while (writemask) {
1415 chan_index = u_bit_scan(&writemask);
1416 LLVMValueRef value = dst[chan_index];
1417
1418 if (inst->Instruction.Saturate)
1419 value = ac_build_clamp(&ctx->ac, value);
1420
1421 /* Skip LDS stores if there is no LDS read of this output. */
1422 if (!skip_lds_store)
1423 lds_store(ctx, chan_index, dw_addr, value);
1424
1425 value = ac_to_integer(&ctx->ac, value);
1426 values[chan_index] = value;
1427
1428 if (reg->Register.WriteMask != 0xF && !is_tess_factor) {
1429 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1430 buf_addr, base,
1431 4 * chan_index, 1, 0, true, false);
1432 }
1433
1434 /* Write tess factors into VGPRs for the epilog. */
1435 if (is_tess_factor &&
1436 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
1437 if (!is_tess_inner) {
1438 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1439 ctx->invoc0_tess_factors[chan_index]);
1440 } else if (chan_index < 2) {
1441 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1442 ctx->invoc0_tess_factors[4 + chan_index]);
1443 }
1444 }
1445 }
1446
1447 if (reg->Register.WriteMask == 0xF && !is_tess_factor) {
1448 LLVMValueRef value = lp_build_gather_values(&ctx->gallivm,
1449 values, 4);
1450 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buf_addr,
1451 base, 0, 1, 0, true, false);
1452 }
1453 }
1454
1455 static void si_nir_store_output_tcs(struct ac_shader_abi *abi,
1456 LLVMValueRef vertex_index,
1457 LLVMValueRef param_index,
1458 unsigned const_index,
1459 unsigned location,
1460 unsigned driver_location,
1461 LLVMValueRef src,
1462 unsigned component,
1463 bool is_patch,
1464 bool is_compact,
1465 unsigned writemask)
1466 {
1467 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1468 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1469 LLVMValueRef dw_addr, stride;
1470 LLVMValueRef buffer, base, addr;
1471 LLVMValueRef values[4];
1472 bool skip_lds_store;
1473 bool is_tess_factor = false, is_tess_inner = false;
1474
1475 driver_location = driver_location / 4;
1476
1477 if (param_index) {
1478 /* Add the constant index to the indirect index */
1479 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1480 LLVMConstInt(ctx->i32, const_index, 0), "");
1481 } else {
1482 if (const_index != 0)
1483 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1484 }
1485
1486 if (!is_patch) {
1487 stride = get_tcs_out_vertex_dw_stride(ctx);
1488 dw_addr = get_tcs_out_current_patch_offset(ctx);
1489 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1490 vertex_index, param_index,
1491 driver_location,
1492 info->output_semantic_name,
1493 info->output_semantic_index,
1494 is_patch);
1495
1496 skip_lds_store = !info->reads_pervertex_outputs;
1497 } else {
1498 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1499 dw_addr = get_dw_address_from_generic_indices(ctx, NULL, dw_addr,
1500 vertex_index, param_index,
1501 driver_location,
1502 info->output_semantic_name,
1503 info->output_semantic_index,
1504 is_patch);
1505
1506 skip_lds_store = !info->reads_perpatch_outputs;
1507
1508 if (!param_index) {
1509 int name = info->output_semantic_name[driver_location];
1510
1511 /* Always write tess factors into LDS for the TCS epilog. */
1512 if (name == TGSI_SEMANTIC_TESSINNER ||
1513 name == TGSI_SEMANTIC_TESSOUTER) {
1514 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1515 skip_lds_store = !info->reads_tessfactor_outputs &&
1516 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs;
1517 is_tess_factor = true;
1518 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1519 }
1520 }
1521 }
1522
1523 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1524
1525 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1526
1527 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1528 param_index, driver_location,
1529 info->output_semantic_name,
1530 info->output_semantic_index,
1531 is_patch);
1532
1533 for (unsigned chan = 0; chan < 4; chan++) {
1534 if (!(writemask & (1 << chan)))
1535 continue;
1536 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1537
1538 /* Skip LDS stores if there is no LDS read of this output. */
1539 if (!skip_lds_store)
1540 ac_lds_store(&ctx->ac, dw_addr, value);
1541
1542 value = ac_to_integer(&ctx->ac, value);
1543 values[chan] = value;
1544
1545 if (writemask != 0xF && !is_tess_factor) {
1546 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1547 addr, base,
1548 4 * chan, 1, 0, true, false);
1549 }
1550
1551 /* Write tess factors into VGPRs for the epilog. */
1552 if (is_tess_factor &&
1553 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
1554 if (!is_tess_inner) {
1555 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1556 ctx->invoc0_tess_factors[chan]);
1557 } else if (chan < 2) {
1558 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1559 ctx->invoc0_tess_factors[4 + chan]);
1560 }
1561 }
1562 }
1563
1564 if (writemask == 0xF && !is_tess_factor) {
1565 LLVMValueRef value = lp_build_gather_values(&ctx->gallivm,
1566 values, 4);
1567 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr,
1568 base, 0, 1, 0, true, false);
1569 }
1570 }
1571
1572 LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi,
1573 unsigned input_index,
1574 unsigned vtx_offset_param,
1575 LLVMTypeRef type,
1576 unsigned swizzle)
1577 {
1578 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1579 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
1580 struct si_shader *shader = ctx->shader;
1581 struct lp_build_context *uint = &ctx->bld_base.uint_bld;
1582 LLVMValueRef vtx_offset, soffset;
1583 struct tgsi_shader_info *info = &shader->selector->info;
1584 unsigned semantic_name = info->input_semantic_name[input_index];
1585 unsigned semantic_index = info->input_semantic_index[input_index];
1586 unsigned param;
1587 LLVMValueRef value;
1588
1589 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1590
1591 /* GFX9 has the ESGS ring in LDS. */
1592 if (ctx->screen->info.chip_class >= GFX9) {
1593 unsigned index = vtx_offset_param;
1594
1595 switch (index / 2) {
1596 case 0:
1597 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx01_offset,
1598 index % 2 ? 16 : 0, 16);
1599 break;
1600 case 1:
1601 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx23_offset,
1602 index % 2 ? 16 : 0, 16);
1603 break;
1604 case 2:
1605 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx45_offset,
1606 index % 2 ? 16 : 0, 16);
1607 break;
1608 default:
1609 assert(0);
1610 return NULL;
1611 }
1612
1613 vtx_offset = LLVMBuildAdd(ctx->ac.builder, vtx_offset,
1614 LLVMConstInt(ctx->i32, param * 4, 0), "");
1615 return lds_load(bld_base, type, swizzle, vtx_offset);
1616 }
1617
1618 /* GFX6: input load from the ESGS ring in memory. */
1619 if (swizzle == ~0) {
1620 LLVMValueRef values[TGSI_NUM_CHANNELS];
1621 unsigned chan;
1622 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1623 values[chan] = si_llvm_load_input_gs(abi, input_index, vtx_offset_param,
1624 type, chan);
1625 }
1626 return lp_build_gather_values(&ctx->gallivm, values,
1627 TGSI_NUM_CHANNELS);
1628 }
1629
1630 /* Get the vertex offset parameter on GFX6. */
1631 LLVMValueRef gs_vtx_offset = ctx->gs_vtx_offset[vtx_offset_param];
1632
1633 vtx_offset = lp_build_mul_imm(uint, gs_vtx_offset, 4);
1634
1635 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1636
1637 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1638 vtx_offset, soffset, 0, 1, 0, true, false);
1639 if (llvm_type_is_64bit(ctx, type)) {
1640 LLVMValueRef value2;
1641 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1642
1643 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1644 ctx->i32_0, vtx_offset, soffset,
1645 0, 1, 0, true, false);
1646 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
1647 }
1648 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1649 }
1650
1651 static LLVMValueRef fetch_input_gs(
1652 struct lp_build_tgsi_context *bld_base,
1653 const struct tgsi_full_src_register *reg,
1654 enum tgsi_opcode_type type,
1655 unsigned swizzle)
1656 {
1657 struct si_shader_context *ctx = si_shader_context(bld_base);
1658 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1659
1660 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1661 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1662 return get_primitive_id(ctx, swizzle);
1663
1664 if (!reg->Register.Dimension)
1665 return NULL;
1666
1667 return si_llvm_load_input_gs(&ctx->abi, reg->Register.Index,
1668 reg->Dimension.Index,
1669 tgsi2llvmtype(bld_base, type),
1670 swizzle);
1671 }
1672
1673 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1674 {
1675 switch (interpolate) {
1676 case TGSI_INTERPOLATE_CONSTANT:
1677 return 0;
1678
1679 case TGSI_INTERPOLATE_LINEAR:
1680 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1681 return SI_PARAM_LINEAR_SAMPLE;
1682 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1683 return SI_PARAM_LINEAR_CENTROID;
1684 else
1685 return SI_PARAM_LINEAR_CENTER;
1686 break;
1687 case TGSI_INTERPOLATE_COLOR:
1688 case TGSI_INTERPOLATE_PERSPECTIVE:
1689 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1690 return SI_PARAM_PERSP_SAMPLE;
1691 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1692 return SI_PARAM_PERSP_CENTROID;
1693 else
1694 return SI_PARAM_PERSP_CENTER;
1695 break;
1696 default:
1697 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1698 return -1;
1699 }
1700 }
1701
1702 static LLVMValueRef si_build_fs_interp(struct si_shader_context *ctx,
1703 unsigned attr_index, unsigned chan,
1704 LLVMValueRef prim_mask,
1705 LLVMValueRef i, LLVMValueRef j)
1706 {
1707 if (i || j) {
1708 return ac_build_fs_interp(&ctx->ac,
1709 LLVMConstInt(ctx->i32, chan, 0),
1710 LLVMConstInt(ctx->i32, attr_index, 0),
1711 prim_mask, i, j);
1712 }
1713 return ac_build_fs_interp_mov(&ctx->ac,
1714 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1715 LLVMConstInt(ctx->i32, chan, 0),
1716 LLVMConstInt(ctx->i32, attr_index, 0),
1717 prim_mask);
1718 }
1719
1720 /**
1721 * Interpolate a fragment shader input.
1722 *
1723 * @param ctx context
1724 * @param input_index index of the input in hardware
1725 * @param semantic_name TGSI_SEMANTIC_*
1726 * @param semantic_index semantic index
1727 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1728 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1729 * @param interp_param interpolation weights (i,j)
1730 * @param prim_mask SI_PARAM_PRIM_MASK
1731 * @param face SI_PARAM_FRONT_FACE
1732 * @param result the return value (4 components)
1733 */
1734 static void interp_fs_input(struct si_shader_context *ctx,
1735 unsigned input_index,
1736 unsigned semantic_name,
1737 unsigned semantic_index,
1738 unsigned num_interp_inputs,
1739 unsigned colors_read_mask,
1740 LLVMValueRef interp_param,
1741 LLVMValueRef prim_mask,
1742 LLVMValueRef face,
1743 LLVMValueRef result[4])
1744 {
1745 LLVMValueRef i = NULL, j = NULL;
1746 unsigned chan;
1747
1748 /* fs.constant returns the param from the middle vertex, so it's not
1749 * really useful for flat shading. It's meant to be used for custom
1750 * interpolation (but the intrinsic can't fetch from the other two
1751 * vertices).
1752 *
1753 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1754 * to do the right thing. The only reason we use fs.constant is that
1755 * fs.interp cannot be used on integers, because they can be equal
1756 * to NaN.
1757 *
1758 * When interp is false we will use fs.constant or for newer llvm,
1759 * amdgcn.interp.mov.
1760 */
1761 bool interp = interp_param != NULL;
1762
1763 if (interp) {
1764 interp_param = LLVMBuildBitCast(ctx->ac.builder, interp_param,
1765 LLVMVectorType(ctx->f32, 2), "");
1766
1767 i = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1768 ctx->i32_0, "");
1769 j = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1770 ctx->i32_1, "");
1771 }
1772
1773 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1774 ctx->shader->key.part.ps.prolog.color_two_side) {
1775 LLVMValueRef is_face_positive;
1776
1777 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1778 * otherwise it's at offset "num_inputs".
1779 */
1780 unsigned back_attr_offset = num_interp_inputs;
1781 if (semantic_index == 1 && colors_read_mask & 0xf)
1782 back_attr_offset += 1;
1783
1784 is_face_positive = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
1785 face, ctx->i32_0, "");
1786
1787 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1788 LLVMValueRef front, back;
1789
1790 front = si_build_fs_interp(ctx,
1791 input_index, chan,
1792 prim_mask, i, j);
1793 back = si_build_fs_interp(ctx,
1794 back_attr_offset, chan,
1795 prim_mask, i, j);
1796
1797 result[chan] = LLVMBuildSelect(ctx->ac.builder,
1798 is_face_positive,
1799 front,
1800 back,
1801 "");
1802 }
1803 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1804 result[0] = si_build_fs_interp(ctx, input_index,
1805 0, prim_mask, i, j);
1806 result[1] =
1807 result[2] = LLVMConstReal(ctx->f32, 0.0f);
1808 result[3] = LLVMConstReal(ctx->f32, 1.0f);
1809 } else {
1810 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1811 result[chan] = si_build_fs_interp(ctx,
1812 input_index, chan,
1813 prim_mask, i, j);
1814 }
1815 }
1816 }
1817
1818 void si_llvm_load_input_fs(
1819 struct si_shader_context *ctx,
1820 unsigned input_index,
1821 LLVMValueRef out[4])
1822 {
1823 struct lp_build_context *base = &ctx->bld_base.base;
1824 struct si_shader *shader = ctx->shader;
1825 struct tgsi_shader_info *info = &shader->selector->info;
1826 LLVMValueRef main_fn = ctx->main_fn;
1827 LLVMValueRef interp_param = NULL;
1828 int interp_param_idx;
1829 enum tgsi_semantic semantic_name = info->input_semantic_name[input_index];
1830 unsigned semantic_index = info->input_semantic_index[input_index];
1831 enum tgsi_interpolate_mode interp_mode = info->input_interpolate[input_index];
1832 enum tgsi_interpolate_loc interp_loc = info->input_interpolate_loc[input_index];
1833
1834 /* Get colors from input VGPRs (set by the prolog). */
1835 if (semantic_name == TGSI_SEMANTIC_COLOR) {
1836 unsigned colors_read = shader->selector->info.colors_read;
1837 unsigned mask = colors_read >> (semantic_index * 4);
1838 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1839 (semantic_index ? util_bitcount(colors_read & 0xf) : 0);
1840
1841 out[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1842 out[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1843 out[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1844 out[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1845 return;
1846 }
1847
1848 interp_param_idx = lookup_interp_param_index(interp_mode, interp_loc);
1849 if (interp_param_idx == -1)
1850 return;
1851 else if (interp_param_idx) {
1852 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
1853 }
1854
1855 interp_fs_input(ctx, input_index, semantic_name,
1856 semantic_index, 0, /* this param is unused */
1857 shader->selector->info.colors_read, interp_param,
1858 ctx->abi.prim_mask,
1859 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1860 &out[0]);
1861 }
1862
1863 static void declare_input_fs(
1864 struct si_shader_context *ctx,
1865 unsigned input_index,
1866 const struct tgsi_full_declaration *decl,
1867 LLVMValueRef out[4])
1868 {
1869 si_llvm_load_input_fs(ctx, input_index, out);
1870 }
1871
1872 static LLVMValueRef get_sample_id(struct si_shader_context *ctx)
1873 {
1874 return unpack_param(ctx, SI_PARAM_ANCILLARY, 8, 4);
1875 }
1876
1877 static LLVMValueRef get_block_size(struct ac_shader_abi *abi)
1878 {
1879 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1880
1881 LLVMValueRef values[3];
1882 LLVMValueRef result;
1883 unsigned i;
1884 unsigned *properties = ctx->shader->selector->info.properties;
1885
1886 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1887 unsigned sizes[3] = {
1888 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1889 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1890 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1891 };
1892
1893 for (i = 0; i < 3; ++i)
1894 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1895
1896 result = lp_build_gather_values(&ctx->gallivm, values, 3);
1897 } else {
1898 result = LLVMGetParam(ctx->main_fn, ctx->param_block_size);
1899 }
1900
1901 return result;
1902 }
1903
1904 /**
1905 * Load a dword from a constant buffer.
1906 */
1907 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1908 LLVMValueRef resource,
1909 LLVMValueRef offset)
1910 {
1911 return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL,
1912 0, 0, 0, true, true);
1913 }
1914
1915 static LLVMValueRef load_sample_position(struct ac_shader_abi *abi, LLVMValueRef sample_id)
1916 {
1917 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1918 struct lp_build_context *uint_bld = &ctx->bld_base.uint_bld;
1919 LLVMValueRef desc = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1920 LLVMValueRef buf_index = LLVMConstInt(ctx->i32, SI_PS_CONST_SAMPLE_POSITIONS, 0);
1921 LLVMValueRef resource = ac_build_load_to_sgpr(&ctx->ac, desc, buf_index);
1922
1923 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1924 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1925 LLVMValueRef offset1 = LLVMBuildAdd(ctx->ac.builder, offset0, LLVMConstInt(ctx->i32, 4, 0), "");
1926
1927 LLVMValueRef pos[4] = {
1928 buffer_load_const(ctx, resource, offset0),
1929 buffer_load_const(ctx, resource, offset1),
1930 LLVMConstReal(ctx->f32, 0),
1931 LLVMConstReal(ctx->f32, 0)
1932 };
1933
1934 return lp_build_gather_values(&ctx->gallivm, pos, 4);
1935 }
1936
1937 static LLVMValueRef si_load_tess_coord(struct ac_shader_abi *abi,
1938 LLVMTypeRef type,
1939 unsigned num_components)
1940 {
1941 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1942 struct lp_build_context *bld = &ctx->bld_base.base;
1943
1944 LLVMValueRef coord[4] = {
1945 LLVMGetParam(ctx->main_fn, ctx->param_tes_u),
1946 LLVMGetParam(ctx->main_fn, ctx->param_tes_v),
1947 ctx->ac.f32_0,
1948 ctx->ac.f32_0
1949 };
1950
1951 /* For triangles, the vector should be (u, v, 1-u-v). */
1952 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1953 PIPE_PRIM_TRIANGLES)
1954 coord[2] = lp_build_sub(bld, ctx->ac.f32_1,
1955 lp_build_add(bld, coord[0], coord[1]));
1956
1957 return lp_build_gather_values(&ctx->gallivm, coord, 4);
1958 }
1959
1960 static LLVMValueRef load_tess_level(struct si_shader_context *ctx,
1961 unsigned semantic_name)
1962 {
1963 LLVMValueRef buffer, base, addr;
1964
1965 int param = si_shader_io_get_unique_index_patch(semantic_name, 0);
1966
1967 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1968
1969 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1970 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1971 LLVMConstInt(ctx->i32, param, 0));
1972
1973 return buffer_load(&ctx->bld_base, ctx->f32,
1974 ~0, buffer, base, addr, true);
1975
1976 }
1977
1978 static LLVMValueRef si_load_tess_level(struct ac_shader_abi *abi,
1979 unsigned varying_id)
1980 {
1981 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1982 unsigned semantic_name;
1983
1984 switch (varying_id) {
1985 case VARYING_SLOT_TESS_LEVEL_INNER:
1986 semantic_name = TGSI_SEMANTIC_TESSINNER;
1987 break;
1988 case VARYING_SLOT_TESS_LEVEL_OUTER:
1989 semantic_name = TGSI_SEMANTIC_TESSOUTER;
1990 break;
1991 default:
1992 unreachable("unknown tess level");
1993 }
1994
1995 return load_tess_level(ctx, semantic_name);
1996
1997 }
1998
1999 static LLVMValueRef si_load_patch_vertices_in(struct ac_shader_abi *abi)
2000 {
2001 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2002 if (ctx->type == PIPE_SHADER_TESS_CTRL)
2003 return unpack_param(ctx, ctx->param_tcs_out_lds_layout, 26, 6);
2004 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
2005 return get_num_tcs_out_vertices(ctx);
2006 else
2007 unreachable("invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
2008 }
2009
2010 void si_load_system_value(struct si_shader_context *ctx,
2011 unsigned index,
2012 const struct tgsi_full_declaration *decl)
2013 {
2014 LLVMValueRef value = 0;
2015
2016 assert(index < RADEON_LLVM_MAX_SYSTEM_VALUES);
2017
2018 switch (decl->Semantic.Name) {
2019 case TGSI_SEMANTIC_INSTANCEID:
2020 value = ctx->abi.instance_id;
2021 break;
2022
2023 case TGSI_SEMANTIC_VERTEXID:
2024 value = LLVMBuildAdd(ctx->ac.builder,
2025 ctx->abi.vertex_id,
2026 ctx->abi.base_vertex, "");
2027 break;
2028
2029 case TGSI_SEMANTIC_VERTEXID_NOBASE:
2030 /* Unused. Clarify the meaning in indexed vs. non-indexed
2031 * draws if this is ever used again. */
2032 assert(false);
2033 break;
2034
2035 case TGSI_SEMANTIC_BASEVERTEX:
2036 {
2037 /* For non-indexed draws, the base vertex set by the driver
2038 * (for direct draws) or the CP (for indirect draws) is the
2039 * first vertex ID, but GLSL expects 0 to be returned.
2040 */
2041 LLVMValueRef vs_state = LLVMGetParam(ctx->main_fn, ctx->param_vs_state_bits);
2042 LLVMValueRef indexed;
2043
2044 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
2045 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
2046
2047 value = LLVMBuildSelect(ctx->ac.builder, indexed,
2048 ctx->abi.base_vertex, ctx->i32_0, "");
2049 break;
2050 }
2051
2052 case TGSI_SEMANTIC_BASEINSTANCE:
2053 value = ctx->abi.start_instance;
2054 break;
2055
2056 case TGSI_SEMANTIC_DRAWID:
2057 value = ctx->abi.draw_id;
2058 break;
2059
2060 case TGSI_SEMANTIC_INVOCATIONID:
2061 if (ctx->type == PIPE_SHADER_TESS_CTRL)
2062 value = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
2063 else if (ctx->type == PIPE_SHADER_GEOMETRY)
2064 value = ctx->abi.gs_invocation_id;
2065 else
2066 assert(!"INVOCATIONID not implemented");
2067 break;
2068
2069 case TGSI_SEMANTIC_POSITION:
2070 {
2071 LLVMValueRef pos[4] = {
2072 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
2073 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
2074 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Z_FLOAT),
2075 lp_build_emit_llvm_unary(&ctx->bld_base, TGSI_OPCODE_RCP,
2076 LLVMGetParam(ctx->main_fn,
2077 SI_PARAM_POS_W_FLOAT)),
2078 };
2079 value = lp_build_gather_values(&ctx->gallivm, pos, 4);
2080 break;
2081 }
2082
2083 case TGSI_SEMANTIC_FACE:
2084 value = ctx->abi.front_face;
2085 break;
2086
2087 case TGSI_SEMANTIC_SAMPLEID:
2088 value = get_sample_id(ctx);
2089 break;
2090
2091 case TGSI_SEMANTIC_SAMPLEPOS: {
2092 LLVMValueRef pos[4] = {
2093 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
2094 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
2095 LLVMConstReal(ctx->f32, 0),
2096 LLVMConstReal(ctx->f32, 0)
2097 };
2098 pos[0] = lp_build_emit_llvm_unary(&ctx->bld_base,
2099 TGSI_OPCODE_FRC, pos[0]);
2100 pos[1] = lp_build_emit_llvm_unary(&ctx->bld_base,
2101 TGSI_OPCODE_FRC, pos[1]);
2102 value = lp_build_gather_values(&ctx->gallivm, pos, 4);
2103 break;
2104 }
2105
2106 case TGSI_SEMANTIC_SAMPLEMASK:
2107 /* This can only occur with the OpenGL Core profile, which
2108 * doesn't support smoothing.
2109 */
2110 value = LLVMGetParam(ctx->main_fn, SI_PARAM_SAMPLE_COVERAGE);
2111 break;
2112
2113 case TGSI_SEMANTIC_TESSCOORD:
2114 value = si_load_tess_coord(&ctx->abi, NULL, 4);
2115 break;
2116
2117 case TGSI_SEMANTIC_VERTICESIN:
2118 value = si_load_patch_vertices_in(&ctx->abi);
2119 break;
2120
2121 case TGSI_SEMANTIC_TESSINNER:
2122 case TGSI_SEMANTIC_TESSOUTER:
2123 value = load_tess_level(ctx, decl->Semantic.Name);
2124 break;
2125
2126 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
2127 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
2128 {
2129 LLVMValueRef buf, slot, val[4];
2130 int i, offset;
2131
2132 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
2133 buf = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2134 buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
2135 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
2136
2137 for (i = 0; i < 4; i++)
2138 val[i] = buffer_load_const(ctx, buf,
2139 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
2140 value = lp_build_gather_values(&ctx->gallivm, val, 4);
2141 break;
2142 }
2143
2144 case TGSI_SEMANTIC_PRIMID:
2145 value = get_primitive_id(ctx, 0);
2146 break;
2147
2148 case TGSI_SEMANTIC_GRID_SIZE:
2149 value = LLVMGetParam(ctx->main_fn, ctx->param_grid_size);
2150 break;
2151
2152 case TGSI_SEMANTIC_BLOCK_SIZE:
2153 value = get_block_size(&ctx->abi);
2154 break;
2155
2156 case TGSI_SEMANTIC_BLOCK_ID:
2157 {
2158 LLVMValueRef values[3];
2159
2160 for (int i = 0; i < 3; i++) {
2161 values[i] = ctx->i32_0;
2162 if (ctx->abi.workgroup_ids[i]) {
2163 values[i] = ctx->abi.workgroup_ids[i];
2164 }
2165 }
2166 value = lp_build_gather_values(&ctx->gallivm, values, 3);
2167 break;
2168 }
2169
2170 case TGSI_SEMANTIC_THREAD_ID:
2171 value = ctx->abi.local_invocation_ids;
2172 break;
2173
2174 case TGSI_SEMANTIC_HELPER_INVOCATION:
2175 value = lp_build_intrinsic(ctx->ac.builder,
2176 "llvm.amdgcn.ps.live",
2177 ctx->i1, NULL, 0,
2178 LP_FUNC_ATTR_READNONE);
2179 value = LLVMBuildNot(ctx->ac.builder, value, "");
2180 value = LLVMBuildSExt(ctx->ac.builder, value, ctx->i32, "");
2181 break;
2182
2183 case TGSI_SEMANTIC_SUBGROUP_SIZE:
2184 value = LLVMConstInt(ctx->i32, 64, 0);
2185 break;
2186
2187 case TGSI_SEMANTIC_SUBGROUP_INVOCATION:
2188 value = ac_get_thread_id(&ctx->ac);
2189 break;
2190
2191 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
2192 {
2193 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
2194 id = LLVMBuildZExt(ctx->ac.builder, id, ctx->i64, "");
2195 value = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->i64, 1, 0), id, "");
2196 value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->v2i32, "");
2197 break;
2198 }
2199
2200 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
2201 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
2202 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
2203 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
2204 {
2205 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
2206 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_GT_MASK ||
2207 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK) {
2208 /* All bits set except LSB */
2209 value = LLVMConstInt(ctx->i64, -2, 0);
2210 } else {
2211 /* All bits set */
2212 value = LLVMConstInt(ctx->i64, -1, 0);
2213 }
2214 id = LLVMBuildZExt(ctx->ac.builder, id, ctx->i64, "");
2215 value = LLVMBuildShl(ctx->ac.builder, value, id, "");
2216 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK ||
2217 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LT_MASK)
2218 value = LLVMBuildNot(ctx->ac.builder, value, "");
2219 value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->v2i32, "");
2220 break;
2221 }
2222
2223 default:
2224 assert(!"unknown system value");
2225 return;
2226 }
2227
2228 ctx->system_values[index] = value;
2229 }
2230
2231 void si_declare_compute_memory(struct si_shader_context *ctx,
2232 const struct tgsi_full_declaration *decl)
2233 {
2234 struct si_shader_selector *sel = ctx->shader->selector;
2235
2236 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, AC_LOCAL_ADDR_SPACE);
2237 LLVMValueRef var;
2238
2239 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
2240 assert(decl->Range.First == decl->Range.Last);
2241 assert(!ctx->ac.lds);
2242
2243 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2244 LLVMArrayType(ctx->i8, sel->local_size),
2245 "compute_lds",
2246 AC_LOCAL_ADDR_SPACE);
2247 LLVMSetAlignment(var, 4);
2248
2249 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
2250 }
2251
2252 static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
2253 {
2254 LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
2255 ctx->param_const_and_shader_buffers);
2256
2257 return ac_build_load_to_sgpr(&ctx->ac, list_ptr,
2258 LLVMConstInt(ctx->i32, si_get_constbuf_slot(i), 0));
2259 }
2260
2261 static LLVMValueRef load_ubo(struct ac_shader_abi *abi, LLVMValueRef index)
2262 {
2263 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2264 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2265
2266 index = si_llvm_bound_index(ctx, index, ctx->num_const_buffers);
2267 index = LLVMBuildAdd(ctx->ac.builder, index,
2268 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
2269
2270 return ac_build_load_to_sgpr(&ctx->ac, ptr, index);
2271 }
2272
2273 static LLVMValueRef
2274 load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, bool write)
2275 {
2276 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2277 LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
2278 ctx->param_const_and_shader_buffers);
2279
2280 index = si_llvm_bound_index(ctx, index, ctx->num_shader_buffers);
2281 index = LLVMBuildSub(ctx->ac.builder,
2282 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS - 1, 0),
2283 index, "");
2284
2285 return ac_build_load_to_sgpr(&ctx->ac, rsrc_ptr, index);
2286 }
2287
2288 static LLVMValueRef fetch_constant(
2289 struct lp_build_tgsi_context *bld_base,
2290 const struct tgsi_full_src_register *reg,
2291 enum tgsi_opcode_type type,
2292 unsigned swizzle)
2293 {
2294 struct si_shader_context *ctx = si_shader_context(bld_base);
2295 struct si_shader_selector *sel = ctx->shader->selector;
2296 const struct tgsi_ind_register *ireg = &reg->Indirect;
2297 unsigned buf, idx;
2298
2299 LLVMValueRef addr, bufp;
2300
2301 if (swizzle == LP_CHAN_ALL) {
2302 unsigned chan;
2303 LLVMValueRef values[4];
2304 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
2305 values[chan] = fetch_constant(bld_base, reg, type, chan);
2306
2307 return lp_build_gather_values(&ctx->gallivm, values, 4);
2308 }
2309
2310 /* Split 64-bit loads. */
2311 if (tgsi_type_is_64bit(type)) {
2312 LLVMValueRef lo, hi;
2313
2314 lo = fetch_constant(bld_base, reg, TGSI_TYPE_UNSIGNED, swizzle);
2315 hi = fetch_constant(bld_base, reg, TGSI_TYPE_UNSIGNED, swizzle + 1);
2316 return si_llvm_emit_fetch_64bit(bld_base, tgsi2llvmtype(bld_base, type),
2317 lo, hi);
2318 }
2319
2320 idx = reg->Register.Index * 4 + swizzle;
2321 if (reg->Register.Indirect) {
2322 addr = si_get_indirect_index(ctx, ireg, 16, idx * 4);
2323 } else {
2324 addr = LLVMConstInt(ctx->i32, idx * 4, 0);
2325 }
2326
2327 /* Fast path when user data SGPRs point to constant buffer 0 directly. */
2328 if (sel->info.const_buffers_declared == 1 &&
2329 sel->info.shader_buffers_declared == 0) {
2330 LLVMValueRef ptr =
2331 LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2332
2333 /* This enables use of s_load_dword and flat_load_dword for const buffer 0
2334 * loads, and up to x4 load opcode merging. However, it leads to horrible
2335 * code reducing SIMD wave occupancy from 8 to 2 in many cases.
2336 *
2337 * Using s_buffer_load_dword (x1) seems to be the best option right now.
2338 *
2339 * LLVM 5.0 on SI doesn't insert a required s_nop between SALU setting
2340 * a descriptor and s_buffer_load_dword using it, so we can't expand
2341 * the pointer into a full descriptor like below. We have to use
2342 * s_load_dword instead. The only case when LLVM 5.0 would select
2343 * s_buffer_load_dword (that we have to prevent) is when we use use
2344 * a literal offset where we don't need bounds checking.
2345 */
2346 if (ctx->screen->info.chip_class == SI &&
2347 HAVE_LLVM < 0x0600 &&
2348 !reg->Register.Indirect) {
2349 addr = LLVMBuildLShr(ctx->ac.builder, addr, LLVMConstInt(ctx->i32, 2, 0), "");
2350 LLVMValueRef result = ac_build_load_invariant(&ctx->ac, ptr, addr);
2351 return bitcast(bld_base, type, result);
2352 }
2353
2354 /* Do the bounds checking with a descriptor, because
2355 * doing computation and manual bounds checking of 64-bit
2356 * addresses generates horrible VALU code with very high
2357 * VGPR usage and very low SIMD occupancy.
2358 */
2359 ptr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->i64, "");
2360 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ctx->v2i32, "");
2361
2362 LLVMValueRef desc_elems[] = {
2363 LLVMBuildExtractElement(ctx->ac.builder, ptr, ctx->i32_0, ""),
2364 LLVMBuildExtractElement(ctx->ac.builder, ptr, ctx->i32_1, ""),
2365 LLVMConstInt(ctx->i32, (sel->info.const_file_max[0] + 1) * 16, 0),
2366 LLVMConstInt(ctx->i32,
2367 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2368 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2369 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2370 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2371 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2372 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32), 0)
2373 };
2374 LLVMValueRef desc = ac_build_gather_values(&ctx->ac, desc_elems, 4);
2375 LLVMValueRef result = buffer_load_const(ctx, desc, addr);
2376 return bitcast(bld_base, type, result);
2377 }
2378
2379 assert(reg->Register.Dimension);
2380 buf = reg->Dimension.Index;
2381
2382 if (reg->Dimension.Indirect) {
2383 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2384 LLVMValueRef index;
2385 index = si_get_bounded_indirect_index(ctx, &reg->DimIndirect,
2386 reg->Dimension.Index,
2387 ctx->num_const_buffers);
2388 index = LLVMBuildAdd(ctx->ac.builder, index,
2389 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
2390 bufp = ac_build_load_to_sgpr(&ctx->ac, ptr, index);
2391 } else
2392 bufp = load_const_buffer_desc(ctx, buf);
2393
2394 return bitcast(bld_base, type, buffer_load_const(ctx, bufp, addr));
2395 }
2396
2397 /* Initialize arguments for the shader export intrinsic */
2398 static void si_llvm_init_export_args(struct si_shader_context *ctx,
2399 LLVMValueRef *values,
2400 unsigned target,
2401 struct ac_export_args *args)
2402 {
2403 LLVMValueRef f32undef = LLVMGetUndef(ctx->ac.f32);
2404 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
2405 unsigned chan;
2406 bool is_int8, is_int10;
2407
2408 /* Default is 0xf. Adjusted below depending on the format. */
2409 args->enabled_channels = 0xf; /* writemask */
2410
2411 /* Specify whether the EXEC mask represents the valid mask */
2412 args->valid_mask = 0;
2413
2414 /* Specify whether this is the last export */
2415 args->done = 0;
2416
2417 /* Specify the target we are exporting */
2418 args->target = target;
2419
2420 if (ctx->type == PIPE_SHADER_FRAGMENT) {
2421 const struct si_shader_key *key = &ctx->shader->key;
2422 unsigned col_formats = key->part.ps.epilog.spi_shader_col_format;
2423 int cbuf = target - V_008DFC_SQ_EXP_MRT;
2424
2425 assert(cbuf >= 0 && cbuf < 8);
2426 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
2427 is_int8 = (key->part.ps.epilog.color_is_int8 >> cbuf) & 0x1;
2428 is_int10 = (key->part.ps.epilog.color_is_int10 >> cbuf) & 0x1;
2429 }
2430
2431 args->compr = false;
2432 args->out[0] = f32undef;
2433 args->out[1] = f32undef;
2434 args->out[2] = f32undef;
2435 args->out[3] = f32undef;
2436
2437 LLVMValueRef (*packf)(struct ac_llvm_context *ctx, LLVMValueRef args[2]) = NULL;
2438 LLVMValueRef (*packi)(struct ac_llvm_context *ctx, LLVMValueRef args[2],
2439 unsigned bits, bool hi) = NULL;
2440
2441 switch (spi_shader_col_format) {
2442 case V_028714_SPI_SHADER_ZERO:
2443 args->enabled_channels = 0; /* writemask */
2444 args->target = V_008DFC_SQ_EXP_NULL;
2445 break;
2446
2447 case V_028714_SPI_SHADER_32_R:
2448 args->enabled_channels = 1; /* writemask */
2449 args->out[0] = values[0];
2450 break;
2451
2452 case V_028714_SPI_SHADER_32_GR:
2453 args->enabled_channels = 0x3; /* writemask */
2454 args->out[0] = values[0];
2455 args->out[1] = values[1];
2456 break;
2457
2458 case V_028714_SPI_SHADER_32_AR:
2459 args->enabled_channels = 0x9; /* writemask */
2460 args->out[0] = values[0];
2461 args->out[3] = values[3];
2462 break;
2463
2464 case V_028714_SPI_SHADER_FP16_ABGR:
2465 packf = ac_build_cvt_pkrtz_f16;
2466 break;
2467
2468 case V_028714_SPI_SHADER_UNORM16_ABGR:
2469 packf = ac_build_cvt_pknorm_u16;
2470 break;
2471
2472 case V_028714_SPI_SHADER_SNORM16_ABGR:
2473 packf = ac_build_cvt_pknorm_i16;
2474 break;
2475
2476 case V_028714_SPI_SHADER_UINT16_ABGR:
2477 packi = ac_build_cvt_pk_u16;
2478 break;
2479
2480 case V_028714_SPI_SHADER_SINT16_ABGR:
2481 packi = ac_build_cvt_pk_i16;
2482 break;
2483
2484 case V_028714_SPI_SHADER_32_ABGR:
2485 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2486 break;
2487 }
2488
2489 /* Pack f16 or norm_i16/u16. */
2490 if (packf) {
2491 for (chan = 0; chan < 2; chan++) {
2492 LLVMValueRef pack_args[2] = {
2493 values[2 * chan],
2494 values[2 * chan + 1]
2495 };
2496 LLVMValueRef packed;
2497
2498 packed = packf(&ctx->ac, pack_args);
2499 args->out[chan] = ac_to_float(&ctx->ac, packed);
2500 }
2501 args->compr = 1; /* COMPR flag */
2502 }
2503 /* Pack i16/u16. */
2504 if (packi) {
2505 for (chan = 0; chan < 2; chan++) {
2506 LLVMValueRef pack_args[2] = {
2507 ac_to_integer(&ctx->ac, values[2 * chan]),
2508 ac_to_integer(&ctx->ac, values[2 * chan + 1])
2509 };
2510 LLVMValueRef packed;
2511
2512 packed = packi(&ctx->ac, pack_args,
2513 is_int8 ? 8 : is_int10 ? 10 : 16,
2514 chan == 1);
2515 args->out[chan] = ac_to_float(&ctx->ac, packed);
2516 }
2517 args->compr = 1; /* COMPR flag */
2518 }
2519 }
2520
2521 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2522 LLVMValueRef alpha)
2523 {
2524 struct si_shader_context *ctx = si_shader_context(bld_base);
2525
2526 if (ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2527 static LLVMRealPredicate cond_map[PIPE_FUNC_ALWAYS + 1] = {
2528 [PIPE_FUNC_LESS] = LLVMRealOLT,
2529 [PIPE_FUNC_EQUAL] = LLVMRealOEQ,
2530 [PIPE_FUNC_LEQUAL] = LLVMRealOLE,
2531 [PIPE_FUNC_GREATER] = LLVMRealOGT,
2532 [PIPE_FUNC_NOTEQUAL] = LLVMRealONE,
2533 [PIPE_FUNC_GEQUAL] = LLVMRealOGE,
2534 };
2535 LLVMRealPredicate cond = cond_map[ctx->shader->key.part.ps.epilog.alpha_func];
2536 assert(cond);
2537
2538 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
2539 SI_PARAM_ALPHA_REF);
2540 LLVMValueRef alpha_pass =
2541 LLVMBuildFCmp(ctx->ac.builder, cond, alpha, alpha_ref, "");
2542 ac_build_kill_if_false(&ctx->ac, alpha_pass);
2543 } else {
2544 ac_build_kill_if_false(&ctx->ac, LLVMConstInt(ctx->i1, 0, 0));
2545 }
2546 }
2547
2548 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2549 LLVMValueRef alpha,
2550 unsigned samplemask_param)
2551 {
2552 struct si_shader_context *ctx = si_shader_context(bld_base);
2553 LLVMValueRef coverage;
2554
2555 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2556 coverage = LLVMGetParam(ctx->main_fn,
2557 samplemask_param);
2558 coverage = ac_to_integer(&ctx->ac, coverage);
2559
2560 coverage = lp_build_intrinsic(ctx->ac.builder, "llvm.ctpop.i32",
2561 ctx->i32,
2562 &coverage, 1, LP_FUNC_ATTR_READNONE);
2563
2564 coverage = LLVMBuildUIToFP(ctx->ac.builder, coverage,
2565 ctx->f32, "");
2566
2567 coverage = LLVMBuildFMul(ctx->ac.builder, coverage,
2568 LLVMConstReal(ctx->f32,
2569 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2570
2571 return LLVMBuildFMul(ctx->ac.builder, alpha, coverage, "");
2572 }
2573
2574 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
2575 struct ac_export_args *pos, LLVMValueRef *out_elts)
2576 {
2577 unsigned reg_index;
2578 unsigned chan;
2579 unsigned const_chan;
2580 LLVMValueRef base_elt;
2581 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2582 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
2583 SI_VS_CONST_CLIP_PLANES, 0);
2584 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
2585
2586 for (reg_index = 0; reg_index < 2; reg_index ++) {
2587 struct ac_export_args *args = &pos[2 + reg_index];
2588
2589 args->out[0] =
2590 args->out[1] =
2591 args->out[2] =
2592 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
2593
2594 /* Compute dot products of position and user clip plane vectors */
2595 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2596 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2597 LLVMValueRef addr =
2598 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
2599 const_chan) * 4, 0);
2600 base_elt = buffer_load_const(ctx, const_resource,
2601 addr);
2602 args->out[chan] =
2603 lp_build_add(&ctx->bld_base.base, args->out[chan],
2604 lp_build_mul(&ctx->bld_base.base, base_elt,
2605 out_elts[const_chan]));
2606 }
2607 }
2608
2609 args->enabled_channels = 0xf;
2610 args->valid_mask = 0;
2611 args->done = 0;
2612 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
2613 args->compr = 0;
2614 }
2615 }
2616
2617 static void si_dump_streamout(struct pipe_stream_output_info *so)
2618 {
2619 unsigned i;
2620
2621 if (so->num_outputs)
2622 fprintf(stderr, "STREAMOUT\n");
2623
2624 for (i = 0; i < so->num_outputs; i++) {
2625 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2626 so->output[i].start_component;
2627 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2628 i, so->output[i].output_buffer,
2629 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2630 so->output[i].register_index,
2631 mask & 1 ? "x" : "",
2632 mask & 2 ? "y" : "",
2633 mask & 4 ? "z" : "",
2634 mask & 8 ? "w" : "");
2635 }
2636 }
2637
2638 static void emit_streamout_output(struct si_shader_context *ctx,
2639 LLVMValueRef const *so_buffers,
2640 LLVMValueRef const *so_write_offsets,
2641 struct pipe_stream_output *stream_out,
2642 struct si_shader_output_values *shader_out)
2643 {
2644 unsigned buf_idx = stream_out->output_buffer;
2645 unsigned start = stream_out->start_component;
2646 unsigned num_comps = stream_out->num_components;
2647 LLVMValueRef out[4];
2648
2649 assert(num_comps && num_comps <= 4);
2650 if (!num_comps || num_comps > 4)
2651 return;
2652
2653 /* Load the output as int. */
2654 for (int j = 0; j < num_comps; j++) {
2655 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
2656
2657 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
2658 }
2659
2660 /* Pack the output. */
2661 LLVMValueRef vdata = NULL;
2662
2663 switch (num_comps) {
2664 case 1: /* as i32 */
2665 vdata = out[0];
2666 break;
2667 case 2: /* as v2i32 */
2668 case 3: /* as v4i32 (aligned to 4) */
2669 case 4: /* as v4i32 */
2670 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2671 for (int j = 0; j < num_comps; j++) {
2672 vdata = LLVMBuildInsertElement(ctx->ac.builder, vdata, out[j],
2673 LLVMConstInt(ctx->i32, j, 0), "");
2674 }
2675 break;
2676 }
2677
2678 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
2679 vdata, num_comps,
2680 so_write_offsets[buf_idx],
2681 ctx->i32_0,
2682 stream_out->dst_offset * 4, 1, 1, true, false);
2683 }
2684
2685 /**
2686 * Write streamout data to buffers for vertex stream @p stream (different
2687 * vertex streams can occur for GS copy shaders).
2688 */
2689 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2690 struct si_shader_output_values *outputs,
2691 unsigned noutput, unsigned stream)
2692 {
2693 struct si_shader_selector *sel = ctx->shader->selector;
2694 struct pipe_stream_output_info *so = &sel->so;
2695 LLVMBuilderRef builder = ctx->ac.builder;
2696 int i;
2697 struct lp_build_if_state if_ctx;
2698
2699 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2700 LLVMValueRef so_vtx_count =
2701 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2702
2703 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
2704
2705 /* can_emit = tid < so_vtx_count; */
2706 LLVMValueRef can_emit =
2707 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2708
2709 /* Emit the streamout code conditionally. This actually avoids
2710 * out-of-bounds buffer access. The hw tells us via the SGPR
2711 * (so_vtx_count) which threads are allowed to emit streamout data. */
2712 lp_build_if(&if_ctx, &ctx->gallivm, can_emit);
2713 {
2714 /* The buffer offset is computed as follows:
2715 * ByteOffset = streamout_offset[buffer_id]*4 +
2716 * (streamout_write_index + thread_id)*stride[buffer_id] +
2717 * attrib_offset
2718 */
2719
2720 LLVMValueRef so_write_index =
2721 LLVMGetParam(ctx->main_fn,
2722 ctx->param_streamout_write_index);
2723
2724 /* Compute (streamout_write_index + thread_id). */
2725 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2726
2727 /* Load the descriptor and compute the write offset for each
2728 * enabled buffer. */
2729 LLVMValueRef so_write_offset[4] = {};
2730 LLVMValueRef so_buffers[4];
2731 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
2732 ctx->param_rw_buffers);
2733
2734 for (i = 0; i < 4; i++) {
2735 if (!so->stride[i])
2736 continue;
2737
2738 LLVMValueRef offset = LLVMConstInt(ctx->i32,
2739 SI_VS_STREAMOUT_BUF0 + i, 0);
2740
2741 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
2742
2743 LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
2744 ctx->param_streamout_offset[i]);
2745 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2746
2747 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2748 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2749 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2750 }
2751
2752 /* Write streamout data. */
2753 for (i = 0; i < so->num_outputs; i++) {
2754 unsigned reg = so->output[i].register_index;
2755
2756 if (reg >= noutput)
2757 continue;
2758
2759 if (stream != so->output[i].stream)
2760 continue;
2761
2762 emit_streamout_output(ctx, so_buffers, so_write_offset,
2763 &so->output[i], &outputs[reg]);
2764 }
2765 }
2766 lp_build_endif(&if_ctx);
2767 }
2768
2769 static void si_export_param(struct si_shader_context *ctx, unsigned index,
2770 LLVMValueRef *values)
2771 {
2772 struct ac_export_args args;
2773
2774 si_llvm_init_export_args(ctx, values,
2775 V_008DFC_SQ_EXP_PARAM + index, &args);
2776 ac_build_export(&ctx->ac, &args);
2777 }
2778
2779 static void si_build_param_exports(struct si_shader_context *ctx,
2780 struct si_shader_output_values *outputs,
2781 unsigned noutput)
2782 {
2783 struct si_shader *shader = ctx->shader;
2784 unsigned param_count = 0;
2785
2786 for (unsigned i = 0; i < noutput; i++) {
2787 unsigned semantic_name = outputs[i].semantic_name;
2788 unsigned semantic_index = outputs[i].semantic_index;
2789
2790 if (outputs[i].vertex_stream[0] != 0 &&
2791 outputs[i].vertex_stream[1] != 0 &&
2792 outputs[i].vertex_stream[2] != 0 &&
2793 outputs[i].vertex_stream[3] != 0)
2794 continue;
2795
2796 switch (semantic_name) {
2797 case TGSI_SEMANTIC_LAYER:
2798 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2799 case TGSI_SEMANTIC_CLIPDIST:
2800 case TGSI_SEMANTIC_COLOR:
2801 case TGSI_SEMANTIC_BCOLOR:
2802 case TGSI_SEMANTIC_PRIMID:
2803 case TGSI_SEMANTIC_FOG:
2804 case TGSI_SEMANTIC_TEXCOORD:
2805 case TGSI_SEMANTIC_GENERIC:
2806 break;
2807 default:
2808 continue;
2809 }
2810
2811 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
2812 semantic_index < SI_MAX_IO_GENERIC) &&
2813 shader->key.opt.kill_outputs &
2814 (1ull << si_shader_io_get_unique_index(semantic_name, semantic_index)))
2815 continue;
2816
2817 si_export_param(ctx, param_count, outputs[i].values);
2818
2819 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2820 shader->info.vs_output_param_offset[i] = param_count++;
2821 }
2822
2823 shader->info.nr_param_exports = param_count;
2824 }
2825
2826 /* Generate export instructions for hardware VS shader stage */
2827 static void si_llvm_export_vs(struct si_shader_context *ctx,
2828 struct si_shader_output_values *outputs,
2829 unsigned noutput)
2830 {
2831 struct si_shader *shader = ctx->shader;
2832 struct ac_export_args pos_args[4] = {};
2833 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2834 unsigned pos_idx;
2835 int i;
2836
2837 /* Build position exports. */
2838 for (i = 0; i < noutput; i++) {
2839 switch (outputs[i].semantic_name) {
2840 case TGSI_SEMANTIC_POSITION:
2841 si_llvm_init_export_args(ctx, outputs[i].values,
2842 V_008DFC_SQ_EXP_POS, &pos_args[0]);
2843 break;
2844 case TGSI_SEMANTIC_PSIZE:
2845 psize_value = outputs[i].values[0];
2846 break;
2847 case TGSI_SEMANTIC_LAYER:
2848 layer_value = outputs[i].values[0];
2849 break;
2850 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2851 viewport_index_value = outputs[i].values[0];
2852 break;
2853 case TGSI_SEMANTIC_EDGEFLAG:
2854 edgeflag_value = outputs[i].values[0];
2855 break;
2856 case TGSI_SEMANTIC_CLIPDIST:
2857 if (!shader->key.opt.clip_disable) {
2858 unsigned index = 2 + outputs[i].semantic_index;
2859 si_llvm_init_export_args(ctx, outputs[i].values,
2860 V_008DFC_SQ_EXP_POS + index,
2861 &pos_args[index]);
2862 }
2863 break;
2864 case TGSI_SEMANTIC_CLIPVERTEX:
2865 if (!shader->key.opt.clip_disable) {
2866 si_llvm_emit_clipvertex(ctx, pos_args,
2867 outputs[i].values);
2868 }
2869 break;
2870 }
2871 }
2872
2873 /* We need to add the position output manually if it's missing. */
2874 if (!pos_args[0].out[0]) {
2875 pos_args[0].enabled_channels = 0xf; /* writemask */
2876 pos_args[0].valid_mask = 0; /* EXEC mask */
2877 pos_args[0].done = 0; /* last export? */
2878 pos_args[0].target = V_008DFC_SQ_EXP_POS;
2879 pos_args[0].compr = 0; /* COMPR flag */
2880 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
2881 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
2882 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
2883 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
2884 }
2885
2886 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2887 if (shader->selector->info.writes_psize ||
2888 shader->selector->info.writes_edgeflag ||
2889 shader->selector->info.writes_viewport_index ||
2890 shader->selector->info.writes_layer) {
2891 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
2892 (shader->selector->info.writes_edgeflag << 1) |
2893 (shader->selector->info.writes_layer << 2);
2894
2895 pos_args[1].valid_mask = 0; /* EXEC mask */
2896 pos_args[1].done = 0; /* last export? */
2897 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2898 pos_args[1].compr = 0; /* COMPR flag */
2899 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
2900 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
2901 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
2902 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
2903
2904 if (shader->selector->info.writes_psize)
2905 pos_args[1].out[0] = psize_value;
2906
2907 if (shader->selector->info.writes_edgeflag) {
2908 /* The output is a float, but the hw expects an integer
2909 * with the first bit containing the edge flag. */
2910 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
2911 edgeflag_value,
2912 ctx->i32, "");
2913 edgeflag_value = ac_build_umin(&ctx->ac,
2914 edgeflag_value,
2915 ctx->i32_1);
2916
2917 /* The LLVM intrinsic expects a float. */
2918 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
2919 }
2920
2921 if (ctx->screen->info.chip_class >= GFX9) {
2922 /* GFX9 has the layer in out.z[10:0] and the viewport
2923 * index in out.z[19:16].
2924 */
2925 if (shader->selector->info.writes_layer)
2926 pos_args[1].out[2] = layer_value;
2927
2928 if (shader->selector->info.writes_viewport_index) {
2929 LLVMValueRef v = viewport_index_value;
2930
2931 v = ac_to_integer(&ctx->ac, v);
2932 v = LLVMBuildShl(ctx->ac.builder, v,
2933 LLVMConstInt(ctx->i32, 16, 0), "");
2934 v = LLVMBuildOr(ctx->ac.builder, v,
2935 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
2936 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
2937 pos_args[1].enabled_channels |= 1 << 2;
2938 }
2939 } else {
2940 if (shader->selector->info.writes_layer)
2941 pos_args[1].out[2] = layer_value;
2942
2943 if (shader->selector->info.writes_viewport_index) {
2944 pos_args[1].out[3] = viewport_index_value;
2945 pos_args[1].enabled_channels |= 1 << 3;
2946 }
2947 }
2948 }
2949
2950 for (i = 0; i < 4; i++)
2951 if (pos_args[i].out[0])
2952 shader->info.nr_pos_exports++;
2953
2954 pos_idx = 0;
2955 for (i = 0; i < 4; i++) {
2956 if (!pos_args[i].out[0])
2957 continue;
2958
2959 /* Specify the target we are exporting */
2960 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2961
2962 if (pos_idx == shader->info.nr_pos_exports)
2963 /* Specify that this is the last export */
2964 pos_args[i].done = 1;
2965
2966 ac_build_export(&ctx->ac, &pos_args[i]);
2967 }
2968
2969 /* Build parameter exports. */
2970 si_build_param_exports(ctx, outputs, noutput);
2971 }
2972
2973 /**
2974 * Forward all outputs from the vertex shader to the TES. This is only used
2975 * for the fixed function TCS.
2976 */
2977 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2978 {
2979 struct si_shader_context *ctx = si_shader_context(bld_base);
2980 LLVMValueRef invocation_id, buffer, buffer_offset;
2981 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
2982 uint64_t inputs;
2983
2984 invocation_id = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
2985 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
2986 buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2987
2988 lds_vertex_stride = get_tcs_in_vertex_dw_stride(ctx);
2989 lds_vertex_offset = LLVMBuildMul(ctx->ac.builder, invocation_id,
2990 lds_vertex_stride, "");
2991 lds_base = get_tcs_in_current_patch_offset(ctx);
2992 lds_base = LLVMBuildAdd(ctx->ac.builder, lds_base, lds_vertex_offset, "");
2993
2994 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
2995 while (inputs) {
2996 unsigned i = u_bit_scan64(&inputs);
2997
2998 LLVMValueRef lds_ptr = LLVMBuildAdd(ctx->ac.builder, lds_base,
2999 LLVMConstInt(ctx->i32, 4 * i, 0),
3000 "");
3001
3002 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
3003 get_rel_patch_id(ctx),
3004 invocation_id,
3005 LLVMConstInt(ctx->i32, i, 0));
3006
3007 LLVMValueRef value = lds_load(bld_base, ctx->ac.i32, ~0,
3008 lds_ptr);
3009
3010 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
3011 buffer_offset, 0, 1, 0, true, false);
3012 }
3013 }
3014
3015 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
3016 LLVMValueRef rel_patch_id,
3017 LLVMValueRef invocation_id,
3018 LLVMValueRef tcs_out_current_patch_data_offset,
3019 LLVMValueRef invoc0_tf_outer[4],
3020 LLVMValueRef invoc0_tf_inner[2])
3021 {
3022 struct si_shader_context *ctx = si_shader_context(bld_base);
3023 struct si_shader *shader = ctx->shader;
3024 unsigned tess_inner_index, tess_outer_index;
3025 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
3026 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
3027 unsigned stride, outer_comps, inner_comps, i, offset;
3028 struct lp_build_if_state if_ctx, inner_if_ctx;
3029
3030 /* Add a barrier before loading tess factors from LDS. */
3031 if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
3032 si_llvm_emit_barrier(NULL, bld_base, NULL);
3033
3034 /* Do this only for invocation 0, because the tess levels are per-patch,
3035 * not per-vertex.
3036 *
3037 * This can't jump, because invocation 0 executes this. It should
3038 * at least mask out the loads and stores for other invocations.
3039 */
3040 lp_build_if(&if_ctx, &ctx->gallivm,
3041 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
3042 invocation_id, ctx->i32_0, ""));
3043
3044 /* Determine the layout of one tess factor element in the buffer. */
3045 switch (shader->key.part.tcs.epilog.prim_mode) {
3046 case PIPE_PRIM_LINES:
3047 stride = 2; /* 2 dwords, 1 vec2 store */
3048 outer_comps = 2;
3049 inner_comps = 0;
3050 break;
3051 case PIPE_PRIM_TRIANGLES:
3052 stride = 4; /* 4 dwords, 1 vec4 store */
3053 outer_comps = 3;
3054 inner_comps = 1;
3055 break;
3056 case PIPE_PRIM_QUADS:
3057 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
3058 outer_comps = 4;
3059 inner_comps = 2;
3060 break;
3061 default:
3062 assert(0);
3063 return;
3064 }
3065
3066 for (i = 0; i < 4; i++) {
3067 inner[i] = LLVMGetUndef(ctx->i32);
3068 outer[i] = LLVMGetUndef(ctx->i32);
3069 }
3070
3071 if (shader->key.part.tcs.epilog.invoc0_tess_factors_are_def) {
3072 /* Tess factors are in VGPRs. */
3073 for (i = 0; i < outer_comps; i++)
3074 outer[i] = out[i] = invoc0_tf_outer[i];
3075 for (i = 0; i < inner_comps; i++)
3076 inner[i] = out[outer_comps+i] = invoc0_tf_inner[i];
3077 } else {
3078 /* Load tess_inner and tess_outer from LDS.
3079 * Any invocation can write them, so we can't get them from a temporary.
3080 */
3081 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
3082 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
3083
3084 lds_base = tcs_out_current_patch_data_offset;
3085 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
3086 LLVMConstInt(ctx->i32,
3087 tess_inner_index * 4, 0), "");
3088 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
3089 LLVMConstInt(ctx->i32,
3090 tess_outer_index * 4, 0), "");
3091
3092 for (i = 0; i < outer_comps; i++) {
3093 outer[i] = out[i] =
3094 lds_load(bld_base, ctx->ac.i32, i, lds_outer);
3095 }
3096 for (i = 0; i < inner_comps; i++) {
3097 inner[i] = out[outer_comps+i] =
3098 lds_load(bld_base, ctx->ac.i32, i, lds_inner);
3099 }
3100 }
3101
3102 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
3103 /* For isolines, the hardware expects tess factors in the
3104 * reverse order from what GLSL / TGSI specify.
3105 */
3106 LLVMValueRef tmp = out[0];
3107 out[0] = out[1];
3108 out[1] = tmp;
3109 }
3110
3111 /* Convert the outputs to vectors for stores. */
3112 vec0 = lp_build_gather_values(&ctx->gallivm, out, MIN2(stride, 4));
3113 vec1 = NULL;
3114
3115 if (stride > 4)
3116 vec1 = lp_build_gather_values(&ctx->gallivm, out+4, stride - 4);
3117
3118 /* Get the buffer. */
3119 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_factor_addr_base64k);
3120
3121 /* Get the offset. */
3122 tf_base = LLVMGetParam(ctx->main_fn,
3123 ctx->param_tcs_factor_offset);
3124 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
3125 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
3126
3127 lp_build_if(&inner_if_ctx, &ctx->gallivm,
3128 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
3129 rel_patch_id, ctx->i32_0, ""));
3130
3131 /* Store the dynamic HS control word. */
3132 offset = 0;
3133 if (ctx->screen->info.chip_class <= VI) {
3134 ac_build_buffer_store_dword(&ctx->ac, buffer,
3135 LLVMConstInt(ctx->i32, 0x80000000, 0),
3136 1, ctx->i32_0, tf_base,
3137 offset, 1, 0, true, false);
3138 offset += 4;
3139 }
3140
3141 lp_build_endif(&inner_if_ctx);
3142
3143 /* Store the tessellation factors. */
3144 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
3145 MIN2(stride, 4), byteoffset, tf_base,
3146 offset, 1, 0, true, false);
3147 offset += 16;
3148 if (vec1)
3149 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
3150 stride - 4, byteoffset, tf_base,
3151 offset, 1, 0, true, false);
3152
3153 /* Store the tess factors into the offchip buffer if TES reads them. */
3154 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
3155 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
3156 LLVMValueRef tf_inner_offset;
3157 unsigned param_outer, param_inner;
3158
3159 buf = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
3160 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
3161
3162 param_outer = si_shader_io_get_unique_index_patch(
3163 TGSI_SEMANTIC_TESSOUTER, 0);
3164 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
3165 LLVMConstInt(ctx->i32, param_outer, 0));
3166
3167 outer_vec = lp_build_gather_values(&ctx->gallivm, outer,
3168 util_next_power_of_two(outer_comps));
3169
3170 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
3171 outer_comps, tf_outer_offset,
3172 base, 0, 1, 0, true, false);
3173 if (inner_comps) {
3174 param_inner = si_shader_io_get_unique_index_patch(
3175 TGSI_SEMANTIC_TESSINNER, 0);
3176 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
3177 LLVMConstInt(ctx->i32, param_inner, 0));
3178
3179 inner_vec = inner_comps == 1 ? inner[0] :
3180 lp_build_gather_values(&ctx->gallivm, inner, inner_comps);
3181 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
3182 inner_comps, tf_inner_offset,
3183 base, 0, 1, 0, true, false);
3184 }
3185 }
3186
3187 lp_build_endif(&if_ctx);
3188 }
3189
3190 static LLVMValueRef
3191 si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
3192 unsigned param, unsigned return_index)
3193 {
3194 return LLVMBuildInsertValue(ctx->ac.builder, ret,
3195 LLVMGetParam(ctx->main_fn, param),
3196 return_index, "");
3197 }
3198
3199 static LLVMValueRef
3200 si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
3201 unsigned param, unsigned return_index)
3202 {
3203 LLVMBuilderRef builder = ctx->ac.builder;
3204 LLVMValueRef p = LLVMGetParam(ctx->main_fn, param);
3205
3206 return LLVMBuildInsertValue(builder, ret,
3207 ac_to_float(&ctx->ac, p),
3208 return_index, "");
3209 }
3210
3211 static LLVMValueRef
3212 si_insert_input_ptr_as_2xi32(struct si_shader_context *ctx, LLVMValueRef ret,
3213 unsigned param, unsigned return_index)
3214 {
3215 LLVMBuilderRef builder = ctx->ac.builder;
3216 LLVMValueRef ptr, lo, hi;
3217
3218 ptr = LLVMGetParam(ctx->main_fn, param);
3219 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i64, "");
3220 ptr = LLVMBuildBitCast(builder, ptr, ctx->v2i32, "");
3221 lo = LLVMBuildExtractElement(builder, ptr, ctx->i32_0, "");
3222 hi = LLVMBuildExtractElement(builder, ptr, ctx->i32_1, "");
3223 ret = LLVMBuildInsertValue(builder, ret, lo, return_index, "");
3224 return LLVMBuildInsertValue(builder, ret, hi, return_index + 1, "");
3225 }
3226
3227 /* This only writes the tessellation factor levels. */
3228 static void si_llvm_emit_tcs_epilogue(struct ac_shader_abi *abi,
3229 unsigned max_outputs,
3230 LLVMValueRef *addrs)
3231 {
3232 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3233 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
3234 LLVMBuilderRef builder = ctx->ac.builder;
3235 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
3236
3237 si_copy_tcs_inputs(bld_base);
3238
3239 rel_patch_id = get_rel_patch_id(ctx);
3240 invocation_id = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
3241 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
3242
3243 if (ctx->screen->info.chip_class >= GFX9) {
3244 LLVMBasicBlockRef blocks[2] = {
3245 LLVMGetInsertBlock(builder),
3246 ctx->merged_wrap_if_state.entry_block
3247 };
3248 LLVMValueRef values[2];
3249
3250 lp_build_endif(&ctx->merged_wrap_if_state);
3251
3252 values[0] = rel_patch_id;
3253 values[1] = LLVMGetUndef(ctx->i32);
3254 rel_patch_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3255
3256 values[0] = tf_lds_offset;
3257 values[1] = LLVMGetUndef(ctx->i32);
3258 tf_lds_offset = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3259
3260 values[0] = invocation_id;
3261 values[1] = ctx->i32_1; /* cause the epilog to skip threads */
3262 invocation_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3263 }
3264
3265 /* Return epilog parameters from this function. */
3266 LLVMValueRef ret = ctx->return_value;
3267 unsigned vgpr;
3268
3269 if (ctx->screen->info.chip_class >= GFX9) {
3270 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3271 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
3272 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
3273 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
3274 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
3275 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
3276 /* Tess offchip and tess factor offsets are at the beginning. */
3277 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
3278 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
3279 vgpr = 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K + 1;
3280 } else {
3281 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3282 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
3283 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
3284 GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
3285 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
3286 GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K);
3287 /* Tess offchip and tess factor offsets are after user SGPRs. */
3288 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset,
3289 GFX6_TCS_NUM_USER_SGPR);
3290 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset,
3291 GFX6_TCS_NUM_USER_SGPR + 1);
3292 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
3293 }
3294
3295 /* VGPRs */
3296 rel_patch_id = ac_to_float(&ctx->ac, rel_patch_id);
3297 invocation_id = ac_to_float(&ctx->ac, invocation_id);
3298 tf_lds_offset = ac_to_float(&ctx->ac, tf_lds_offset);
3299
3300 /* Leave a hole corresponding to the two input VGPRs. This ensures that
3301 * the invocation_id output does not alias the tcs_rel_ids input,
3302 * which saves a V_MOV on gfx9.
3303 */
3304 vgpr += 2;
3305
3306 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
3307 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
3308
3309 if (ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
3310 vgpr++; /* skip the tess factor LDS offset */
3311 for (unsigned i = 0; i < 6; i++) {
3312 LLVMValueRef value =
3313 LLVMBuildLoad(builder, ctx->invoc0_tess_factors[i], "");
3314 value = ac_to_float(&ctx->ac, value);
3315 ret = LLVMBuildInsertValue(builder, ret, value, vgpr++, "");
3316 }
3317 } else {
3318 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
3319 }
3320 ctx->return_value = ret;
3321 }
3322
3323 /* Pass TCS inputs from LS to TCS on GFX9. */
3324 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
3325 {
3326 LLVMValueRef ret = ctx->return_value;
3327
3328 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
3329 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
3330 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
3331 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
3332
3333 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers,
3334 8 + SI_SGPR_RW_BUFFERS);
3335 ret = si_insert_input_ptr_as_2xi32(ctx, ret,
3336 ctx->param_bindless_samplers_and_images,
3337 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
3338
3339 ret = si_insert_input_ret(ctx, ret, ctx->param_vs_state_bits,
3340 8 + SI_SGPR_VS_STATE_BITS);
3341 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3342 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
3343 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_offsets,
3344 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
3345 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_layout,
3346 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
3347 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
3348 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
3349 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
3350 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
3351
3352 unsigned desc_param = ctx->param_tcs_factor_addr_base64k + 2;
3353 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
3354 8 + GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS);
3355 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
3356 8 + GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES);
3357
3358 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
3359 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
3360 ac_to_float(&ctx->ac, ctx->abi.tcs_patch_id),
3361 vgpr++, "");
3362 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
3363 ac_to_float(&ctx->ac, ctx->abi.tcs_rel_ids),
3364 vgpr++, "");
3365 ctx->return_value = ret;
3366 }
3367
3368 /* Pass GS inputs from ES to GS on GFX9. */
3369 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
3370 {
3371 LLVMValueRef ret = ctx->return_value;
3372
3373 ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
3374 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
3375 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
3376
3377 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers,
3378 8 + SI_SGPR_RW_BUFFERS);
3379 ret = si_insert_input_ptr_as_2xi32(ctx, ret,
3380 ctx->param_bindless_samplers_and_images,
3381 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
3382
3383 unsigned desc_param = ctx->param_vs_state_bits + 1;
3384 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
3385 8 + GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS);
3386 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
3387 8 + GFX9_SGPR_GS_SAMPLERS_AND_IMAGES);
3388
3389 unsigned vgpr = 8 + GFX9_GS_NUM_USER_SGPR;
3390 for (unsigned i = 0; i < 5; i++) {
3391 unsigned param = ctx->param_gs_vtx01_offset + i;
3392 ret = si_insert_input_ret_float(ctx, ret, param, vgpr++);
3393 }
3394 ctx->return_value = ret;
3395 }
3396
3397 static void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi,
3398 unsigned max_outputs,
3399 LLVMValueRef *addrs)
3400 {
3401 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3402 struct si_shader *shader = ctx->shader;
3403 struct tgsi_shader_info *info = &shader->selector->info;
3404 unsigned i, chan;
3405 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
3406 ctx->param_rel_auto_id);
3407 LLVMValueRef vertex_dw_stride = get_tcs_in_vertex_dw_stride(ctx);
3408 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
3409 vertex_dw_stride, "");
3410
3411 /* Write outputs to LDS. The next shader (TCS aka HS) will read
3412 * its inputs from it. */
3413 for (i = 0; i < info->num_outputs; i++) {
3414 unsigned name = info->output_semantic_name[i];
3415 unsigned index = info->output_semantic_index[i];
3416
3417 /* The ARB_shader_viewport_layer_array spec contains the
3418 * following issue:
3419 *
3420 * 2) What happens if gl_ViewportIndex or gl_Layer is
3421 * written in the vertex shader and a geometry shader is
3422 * present?
3423 *
3424 * RESOLVED: The value written by the last vertex processing
3425 * stage is used. If the last vertex processing stage
3426 * (vertex, tessellation evaluation or geometry) does not
3427 * statically assign to gl_ViewportIndex or gl_Layer, index
3428 * or layer zero is assumed.
3429 *
3430 * So writes to those outputs in VS-as-LS are simply ignored.
3431 */
3432 if (name == TGSI_SEMANTIC_LAYER ||
3433 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
3434 continue;
3435
3436 int param = si_shader_io_get_unique_index(name, index);
3437 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
3438 LLVMConstInt(ctx->i32, param * 4, 0), "");
3439
3440 for (chan = 0; chan < 4; chan++) {
3441 if (!(info->output_usagemask[i] & (1 << chan)))
3442 continue;
3443
3444 lds_store(ctx, chan, dw_addr,
3445 LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], ""));
3446 }
3447 }
3448
3449 if (ctx->screen->info.chip_class >= GFX9)
3450 si_set_ls_return_value_for_tcs(ctx);
3451 }
3452
3453 static void si_llvm_emit_es_epilogue(struct ac_shader_abi *abi,
3454 unsigned max_outputs,
3455 LLVMValueRef *addrs)
3456 {
3457 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3458 struct si_shader *es = ctx->shader;
3459 struct tgsi_shader_info *info = &es->selector->info;
3460 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
3461 ctx->param_es2gs_offset);
3462 LLVMValueRef lds_base = NULL;
3463 unsigned chan;
3464 int i;
3465
3466 if (ctx->screen->info.chip_class >= GFX9 && info->num_outputs) {
3467 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
3468 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
3469 LLVMValueRef wave_idx = unpack_param(ctx, ctx->param_merged_wave_info, 24, 4);
3470 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
3471 LLVMBuildMul(ctx->ac.builder, wave_idx,
3472 LLVMConstInt(ctx->i32, 64, false), ""), "");
3473 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
3474 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
3475 }
3476
3477 for (i = 0; i < info->num_outputs; i++) {
3478 int param;
3479
3480 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
3481 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
3482 continue;
3483
3484 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
3485 info->output_semantic_index[i]);
3486
3487 for (chan = 0; chan < 4; chan++) {
3488 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
3489 out_val = ac_to_integer(&ctx->ac, out_val);
3490
3491 /* GFX9 has the ESGS ring in LDS. */
3492 if (ctx->screen->info.chip_class >= GFX9) {
3493 lds_store(ctx, param * 4 + chan, lds_base, out_val);
3494 continue;
3495 }
3496
3497 ac_build_buffer_store_dword(&ctx->ac,
3498 ctx->esgs_ring,
3499 out_val, 1, NULL, soffset,
3500 (4 * param + chan) * 4,
3501 1, 1, true, true);
3502 }
3503 }
3504
3505 if (ctx->screen->info.chip_class >= GFX9)
3506 si_set_es_return_value_for_gs(ctx);
3507 }
3508
3509 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
3510 {
3511 if (ctx->screen->info.chip_class >= GFX9)
3512 return unpack_param(ctx, ctx->param_merged_wave_info, 16, 8);
3513 else
3514 return LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id);
3515 }
3516
3517 static void emit_gs_epilogue(struct si_shader_context *ctx)
3518 {
3519 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
3520 si_get_gs_wave_id(ctx));
3521
3522 if (ctx->screen->info.chip_class >= GFX9)
3523 lp_build_endif(&ctx->merged_wrap_if_state);
3524 }
3525
3526 static void si_llvm_emit_gs_epilogue(struct ac_shader_abi *abi,
3527 unsigned max_outputs,
3528 LLVMValueRef *addrs)
3529 {
3530 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3531 struct tgsi_shader_info UNUSED *info = &ctx->shader->selector->info;
3532
3533 assert(info->num_outputs <= max_outputs);
3534
3535 emit_gs_epilogue(ctx);
3536 }
3537
3538 static void si_tgsi_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
3539 {
3540 struct si_shader_context *ctx = si_shader_context(bld_base);
3541 emit_gs_epilogue(ctx);
3542 }
3543
3544 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
3545 unsigned max_outputs,
3546 LLVMValueRef *addrs)
3547 {
3548 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3549 struct tgsi_shader_info *info = &ctx->shader->selector->info;
3550 struct si_shader_output_values *outputs = NULL;
3551 int i,j;
3552
3553 assert(!ctx->shader->is_gs_copy_shader);
3554 assert(info->num_outputs <= max_outputs);
3555
3556 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
3557
3558 /* Vertex color clamping.
3559 *
3560 * This uses a state constant loaded in a user data SGPR and
3561 * an IF statement is added that clamps all colors if the constant
3562 * is true.
3563 */
3564 if (ctx->type == PIPE_SHADER_VERTEX) {
3565 struct lp_build_if_state if_ctx;
3566 LLVMValueRef cond = NULL;
3567 LLVMValueRef addr, val;
3568
3569 for (i = 0; i < info->num_outputs; i++) {
3570 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
3571 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
3572 continue;
3573
3574 /* We've found a color. */
3575 if (!cond) {
3576 /* The state is in the first bit of the user SGPR. */
3577 cond = LLVMGetParam(ctx->main_fn,
3578 ctx->param_vs_state_bits);
3579 cond = LLVMBuildTrunc(ctx->ac.builder, cond,
3580 ctx->i1, "");
3581 lp_build_if(&if_ctx, &ctx->gallivm, cond);
3582 }
3583
3584 for (j = 0; j < 4; j++) {
3585 addr = addrs[4 * i + j];
3586 val = LLVMBuildLoad(ctx->ac.builder, addr, "");
3587 val = ac_build_clamp(&ctx->ac, val);
3588 LLVMBuildStore(ctx->ac.builder, val, addr);
3589 }
3590 }
3591
3592 if (cond)
3593 lp_build_endif(&if_ctx);
3594 }
3595
3596 for (i = 0; i < info->num_outputs; i++) {
3597 outputs[i].semantic_name = info->output_semantic_name[i];
3598 outputs[i].semantic_index = info->output_semantic_index[i];
3599
3600 for (j = 0; j < 4; j++) {
3601 outputs[i].values[j] =
3602 LLVMBuildLoad(ctx->ac.builder,
3603 addrs[4 * i + j],
3604 "");
3605 outputs[i].vertex_stream[j] =
3606 (info->output_streams[i] >> (2 * j)) & 3;
3607 }
3608 }
3609
3610 if (ctx->shader->selector->so.num_outputs)
3611 si_llvm_emit_streamout(ctx, outputs, i, 0);
3612
3613 /* Export PrimitiveID. */
3614 if (ctx->shader->key.mono.u.vs_export_prim_id) {
3615 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
3616 outputs[i].semantic_index = 0;
3617 outputs[i].values[0] = ac_to_float(&ctx->ac, get_primitive_id(ctx, 0));
3618 for (j = 1; j < 4; j++)
3619 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
3620
3621 memset(outputs[i].vertex_stream, 0,
3622 sizeof(outputs[i].vertex_stream));
3623 i++;
3624 }
3625
3626 si_llvm_export_vs(ctx, outputs, i);
3627 FREE(outputs);
3628 }
3629
3630 static void si_tgsi_emit_epilogue(struct lp_build_tgsi_context *bld_base)
3631 {
3632 struct si_shader_context *ctx = si_shader_context(bld_base);
3633
3634 ctx->abi.emit_outputs(&ctx->abi, RADEON_LLVM_MAX_OUTPUTS,
3635 &ctx->outputs[0][0]);
3636 }
3637
3638 struct si_ps_exports {
3639 unsigned num;
3640 struct ac_export_args args[10];
3641 };
3642
3643 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
3644 LLVMValueRef depth, LLVMValueRef stencil,
3645 LLVMValueRef samplemask, struct si_ps_exports *exp)
3646 {
3647 struct si_shader_context *ctx = si_shader_context(bld_base);
3648 struct ac_export_args args;
3649
3650 ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
3651
3652 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3653 }
3654
3655 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
3656 LLVMValueRef *color, unsigned index,
3657 unsigned samplemask_param,
3658 bool is_last, struct si_ps_exports *exp)
3659 {
3660 struct si_shader_context *ctx = si_shader_context(bld_base);
3661 int i;
3662
3663 /* Clamp color */
3664 if (ctx->shader->key.part.ps.epilog.clamp_color)
3665 for (i = 0; i < 4; i++)
3666 color[i] = ac_build_clamp(&ctx->ac, color[i]);
3667
3668 /* Alpha to one */
3669 if (ctx->shader->key.part.ps.epilog.alpha_to_one)
3670 color[3] = ctx->ac.f32_1;
3671
3672 /* Alpha test */
3673 if (index == 0 &&
3674 ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
3675 si_alpha_test(bld_base, color[3]);
3676
3677 /* Line & polygon smoothing */
3678 if (ctx->shader->key.part.ps.epilog.poly_line_smoothing)
3679 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
3680 samplemask_param);
3681
3682 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3683 if (ctx->shader->key.part.ps.epilog.last_cbuf > 0) {
3684 struct ac_export_args args[8];
3685 int c, last = -1;
3686
3687 /* Get the export arguments, also find out what the last one is. */
3688 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3689 si_llvm_init_export_args(ctx, color,
3690 V_008DFC_SQ_EXP_MRT + c, &args[c]);
3691 if (args[c].enabled_channels)
3692 last = c;
3693 }
3694
3695 /* Emit all exports. */
3696 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3697 if (is_last && last == c) {
3698 args[c].valid_mask = 1; /* whether the EXEC mask is valid */
3699 args[c].done = 1; /* DONE bit */
3700 } else if (!args[c].enabled_channels)
3701 continue; /* unnecessary NULL export */
3702
3703 memcpy(&exp->args[exp->num++], &args[c], sizeof(args[c]));
3704 }
3705 } else {
3706 struct ac_export_args args;
3707
3708 /* Export */
3709 si_llvm_init_export_args(ctx, color, V_008DFC_SQ_EXP_MRT + index,
3710 &args);
3711 if (is_last) {
3712 args.valid_mask = 1; /* whether the EXEC mask is valid */
3713 args.done = 1; /* DONE bit */
3714 } else if (!args.enabled_channels)
3715 return; /* unnecessary NULL export */
3716
3717 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3718 }
3719 }
3720
3721 static void si_emit_ps_exports(struct si_shader_context *ctx,
3722 struct si_ps_exports *exp)
3723 {
3724 for (unsigned i = 0; i < exp->num; i++)
3725 ac_build_export(&ctx->ac, &exp->args[i]);
3726 }
3727
3728 static void si_export_null(struct lp_build_tgsi_context *bld_base)
3729 {
3730 struct si_shader_context *ctx = si_shader_context(bld_base);
3731 struct lp_build_context *base = &bld_base->base;
3732 struct ac_export_args args;
3733
3734 args.enabled_channels = 0x0; /* enabled channels */
3735 args.valid_mask = 1; /* whether the EXEC mask is valid */
3736 args.done = 1; /* DONE bit */
3737 args.target = V_008DFC_SQ_EXP_NULL;
3738 args.compr = 0; /* COMPR flag (0 = 32-bit export) */
3739 args.out[0] = base->undef; /* R */
3740 args.out[1] = base->undef; /* G */
3741 args.out[2] = base->undef; /* B */
3742 args.out[3] = base->undef; /* A */
3743
3744 ac_build_export(&ctx->ac, &args);
3745 }
3746
3747 /**
3748 * Return PS outputs in this order:
3749 *
3750 * v[0:3] = color0.xyzw
3751 * v[4:7] = color1.xyzw
3752 * ...
3753 * vN+0 = Depth
3754 * vN+1 = Stencil
3755 * vN+2 = SampleMask
3756 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3757 *
3758 * The alpha-ref SGPR is returned via its original location.
3759 */
3760 static void si_llvm_return_fs_outputs(struct ac_shader_abi *abi,
3761 unsigned max_outputs,
3762 LLVMValueRef *addrs)
3763 {
3764 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3765 struct si_shader *shader = ctx->shader;
3766 struct tgsi_shader_info *info = &shader->selector->info;
3767 LLVMBuilderRef builder = ctx->ac.builder;
3768 unsigned i, j, first_vgpr, vgpr;
3769
3770 LLVMValueRef color[8][4] = {};
3771 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3772 LLVMValueRef ret;
3773
3774 if (ctx->postponed_kill)
3775 ac_build_kill_if_false(&ctx->ac, LLVMBuildLoad(builder, ctx->postponed_kill, ""));
3776
3777 /* Read the output values. */
3778 for (i = 0; i < info->num_outputs; i++) {
3779 unsigned semantic_name = info->output_semantic_name[i];
3780 unsigned semantic_index = info->output_semantic_index[i];
3781
3782 switch (semantic_name) {
3783 case TGSI_SEMANTIC_COLOR:
3784 assert(semantic_index < 8);
3785 for (j = 0; j < 4; j++) {
3786 LLVMValueRef ptr = addrs[4 * i + j];
3787 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3788 color[semantic_index][j] = result;
3789 }
3790 break;
3791 case TGSI_SEMANTIC_POSITION:
3792 depth = LLVMBuildLoad(builder,
3793 addrs[4 * i + 2], "");
3794 break;
3795 case TGSI_SEMANTIC_STENCIL:
3796 stencil = LLVMBuildLoad(builder,
3797 addrs[4 * i + 1], "");
3798 break;
3799 case TGSI_SEMANTIC_SAMPLEMASK:
3800 samplemask = LLVMBuildLoad(builder,
3801 addrs[4 * i + 0], "");
3802 break;
3803 default:
3804 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3805 semantic_name);
3806 }
3807 }
3808
3809 /* Fill the return structure. */
3810 ret = ctx->return_value;
3811
3812 /* Set SGPRs. */
3813 ret = LLVMBuildInsertValue(builder, ret,
3814 ac_to_integer(&ctx->ac,
3815 LLVMGetParam(ctx->main_fn,
3816 SI_PARAM_ALPHA_REF)),
3817 SI_SGPR_ALPHA_REF, "");
3818
3819 /* Set VGPRs */
3820 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3821 for (i = 0; i < ARRAY_SIZE(color); i++) {
3822 if (!color[i][0])
3823 continue;
3824
3825 for (j = 0; j < 4; j++)
3826 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3827 }
3828 if (depth)
3829 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3830 if (stencil)
3831 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3832 if (samplemask)
3833 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3834
3835 /* Add the input sample mask for smoothing at the end. */
3836 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3837 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3838 ret = LLVMBuildInsertValue(builder, ret,
3839 LLVMGetParam(ctx->main_fn,
3840 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3841
3842 ctx->return_value = ret;
3843 }
3844
3845 static void membar_emit(
3846 const struct lp_build_tgsi_action *action,
3847 struct lp_build_tgsi_context *bld_base,
3848 struct lp_build_emit_data *emit_data)
3849 {
3850 struct si_shader_context *ctx = si_shader_context(bld_base);
3851 LLVMValueRef src0 = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
3852 unsigned flags = LLVMConstIntGetZExtValue(src0);
3853 unsigned waitcnt = NOOP_WAITCNT;
3854
3855 if (flags & TGSI_MEMBAR_THREAD_GROUP)
3856 waitcnt &= VM_CNT & LGKM_CNT;
3857
3858 if (flags & (TGSI_MEMBAR_ATOMIC_BUFFER |
3859 TGSI_MEMBAR_SHADER_BUFFER |
3860 TGSI_MEMBAR_SHADER_IMAGE))
3861 waitcnt &= VM_CNT;
3862
3863 if (flags & TGSI_MEMBAR_SHARED)
3864 waitcnt &= LGKM_CNT;
3865
3866 if (waitcnt != NOOP_WAITCNT)
3867 ac_build_waitcnt(&ctx->ac, waitcnt);
3868 }
3869
3870 static void clock_emit(
3871 const struct lp_build_tgsi_action *action,
3872 struct lp_build_tgsi_context *bld_base,
3873 struct lp_build_emit_data *emit_data)
3874 {
3875 struct si_shader_context *ctx = si_shader_context(bld_base);
3876 LLVMValueRef tmp;
3877
3878 tmp = lp_build_intrinsic(ctx->ac.builder, "llvm.readcyclecounter",
3879 ctx->i64, NULL, 0, 0);
3880 tmp = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->v2i32, "");
3881
3882 emit_data->output[0] =
3883 LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->i32_0, "");
3884 emit_data->output[1] =
3885 LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->i32_1, "");
3886 }
3887
3888 static void si_llvm_emit_ddxy(
3889 const struct lp_build_tgsi_action *action,
3890 struct lp_build_tgsi_context *bld_base,
3891 struct lp_build_emit_data *emit_data)
3892 {
3893 struct si_shader_context *ctx = si_shader_context(bld_base);
3894 unsigned opcode = emit_data->info->opcode;
3895 LLVMValueRef val;
3896 int idx;
3897 unsigned mask;
3898
3899 if (opcode == TGSI_OPCODE_DDX_FINE)
3900 mask = AC_TID_MASK_LEFT;
3901 else if (opcode == TGSI_OPCODE_DDY_FINE)
3902 mask = AC_TID_MASK_TOP;
3903 else
3904 mask = AC_TID_MASK_TOP_LEFT;
3905
3906 /* for DDX we want to next X pixel, DDY next Y pixel. */
3907 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
3908
3909 val = ac_to_integer(&ctx->ac, emit_data->args[0]);
3910 val = ac_build_ddxy(&ctx->ac, mask, idx, val);
3911 emit_data->output[emit_data->chan] = val;
3912 }
3913
3914 /*
3915 * this takes an I,J coordinate pair,
3916 * and works out the X and Y derivatives.
3917 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
3918 */
3919 static LLVMValueRef si_llvm_emit_ddxy_interp(
3920 struct lp_build_tgsi_context *bld_base,
3921 LLVMValueRef interp_ij)
3922 {
3923 struct si_shader_context *ctx = si_shader_context(bld_base);
3924 LLVMValueRef result[4], a;
3925 unsigned i;
3926
3927 for (i = 0; i < 2; i++) {
3928 a = LLVMBuildExtractElement(ctx->ac.builder, interp_ij,
3929 LLVMConstInt(ctx->i32, i, 0), "");
3930 result[i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDX, a);
3931 result[2+i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDY, a);
3932 }
3933
3934 return lp_build_gather_values(&ctx->gallivm, result, 4);
3935 }
3936
3937 static void interp_fetch_args(
3938 struct lp_build_tgsi_context *bld_base,
3939 struct lp_build_emit_data *emit_data)
3940 {
3941 struct si_shader_context *ctx = si_shader_context(bld_base);
3942 const struct tgsi_full_instruction *inst = emit_data->inst;
3943
3944 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
3945 /* offset is in second src, first two channels */
3946 emit_data->args[0] = lp_build_emit_fetch(bld_base,
3947 emit_data->inst, 1,
3948 TGSI_CHAN_X);
3949 emit_data->args[1] = lp_build_emit_fetch(bld_base,
3950 emit_data->inst, 1,
3951 TGSI_CHAN_Y);
3952 emit_data->arg_count = 2;
3953 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3954 LLVMValueRef sample_position;
3955 LLVMValueRef sample_id;
3956 LLVMValueRef halfval = LLVMConstReal(ctx->f32, 0.5f);
3957
3958 /* fetch sample ID, then fetch its sample position,
3959 * and place into first two channels.
3960 */
3961 sample_id = lp_build_emit_fetch(bld_base,
3962 emit_data->inst, 1, TGSI_CHAN_X);
3963 sample_id = ac_to_integer(&ctx->ac, sample_id);
3964
3965 /* Section 8.13.2 (Interpolation Functions) of the OpenGL Shading
3966 * Language 4.50 spec says about interpolateAtSample:
3967 *
3968 * "Returns the value of the input interpolant variable at
3969 * the location of sample number sample. If multisample
3970 * buffers are not available, the input variable will be
3971 * evaluated at the center of the pixel. If sample sample
3972 * does not exist, the position used to interpolate the
3973 * input variable is undefined."
3974 *
3975 * This means that sample_id values outside of the valid are
3976 * in fact valid input, and the usual mechanism for loading the
3977 * sample position doesn't work.
3978 */
3979 if (ctx->shader->key.mono.u.ps.interpolate_at_sample_force_center) {
3980 LLVMValueRef center[4] = {
3981 LLVMConstReal(ctx->f32, 0.5),
3982 LLVMConstReal(ctx->f32, 0.5),
3983 ctx->ac.f32_0,
3984 ctx->ac.f32_0,
3985 };
3986
3987 sample_position = lp_build_gather_values(&ctx->gallivm, center, 4);
3988 } else {
3989 sample_position = load_sample_position(&ctx->abi, sample_id);
3990 }
3991
3992 emit_data->args[0] = LLVMBuildExtractElement(ctx->ac.builder,
3993 sample_position,
3994 ctx->i32_0, "");
3995
3996 emit_data->args[0] = LLVMBuildFSub(ctx->ac.builder, emit_data->args[0], halfval, "");
3997 emit_data->args[1] = LLVMBuildExtractElement(ctx->ac.builder,
3998 sample_position,
3999 ctx->i32_1, "");
4000 emit_data->args[1] = LLVMBuildFSub(ctx->ac.builder, emit_data->args[1], halfval, "");
4001 emit_data->arg_count = 2;
4002 }
4003 }
4004
4005 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
4006 struct lp_build_tgsi_context *bld_base,
4007 struct lp_build_emit_data *emit_data)
4008 {
4009 struct si_shader_context *ctx = si_shader_context(bld_base);
4010 struct si_shader *shader = ctx->shader;
4011 const struct tgsi_shader_info *info = &shader->selector->info;
4012 LLVMValueRef interp_param;
4013 const struct tgsi_full_instruction *inst = emit_data->inst;
4014 const struct tgsi_full_src_register *input = &inst->Src[0];
4015 int input_base, input_array_size;
4016 int chan;
4017 int i;
4018 LLVMValueRef prim_mask = ctx->abi.prim_mask;
4019 LLVMValueRef array_idx;
4020 int interp_param_idx;
4021 unsigned interp;
4022 unsigned location;
4023
4024 assert(input->Register.File == TGSI_FILE_INPUT);
4025
4026 if (input->Register.Indirect) {
4027 unsigned array_id = input->Indirect.ArrayID;
4028
4029 if (array_id) {
4030 input_base = info->input_array_first[array_id];
4031 input_array_size = info->input_array_last[array_id] - input_base + 1;
4032 } else {
4033 input_base = inst->Src[0].Register.Index;
4034 input_array_size = info->num_inputs - input_base;
4035 }
4036
4037 array_idx = si_get_indirect_index(ctx, &input->Indirect,
4038 1, input->Register.Index - input_base);
4039 } else {
4040 input_base = inst->Src[0].Register.Index;
4041 input_array_size = 1;
4042 array_idx = ctx->i32_0;
4043 }
4044
4045 interp = shader->selector->info.input_interpolate[input_base];
4046
4047 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
4048 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
4049 location = TGSI_INTERPOLATE_LOC_CENTER;
4050 else
4051 location = TGSI_INTERPOLATE_LOC_CENTROID;
4052
4053 interp_param_idx = lookup_interp_param_index(interp, location);
4054 if (interp_param_idx == -1)
4055 return;
4056 else if (interp_param_idx)
4057 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
4058 else
4059 interp_param = NULL;
4060
4061 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
4062 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
4063 LLVMValueRef ij_out[2];
4064 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
4065
4066 /*
4067 * take the I then J parameters, and the DDX/Y for it, and
4068 * calculate the IJ inputs for the interpolator.
4069 * temp1 = ddx * offset/sample.x + I;
4070 * interp_param.I = ddy * offset/sample.y + temp1;
4071 * temp1 = ddx * offset/sample.x + J;
4072 * interp_param.J = ddy * offset/sample.y + temp1;
4073 */
4074 for (i = 0; i < 2; i++) {
4075 LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, 0);
4076 LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, 0);
4077 LLVMValueRef ddx_el = LLVMBuildExtractElement(ctx->ac.builder,
4078 ddxy_out, ix_ll, "");
4079 LLVMValueRef ddy_el = LLVMBuildExtractElement(ctx->ac.builder,
4080 ddxy_out, iy_ll, "");
4081 LLVMValueRef interp_el = LLVMBuildExtractElement(ctx->ac.builder,
4082 interp_param, ix_ll, "");
4083 LLVMValueRef temp1, temp2;
4084
4085 interp_el = ac_to_float(&ctx->ac, interp_el);
4086
4087 temp1 = LLVMBuildFMul(ctx->ac.builder, ddx_el, emit_data->args[0], "");
4088
4089 temp1 = LLVMBuildFAdd(ctx->ac.builder, temp1, interp_el, "");
4090
4091 temp2 = LLVMBuildFMul(ctx->ac.builder, ddy_el, emit_data->args[1], "");
4092
4093 ij_out[i] = LLVMBuildFAdd(ctx->ac.builder, temp2, temp1, "");
4094 }
4095 interp_param = lp_build_gather_values(&ctx->gallivm, ij_out, 2);
4096 }
4097
4098 if (interp_param)
4099 interp_param = ac_to_float(&ctx->ac, interp_param);
4100
4101 for (chan = 0; chan < 4; chan++) {
4102 LLVMValueRef gather = LLVMGetUndef(LLVMVectorType(ctx->f32, input_array_size));
4103 unsigned schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
4104
4105 for (unsigned idx = 0; idx < input_array_size; ++idx) {
4106 LLVMValueRef v, i = NULL, j = NULL;
4107
4108 if (interp_param) {
4109 i = LLVMBuildExtractElement(
4110 ctx->ac.builder, interp_param, ctx->i32_0, "");
4111 j = LLVMBuildExtractElement(
4112 ctx->ac.builder, interp_param, ctx->i32_1, "");
4113 }
4114 v = si_build_fs_interp(ctx, input_base + idx, schan,
4115 prim_mask, i, j);
4116
4117 gather = LLVMBuildInsertElement(ctx->ac.builder,
4118 gather, v, LLVMConstInt(ctx->i32, idx, false), "");
4119 }
4120
4121 emit_data->output[chan] = LLVMBuildExtractElement(
4122 ctx->ac.builder, gather, array_idx, "");
4123 }
4124 }
4125
4126 static void vote_all_emit(
4127 const struct lp_build_tgsi_action *action,
4128 struct lp_build_tgsi_context *bld_base,
4129 struct lp_build_emit_data *emit_data)
4130 {
4131 struct si_shader_context *ctx = si_shader_context(bld_base);
4132
4133 LLVMValueRef tmp = ac_build_vote_all(&ctx->ac, emit_data->args[0]);
4134 emit_data->output[emit_data->chan] =
4135 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4136 }
4137
4138 static void vote_any_emit(
4139 const struct lp_build_tgsi_action *action,
4140 struct lp_build_tgsi_context *bld_base,
4141 struct lp_build_emit_data *emit_data)
4142 {
4143 struct si_shader_context *ctx = si_shader_context(bld_base);
4144
4145 LLVMValueRef tmp = ac_build_vote_any(&ctx->ac, emit_data->args[0]);
4146 emit_data->output[emit_data->chan] =
4147 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4148 }
4149
4150 static void vote_eq_emit(
4151 const struct lp_build_tgsi_action *action,
4152 struct lp_build_tgsi_context *bld_base,
4153 struct lp_build_emit_data *emit_data)
4154 {
4155 struct si_shader_context *ctx = si_shader_context(bld_base);
4156
4157 LLVMValueRef tmp = ac_build_vote_eq(&ctx->ac, emit_data->args[0]);
4158 emit_data->output[emit_data->chan] =
4159 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4160 }
4161
4162 static void ballot_emit(
4163 const struct lp_build_tgsi_action *action,
4164 struct lp_build_tgsi_context *bld_base,
4165 struct lp_build_emit_data *emit_data)
4166 {
4167 struct si_shader_context *ctx = si_shader_context(bld_base);
4168 LLVMBuilderRef builder = ctx->ac.builder;
4169 LLVMValueRef tmp;
4170
4171 tmp = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
4172 tmp = ac_build_ballot(&ctx->ac, tmp);
4173 tmp = LLVMBuildBitCast(builder, tmp, ctx->v2i32, "");
4174
4175 emit_data->output[0] = LLVMBuildExtractElement(builder, tmp, ctx->i32_0, "");
4176 emit_data->output[1] = LLVMBuildExtractElement(builder, tmp, ctx->i32_1, "");
4177 }
4178
4179 static void read_invoc_fetch_args(
4180 struct lp_build_tgsi_context *bld_base,
4181 struct lp_build_emit_data *emit_data)
4182 {
4183 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
4184 0, emit_data->src_chan);
4185
4186 /* Always read the source invocation (= lane) from the X channel. */
4187 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
4188 1, TGSI_CHAN_X);
4189 emit_data->arg_count = 2;
4190 }
4191
4192 static void read_lane_emit(
4193 const struct lp_build_tgsi_action *action,
4194 struct lp_build_tgsi_context *bld_base,
4195 struct lp_build_emit_data *emit_data)
4196 {
4197 struct si_shader_context *ctx = si_shader_context(bld_base);
4198
4199 /* We currently have no other way to prevent LLVM from lifting the icmp
4200 * calls to a dominating basic block.
4201 */
4202 ac_build_optimization_barrier(&ctx->ac, &emit_data->args[0]);
4203
4204 for (unsigned i = 0; i < emit_data->arg_count; ++i)
4205 emit_data->args[i] = ac_to_integer(&ctx->ac, emit_data->args[i]);
4206
4207 emit_data->output[emit_data->chan] =
4208 ac_build_intrinsic(&ctx->ac, action->intr_name,
4209 ctx->i32, emit_data->args, emit_data->arg_count,
4210 AC_FUNC_ATTR_READNONE |
4211 AC_FUNC_ATTR_CONVERGENT);
4212 }
4213
4214 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
4215 struct lp_build_emit_data *emit_data)
4216 {
4217 struct si_shader_context *ctx = si_shader_context(bld_base);
4218 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
4219 LLVMValueRef imm;
4220 unsigned stream;
4221
4222 assert(src0.File == TGSI_FILE_IMMEDIATE);
4223
4224 imm = ctx->imms[src0.Index * TGSI_NUM_CHANNELS + src0.SwizzleX];
4225 stream = LLVMConstIntGetZExtValue(imm) & 0x3;
4226 return stream;
4227 }
4228
4229 /* Emit one vertex from the geometry shader */
4230 static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
4231 unsigned stream,
4232 LLVMValueRef *addrs)
4233 {
4234 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
4235 struct tgsi_shader_info *info = &ctx->shader->selector->info;
4236 struct lp_build_context *uint = &ctx->bld_base.uint_bld;
4237 struct si_shader *shader = ctx->shader;
4238 struct lp_build_if_state if_state;
4239 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
4240 ctx->param_gs2vs_offset);
4241 LLVMValueRef gs_next_vertex;
4242 LLVMValueRef can_emit;
4243 unsigned chan, offset;
4244 int i;
4245
4246 /* Write vertex attribute values to GSVS ring */
4247 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
4248 ctx->gs_next_vertex[stream],
4249 "");
4250
4251 /* If this thread has already emitted the declared maximum number of
4252 * vertices, skip the write: excessive vertex emissions are not
4253 * supposed to have any effect.
4254 *
4255 * If the shader has no writes to memory, kill it instead. This skips
4256 * further memory loads and may allow LLVM to skip to the end
4257 * altogether.
4258 */
4259 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
4260 LLVMConstInt(ctx->i32,
4261 shader->selector->gs_max_out_vertices, 0), "");
4262
4263 bool use_kill = !info->writes_memory;
4264 if (use_kill) {
4265 ac_build_kill_if_false(&ctx->ac, can_emit);
4266 } else {
4267 lp_build_if(&if_state, &ctx->gallivm, can_emit);
4268 }
4269
4270 offset = 0;
4271 for (i = 0; i < info->num_outputs; i++) {
4272 for (chan = 0; chan < 4; chan++) {
4273 if (!(info->output_usagemask[i] & (1 << chan)) ||
4274 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
4275 continue;
4276
4277 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
4278 LLVMValueRef voffset =
4279 LLVMConstInt(ctx->i32, offset *
4280 shader->selector->gs_max_out_vertices, 0);
4281 offset++;
4282
4283 voffset = lp_build_add(uint, voffset, gs_next_vertex);
4284 voffset = lp_build_mul_imm(uint, voffset, 4);
4285
4286 out_val = ac_to_integer(&ctx->ac, out_val);
4287
4288 ac_build_buffer_store_dword(&ctx->ac,
4289 ctx->gsvs_ring[stream],
4290 out_val, 1,
4291 voffset, soffset, 0,
4292 1, 1, true, true);
4293 }
4294 }
4295
4296 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
4297 ctx->i32_1);
4298
4299 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
4300
4301 /* Signal vertex emission */
4302 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
4303 si_get_gs_wave_id(ctx));
4304 if (!use_kill)
4305 lp_build_endif(&if_state);
4306 }
4307
4308 /* Emit one vertex from the geometry shader */
4309 static void si_tgsi_emit_vertex(
4310 const struct lp_build_tgsi_action *action,
4311 struct lp_build_tgsi_context *bld_base,
4312 struct lp_build_emit_data *emit_data)
4313 {
4314 struct si_shader_context *ctx = si_shader_context(bld_base);
4315 unsigned stream = si_llvm_get_stream(bld_base, emit_data);
4316
4317 si_llvm_emit_vertex(&ctx->abi, stream, ctx->outputs[0]);
4318 }
4319
4320 /* Cut one primitive from the geometry shader */
4321 static void si_llvm_emit_primitive(struct ac_shader_abi *abi,
4322 unsigned stream)
4323 {
4324 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
4325
4326 /* Signal primitive cut */
4327 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
4328 si_get_gs_wave_id(ctx));
4329 }
4330
4331 /* Cut one primitive from the geometry shader */
4332 static void si_tgsi_emit_primitive(
4333 const struct lp_build_tgsi_action *action,
4334 struct lp_build_tgsi_context *bld_base,
4335 struct lp_build_emit_data *emit_data)
4336 {
4337 struct si_shader_context *ctx = si_shader_context(bld_base);
4338
4339 si_llvm_emit_primitive(&ctx->abi, si_llvm_get_stream(bld_base, emit_data));
4340 }
4341
4342 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
4343 struct lp_build_tgsi_context *bld_base,
4344 struct lp_build_emit_data *emit_data)
4345 {
4346 struct si_shader_context *ctx = si_shader_context(bld_base);
4347
4348 /* SI only (thanks to a hw bug workaround):
4349 * The real barrier instruction isn’t needed, because an entire patch
4350 * always fits into a single wave.
4351 */
4352 if (ctx->screen->info.chip_class == SI &&
4353 ctx->type == PIPE_SHADER_TESS_CTRL) {
4354 ac_build_waitcnt(&ctx->ac, LGKM_CNT & VM_CNT);
4355 return;
4356 }
4357
4358 lp_build_intrinsic(ctx->ac.builder,
4359 "llvm.amdgcn.s.barrier",
4360 ctx->voidt, NULL, 0, LP_FUNC_ATTR_CONVERGENT);
4361 }
4362
4363 static const struct lp_build_tgsi_action interp_action = {
4364 .fetch_args = interp_fetch_args,
4365 .emit = build_interp_intrinsic,
4366 };
4367
4368 static void si_create_function(struct si_shader_context *ctx,
4369 const char *name,
4370 LLVMTypeRef *returns, unsigned num_returns,
4371 struct si_function_info *fninfo,
4372 unsigned max_workgroup_size)
4373 {
4374 int i;
4375
4376 si_llvm_create_func(ctx, name, returns, num_returns,
4377 fninfo->types, fninfo->num_params);
4378 ctx->return_value = LLVMGetUndef(ctx->return_type);
4379
4380 for (i = 0; i < fninfo->num_sgpr_params; ++i) {
4381 LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
4382
4383 /* The combination of:
4384 * - noalias
4385 * - dereferenceable
4386 * - invariant.load
4387 * allows the optimization passes to move loads and reduces
4388 * SGPR spilling significantly.
4389 */
4390 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_INREG);
4391
4392 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
4393 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_NOALIAS);
4394 ac_add_attr_dereferenceable(P, UINT64_MAX);
4395 }
4396 }
4397
4398 for (i = 0; i < fninfo->num_params; ++i) {
4399 if (fninfo->assign[i])
4400 *fninfo->assign[i] = LLVMGetParam(ctx->main_fn, i);
4401 }
4402
4403 if (max_workgroup_size) {
4404 si_llvm_add_attribute(ctx->main_fn, "amdgpu-max-work-group-size",
4405 max_workgroup_size);
4406 }
4407 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4408 "no-signed-zeros-fp-math",
4409 "true");
4410
4411 if (ctx->screen->debug_flags & DBG(UNSAFE_MATH)) {
4412 /* These were copied from some LLVM test. */
4413 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4414 "less-precise-fpmad",
4415 "true");
4416 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4417 "no-infs-fp-math",
4418 "true");
4419 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4420 "no-nans-fp-math",
4421 "true");
4422 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4423 "unsafe-fp-math",
4424 "true");
4425 }
4426 }
4427
4428 static void declare_streamout_params(struct si_shader_context *ctx,
4429 struct pipe_stream_output_info *so,
4430 struct si_function_info *fninfo)
4431 {
4432 int i;
4433
4434 /* Streamout SGPRs. */
4435 if (so->num_outputs) {
4436 if (ctx->type != PIPE_SHADER_TESS_EVAL)
4437 ctx->param_streamout_config = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4438 else
4439 ctx->param_streamout_config = fninfo->num_params - 1;
4440
4441 ctx->param_streamout_write_index = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4442 }
4443 /* A streamout buffer offset is loaded if the stride is non-zero. */
4444 for (i = 0; i < 4; i++) {
4445 if (!so->stride[i])
4446 continue;
4447
4448 ctx->param_streamout_offset[i] = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4449 }
4450 }
4451
4452 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
4453 {
4454 switch (shader->selector->type) {
4455 case PIPE_SHADER_TESS_CTRL:
4456 /* Return this so that LLVM doesn't remove s_barrier
4457 * instructions on chips where we use s_barrier. */
4458 return shader->selector->screen->info.chip_class >= CIK ? 128 : 64;
4459
4460 case PIPE_SHADER_GEOMETRY:
4461 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 64;
4462
4463 case PIPE_SHADER_COMPUTE:
4464 break; /* see below */
4465
4466 default:
4467 return 0;
4468 }
4469
4470 const unsigned *properties = shader->selector->info.properties;
4471 unsigned max_work_group_size =
4472 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
4473 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
4474 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
4475
4476 if (!max_work_group_size) {
4477 /* This is a variable group size compute shader,
4478 * compile it for the maximum possible group size.
4479 */
4480 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
4481 }
4482 return max_work_group_size;
4483 }
4484
4485 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
4486 struct si_function_info *fninfo,
4487 bool assign_params)
4488 {
4489 LLVMTypeRef const_shader_buf_type;
4490
4491 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
4492 ctx->shader->selector->info.shader_buffers_declared == 0)
4493 const_shader_buf_type = ctx->f32;
4494 else
4495 const_shader_buf_type = ctx->v4i32;
4496
4497 unsigned const_and_shader_buffers =
4498 add_arg(fninfo, ARG_SGPR,
4499 ac_array_in_const_addr_space(const_shader_buf_type));
4500
4501 unsigned samplers_and_images =
4502 add_arg(fninfo, ARG_SGPR,
4503 ac_array_in_const_addr_space(ctx->v8i32));
4504
4505 if (assign_params) {
4506 ctx->param_const_and_shader_buffers = const_and_shader_buffers;
4507 ctx->param_samplers_and_images = samplers_and_images;
4508 }
4509 }
4510
4511 static void declare_global_desc_pointers(struct si_shader_context *ctx,
4512 struct si_function_info *fninfo)
4513 {
4514 ctx->param_rw_buffers = add_arg(fninfo, ARG_SGPR,
4515 ac_array_in_const_addr_space(ctx->v4i32));
4516 ctx->param_bindless_samplers_and_images = add_arg(fninfo, ARG_SGPR,
4517 ac_array_in_const_addr_space(ctx->v8i32));
4518 }
4519
4520 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx,
4521 struct si_function_info *fninfo)
4522 {
4523 ctx->param_vertex_buffers = add_arg(fninfo, ARG_SGPR,
4524 ac_array_in_const_addr_space(ctx->v4i32));
4525 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.base_vertex);
4526 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.start_instance);
4527 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.draw_id);
4528 ctx->param_vs_state_bits = add_arg(fninfo, ARG_SGPR, ctx->i32);
4529 }
4530
4531 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
4532 struct si_function_info *fninfo,
4533 unsigned *num_prolog_vgprs)
4534 {
4535 struct si_shader *shader = ctx->shader;
4536
4537 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.vertex_id);
4538 if (shader->key.as_ls) {
4539 ctx->param_rel_auto_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4540 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
4541 } else {
4542 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
4543 ctx->param_vs_prim_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4544 }
4545 add_arg(fninfo, ARG_VGPR, ctx->i32); /* unused */
4546
4547 if (!shader->is_gs_copy_shader) {
4548 /* Vertex load indices. */
4549 ctx->param_vertex_index0 = fninfo->num_params;
4550 for (unsigned i = 0; i < shader->selector->info.num_inputs; i++)
4551 add_arg(fninfo, ARG_VGPR, ctx->i32);
4552 *num_prolog_vgprs += shader->selector->info.num_inputs;
4553 }
4554 }
4555
4556 static void declare_tes_input_vgprs(struct si_shader_context *ctx,
4557 struct si_function_info *fninfo)
4558 {
4559 ctx->param_tes_u = add_arg(fninfo, ARG_VGPR, ctx->f32);
4560 ctx->param_tes_v = add_arg(fninfo, ARG_VGPR, ctx->f32);
4561 ctx->param_tes_rel_patch_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4562 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tes_patch_id);
4563 }
4564
4565 enum {
4566 /* Convenient merged shader definitions. */
4567 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
4568 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
4569 };
4570
4571 static void create_function(struct si_shader_context *ctx)
4572 {
4573 struct si_shader *shader = ctx->shader;
4574 struct si_function_info fninfo;
4575 LLVMTypeRef returns[16+32*4];
4576 unsigned i, num_return_sgprs;
4577 unsigned num_returns = 0;
4578 unsigned num_prolog_vgprs = 0;
4579 unsigned type = ctx->type;
4580 unsigned vs_blit_property =
4581 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
4582
4583 si_init_function_info(&fninfo);
4584
4585 /* Set MERGED shaders. */
4586 if (ctx->screen->info.chip_class >= GFX9) {
4587 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
4588 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
4589 else if (shader->key.as_es || type == PIPE_SHADER_GEOMETRY)
4590 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
4591 }
4592
4593 LLVMTypeRef v3i32 = LLVMVectorType(ctx->i32, 3);
4594
4595 switch (type) {
4596 case PIPE_SHADER_VERTEX:
4597 declare_global_desc_pointers(ctx, &fninfo);
4598
4599 if (vs_blit_property) {
4600 ctx->param_vs_blit_inputs = fninfo.num_params;
4601 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* i16 x1, y1 */
4602 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* i16 x2, y2 */
4603 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* depth */
4604
4605 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
4606 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color0 */
4607 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color1 */
4608 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color2 */
4609 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color3 */
4610 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
4611 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.x1 */
4612 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.y1 */
4613 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.x2 */
4614 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.y2 */
4615 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.z */
4616 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.w */
4617 }
4618
4619 /* VGPRs */
4620 declare_vs_input_vgprs(ctx, &fninfo, &num_prolog_vgprs);
4621 break;
4622 }
4623
4624 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4625 declare_vs_specific_input_sgprs(ctx, &fninfo);
4626
4627 if (shader->key.as_es) {
4628 ctx->param_es2gs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4629 } else if (shader->key.as_ls) {
4630 /* no extra parameters */
4631 } else {
4632 if (shader->is_gs_copy_shader) {
4633 fninfo.num_params = ctx->param_rw_buffers + 1;
4634 fninfo.num_sgpr_params = fninfo.num_params;
4635 }
4636
4637 /* The locations of the other parameters are assigned dynamically. */
4638 declare_streamout_params(ctx, &shader->selector->so,
4639 &fninfo);
4640 }
4641
4642 /* VGPRs */
4643 declare_vs_input_vgprs(ctx, &fninfo, &num_prolog_vgprs);
4644 break;
4645
4646 case PIPE_SHADER_TESS_CTRL: /* SI-CI-VI */
4647 declare_global_desc_pointers(ctx, &fninfo);
4648 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4649 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4650 ctx->param_tcs_out_lds_offsets = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4651 ctx->param_tcs_out_lds_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4652 ctx->param_vs_state_bits = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4653 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4654 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4655 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4656 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4657
4658 /* VGPRs */
4659 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_patch_id);
4660 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_rel_ids);
4661
4662 /* param_tcs_offchip_offset and param_tcs_factor_offset are
4663 * placed after the user SGPRs.
4664 */
4665 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
4666 returns[num_returns++] = ctx->i32; /* SGPRs */
4667 for (i = 0; i < 11; i++)
4668 returns[num_returns++] = ctx->f32; /* VGPRs */
4669 break;
4670
4671 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
4672 /* Merged stages have 8 system SGPRs at the beginning. */
4673 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* SPI_SHADER_USER_DATA_ADDR_LO_HS */
4674 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* SPI_SHADER_USER_DATA_ADDR_HI_HS */
4675 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4676 ctx->param_merged_wave_info = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4677 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4678 ctx->param_merged_scratch_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4679 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4680 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4681
4682 declare_global_desc_pointers(ctx, &fninfo);
4683 declare_per_stage_desc_pointers(ctx, &fninfo,
4684 ctx->type == PIPE_SHADER_VERTEX);
4685 declare_vs_specific_input_sgprs(ctx, &fninfo);
4686
4687 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4688 ctx->param_tcs_out_lds_offsets = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4689 ctx->param_tcs_out_lds_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4690 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4691 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4692 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4693
4694 declare_per_stage_desc_pointers(ctx, &fninfo,
4695 ctx->type == PIPE_SHADER_TESS_CTRL);
4696
4697 /* VGPRs (first TCS, then VS) */
4698 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_patch_id);
4699 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_rel_ids);
4700
4701 if (ctx->type == PIPE_SHADER_VERTEX) {
4702 declare_vs_input_vgprs(ctx, &fninfo,
4703 &num_prolog_vgprs);
4704
4705 /* LS return values are inputs to the TCS main shader part. */
4706 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
4707 returns[num_returns++] = ctx->i32; /* SGPRs */
4708 for (i = 0; i < 2; i++)
4709 returns[num_returns++] = ctx->f32; /* VGPRs */
4710 } else {
4711 /* TCS return values are inputs to the TCS epilog.
4712 *
4713 * param_tcs_offchip_offset, param_tcs_factor_offset,
4714 * param_tcs_offchip_layout, and param_rw_buffers
4715 * should be passed to the epilog.
4716 */
4717 for (i = 0; i <= 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K; i++)
4718 returns[num_returns++] = ctx->i32; /* SGPRs */
4719 for (i = 0; i < 11; i++)
4720 returns[num_returns++] = ctx->f32; /* VGPRs */
4721 }
4722 break;
4723
4724 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
4725 /* Merged stages have 8 system SGPRs at the beginning. */
4726 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_USER_DATA_ADDR_LO_GS) */
4727 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_USER_DATA_ADDR_HI_GS) */
4728 ctx->param_gs2vs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4729 ctx->param_merged_wave_info = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4730 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4731 ctx->param_merged_scratch_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4732 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
4733 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
4734
4735 declare_global_desc_pointers(ctx, &fninfo);
4736 declare_per_stage_desc_pointers(ctx, &fninfo,
4737 (ctx->type == PIPE_SHADER_VERTEX ||
4738 ctx->type == PIPE_SHADER_TESS_EVAL));
4739 if (ctx->type == PIPE_SHADER_VERTEX) {
4740 declare_vs_specific_input_sgprs(ctx, &fninfo);
4741 } else {
4742 /* TESS_EVAL (and also GEOMETRY):
4743 * Declare as many input SGPRs as the VS has. */
4744 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4745 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4746 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4747 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4748 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4749 ctx->param_vs_state_bits = add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4750 }
4751
4752 declare_per_stage_desc_pointers(ctx, &fninfo,
4753 ctx->type == PIPE_SHADER_GEOMETRY);
4754
4755 /* VGPRs (first GS, then VS/TES) */
4756 ctx->param_gs_vtx01_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4757 ctx->param_gs_vtx23_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4758 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_prim_id);
4759 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_invocation_id);
4760 ctx->param_gs_vtx45_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4761
4762 if (ctx->type == PIPE_SHADER_VERTEX) {
4763 declare_vs_input_vgprs(ctx, &fninfo,
4764 &num_prolog_vgprs);
4765 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
4766 declare_tes_input_vgprs(ctx, &fninfo);
4767 }
4768
4769 if (ctx->type == PIPE_SHADER_VERTEX ||
4770 ctx->type == PIPE_SHADER_TESS_EVAL) {
4771 /* ES return values are inputs to GS. */
4772 for (i = 0; i < 8 + GFX9_GS_NUM_USER_SGPR; i++)
4773 returns[num_returns++] = ctx->i32; /* SGPRs */
4774 for (i = 0; i < 5; i++)
4775 returns[num_returns++] = ctx->f32; /* VGPRs */
4776 }
4777 break;
4778
4779 case PIPE_SHADER_TESS_EVAL:
4780 declare_global_desc_pointers(ctx, &fninfo);
4781 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4782 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4783 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4784
4785 if (shader->key.as_es) {
4786 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4787 add_arg(&fninfo, ARG_SGPR, ctx->i32);
4788 ctx->param_es2gs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4789 } else {
4790 add_arg(&fninfo, ARG_SGPR, ctx->i32);
4791 declare_streamout_params(ctx, &shader->selector->so,
4792 &fninfo);
4793 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4794 }
4795
4796 /* VGPRs */
4797 declare_tes_input_vgprs(ctx, &fninfo);
4798 break;
4799
4800 case PIPE_SHADER_GEOMETRY:
4801 declare_global_desc_pointers(ctx, &fninfo);
4802 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4803 ctx->param_gs2vs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4804 ctx->param_gs_wave_id = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4805
4806 /* VGPRs */
4807 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[0]);
4808 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[1]);
4809 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_prim_id);
4810 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[2]);
4811 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[3]);
4812 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[4]);
4813 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[5]);
4814 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_invocation_id);
4815 break;
4816
4817 case PIPE_SHADER_FRAGMENT:
4818 declare_global_desc_pointers(ctx, &fninfo);
4819 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4820 add_arg_checked(&fninfo, ARG_SGPR, ctx->f32, SI_PARAM_ALPHA_REF);
4821 add_arg_assign_checked(&fninfo, ARG_SGPR, ctx->i32,
4822 &ctx->abi.prim_mask, SI_PARAM_PRIM_MASK);
4823
4824 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_SAMPLE);
4825 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_CENTER);
4826 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_CENTROID);
4827 add_arg_checked(&fninfo, ARG_VGPR, v3i32, SI_PARAM_PERSP_PULL_MODEL);
4828 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_SAMPLE);
4829 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_CENTER);
4830 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_CENTROID);
4831 add_arg_checked(&fninfo, ARG_VGPR, ctx->f32, SI_PARAM_LINE_STIPPLE_TEX);
4832 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4833 &ctx->abi.frag_pos[0], SI_PARAM_POS_X_FLOAT);
4834 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4835 &ctx->abi.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
4836 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4837 &ctx->abi.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
4838 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4839 &ctx->abi.frag_pos[3], SI_PARAM_POS_W_FLOAT);
4840 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->i32,
4841 &ctx->abi.front_face, SI_PARAM_FRONT_FACE);
4842 shader->info.face_vgpr_index = 20;
4843 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->i32,
4844 &ctx->abi.ancillary, SI_PARAM_ANCILLARY);
4845 shader->info.ancillary_vgpr_index = 21;
4846 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4847 &ctx->abi.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
4848 add_arg_checked(&fninfo, ARG_VGPR, ctx->i32, SI_PARAM_POS_FIXED_PT);
4849
4850 /* Color inputs from the prolog. */
4851 if (shader->selector->info.colors_read) {
4852 unsigned num_color_elements =
4853 util_bitcount(shader->selector->info.colors_read);
4854
4855 assert(fninfo.num_params + num_color_elements <= ARRAY_SIZE(fninfo.types));
4856 for (i = 0; i < num_color_elements; i++)
4857 add_arg(&fninfo, ARG_VGPR, ctx->f32);
4858
4859 num_prolog_vgprs += num_color_elements;
4860 }
4861
4862 /* Outputs for the epilog. */
4863 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
4864 num_returns =
4865 num_return_sgprs +
4866 util_bitcount(shader->selector->info.colors_written) * 4 +
4867 shader->selector->info.writes_z +
4868 shader->selector->info.writes_stencil +
4869 shader->selector->info.writes_samplemask +
4870 1 /* SampleMaskIn */;
4871
4872 num_returns = MAX2(num_returns,
4873 num_return_sgprs +
4874 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
4875
4876 for (i = 0; i < num_return_sgprs; i++)
4877 returns[i] = ctx->i32;
4878 for (; i < num_returns; i++)
4879 returns[i] = ctx->f32;
4880 break;
4881
4882 case PIPE_SHADER_COMPUTE:
4883 declare_global_desc_pointers(ctx, &fninfo);
4884 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4885 if (shader->selector->info.uses_grid_size)
4886 ctx->param_grid_size = add_arg(&fninfo, ARG_SGPR, v3i32);
4887 if (shader->selector->info.uses_block_size)
4888 ctx->param_block_size = add_arg(&fninfo, ARG_SGPR, v3i32);
4889
4890 for (i = 0; i < 3; i++) {
4891 ctx->abi.workgroup_ids[i] = NULL;
4892 if (shader->selector->info.uses_block_id[i])
4893 add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &ctx->abi.workgroup_ids[i]);
4894 }
4895
4896 add_arg_assign(&fninfo, ARG_VGPR, v3i32, &ctx->abi.local_invocation_ids);
4897 break;
4898 default:
4899 assert(0 && "unimplemented shader");
4900 return;
4901 }
4902
4903 si_create_function(ctx, "main", returns, num_returns, &fninfo,
4904 si_get_max_workgroup_size(shader));
4905
4906 /* Reserve register locations for VGPR inputs the PS prolog may need. */
4907 if (ctx->type == PIPE_SHADER_FRAGMENT &&
4908 ctx->separate_prolog) {
4909 si_llvm_add_attribute(ctx->main_fn,
4910 "InitialPSInputAddr",
4911 S_0286D0_PERSP_SAMPLE_ENA(1) |
4912 S_0286D0_PERSP_CENTER_ENA(1) |
4913 S_0286D0_PERSP_CENTROID_ENA(1) |
4914 S_0286D0_LINEAR_SAMPLE_ENA(1) |
4915 S_0286D0_LINEAR_CENTER_ENA(1) |
4916 S_0286D0_LINEAR_CENTROID_ENA(1) |
4917 S_0286D0_FRONT_FACE_ENA(1) |
4918 S_0286D0_ANCILLARY_ENA(1) |
4919 S_0286D0_POS_FIXED_PT_ENA(1));
4920 }
4921
4922 shader->info.num_input_sgprs = 0;
4923 shader->info.num_input_vgprs = 0;
4924
4925 for (i = 0; i < fninfo.num_sgpr_params; ++i)
4926 shader->info.num_input_sgprs += ac_get_type_size(fninfo.types[i]) / 4;
4927
4928 for (; i < fninfo.num_params; ++i)
4929 shader->info.num_input_vgprs += ac_get_type_size(fninfo.types[i]) / 4;
4930
4931 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
4932 shader->info.num_input_vgprs -= num_prolog_vgprs;
4933
4934 if (shader->key.as_ls ||
4935 ctx->type == PIPE_SHADER_TESS_CTRL ||
4936 /* GFX9 has the ESGS ring buffer in LDS. */
4937 type == SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY)
4938 ac_declare_lds_as_pointer(&ctx->ac);
4939 }
4940
4941 /**
4942 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
4943 * for later use.
4944 */
4945 static void preload_ring_buffers(struct si_shader_context *ctx)
4946 {
4947 LLVMBuilderRef builder = ctx->ac.builder;
4948
4949 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
4950 ctx->param_rw_buffers);
4951
4952 if (ctx->screen->info.chip_class <= VI &&
4953 (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)) {
4954 unsigned ring =
4955 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
4956 : SI_ES_RING_ESGS;
4957 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
4958
4959 ctx->esgs_ring =
4960 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
4961 }
4962
4963 if (ctx->shader->is_gs_copy_shader) {
4964 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4965
4966 ctx->gsvs_ring[0] =
4967 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
4968 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
4969 const struct si_shader_selector *sel = ctx->shader->selector;
4970 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4971 LLVMValueRef base_ring;
4972
4973 base_ring = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
4974
4975 /* The conceptual layout of the GSVS ring is
4976 * v0c0 .. vLv0 v0c1 .. vLc1 ..
4977 * but the real memory layout is swizzled across
4978 * threads:
4979 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
4980 * t16v0c0 ..
4981 * Override the buffer descriptor accordingly.
4982 */
4983 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
4984 uint64_t stream_offset = 0;
4985
4986 for (unsigned stream = 0; stream < 4; ++stream) {
4987 unsigned num_components;
4988 unsigned stride;
4989 unsigned num_records;
4990 LLVMValueRef ring, tmp;
4991
4992 num_components = sel->info.num_stream_output_components[stream];
4993 if (!num_components)
4994 continue;
4995
4996 stride = 4 * num_components * sel->gs_max_out_vertices;
4997
4998 /* Limit on the stride field for <= CIK. */
4999 assert(stride < (1 << 14));
5000
5001 num_records = 64;
5002
5003 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
5004 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
5005 tmp = LLVMBuildAdd(builder, tmp,
5006 LLVMConstInt(ctx->i64,
5007 stream_offset, 0), "");
5008 stream_offset += stride * 64;
5009
5010 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
5011 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
5012 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
5013 tmp = LLVMBuildOr(builder, tmp,
5014 LLVMConstInt(ctx->i32,
5015 S_008F04_STRIDE(stride) |
5016 S_008F04_SWIZZLE_ENABLE(1), 0), "");
5017 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
5018 ring = LLVMBuildInsertElement(builder, ring,
5019 LLVMConstInt(ctx->i32, num_records, 0),
5020 LLVMConstInt(ctx->i32, 2, 0), "");
5021 ring = LLVMBuildInsertElement(builder, ring,
5022 LLVMConstInt(ctx->i32,
5023 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
5024 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
5025 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
5026 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
5027 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
5028 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
5029 S_008F0C_ELEMENT_SIZE(1) | /* element_size = 4 (bytes) */
5030 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
5031 S_008F0C_ADD_TID_ENABLE(1),
5032 0),
5033 LLVMConstInt(ctx->i32, 3, 0), "");
5034
5035 ctx->gsvs_ring[stream] = ring;
5036 }
5037 }
5038 }
5039
5040 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
5041 LLVMValueRef param_rw_buffers,
5042 unsigned param_pos_fixed_pt)
5043 {
5044 LLVMBuilderRef builder = ctx->ac.builder;
5045 LLVMValueRef slot, desc, offset, row, bit, address[2];
5046
5047 /* Use the fixed-point gl_FragCoord input.
5048 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
5049 * per coordinate to get the repeating effect.
5050 */
5051 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
5052 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
5053
5054 /* Load the buffer descriptor. */
5055 slot = LLVMConstInt(ctx->i32, SI_PS_CONST_POLY_STIPPLE, 0);
5056 desc = ac_build_load_to_sgpr(&ctx->ac, param_rw_buffers, slot);
5057
5058 /* The stipple pattern is 32x32, each row has 32 bits. */
5059 offset = LLVMBuildMul(builder, address[1],
5060 LLVMConstInt(ctx->i32, 4, 0), "");
5061 row = buffer_load_const(ctx, desc, offset);
5062 row = ac_to_integer(&ctx->ac, row);
5063 bit = LLVMBuildLShr(builder, row, address[0], "");
5064 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
5065 ac_build_kill_if_false(&ctx->ac, bit);
5066 }
5067
5068 void si_shader_binary_read_config(struct ac_shader_binary *binary,
5069 struct si_shader_config *conf,
5070 unsigned symbol_offset)
5071 {
5072 unsigned i;
5073 const unsigned char *config =
5074 ac_shader_binary_config_start(binary, symbol_offset);
5075 bool really_needs_scratch = false;
5076
5077 /* LLVM adds SGPR spills to the scratch size.
5078 * Find out if we really need the scratch buffer.
5079 */
5080 for (i = 0; i < binary->reloc_count; i++) {
5081 const struct ac_shader_reloc *reloc = &binary->relocs[i];
5082
5083 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
5084 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5085 really_needs_scratch = true;
5086 break;
5087 }
5088 }
5089
5090 /* XXX: We may be able to emit some of these values directly rather than
5091 * extracting fields to be emitted later.
5092 */
5093
5094 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
5095 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
5096 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
5097 switch (reg) {
5098 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
5099 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
5100 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
5101 case R_00B428_SPI_SHADER_PGM_RSRC1_HS:
5102 case R_00B848_COMPUTE_PGM_RSRC1:
5103 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
5104 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
5105 conf->float_mode = G_00B028_FLOAT_MODE(value);
5106 conf->rsrc1 = value;
5107 break;
5108 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
5109 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
5110 break;
5111 case R_00B84C_COMPUTE_PGM_RSRC2:
5112 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
5113 conf->rsrc2 = value;
5114 break;
5115 case R_0286CC_SPI_PS_INPUT_ENA:
5116 conf->spi_ps_input_ena = value;
5117 break;
5118 case R_0286D0_SPI_PS_INPUT_ADDR:
5119 conf->spi_ps_input_addr = value;
5120 break;
5121 case R_0286E8_SPI_TMPRING_SIZE:
5122 case R_00B860_COMPUTE_TMPRING_SIZE:
5123 /* WAVESIZE is in units of 256 dwords. */
5124 if (really_needs_scratch)
5125 conf->scratch_bytes_per_wave =
5126 G_00B860_WAVESIZE(value) * 256 * 4;
5127 break;
5128 case 0x4: /* SPILLED_SGPRS */
5129 conf->spilled_sgprs = value;
5130 break;
5131 case 0x8: /* SPILLED_VGPRS */
5132 conf->spilled_vgprs = value;
5133 break;
5134 default:
5135 {
5136 static bool printed;
5137
5138 if (!printed) {
5139 fprintf(stderr, "Warning: LLVM emitted unknown "
5140 "config register: 0x%x\n", reg);
5141 printed = true;
5142 }
5143 }
5144 break;
5145 }
5146 }
5147
5148 if (!conf->spi_ps_input_addr)
5149 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
5150 }
5151
5152 void si_shader_apply_scratch_relocs(struct si_shader *shader,
5153 uint64_t scratch_va)
5154 {
5155 unsigned i;
5156 uint32_t scratch_rsrc_dword0 = scratch_va;
5157 uint32_t scratch_rsrc_dword1 =
5158 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
5159
5160 /* Enable scratch coalescing. */
5161 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
5162
5163 for (i = 0 ; i < shader->binary.reloc_count; i++) {
5164 const struct ac_shader_reloc *reloc =
5165 &shader->binary.relocs[i];
5166 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
5167 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5168 &scratch_rsrc_dword0, 4);
5169 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5170 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5171 &scratch_rsrc_dword1, 4);
5172 }
5173 }
5174 }
5175
5176 static unsigned si_get_shader_binary_size(const struct si_shader *shader)
5177 {
5178 unsigned size = shader->binary.code_size;
5179
5180 if (shader->prolog)
5181 size += shader->prolog->binary.code_size;
5182 if (shader->previous_stage)
5183 size += shader->previous_stage->binary.code_size;
5184 if (shader->prolog2)
5185 size += shader->prolog2->binary.code_size;
5186 if (shader->epilog)
5187 size += shader->epilog->binary.code_size;
5188 return size;
5189 }
5190
5191 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
5192 {
5193 const struct ac_shader_binary *prolog =
5194 shader->prolog ? &shader->prolog->binary : NULL;
5195 const struct ac_shader_binary *previous_stage =
5196 shader->previous_stage ? &shader->previous_stage->binary : NULL;
5197 const struct ac_shader_binary *prolog2 =
5198 shader->prolog2 ? &shader->prolog2->binary : NULL;
5199 const struct ac_shader_binary *epilog =
5200 shader->epilog ? &shader->epilog->binary : NULL;
5201 const struct ac_shader_binary *mainb = &shader->binary;
5202 unsigned bo_size = si_get_shader_binary_size(shader) +
5203 (!epilog ? mainb->rodata_size : 0);
5204 unsigned char *ptr;
5205
5206 assert(!prolog || !prolog->rodata_size);
5207 assert(!previous_stage || !previous_stage->rodata_size);
5208 assert(!prolog2 || !prolog2->rodata_size);
5209 assert((!prolog && !previous_stage && !prolog2 && !epilog) ||
5210 !mainb->rodata_size);
5211 assert(!epilog || !epilog->rodata_size);
5212
5213 r600_resource_reference(&shader->bo, NULL);
5214 shader->bo = (struct r600_resource*)
5215 si_aligned_buffer_create(&sscreen->b,
5216 sscreen->cpdma_prefetch_writes_memory ?
5217 0 : R600_RESOURCE_FLAG_READ_ONLY,
5218 PIPE_USAGE_IMMUTABLE,
5219 align(bo_size, SI_CPDMA_ALIGNMENT),
5220 256);
5221 if (!shader->bo)
5222 return -ENOMEM;
5223
5224 /* Upload. */
5225 ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
5226 PIPE_TRANSFER_READ_WRITE |
5227 PIPE_TRANSFER_UNSYNCHRONIZED);
5228
5229 /* Don't use util_memcpy_cpu_to_le32. LLVM binaries are
5230 * endian-independent. */
5231 if (prolog) {
5232 memcpy(ptr, prolog->code, prolog->code_size);
5233 ptr += prolog->code_size;
5234 }
5235 if (previous_stage) {
5236 memcpy(ptr, previous_stage->code, previous_stage->code_size);
5237 ptr += previous_stage->code_size;
5238 }
5239 if (prolog2) {
5240 memcpy(ptr, prolog2->code, prolog2->code_size);
5241 ptr += prolog2->code_size;
5242 }
5243
5244 memcpy(ptr, mainb->code, mainb->code_size);
5245 ptr += mainb->code_size;
5246
5247 if (epilog)
5248 memcpy(ptr, epilog->code, epilog->code_size);
5249 else if (mainb->rodata_size > 0)
5250 memcpy(ptr, mainb->rodata, mainb->rodata_size);
5251
5252 sscreen->ws->buffer_unmap(shader->bo->buf);
5253 return 0;
5254 }
5255
5256 static void si_shader_dump_disassembly(const struct ac_shader_binary *binary,
5257 struct pipe_debug_callback *debug,
5258 const char *name, FILE *file)
5259 {
5260 char *line, *p;
5261 unsigned i, count;
5262
5263 if (binary->disasm_string) {
5264 fprintf(file, "Shader %s disassembly:\n", name);
5265 fprintf(file, "%s", binary->disasm_string);
5266
5267 if (debug && debug->debug_message) {
5268 /* Very long debug messages are cut off, so send the
5269 * disassembly one line at a time. This causes more
5270 * overhead, but on the plus side it simplifies
5271 * parsing of resulting logs.
5272 */
5273 pipe_debug_message(debug, SHADER_INFO,
5274 "Shader Disassembly Begin");
5275
5276 line = binary->disasm_string;
5277 while (*line) {
5278 p = util_strchrnul(line, '\n');
5279 count = p - line;
5280
5281 if (count) {
5282 pipe_debug_message(debug, SHADER_INFO,
5283 "%.*s", count, line);
5284 }
5285
5286 if (!*p)
5287 break;
5288 line = p + 1;
5289 }
5290
5291 pipe_debug_message(debug, SHADER_INFO,
5292 "Shader Disassembly End");
5293 }
5294 } else {
5295 fprintf(file, "Shader %s binary:\n", name);
5296 for (i = 0; i < binary->code_size; i += 4) {
5297 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
5298 binary->code[i + 3], binary->code[i + 2],
5299 binary->code[i + 1], binary->code[i]);
5300 }
5301 }
5302 }
5303
5304 static void si_calculate_max_simd_waves(struct si_shader *shader)
5305 {
5306 struct si_screen *sscreen = shader->selector->screen;
5307 struct si_shader_config *conf = &shader->config;
5308 unsigned num_inputs = shader->selector->info.num_inputs;
5309 unsigned lds_increment = sscreen->info.chip_class >= CIK ? 512 : 256;
5310 unsigned lds_per_wave = 0;
5311 unsigned max_simd_waves;
5312
5313 switch (sscreen->info.family) {
5314 /* These always have 8 waves: */
5315 case CHIP_POLARIS10:
5316 case CHIP_POLARIS11:
5317 case CHIP_POLARIS12:
5318 max_simd_waves = 8;
5319 break;
5320 default:
5321 max_simd_waves = 10;
5322 }
5323
5324 /* Compute LDS usage for PS. */
5325 switch (shader->selector->type) {
5326 case PIPE_SHADER_FRAGMENT:
5327 /* The minimum usage per wave is (num_inputs * 48). The maximum
5328 * usage is (num_inputs * 48 * 16).
5329 * We can get anything in between and it varies between waves.
5330 *
5331 * The 48 bytes per input for a single primitive is equal to
5332 * 4 bytes/component * 4 components/input * 3 points.
5333 *
5334 * Other stages don't know the size at compile time or don't
5335 * allocate LDS per wave, but instead they do it per thread group.
5336 */
5337 lds_per_wave = conf->lds_size * lds_increment +
5338 align(num_inputs * 48, lds_increment);
5339 break;
5340 case PIPE_SHADER_COMPUTE:
5341 if (shader->selector) {
5342 unsigned max_workgroup_size =
5343 si_get_max_workgroup_size(shader);
5344 lds_per_wave = (conf->lds_size * lds_increment) /
5345 DIV_ROUND_UP(max_workgroup_size, 64);
5346 }
5347 break;
5348 }
5349
5350 /* Compute the per-SIMD wave counts. */
5351 if (conf->num_sgprs) {
5352 if (sscreen->info.chip_class >= VI)
5353 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
5354 else
5355 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
5356 }
5357
5358 if (conf->num_vgprs)
5359 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
5360
5361 /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
5362 * 16KB makes some SIMDs unoccupied). */
5363 if (lds_per_wave)
5364 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
5365
5366 conf->max_simd_waves = max_simd_waves;
5367 }
5368
5369 void si_shader_dump_stats_for_shader_db(const struct si_shader *shader,
5370 struct pipe_debug_callback *debug)
5371 {
5372 const struct si_shader_config *conf = &shader->config;
5373
5374 pipe_debug_message(debug, SHADER_INFO,
5375 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
5376 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
5377 "Spilled VGPRs: %d PrivMem VGPRs: %d",
5378 conf->num_sgprs, conf->num_vgprs,
5379 si_get_shader_binary_size(shader),
5380 conf->lds_size, conf->scratch_bytes_per_wave,
5381 conf->max_simd_waves, conf->spilled_sgprs,
5382 conf->spilled_vgprs, conf->private_mem_vgprs);
5383 }
5384
5385 static void si_shader_dump_stats(struct si_screen *sscreen,
5386 const struct si_shader *shader,
5387 unsigned processor,
5388 FILE *file,
5389 bool check_debug_option)
5390 {
5391 const struct si_shader_config *conf = &shader->config;
5392
5393 if (!check_debug_option ||
5394 si_can_dump_shader(sscreen, processor)) {
5395 if (processor == PIPE_SHADER_FRAGMENT) {
5396 fprintf(file, "*** SHADER CONFIG ***\n"
5397 "SPI_PS_INPUT_ADDR = 0x%04x\n"
5398 "SPI_PS_INPUT_ENA = 0x%04x\n",
5399 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
5400 }
5401
5402 fprintf(file, "*** SHADER STATS ***\n"
5403 "SGPRS: %d\n"
5404 "VGPRS: %d\n"
5405 "Spilled SGPRs: %d\n"
5406 "Spilled VGPRs: %d\n"
5407 "Private memory VGPRs: %d\n"
5408 "Code Size: %d bytes\n"
5409 "LDS: %d blocks\n"
5410 "Scratch: %d bytes per wave\n"
5411 "Max Waves: %d\n"
5412 "********************\n\n\n",
5413 conf->num_sgprs, conf->num_vgprs,
5414 conf->spilled_sgprs, conf->spilled_vgprs,
5415 conf->private_mem_vgprs,
5416 si_get_shader_binary_size(shader),
5417 conf->lds_size, conf->scratch_bytes_per_wave,
5418 conf->max_simd_waves);
5419 }
5420 }
5421
5422 const char *si_get_shader_name(const struct si_shader *shader, unsigned processor)
5423 {
5424 switch (processor) {
5425 case PIPE_SHADER_VERTEX:
5426 if (shader->key.as_es)
5427 return "Vertex Shader as ES";
5428 else if (shader->key.as_ls)
5429 return "Vertex Shader as LS";
5430 else
5431 return "Vertex Shader as VS";
5432 case PIPE_SHADER_TESS_CTRL:
5433 return "Tessellation Control Shader";
5434 case PIPE_SHADER_TESS_EVAL:
5435 if (shader->key.as_es)
5436 return "Tessellation Evaluation Shader as ES";
5437 else
5438 return "Tessellation Evaluation Shader as VS";
5439 case PIPE_SHADER_GEOMETRY:
5440 if (shader->is_gs_copy_shader)
5441 return "GS Copy Shader as VS";
5442 else
5443 return "Geometry Shader";
5444 case PIPE_SHADER_FRAGMENT:
5445 return "Pixel Shader";
5446 case PIPE_SHADER_COMPUTE:
5447 return "Compute Shader";
5448 default:
5449 return "Unknown Shader";
5450 }
5451 }
5452
5453 void si_shader_dump(struct si_screen *sscreen, const struct si_shader *shader,
5454 struct pipe_debug_callback *debug, unsigned processor,
5455 FILE *file, bool check_debug_option)
5456 {
5457 if (!check_debug_option ||
5458 si_can_dump_shader(sscreen, processor))
5459 si_dump_shader_key(processor, shader, file);
5460
5461 if (!check_debug_option && shader->binary.llvm_ir_string) {
5462 if (shader->previous_stage &&
5463 shader->previous_stage->binary.llvm_ir_string) {
5464 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
5465 si_get_shader_name(shader, processor));
5466 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
5467 }
5468
5469 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
5470 si_get_shader_name(shader, processor));
5471 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
5472 }
5473
5474 if (!check_debug_option ||
5475 (si_can_dump_shader(sscreen, processor) &&
5476 !(sscreen->debug_flags & DBG(NO_ASM)))) {
5477 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
5478
5479 if (shader->prolog)
5480 si_shader_dump_disassembly(&shader->prolog->binary,
5481 debug, "prolog", file);
5482 if (shader->previous_stage)
5483 si_shader_dump_disassembly(&shader->previous_stage->binary,
5484 debug, "previous stage", file);
5485 if (shader->prolog2)
5486 si_shader_dump_disassembly(&shader->prolog2->binary,
5487 debug, "prolog2", file);
5488
5489 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
5490
5491 if (shader->epilog)
5492 si_shader_dump_disassembly(&shader->epilog->binary,
5493 debug, "epilog", file);
5494 fprintf(file, "\n");
5495 }
5496
5497 si_shader_dump_stats(sscreen, shader, processor, file,
5498 check_debug_option);
5499 }
5500
5501 static int si_compile_llvm(struct si_screen *sscreen,
5502 struct ac_shader_binary *binary,
5503 struct si_shader_config *conf,
5504 LLVMTargetMachineRef tm,
5505 LLVMModuleRef mod,
5506 struct pipe_debug_callback *debug,
5507 unsigned processor,
5508 const char *name)
5509 {
5510 int r = 0;
5511 unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
5512
5513 if (si_can_dump_shader(sscreen, processor)) {
5514 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
5515
5516 if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
5517 fprintf(stderr, "%s LLVM IR:\n\n", name);
5518 ac_dump_module(mod);
5519 fprintf(stderr, "\n");
5520 }
5521 }
5522
5523 if (sscreen->record_llvm_ir) {
5524 char *ir = LLVMPrintModuleToString(mod);
5525 binary->llvm_ir_string = strdup(ir);
5526 LLVMDisposeMessage(ir);
5527 }
5528
5529 if (!si_replace_shader(count, binary)) {
5530 r = si_llvm_compile(mod, binary, tm, debug);
5531 if (r)
5532 return r;
5533 }
5534
5535 si_shader_binary_read_config(binary, conf, 0);
5536
5537 /* Enable 64-bit and 16-bit denormals, because there is no performance
5538 * cost.
5539 *
5540 * If denormals are enabled, all floating-point output modifiers are
5541 * ignored.
5542 *
5543 * Don't enable denormals for 32-bit floats, because:
5544 * - Floating-point output modifiers would be ignored by the hw.
5545 * - Some opcodes don't support denormals, such as v_mad_f32. We would
5546 * have to stop using those.
5547 * - SI & CI would be very slow.
5548 */
5549 conf->float_mode |= V_00B028_FP_64_DENORMS;
5550
5551 FREE(binary->config);
5552 FREE(binary->global_symbol_offsets);
5553 binary->config = NULL;
5554 binary->global_symbol_offsets = NULL;
5555
5556 /* Some shaders can't have rodata because their binaries can be
5557 * concatenated.
5558 */
5559 if (binary->rodata_size &&
5560 (processor == PIPE_SHADER_VERTEX ||
5561 processor == PIPE_SHADER_TESS_CTRL ||
5562 processor == PIPE_SHADER_TESS_EVAL ||
5563 processor == PIPE_SHADER_FRAGMENT)) {
5564 fprintf(stderr, "radeonsi: The shader can't have rodata.");
5565 return -EINVAL;
5566 }
5567
5568 return r;
5569 }
5570
5571 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
5572 {
5573 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
5574 LLVMBuildRetVoid(ctx->ac.builder);
5575 else
5576 LLVMBuildRet(ctx->ac.builder, ret);
5577 }
5578
5579 /* Generate code for the hardware VS shader stage to go with a geometry shader */
5580 struct si_shader *
5581 si_generate_gs_copy_shader(struct si_screen *sscreen,
5582 LLVMTargetMachineRef tm,
5583 struct si_shader_selector *gs_selector,
5584 struct pipe_debug_callback *debug)
5585 {
5586 struct si_shader_context ctx;
5587 struct si_shader *shader;
5588 LLVMBuilderRef builder;
5589 struct lp_build_tgsi_context *bld_base = &ctx.bld_base;
5590 struct lp_build_context *uint = &bld_base->uint_bld;
5591 struct si_shader_output_values *outputs;
5592 struct tgsi_shader_info *gsinfo = &gs_selector->info;
5593 int i, r;
5594
5595 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
5596
5597 if (!outputs)
5598 return NULL;
5599
5600 shader = CALLOC_STRUCT(si_shader);
5601 if (!shader) {
5602 FREE(outputs);
5603 return NULL;
5604 }
5605
5606 /* We can leave the fence as permanently signaled because the GS copy
5607 * shader only becomes visible globally after it has been compiled. */
5608 util_queue_fence_init(&shader->ready);
5609
5610 shader->selector = gs_selector;
5611 shader->is_gs_copy_shader = true;
5612
5613 si_init_shader_ctx(&ctx, sscreen, tm);
5614 ctx.shader = shader;
5615 ctx.type = PIPE_SHADER_VERTEX;
5616
5617 builder = ctx.ac.builder;
5618
5619 create_function(&ctx);
5620 preload_ring_buffers(&ctx);
5621
5622 LLVMValueRef voffset =
5623 lp_build_mul_imm(uint, ctx.abi.vertex_id, 4);
5624
5625 /* Fetch the vertex stream ID.*/
5626 LLVMValueRef stream_id;
5627
5628 if (gs_selector->so.num_outputs)
5629 stream_id = unpack_param(&ctx, ctx.param_streamout_config, 24, 2);
5630 else
5631 stream_id = ctx.i32_0;
5632
5633 /* Fill in output information. */
5634 for (i = 0; i < gsinfo->num_outputs; ++i) {
5635 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
5636 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
5637
5638 for (int chan = 0; chan < 4; chan++) {
5639 outputs[i].vertex_stream[chan] =
5640 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
5641 }
5642 }
5643
5644 LLVMBasicBlockRef end_bb;
5645 LLVMValueRef switch_inst;
5646
5647 end_bb = LLVMAppendBasicBlockInContext(ctx.ac.context, ctx.main_fn, "end");
5648 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
5649
5650 for (int stream = 0; stream < 4; stream++) {
5651 LLVMBasicBlockRef bb;
5652 unsigned offset;
5653
5654 if (!gsinfo->num_stream_output_components[stream])
5655 continue;
5656
5657 if (stream > 0 && !gs_selector->so.num_outputs)
5658 continue;
5659
5660 bb = LLVMInsertBasicBlockInContext(ctx.ac.context, end_bb, "out");
5661 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
5662 LLVMPositionBuilderAtEnd(builder, bb);
5663
5664 /* Fetch vertex data from GSVS ring */
5665 offset = 0;
5666 for (i = 0; i < gsinfo->num_outputs; ++i) {
5667 for (unsigned chan = 0; chan < 4; chan++) {
5668 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
5669 outputs[i].vertex_stream[chan] != stream) {
5670 outputs[i].values[chan] = ctx.bld_base.base.undef;
5671 continue;
5672 }
5673
5674 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
5675 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
5676 offset++;
5677
5678 outputs[i].values[chan] =
5679 ac_build_buffer_load(&ctx.ac,
5680 ctx.gsvs_ring[0], 1,
5681 ctx.i32_0, voffset,
5682 soffset, 0, 1, 1,
5683 true, false);
5684 }
5685 }
5686
5687 /* Streamout and exports. */
5688 if (gs_selector->so.num_outputs) {
5689 si_llvm_emit_streamout(&ctx, outputs,
5690 gsinfo->num_outputs,
5691 stream);
5692 }
5693
5694 if (stream == 0)
5695 si_llvm_export_vs(&ctx, outputs, gsinfo->num_outputs);
5696
5697 LLVMBuildBr(builder, end_bb);
5698 }
5699
5700 LLVMPositionBuilderAtEnd(builder, end_bb);
5701
5702 LLVMBuildRetVoid(ctx.ac.builder);
5703
5704 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
5705 si_llvm_optimize_module(&ctx);
5706
5707 r = si_compile_llvm(sscreen, &ctx.shader->binary,
5708 &ctx.shader->config, ctx.tm,
5709 ctx.gallivm.module,
5710 debug, PIPE_SHADER_GEOMETRY,
5711 "GS Copy Shader");
5712 if (!r) {
5713 if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
5714 fprintf(stderr, "GS Copy Shader:\n");
5715 si_shader_dump(sscreen, ctx.shader, debug,
5716 PIPE_SHADER_GEOMETRY, stderr, true);
5717 r = si_shader_binary_upload(sscreen, ctx.shader);
5718 }
5719
5720 si_llvm_dispose(&ctx);
5721
5722 FREE(outputs);
5723
5724 if (r != 0) {
5725 FREE(shader);
5726 shader = NULL;
5727 }
5728 return shader;
5729 }
5730
5731 static void si_dump_shader_key_vs(const struct si_shader_key *key,
5732 const struct si_vs_prolog_bits *prolog,
5733 const char *prefix, FILE *f)
5734 {
5735 fprintf(f, " %s.instance_divisor_is_one = %u\n",
5736 prefix, prolog->instance_divisor_is_one);
5737 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
5738 prefix, prolog->instance_divisor_is_fetched);
5739 fprintf(f, " %s.ls_vgpr_fix = %u\n",
5740 prefix, prolog->ls_vgpr_fix);
5741
5742 fprintf(f, " mono.vs.fix_fetch = {");
5743 for (int i = 0; i < SI_MAX_ATTRIBS; i++)
5744 fprintf(f, !i ? "%u" : ", %u", key->mono.vs_fix_fetch[i]);
5745 fprintf(f, "}\n");
5746 }
5747
5748 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
5749 FILE *f)
5750 {
5751 const struct si_shader_key *key = &shader->key;
5752
5753 fprintf(f, "SHADER KEY\n");
5754
5755 switch (processor) {
5756 case PIPE_SHADER_VERTEX:
5757 si_dump_shader_key_vs(key, &key->part.vs.prolog,
5758 "part.vs.prolog", f);
5759 fprintf(f, " as_es = %u\n", key->as_es);
5760 fprintf(f, " as_ls = %u\n", key->as_ls);
5761 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5762 key->mono.u.vs_export_prim_id);
5763 break;
5764
5765 case PIPE_SHADER_TESS_CTRL:
5766 if (shader->selector->screen->info.chip_class >= GFX9) {
5767 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
5768 "part.tcs.ls_prolog", f);
5769 }
5770 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
5771 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
5772 break;
5773
5774 case PIPE_SHADER_TESS_EVAL:
5775 fprintf(f, " as_es = %u\n", key->as_es);
5776 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5777 key->mono.u.vs_export_prim_id);
5778 break;
5779
5780 case PIPE_SHADER_GEOMETRY:
5781 if (shader->is_gs_copy_shader)
5782 break;
5783
5784 if (shader->selector->screen->info.chip_class >= GFX9 &&
5785 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
5786 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
5787 "part.gs.vs_prolog", f);
5788 }
5789 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
5790 break;
5791
5792 case PIPE_SHADER_COMPUTE:
5793 break;
5794
5795 case PIPE_SHADER_FRAGMENT:
5796 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
5797 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
5798 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
5799 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
5800 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
5801 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
5802 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
5803 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
5804 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
5805 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
5806 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
5807 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
5808 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
5809 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
5810 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
5811 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
5812 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
5813 break;
5814
5815 default:
5816 assert(0);
5817 }
5818
5819 if ((processor == PIPE_SHADER_GEOMETRY ||
5820 processor == PIPE_SHADER_TESS_EVAL ||
5821 processor == PIPE_SHADER_VERTEX) &&
5822 !key->as_es && !key->as_ls) {
5823 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
5824 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
5825 }
5826 }
5827
5828 static void si_init_shader_ctx(struct si_shader_context *ctx,
5829 struct si_screen *sscreen,
5830 LLVMTargetMachineRef tm)
5831 {
5832 struct lp_build_tgsi_context *bld_base;
5833
5834 si_llvm_context_init(ctx, sscreen, tm);
5835
5836 bld_base = &ctx->bld_base;
5837 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
5838
5839 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
5840 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
5841 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
5842
5843 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
5844
5845 bld_base->op_actions[TGSI_OPCODE_CLOCK].emit = clock_emit;
5846
5847 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
5848 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
5849 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
5850 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
5851
5852 bld_base->op_actions[TGSI_OPCODE_VOTE_ALL].emit = vote_all_emit;
5853 bld_base->op_actions[TGSI_OPCODE_VOTE_ANY].emit = vote_any_emit;
5854 bld_base->op_actions[TGSI_OPCODE_VOTE_EQ].emit = vote_eq_emit;
5855 bld_base->op_actions[TGSI_OPCODE_BALLOT].emit = ballot_emit;
5856 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].intr_name = "llvm.amdgcn.readfirstlane";
5857 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].emit = read_lane_emit;
5858 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].intr_name = "llvm.amdgcn.readlane";
5859 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].fetch_args = read_invoc_fetch_args;
5860 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].emit = read_lane_emit;
5861
5862 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_tgsi_emit_vertex;
5863 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_tgsi_emit_primitive;
5864 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
5865 }
5866
5867 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
5868 {
5869 struct si_shader *shader = ctx->shader;
5870 struct tgsi_shader_info *info = &shader->selector->info;
5871
5872 if ((ctx->type != PIPE_SHADER_VERTEX &&
5873 ctx->type != PIPE_SHADER_TESS_EVAL) ||
5874 shader->key.as_ls ||
5875 shader->key.as_es)
5876 return;
5877
5878 ac_optimize_vs_outputs(&ctx->ac,
5879 ctx->main_fn,
5880 shader->info.vs_output_param_offset,
5881 info->num_outputs,
5882 &shader->info.nr_param_exports);
5883 }
5884
5885 static void si_count_scratch_private_memory(struct si_shader_context *ctx)
5886 {
5887 ctx->shader->config.private_mem_vgprs = 0;
5888
5889 /* Process all LLVM instructions. */
5890 LLVMBasicBlockRef bb = LLVMGetFirstBasicBlock(ctx->main_fn);
5891 while (bb) {
5892 LLVMValueRef next = LLVMGetFirstInstruction(bb);
5893
5894 while (next) {
5895 LLVMValueRef inst = next;
5896 next = LLVMGetNextInstruction(next);
5897
5898 if (LLVMGetInstructionOpcode(inst) != LLVMAlloca)
5899 continue;
5900
5901 LLVMTypeRef type = LLVMGetElementType(LLVMTypeOf(inst));
5902 /* No idea why LLVM aligns allocas to 4 elements. */
5903 unsigned alignment = LLVMGetAlignment(inst);
5904 unsigned dw_size = align(ac_get_type_size(type) / 4, alignment);
5905 ctx->shader->config.private_mem_vgprs += dw_size;
5906 }
5907 bb = LLVMGetNextBasicBlock(bb);
5908 }
5909 }
5910
5911 static void si_init_exec_from_input(struct si_shader_context *ctx,
5912 unsigned param, unsigned bitoffset)
5913 {
5914 LLVMValueRef args[] = {
5915 LLVMGetParam(ctx->main_fn, param),
5916 LLVMConstInt(ctx->i32, bitoffset, 0),
5917 };
5918 lp_build_intrinsic(ctx->ac.builder,
5919 "llvm.amdgcn.init.exec.from.input",
5920 ctx->voidt, args, 2, LP_FUNC_ATTR_CONVERGENT);
5921 }
5922
5923 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
5924 const struct si_vs_prolog_bits *key)
5925 {
5926 /* VGPR initialization fixup for Vega10 and Raven is always done in the
5927 * VS prolog. */
5928 return sel->vs_needs_prolog || key->ls_vgpr_fix;
5929 }
5930
5931 static bool si_compile_tgsi_main(struct si_shader_context *ctx,
5932 bool is_monolithic)
5933 {
5934 struct si_shader *shader = ctx->shader;
5935 struct si_shader_selector *sel = shader->selector;
5936 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
5937
5938 // TODO clean all this up!
5939 switch (ctx->type) {
5940 case PIPE_SHADER_VERTEX:
5941 ctx->load_input = declare_input_vs;
5942 if (shader->key.as_ls)
5943 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
5944 else if (shader->key.as_es)
5945 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
5946 else
5947 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
5948 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5949 break;
5950 case PIPE_SHADER_TESS_CTRL:
5951 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
5952 ctx->abi.load_tess_varyings = si_nir_load_tcs_varyings;
5953 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
5954 bld_base->emit_store = store_output_tcs;
5955 ctx->abi.store_tcs_outputs = si_nir_store_output_tcs;
5956 ctx->abi.emit_outputs = si_llvm_emit_tcs_epilogue;
5957 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
5958 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5959 break;
5960 case PIPE_SHADER_TESS_EVAL:
5961 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
5962 ctx->abi.load_tess_varyings = si_nir_load_input_tes;
5963 ctx->abi.load_tess_coord = si_load_tess_coord;
5964 ctx->abi.load_tess_level = si_load_tess_level;
5965 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
5966 if (shader->key.as_es)
5967 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
5968 else
5969 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
5970 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5971 break;
5972 case PIPE_SHADER_GEOMETRY:
5973 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
5974 ctx->abi.load_inputs = si_nir_load_input_gs;
5975 ctx->abi.emit_vertex = si_llvm_emit_vertex;
5976 ctx->abi.emit_primitive = si_llvm_emit_primitive;
5977 ctx->abi.emit_outputs = si_llvm_emit_gs_epilogue;
5978 bld_base->emit_epilogue = si_tgsi_emit_gs_epilogue;
5979 break;
5980 case PIPE_SHADER_FRAGMENT:
5981 ctx->load_input = declare_input_fs;
5982 ctx->abi.emit_outputs = si_llvm_return_fs_outputs;
5983 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5984 ctx->abi.lookup_interp_param = si_nir_lookup_interp_param;
5985 ctx->abi.load_sample_position = load_sample_position;
5986 break;
5987 case PIPE_SHADER_COMPUTE:
5988 break;
5989 default:
5990 assert(!"Unsupported shader type");
5991 return false;
5992 }
5993
5994 ctx->abi.load_ubo = load_ubo;
5995 ctx->abi.load_ssbo = load_ssbo;
5996
5997 create_function(ctx);
5998 preload_ring_buffers(ctx);
5999
6000 /* For GFX9 merged shaders:
6001 * - Set EXEC for the first shader. If the prolog is present, set
6002 * EXEC there instead.
6003 * - Add a barrier before the second shader.
6004 * - In the second shader, reset EXEC to ~0 and wrap the main part in
6005 * an if-statement. This is required for correctness in geometry
6006 * shaders, to ensure that empty GS waves do not send GS_EMIT and
6007 * GS_CUT messages.
6008 *
6009 * For monolithic merged shaders, the first shader is wrapped in an
6010 * if-block together with its prolog in si_build_wrapper_function.
6011 */
6012 if (ctx->screen->info.chip_class >= GFX9) {
6013 if (!is_monolithic &&
6014 sel->info.num_instructions > 1 && /* not empty shader */
6015 (shader->key.as_es || shader->key.as_ls) &&
6016 (ctx->type == PIPE_SHADER_TESS_EVAL ||
6017 (ctx->type == PIPE_SHADER_VERTEX &&
6018 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog)))) {
6019 si_init_exec_from_input(ctx,
6020 ctx->param_merged_wave_info, 0);
6021 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
6022 ctx->type == PIPE_SHADER_GEOMETRY) {
6023 if (!is_monolithic)
6024 ac_init_exec_full_mask(&ctx->ac);
6025
6026 /* The barrier must execute for all shaders in a
6027 * threadgroup.
6028 */
6029 si_llvm_emit_barrier(NULL, bld_base, NULL);
6030
6031 LLVMValueRef num_threads = unpack_param(ctx, ctx->param_merged_wave_info, 8, 8);
6032 LLVMValueRef ena =
6033 LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
6034 ac_get_thread_id(&ctx->ac), num_threads, "");
6035 lp_build_if(&ctx->merged_wrap_if_state, &ctx->gallivm, ena);
6036 }
6037 }
6038
6039 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
6040 sel->tcs_info.tessfactors_are_def_in_all_invocs) {
6041 for (unsigned i = 0; i < 6; i++) {
6042 ctx->invoc0_tess_factors[i] =
6043 lp_build_alloca_undef(&ctx->gallivm, ctx->i32, "");
6044 }
6045 }
6046
6047 if (ctx->type == PIPE_SHADER_GEOMETRY) {
6048 int i;
6049 for (i = 0; i < 4; i++) {
6050 ctx->gs_next_vertex[i] =
6051 lp_build_alloca(&ctx->gallivm,
6052 ctx->i32, "");
6053 }
6054 }
6055
6056 if (sel->force_correct_derivs_after_kill) {
6057 ctx->postponed_kill = lp_build_alloca_undef(&ctx->gallivm, ctx->i1, "");
6058 /* true = don't kill. */
6059 LLVMBuildStore(ctx->ac.builder, LLVMConstInt(ctx->i1, 1, 0),
6060 ctx->postponed_kill);
6061 }
6062
6063 if (sel->tokens) {
6064 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
6065 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
6066 return false;
6067 }
6068 } else {
6069 if (!si_nir_build_llvm(ctx, sel->nir)) {
6070 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
6071 return false;
6072 }
6073 }
6074
6075 si_llvm_build_ret(ctx, ctx->return_value);
6076 return true;
6077 }
6078
6079 /**
6080 * Compute the VS prolog key, which contains all the information needed to
6081 * build the VS prolog function, and set shader->info bits where needed.
6082 *
6083 * \param info Shader info of the vertex shader.
6084 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
6085 * \param prolog_key Key of the VS prolog
6086 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
6087 * \param key Output shader part key.
6088 */
6089 static void si_get_vs_prolog_key(const struct tgsi_shader_info *info,
6090 unsigned num_input_sgprs,
6091 const struct si_vs_prolog_bits *prolog_key,
6092 struct si_shader *shader_out,
6093 union si_shader_part_key *key)
6094 {
6095 memset(key, 0, sizeof(*key));
6096 key->vs_prolog.states = *prolog_key;
6097 key->vs_prolog.num_input_sgprs = num_input_sgprs;
6098 key->vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
6099 key->vs_prolog.as_ls = shader_out->key.as_ls;
6100 key->vs_prolog.as_es = shader_out->key.as_es;
6101
6102 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
6103 key->vs_prolog.as_ls = 1;
6104 key->vs_prolog.num_merged_next_stage_vgprs = 2;
6105 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
6106 key->vs_prolog.as_es = 1;
6107 key->vs_prolog.num_merged_next_stage_vgprs = 5;
6108 }
6109
6110 /* Enable loading the InstanceID VGPR. */
6111 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
6112
6113 if ((key->vs_prolog.states.instance_divisor_is_one |
6114 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
6115 shader_out->info.uses_instanceid = true;
6116 }
6117
6118 /**
6119 * Compute the PS prolog key, which contains all the information needed to
6120 * build the PS prolog function, and set related bits in shader->config.
6121 */
6122 static void si_get_ps_prolog_key(struct si_shader *shader,
6123 union si_shader_part_key *key,
6124 bool separate_prolog)
6125 {
6126 struct tgsi_shader_info *info = &shader->selector->info;
6127
6128 memset(key, 0, sizeof(*key));
6129 key->ps_prolog.states = shader->key.part.ps.prolog;
6130 key->ps_prolog.colors_read = info->colors_read;
6131 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
6132 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
6133 key->ps_prolog.wqm = info->uses_derivatives &&
6134 (key->ps_prolog.colors_read ||
6135 key->ps_prolog.states.force_persp_sample_interp ||
6136 key->ps_prolog.states.force_linear_sample_interp ||
6137 key->ps_prolog.states.force_persp_center_interp ||
6138 key->ps_prolog.states.force_linear_center_interp ||
6139 key->ps_prolog.states.bc_optimize_for_persp ||
6140 key->ps_prolog.states.bc_optimize_for_linear);
6141 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
6142
6143 if (info->colors_read) {
6144 unsigned *color = shader->selector->color_attr_index;
6145
6146 if (shader->key.part.ps.prolog.color_two_side) {
6147 /* BCOLORs are stored after the last input. */
6148 key->ps_prolog.num_interp_inputs = info->num_inputs;
6149 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
6150 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
6151 }
6152
6153 for (unsigned i = 0; i < 2; i++) {
6154 unsigned interp = info->input_interpolate[color[i]];
6155 unsigned location = info->input_interpolate_loc[color[i]];
6156
6157 if (!(info->colors_read & (0xf << i*4)))
6158 continue;
6159
6160 key->ps_prolog.color_attr_index[i] = color[i];
6161
6162 if (shader->key.part.ps.prolog.flatshade_colors &&
6163 interp == TGSI_INTERPOLATE_COLOR)
6164 interp = TGSI_INTERPOLATE_CONSTANT;
6165
6166 switch (interp) {
6167 case TGSI_INTERPOLATE_CONSTANT:
6168 key->ps_prolog.color_interp_vgpr_index[i] = -1;
6169 break;
6170 case TGSI_INTERPOLATE_PERSPECTIVE:
6171 case TGSI_INTERPOLATE_COLOR:
6172 /* Force the interpolation location for colors here. */
6173 if (shader->key.part.ps.prolog.force_persp_sample_interp)
6174 location = TGSI_INTERPOLATE_LOC_SAMPLE;
6175 if (shader->key.part.ps.prolog.force_persp_center_interp)
6176 location = TGSI_INTERPOLATE_LOC_CENTER;
6177
6178 switch (location) {
6179 case TGSI_INTERPOLATE_LOC_SAMPLE:
6180 key->ps_prolog.color_interp_vgpr_index[i] = 0;
6181 shader->config.spi_ps_input_ena |=
6182 S_0286CC_PERSP_SAMPLE_ENA(1);
6183 break;
6184 case TGSI_INTERPOLATE_LOC_CENTER:
6185 key->ps_prolog.color_interp_vgpr_index[i] = 2;
6186 shader->config.spi_ps_input_ena |=
6187 S_0286CC_PERSP_CENTER_ENA(1);
6188 break;
6189 case TGSI_INTERPOLATE_LOC_CENTROID:
6190 key->ps_prolog.color_interp_vgpr_index[i] = 4;
6191 shader->config.spi_ps_input_ena |=
6192 S_0286CC_PERSP_CENTROID_ENA(1);
6193 break;
6194 default:
6195 assert(0);
6196 }
6197 break;
6198 case TGSI_INTERPOLATE_LINEAR:
6199 /* Force the interpolation location for colors here. */
6200 if (shader->key.part.ps.prolog.force_linear_sample_interp)
6201 location = TGSI_INTERPOLATE_LOC_SAMPLE;
6202 if (shader->key.part.ps.prolog.force_linear_center_interp)
6203 location = TGSI_INTERPOLATE_LOC_CENTER;
6204
6205 /* The VGPR assignment for non-monolithic shaders
6206 * works because InitialPSInputAddr is set on the
6207 * main shader and PERSP_PULL_MODEL is never used.
6208 */
6209 switch (location) {
6210 case TGSI_INTERPOLATE_LOC_SAMPLE:
6211 key->ps_prolog.color_interp_vgpr_index[i] =
6212 separate_prolog ? 6 : 9;
6213 shader->config.spi_ps_input_ena |=
6214 S_0286CC_LINEAR_SAMPLE_ENA(1);
6215 break;
6216 case TGSI_INTERPOLATE_LOC_CENTER:
6217 key->ps_prolog.color_interp_vgpr_index[i] =
6218 separate_prolog ? 8 : 11;
6219 shader->config.spi_ps_input_ena |=
6220 S_0286CC_LINEAR_CENTER_ENA(1);
6221 break;
6222 case TGSI_INTERPOLATE_LOC_CENTROID:
6223 key->ps_prolog.color_interp_vgpr_index[i] =
6224 separate_prolog ? 10 : 13;
6225 shader->config.spi_ps_input_ena |=
6226 S_0286CC_LINEAR_CENTROID_ENA(1);
6227 break;
6228 default:
6229 assert(0);
6230 }
6231 break;
6232 default:
6233 assert(0);
6234 }
6235 }
6236 }
6237 }
6238
6239 /**
6240 * Check whether a PS prolog is required based on the key.
6241 */
6242 static bool si_need_ps_prolog(const union si_shader_part_key *key)
6243 {
6244 return key->ps_prolog.colors_read ||
6245 key->ps_prolog.states.force_persp_sample_interp ||
6246 key->ps_prolog.states.force_linear_sample_interp ||
6247 key->ps_prolog.states.force_persp_center_interp ||
6248 key->ps_prolog.states.force_linear_center_interp ||
6249 key->ps_prolog.states.bc_optimize_for_persp ||
6250 key->ps_prolog.states.bc_optimize_for_linear ||
6251 key->ps_prolog.states.poly_stipple ||
6252 key->ps_prolog.states.samplemask_log_ps_iter;
6253 }
6254
6255 /**
6256 * Compute the PS epilog key, which contains all the information needed to
6257 * build the PS epilog function.
6258 */
6259 static void si_get_ps_epilog_key(struct si_shader *shader,
6260 union si_shader_part_key *key)
6261 {
6262 struct tgsi_shader_info *info = &shader->selector->info;
6263 memset(key, 0, sizeof(*key));
6264 key->ps_epilog.colors_written = info->colors_written;
6265 key->ps_epilog.writes_z = info->writes_z;
6266 key->ps_epilog.writes_stencil = info->writes_stencil;
6267 key->ps_epilog.writes_samplemask = info->writes_samplemask;
6268 key->ps_epilog.states = shader->key.part.ps.epilog;
6269 }
6270
6271 /**
6272 * Build the GS prolog function. Rotate the input vertices for triangle strips
6273 * with adjacency.
6274 */
6275 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
6276 union si_shader_part_key *key)
6277 {
6278 unsigned num_sgprs, num_vgprs;
6279 struct si_function_info fninfo;
6280 LLVMBuilderRef builder = ctx->ac.builder;
6281 LLVMTypeRef returns[48];
6282 LLVMValueRef func, ret;
6283
6284 si_init_function_info(&fninfo);
6285
6286 if (ctx->screen->info.chip_class >= GFX9) {
6287 num_sgprs = 8 + GFX9_GS_NUM_USER_SGPR;
6288 num_vgprs = 5; /* ES inputs are not needed by GS */
6289 } else {
6290 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
6291 num_vgprs = 8;
6292 }
6293
6294 for (unsigned i = 0; i < num_sgprs; ++i) {
6295 add_arg(&fninfo, ARG_SGPR, ctx->i32);
6296 returns[i] = ctx->i32;
6297 }
6298
6299 for (unsigned i = 0; i < num_vgprs; ++i) {
6300 add_arg(&fninfo, ARG_VGPR, ctx->i32);
6301 returns[num_sgprs + i] = ctx->f32;
6302 }
6303
6304 /* Create the function. */
6305 si_create_function(ctx, "gs_prolog", returns, num_sgprs + num_vgprs,
6306 &fninfo, 0);
6307 func = ctx->main_fn;
6308
6309 /* Set the full EXEC mask for the prolog, because we are only fiddling
6310 * with registers here. The main shader part will set the correct EXEC
6311 * mask.
6312 */
6313 if (ctx->screen->info.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
6314 ac_init_exec_full_mask(&ctx->ac);
6315
6316 /* Copy inputs to outputs. This should be no-op, as the registers match,
6317 * but it will prevent the compiler from overwriting them unintentionally.
6318 */
6319 ret = ctx->return_value;
6320 for (unsigned i = 0; i < num_sgprs; i++) {
6321 LLVMValueRef p = LLVMGetParam(func, i);
6322 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
6323 }
6324 for (unsigned i = 0; i < num_vgprs; i++) {
6325 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
6326 p = ac_to_float(&ctx->ac, p);
6327 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
6328 }
6329
6330 if (key->gs_prolog.states.tri_strip_adj_fix) {
6331 /* Remap the input vertices for every other primitive. */
6332 const unsigned gfx6_vtx_params[6] = {
6333 num_sgprs,
6334 num_sgprs + 1,
6335 num_sgprs + 3,
6336 num_sgprs + 4,
6337 num_sgprs + 5,
6338 num_sgprs + 6
6339 };
6340 const unsigned gfx9_vtx_params[3] = {
6341 num_sgprs,
6342 num_sgprs + 1,
6343 num_sgprs + 4,
6344 };
6345 LLVMValueRef vtx_in[6], vtx_out[6];
6346 LLVMValueRef prim_id, rotate;
6347
6348 if (ctx->screen->info.chip_class >= GFX9) {
6349 for (unsigned i = 0; i < 3; i++) {
6350 vtx_in[i*2] = unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
6351 vtx_in[i*2+1] = unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
6352 }
6353 } else {
6354 for (unsigned i = 0; i < 6; i++)
6355 vtx_in[i] = LLVMGetParam(func, gfx6_vtx_params[i]);
6356 }
6357
6358 prim_id = LLVMGetParam(func, num_sgprs + 2);
6359 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
6360
6361 for (unsigned i = 0; i < 6; ++i) {
6362 LLVMValueRef base, rotated;
6363 base = vtx_in[i];
6364 rotated = vtx_in[(i + 4) % 6];
6365 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
6366 }
6367
6368 if (ctx->screen->info.chip_class >= GFX9) {
6369 for (unsigned i = 0; i < 3; i++) {
6370 LLVMValueRef hi, out;
6371
6372 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
6373 LLVMConstInt(ctx->i32, 16, 0), "");
6374 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
6375 out = ac_to_float(&ctx->ac, out);
6376 ret = LLVMBuildInsertValue(builder, ret, out,
6377 gfx9_vtx_params[i], "");
6378 }
6379 } else {
6380 for (unsigned i = 0; i < 6; i++) {
6381 LLVMValueRef out;
6382
6383 out = ac_to_float(&ctx->ac, vtx_out[i]);
6384 ret = LLVMBuildInsertValue(builder, ret, out,
6385 gfx6_vtx_params[i], "");
6386 }
6387 }
6388 }
6389
6390 LLVMBuildRet(builder, ret);
6391 }
6392
6393 /**
6394 * Given a list of shader part functions, build a wrapper function that
6395 * runs them in sequence to form a monolithic shader.
6396 */
6397 static void si_build_wrapper_function(struct si_shader_context *ctx,
6398 LLVMValueRef *parts,
6399 unsigned num_parts,
6400 unsigned main_part,
6401 unsigned next_shader_first_part)
6402 {
6403 LLVMBuilderRef builder = ctx->ac.builder;
6404 /* PS epilog has one arg per color component; gfx9 merged shader
6405 * prologs need to forward 32 user SGPRs.
6406 */
6407 struct si_function_info fninfo;
6408 LLVMValueRef initial[64], out[64];
6409 LLVMTypeRef function_type;
6410 unsigned num_first_params;
6411 unsigned num_out, initial_num_out;
6412 MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
6413 MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
6414 unsigned num_sgprs, num_vgprs;
6415 unsigned gprs;
6416 struct lp_build_if_state if_state;
6417
6418 si_init_function_info(&fninfo);
6419
6420 for (unsigned i = 0; i < num_parts; ++i) {
6421 lp_add_function_attr(parts[i], -1, LP_FUNC_ATTR_ALWAYSINLINE);
6422 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
6423 }
6424
6425 /* The parameters of the wrapper function correspond to those of the
6426 * first part in terms of SGPRs and VGPRs, but we use the types of the
6427 * main part to get the right types. This is relevant for the
6428 * dereferenceable attribute on descriptor table pointers.
6429 */
6430 num_sgprs = 0;
6431 num_vgprs = 0;
6432
6433 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
6434 num_first_params = LLVMCountParamTypes(function_type);
6435
6436 for (unsigned i = 0; i < num_first_params; ++i) {
6437 LLVMValueRef param = LLVMGetParam(parts[0], i);
6438
6439 if (ac_is_sgpr_param(param)) {
6440 assert(num_vgprs == 0);
6441 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
6442 } else {
6443 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
6444 }
6445 }
6446
6447 gprs = 0;
6448 while (gprs < num_sgprs + num_vgprs) {
6449 LLVMValueRef param = LLVMGetParam(parts[main_part], fninfo.num_params);
6450 LLVMTypeRef type = LLVMTypeOf(param);
6451 unsigned size = ac_get_type_size(type) / 4;
6452
6453 add_arg(&fninfo, gprs < num_sgprs ? ARG_SGPR : ARG_VGPR, type);
6454
6455 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
6456 assert(gprs + size <= num_sgprs + num_vgprs &&
6457 (gprs >= num_sgprs || gprs + size <= num_sgprs));
6458
6459 gprs += size;
6460 }
6461
6462 si_create_function(ctx, "wrapper", NULL, 0, &fninfo,
6463 si_get_max_workgroup_size(ctx->shader));
6464
6465 if (is_merged_shader(ctx->shader))
6466 ac_init_exec_full_mask(&ctx->ac);
6467
6468 /* Record the arguments of the function as if they were an output of
6469 * a previous part.
6470 */
6471 num_out = 0;
6472 num_out_sgpr = 0;
6473
6474 for (unsigned i = 0; i < fninfo.num_params; ++i) {
6475 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
6476 LLVMTypeRef param_type = LLVMTypeOf(param);
6477 LLVMTypeRef out_type = i < fninfo.num_sgpr_params ? ctx->i32 : ctx->f32;
6478 unsigned size = ac_get_type_size(param_type) / 4;
6479
6480 if (size == 1) {
6481 if (param_type != out_type)
6482 param = LLVMBuildBitCast(builder, param, out_type, "");
6483 out[num_out++] = param;
6484 } else {
6485 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
6486
6487 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6488 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
6489 param_type = ctx->i64;
6490 }
6491
6492 if (param_type != vector_type)
6493 param = LLVMBuildBitCast(builder, param, vector_type, "");
6494
6495 for (unsigned j = 0; j < size; ++j)
6496 out[num_out++] = LLVMBuildExtractElement(
6497 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
6498 }
6499
6500 if (i < fninfo.num_sgpr_params)
6501 num_out_sgpr = num_out;
6502 }
6503
6504 memcpy(initial, out, sizeof(out));
6505 initial_num_out = num_out;
6506 initial_num_out_sgpr = num_out_sgpr;
6507
6508 /* Now chain the parts. */
6509 for (unsigned part = 0; part < num_parts; ++part) {
6510 LLVMValueRef in[48];
6511 LLVMValueRef ret;
6512 LLVMTypeRef ret_type;
6513 unsigned out_idx = 0;
6514 unsigned num_params = LLVMCountParams(parts[part]);
6515
6516 /* Merged shaders are executed conditionally depending
6517 * on the number of enabled threads passed in the input SGPRs. */
6518 if (is_merged_shader(ctx->shader) && part == 0) {
6519 LLVMValueRef ena, count = initial[3];
6520
6521 count = LLVMBuildAnd(builder, count,
6522 LLVMConstInt(ctx->i32, 0x7f, 0), "");
6523 ena = LLVMBuildICmp(builder, LLVMIntULT,
6524 ac_get_thread_id(&ctx->ac), count, "");
6525 lp_build_if(&if_state, &ctx->gallivm, ena);
6526 }
6527
6528 /* Derive arguments for the next part from outputs of the
6529 * previous one.
6530 */
6531 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
6532 LLVMValueRef param;
6533 LLVMTypeRef param_type;
6534 bool is_sgpr;
6535 unsigned param_size;
6536 LLVMValueRef arg = NULL;
6537
6538 param = LLVMGetParam(parts[part], param_idx);
6539 param_type = LLVMTypeOf(param);
6540 param_size = ac_get_type_size(param_type) / 4;
6541 is_sgpr = ac_is_sgpr_param(param);
6542
6543 if (is_sgpr)
6544 lp_add_function_attr(parts[part], param_idx + 1, LP_FUNC_ATTR_INREG);
6545
6546 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
6547 assert(is_sgpr || out_idx >= num_out_sgpr);
6548
6549 if (param_size == 1)
6550 arg = out[out_idx];
6551 else
6552 arg = lp_build_gather_values(&ctx->gallivm, &out[out_idx], param_size);
6553
6554 if (LLVMTypeOf(arg) != param_type) {
6555 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6556 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
6557 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
6558 } else {
6559 arg = LLVMBuildBitCast(builder, arg, param_type, "");
6560 }
6561 }
6562
6563 in[param_idx] = arg;
6564 out_idx += param_size;
6565 }
6566
6567 ret = LLVMBuildCall(builder, parts[part], in, num_params, "");
6568
6569 if (is_merged_shader(ctx->shader) &&
6570 part + 1 == next_shader_first_part) {
6571 lp_build_endif(&if_state);
6572
6573 /* The second half of the merged shader should use
6574 * the inputs from the toplevel (wrapper) function,
6575 * not the return value from the last call.
6576 *
6577 * That's because the last call was executed condi-
6578 * tionally, so we can't consume it in the main
6579 * block.
6580 */
6581 memcpy(out, initial, sizeof(initial));
6582 num_out = initial_num_out;
6583 num_out_sgpr = initial_num_out_sgpr;
6584 continue;
6585 }
6586
6587 /* Extract the returned GPRs. */
6588 ret_type = LLVMTypeOf(ret);
6589 num_out = 0;
6590 num_out_sgpr = 0;
6591
6592 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
6593 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
6594
6595 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
6596
6597 for (unsigned i = 0; i < ret_size; ++i) {
6598 LLVMValueRef val =
6599 LLVMBuildExtractValue(builder, ret, i, "");
6600
6601 assert(num_out < ARRAY_SIZE(out));
6602 out[num_out++] = val;
6603
6604 if (LLVMTypeOf(val) == ctx->i32) {
6605 assert(num_out_sgpr + 1 == num_out);
6606 num_out_sgpr = num_out;
6607 }
6608 }
6609 }
6610 }
6611
6612 LLVMBuildRetVoid(builder);
6613 }
6614
6615 int si_compile_tgsi_shader(struct si_screen *sscreen,
6616 LLVMTargetMachineRef tm,
6617 struct si_shader *shader,
6618 bool is_monolithic,
6619 struct pipe_debug_callback *debug)
6620 {
6621 struct si_shader_selector *sel = shader->selector;
6622 struct si_shader_context ctx;
6623 int r = -1;
6624
6625 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6626 * conversion fails. */
6627 if (si_can_dump_shader(sscreen, sel->info.processor) &&
6628 !(sscreen->debug_flags & DBG(NO_TGSI))) {
6629 if (sel->tokens)
6630 tgsi_dump(sel->tokens, 0);
6631 else
6632 nir_print_shader(sel->nir, stderr);
6633 si_dump_streamout(&sel->so);
6634 }
6635
6636 si_init_shader_ctx(&ctx, sscreen, tm);
6637 si_llvm_context_set_tgsi(&ctx, shader);
6638 ctx.separate_prolog = !is_monolithic;
6639
6640 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
6641 sizeof(shader->info.vs_output_param_offset));
6642
6643 shader->info.uses_instanceid = sel->info.uses_instanceid;
6644
6645 if (!si_compile_tgsi_main(&ctx, is_monolithic)) {
6646 si_llvm_dispose(&ctx);
6647 return -1;
6648 }
6649
6650 if (is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
6651 LLVMValueRef parts[2];
6652 bool need_prolog = sel->vs_needs_prolog;
6653
6654 parts[1] = ctx.main_fn;
6655
6656 if (need_prolog) {
6657 union si_shader_part_key prolog_key;
6658 si_get_vs_prolog_key(&sel->info,
6659 shader->info.num_input_sgprs,
6660 &shader->key.part.vs.prolog,
6661 shader, &prolog_key);
6662 si_build_vs_prolog_function(&ctx, &prolog_key);
6663 parts[0] = ctx.main_fn;
6664 }
6665
6666 si_build_wrapper_function(&ctx, parts + !need_prolog,
6667 1 + need_prolog, need_prolog, 0);
6668 } else if (is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
6669 if (sscreen->info.chip_class >= GFX9) {
6670 struct si_shader_selector *ls = shader->key.part.tcs.ls;
6671 LLVMValueRef parts[4];
6672 bool vs_needs_prolog =
6673 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog);
6674
6675 /* TCS main part */
6676 parts[2] = ctx.main_fn;
6677
6678 /* TCS epilog */
6679 union si_shader_part_key tcs_epilog_key;
6680 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
6681 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6682 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
6683 parts[3] = ctx.main_fn;
6684
6685 /* VS prolog */
6686 if (vs_needs_prolog) {
6687 union si_shader_part_key vs_prolog_key;
6688 si_get_vs_prolog_key(&ls->info,
6689 shader->info.num_input_sgprs,
6690 &shader->key.part.tcs.ls_prolog,
6691 shader, &vs_prolog_key);
6692 vs_prolog_key.vs_prolog.is_monolithic = true;
6693 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6694 parts[0] = ctx.main_fn;
6695 }
6696
6697 /* VS as LS main part */
6698 struct si_shader shader_ls = {};
6699 shader_ls.selector = ls;
6700 shader_ls.key.as_ls = 1;
6701 shader_ls.key.mono = shader->key.mono;
6702 shader_ls.key.opt = shader->key.opt;
6703 si_llvm_context_set_tgsi(&ctx, &shader_ls);
6704
6705 if (!si_compile_tgsi_main(&ctx, true)) {
6706 si_llvm_dispose(&ctx);
6707 return -1;
6708 }
6709 shader->info.uses_instanceid |= ls->info.uses_instanceid;
6710 parts[1] = ctx.main_fn;
6711
6712 /* Reset the shader context. */
6713 ctx.shader = shader;
6714 ctx.type = PIPE_SHADER_TESS_CTRL;
6715
6716 si_build_wrapper_function(&ctx,
6717 parts + !vs_needs_prolog,
6718 4 - !vs_needs_prolog, 0,
6719 vs_needs_prolog ? 2 : 1);
6720 } else {
6721 LLVMValueRef parts[2];
6722 union si_shader_part_key epilog_key;
6723
6724 parts[0] = ctx.main_fn;
6725
6726 memset(&epilog_key, 0, sizeof(epilog_key));
6727 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6728 si_build_tcs_epilog_function(&ctx, &epilog_key);
6729 parts[1] = ctx.main_fn;
6730
6731 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
6732 }
6733 } else if (is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
6734 if (ctx.screen->info.chip_class >= GFX9) {
6735 struct si_shader_selector *es = shader->key.part.gs.es;
6736 LLVMValueRef es_prolog = NULL;
6737 LLVMValueRef es_main = NULL;
6738 LLVMValueRef gs_prolog = NULL;
6739 LLVMValueRef gs_main = ctx.main_fn;
6740
6741 /* GS prolog */
6742 union si_shader_part_key gs_prolog_key;
6743 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
6744 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6745 gs_prolog_key.gs_prolog.is_monolithic = true;
6746 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
6747 gs_prolog = ctx.main_fn;
6748
6749 /* ES prolog */
6750 if (es->vs_needs_prolog) {
6751 union si_shader_part_key vs_prolog_key;
6752 si_get_vs_prolog_key(&es->info,
6753 shader->info.num_input_sgprs,
6754 &shader->key.part.gs.vs_prolog,
6755 shader, &vs_prolog_key);
6756 vs_prolog_key.vs_prolog.is_monolithic = true;
6757 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6758 es_prolog = ctx.main_fn;
6759 }
6760
6761 /* ES main part */
6762 struct si_shader shader_es = {};
6763 shader_es.selector = es;
6764 shader_es.key.as_es = 1;
6765 shader_es.key.mono = shader->key.mono;
6766 shader_es.key.opt = shader->key.opt;
6767 si_llvm_context_set_tgsi(&ctx, &shader_es);
6768
6769 if (!si_compile_tgsi_main(&ctx, true)) {
6770 si_llvm_dispose(&ctx);
6771 return -1;
6772 }
6773 shader->info.uses_instanceid |= es->info.uses_instanceid;
6774 es_main = ctx.main_fn;
6775
6776 /* Reset the shader context. */
6777 ctx.shader = shader;
6778 ctx.type = PIPE_SHADER_GEOMETRY;
6779
6780 /* Prepare the array of shader parts. */
6781 LLVMValueRef parts[4];
6782 unsigned num_parts = 0, main_part, next_first_part;
6783
6784 if (es_prolog)
6785 parts[num_parts++] = es_prolog;
6786
6787 parts[main_part = num_parts++] = es_main;
6788 parts[next_first_part = num_parts++] = gs_prolog;
6789 parts[num_parts++] = gs_main;
6790
6791 si_build_wrapper_function(&ctx, parts, num_parts,
6792 main_part, next_first_part);
6793 } else {
6794 LLVMValueRef parts[2];
6795 union si_shader_part_key prolog_key;
6796
6797 parts[1] = ctx.main_fn;
6798
6799 memset(&prolog_key, 0, sizeof(prolog_key));
6800 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6801 si_build_gs_prolog_function(&ctx, &prolog_key);
6802 parts[0] = ctx.main_fn;
6803
6804 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
6805 }
6806 } else if (is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
6807 LLVMValueRef parts[3];
6808 union si_shader_part_key prolog_key;
6809 union si_shader_part_key epilog_key;
6810 bool need_prolog;
6811
6812 si_get_ps_prolog_key(shader, &prolog_key, false);
6813 need_prolog = si_need_ps_prolog(&prolog_key);
6814
6815 parts[need_prolog ? 1 : 0] = ctx.main_fn;
6816
6817 if (need_prolog) {
6818 si_build_ps_prolog_function(&ctx, &prolog_key);
6819 parts[0] = ctx.main_fn;
6820 }
6821
6822 si_get_ps_epilog_key(shader, &epilog_key);
6823 si_build_ps_epilog_function(&ctx, &epilog_key);
6824 parts[need_prolog ? 2 : 1] = ctx.main_fn;
6825
6826 si_build_wrapper_function(&ctx, parts, need_prolog ? 3 : 2,
6827 need_prolog ? 1 : 0, 0);
6828 }
6829
6830 si_llvm_optimize_module(&ctx);
6831
6832 /* Post-optimization transformations and analysis. */
6833 si_optimize_vs_outputs(&ctx);
6834
6835 if ((debug && debug->debug_message) ||
6836 si_can_dump_shader(sscreen, ctx.type))
6837 si_count_scratch_private_memory(&ctx);
6838
6839 /* Compile to bytecode. */
6840 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
6841 ctx.gallivm.module, debug, ctx.type, "TGSI shader");
6842 si_llvm_dispose(&ctx);
6843 if (r) {
6844 fprintf(stderr, "LLVM failed to compile shader\n");
6845 return r;
6846 }
6847
6848 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
6849 * LLVM 3.9svn has this bug.
6850 */
6851 if (sel->type == PIPE_SHADER_COMPUTE) {
6852 unsigned wave_size = 64;
6853 unsigned max_vgprs = 256;
6854 unsigned max_sgprs = sscreen->info.chip_class >= VI ? 800 : 512;
6855 unsigned max_sgprs_per_wave = 128;
6856 unsigned max_block_threads = si_get_max_workgroup_size(shader);
6857 unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
6858 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
6859
6860 max_vgprs = max_vgprs / min_waves_per_simd;
6861 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
6862
6863 if (shader->config.num_sgprs > max_sgprs ||
6864 shader->config.num_vgprs > max_vgprs) {
6865 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6866 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6867 shader->config.num_sgprs, shader->config.num_vgprs,
6868 max_sgprs, max_vgprs);
6869
6870 /* Just terminate the process, because dependent
6871 * shaders can hang due to bad input data, but use
6872 * the env var to allow shader-db to work.
6873 */
6874 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6875 abort();
6876 }
6877 }
6878
6879 /* Add the scratch offset to input SGPRs. */
6880 if (shader->config.scratch_bytes_per_wave && !is_merged_shader(shader))
6881 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6882
6883 /* Calculate the number of fragment input VGPRs. */
6884 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6885 shader->info.num_input_vgprs = 0;
6886 shader->info.face_vgpr_index = -1;
6887 shader->info.ancillary_vgpr_index = -1;
6888
6889 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6890 shader->info.num_input_vgprs += 2;
6891 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6892 shader->info.num_input_vgprs += 2;
6893 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6894 shader->info.num_input_vgprs += 2;
6895 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6896 shader->info.num_input_vgprs += 3;
6897 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6898 shader->info.num_input_vgprs += 2;
6899 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6900 shader->info.num_input_vgprs += 2;
6901 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6902 shader->info.num_input_vgprs += 2;
6903 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6904 shader->info.num_input_vgprs += 1;
6905 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6906 shader->info.num_input_vgprs += 1;
6907 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6908 shader->info.num_input_vgprs += 1;
6909 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6910 shader->info.num_input_vgprs += 1;
6911 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6912 shader->info.num_input_vgprs += 1;
6913 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6914 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6915 shader->info.num_input_vgprs += 1;
6916 }
6917 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr)) {
6918 shader->info.ancillary_vgpr_index = shader->info.num_input_vgprs;
6919 shader->info.num_input_vgprs += 1;
6920 }
6921 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6922 shader->info.num_input_vgprs += 1;
6923 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6924 shader->info.num_input_vgprs += 1;
6925 }
6926
6927 si_calculate_max_simd_waves(shader);
6928 si_shader_dump_stats_for_shader_db(shader, debug);
6929 return 0;
6930 }
6931
6932 /**
6933 * Create, compile and return a shader part (prolog or epilog).
6934 *
6935 * \param sscreen screen
6936 * \param list list of shader parts of the same category
6937 * \param type shader type
6938 * \param key shader part key
6939 * \param prolog whether the part being requested is a prolog
6940 * \param tm LLVM target machine
6941 * \param debug debug callback
6942 * \param build the callback responsible for building the main function
6943 * \return non-NULL on success
6944 */
6945 static struct si_shader_part *
6946 si_get_shader_part(struct si_screen *sscreen,
6947 struct si_shader_part **list,
6948 enum pipe_shader_type type,
6949 bool prolog,
6950 union si_shader_part_key *key,
6951 LLVMTargetMachineRef tm,
6952 struct pipe_debug_callback *debug,
6953 void (*build)(struct si_shader_context *,
6954 union si_shader_part_key *),
6955 const char *name)
6956 {
6957 struct si_shader_part *result;
6958
6959 mtx_lock(&sscreen->shader_parts_mutex);
6960
6961 /* Find existing. */
6962 for (result = *list; result; result = result->next) {
6963 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
6964 mtx_unlock(&sscreen->shader_parts_mutex);
6965 return result;
6966 }
6967 }
6968
6969 /* Compile a new one. */
6970 result = CALLOC_STRUCT(si_shader_part);
6971 result->key = *key;
6972
6973 struct si_shader shader = {};
6974 struct si_shader_context ctx;
6975
6976 si_init_shader_ctx(&ctx, sscreen, tm);
6977 ctx.shader = &shader;
6978 ctx.type = type;
6979
6980 switch (type) {
6981 case PIPE_SHADER_VERTEX:
6982 shader.key.as_ls = key->vs_prolog.as_ls;
6983 shader.key.as_es = key->vs_prolog.as_es;
6984 break;
6985 case PIPE_SHADER_TESS_CTRL:
6986 assert(!prolog);
6987 shader.key.part.tcs.epilog = key->tcs_epilog.states;
6988 break;
6989 case PIPE_SHADER_GEOMETRY:
6990 assert(prolog);
6991 break;
6992 case PIPE_SHADER_FRAGMENT:
6993 if (prolog)
6994 shader.key.part.ps.prolog = key->ps_prolog.states;
6995 else
6996 shader.key.part.ps.epilog = key->ps_epilog.states;
6997 break;
6998 default:
6999 unreachable("bad shader part");
7000 }
7001
7002 build(&ctx, key);
7003
7004 /* Compile. */
7005 si_llvm_optimize_module(&ctx);
7006
7007 if (si_compile_llvm(sscreen, &result->binary, &result->config, tm,
7008 ctx.ac.module, debug, ctx.type, name)) {
7009 FREE(result);
7010 result = NULL;
7011 goto out;
7012 }
7013
7014 result->next = *list;
7015 *list = result;
7016
7017 out:
7018 si_llvm_dispose(&ctx);
7019 mtx_unlock(&sscreen->shader_parts_mutex);
7020 return result;
7021 }
7022
7023 static LLVMValueRef si_prolog_get_rw_buffers(struct si_shader_context *ctx)
7024 {
7025 LLVMValueRef ptr[2], list;
7026 bool is_merged_shader =
7027 ctx->screen->info.chip_class >= GFX9 &&
7028 (ctx->type == PIPE_SHADER_TESS_CTRL ||
7029 ctx->type == PIPE_SHADER_GEOMETRY ||
7030 ctx->shader->key.as_ls || ctx->shader->key.as_es);
7031
7032 /* Get the pointer to rw buffers. */
7033 ptr[0] = LLVMGetParam(ctx->main_fn, (is_merged_shader ? 8 : 0) + SI_SGPR_RW_BUFFERS);
7034 ptr[1] = LLVMGetParam(ctx->main_fn, (is_merged_shader ? 8 : 0) + SI_SGPR_RW_BUFFERS_HI);
7035 list = lp_build_gather_values(&ctx->gallivm, ptr, 2);
7036 list = LLVMBuildBitCast(ctx->ac.builder, list, ctx->i64, "");
7037 list = LLVMBuildIntToPtr(ctx->ac.builder, list,
7038 ac_array_in_const_addr_space(ctx->v4i32), "");
7039 return list;
7040 }
7041
7042 /**
7043 * Build the vertex shader prolog function.
7044 *
7045 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
7046 * All inputs are returned unmodified. The vertex load indices are
7047 * stored after them, which will be used by the API VS for fetching inputs.
7048 *
7049 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
7050 * input_v0,
7051 * input_v1,
7052 * input_v2,
7053 * input_v3,
7054 * (VertexID + BaseVertex),
7055 * (InstanceID + StartInstance),
7056 * (InstanceID / 2 + StartInstance)
7057 */
7058 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
7059 union si_shader_part_key *key)
7060 {
7061 struct si_function_info fninfo;
7062 LLVMTypeRef *returns;
7063 LLVMValueRef ret, func;
7064 int num_returns, i;
7065 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
7066 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
7067 LLVMValueRef input_vgprs[9];
7068 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
7069 num_input_vgprs;
7070 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
7071
7072 si_init_function_info(&fninfo);
7073
7074 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
7075 returns = alloca((num_all_input_regs + key->vs_prolog.last_input + 1) *
7076 sizeof(LLVMTypeRef));
7077 num_returns = 0;
7078
7079 /* Declare input and output SGPRs. */
7080 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7081 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7082 returns[num_returns++] = ctx->i32;
7083 }
7084
7085 /* Preloaded VGPRs (outputs must be floats) */
7086 for (i = 0; i < num_input_vgprs; i++) {
7087 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &input_vgprs[i]);
7088 returns[num_returns++] = ctx->f32;
7089 }
7090
7091 /* Vertex load indices. */
7092 for (i = 0; i <= key->vs_prolog.last_input; i++)
7093 returns[num_returns++] = ctx->f32;
7094
7095 /* Create the function. */
7096 si_create_function(ctx, "vs_prolog", returns, num_returns, &fninfo, 0);
7097 func = ctx->main_fn;
7098
7099 if (key->vs_prolog.num_merged_next_stage_vgprs) {
7100 if (!key->vs_prolog.is_monolithic)
7101 si_init_exec_from_input(ctx, 3, 0);
7102
7103 if (key->vs_prolog.as_ls &&
7104 ctx->screen->has_ls_vgpr_init_bug) {
7105 /* If there are no HS threads, SPI loads the LS VGPRs
7106 * starting at VGPR 0. Shift them back to where they
7107 * belong.
7108 */
7109 LLVMValueRef has_hs_threads =
7110 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
7111 unpack_param(ctx, 3, 8, 8),
7112 ctx->i32_0, "");
7113
7114 for (i = 4; i > 0; --i) {
7115 input_vgprs[i + 1] =
7116 LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
7117 input_vgprs[i + 1],
7118 input_vgprs[i - 1], "");
7119 }
7120 }
7121 }
7122
7123 ctx->abi.vertex_id = input_vgprs[first_vs_vgpr];
7124 ctx->abi.instance_id = input_vgprs[first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1)];
7125
7126 /* Copy inputs to outputs. This should be no-op, as the registers match,
7127 * but it will prevent the compiler from overwriting them unintentionally.
7128 */
7129 ret = ctx->return_value;
7130 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7131 LLVMValueRef p = LLVMGetParam(func, i);
7132 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
7133 }
7134 for (i = 0; i < num_input_vgprs; i++) {
7135 LLVMValueRef p = input_vgprs[i];
7136 p = ac_to_float(&ctx->ac, p);
7137 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p,
7138 key->vs_prolog.num_input_sgprs + i, "");
7139 }
7140
7141 /* Compute vertex load indices from instance divisors. */
7142 LLVMValueRef instance_divisor_constbuf = NULL;
7143
7144 if (key->vs_prolog.states.instance_divisor_is_fetched) {
7145 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
7146 LLVMValueRef buf_index =
7147 LLVMConstInt(ctx->i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
7148 instance_divisor_constbuf =
7149 ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
7150 }
7151
7152 for (i = 0; i <= key->vs_prolog.last_input; i++) {
7153 bool divisor_is_one =
7154 key->vs_prolog.states.instance_divisor_is_one & (1u << i);
7155 bool divisor_is_fetched =
7156 key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
7157 LLVMValueRef index;
7158
7159 if (divisor_is_one || divisor_is_fetched) {
7160 LLVMValueRef divisor = ctx->i32_1;
7161
7162 if (divisor_is_fetched) {
7163 divisor = buffer_load_const(ctx, instance_divisor_constbuf,
7164 LLVMConstInt(ctx->i32, i * 4, 0));
7165 divisor = ac_to_integer(&ctx->ac, divisor);
7166 }
7167
7168 /* InstanceID / Divisor + StartInstance */
7169 index = get_instance_index_for_fetch(ctx,
7170 user_sgpr_base +
7171 SI_SGPR_START_INSTANCE,
7172 divisor);
7173 } else {
7174 /* VertexID + BaseVertex */
7175 index = LLVMBuildAdd(ctx->ac.builder,
7176 ctx->abi.vertex_id,
7177 LLVMGetParam(func, user_sgpr_base +
7178 SI_SGPR_BASE_VERTEX), "");
7179 }
7180
7181 index = ac_to_float(&ctx->ac, index);
7182 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index,
7183 fninfo.num_params + i, "");
7184 }
7185
7186 si_llvm_build_ret(ctx, ret);
7187 }
7188
7189 static bool si_get_vs_prolog(struct si_screen *sscreen,
7190 LLVMTargetMachineRef tm,
7191 struct si_shader *shader,
7192 struct pipe_debug_callback *debug,
7193 struct si_shader *main_part,
7194 const struct si_vs_prolog_bits *key)
7195 {
7196 struct si_shader_selector *vs = main_part->selector;
7197
7198 if (!si_vs_needs_prolog(vs, key))
7199 return true;
7200
7201 /* Get the prolog. */
7202 union si_shader_part_key prolog_key;
7203 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
7204 key, shader, &prolog_key);
7205
7206 shader->prolog =
7207 si_get_shader_part(sscreen, &sscreen->vs_prologs,
7208 PIPE_SHADER_VERTEX, true, &prolog_key, tm,
7209 debug, si_build_vs_prolog_function,
7210 "Vertex Shader Prolog");
7211 return shader->prolog != NULL;
7212 }
7213
7214 /**
7215 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
7216 */
7217 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
7218 LLVMTargetMachineRef tm,
7219 struct si_shader *shader,
7220 struct pipe_debug_callback *debug)
7221 {
7222 return si_get_vs_prolog(sscreen, tm, shader, debug, shader,
7223 &shader->key.part.vs.prolog);
7224 }
7225
7226 /**
7227 * Compile the TCS epilog function. This writes tesselation factors to memory
7228 * based on the output primitive type of the tesselator (determined by TES).
7229 */
7230 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
7231 union si_shader_part_key *key)
7232 {
7233 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7234 struct si_function_info fninfo;
7235 LLVMValueRef func;
7236
7237 si_init_function_info(&fninfo);
7238
7239 if (ctx->screen->info.chip_class >= GFX9) {
7240 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7241 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7242 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* wave info */
7243 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7244 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7245 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7246 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7247 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7248 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7249 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7250 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7251 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7252 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7253 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7254 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7255 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7256 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7257 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7258 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7259 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7260 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7261 } else {
7262 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7263 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7264 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7265 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7266 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7267 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7268 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7269 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7270 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7271 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7272 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7273 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7274 }
7275
7276 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* VGPR gap */
7277 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* VGPR gap */
7278 unsigned tess_factors_idx =
7279 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* patch index within the wave (REL_PATCH_ID) */
7280 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* invocation ID within the patch */
7281 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* LDS offset where tess factors should be loaded from */
7282
7283 for (unsigned i = 0; i < 6; i++)
7284 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* tess factors */
7285
7286 /* Create the function. */
7287 si_create_function(ctx, "tcs_epilog", NULL, 0, &fninfo,
7288 ctx->screen->info.chip_class >= CIK ? 128 : 64);
7289 ac_declare_lds_as_pointer(&ctx->ac);
7290 func = ctx->main_fn;
7291
7292 LLVMValueRef invoc0_tess_factors[6];
7293 for (unsigned i = 0; i < 6; i++)
7294 invoc0_tess_factors[i] = LLVMGetParam(func, tess_factors_idx + 3 + i);
7295
7296 si_write_tess_factors(bld_base,
7297 LLVMGetParam(func, tess_factors_idx),
7298 LLVMGetParam(func, tess_factors_idx + 1),
7299 LLVMGetParam(func, tess_factors_idx + 2),
7300 invoc0_tess_factors, invoc0_tess_factors + 4);
7301
7302 LLVMBuildRetVoid(ctx->ac.builder);
7303 }
7304
7305 /**
7306 * Select and compile (or reuse) TCS parts (epilog).
7307 */
7308 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
7309 LLVMTargetMachineRef tm,
7310 struct si_shader *shader,
7311 struct pipe_debug_callback *debug)
7312 {
7313 if (sscreen->info.chip_class >= GFX9) {
7314 struct si_shader *ls_main_part =
7315 shader->key.part.tcs.ls->main_shader_part_ls;
7316
7317 if (!si_get_vs_prolog(sscreen, tm, shader, debug, ls_main_part,
7318 &shader->key.part.tcs.ls_prolog))
7319 return false;
7320
7321 shader->previous_stage = ls_main_part;
7322 }
7323
7324 /* Get the epilog. */
7325 union si_shader_part_key epilog_key;
7326 memset(&epilog_key, 0, sizeof(epilog_key));
7327 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
7328
7329 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
7330 PIPE_SHADER_TESS_CTRL, false,
7331 &epilog_key, tm, debug,
7332 si_build_tcs_epilog_function,
7333 "Tessellation Control Shader Epilog");
7334 return shader->epilog != NULL;
7335 }
7336
7337 /**
7338 * Select and compile (or reuse) GS parts (prolog).
7339 */
7340 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
7341 LLVMTargetMachineRef tm,
7342 struct si_shader *shader,
7343 struct pipe_debug_callback *debug)
7344 {
7345 if (sscreen->info.chip_class >= GFX9) {
7346 struct si_shader *es_main_part =
7347 shader->key.part.gs.es->main_shader_part_es;
7348
7349 if (shader->key.part.gs.es->type == PIPE_SHADER_VERTEX &&
7350 !si_get_vs_prolog(sscreen, tm, shader, debug, es_main_part,
7351 &shader->key.part.gs.vs_prolog))
7352 return false;
7353
7354 shader->previous_stage = es_main_part;
7355 }
7356
7357 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
7358 return true;
7359
7360 union si_shader_part_key prolog_key;
7361 memset(&prolog_key, 0, sizeof(prolog_key));
7362 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
7363
7364 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
7365 PIPE_SHADER_GEOMETRY, true,
7366 &prolog_key, tm, debug,
7367 si_build_gs_prolog_function,
7368 "Geometry Shader Prolog");
7369 return shader->prolog2 != NULL;
7370 }
7371
7372 /**
7373 * Build the pixel shader prolog function. This handles:
7374 * - two-side color selection and interpolation
7375 * - overriding interpolation parameters for the API PS
7376 * - polygon stippling
7377 *
7378 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
7379 * overriden by other states. (e.g. per-sample interpolation)
7380 * Interpolated colors are stored after the preloaded VGPRs.
7381 */
7382 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
7383 union si_shader_part_key *key)
7384 {
7385 struct si_function_info fninfo;
7386 LLVMValueRef ret, func;
7387 int num_returns, i, num_color_channels;
7388
7389 assert(si_need_ps_prolog(key));
7390
7391 si_init_function_info(&fninfo);
7392
7393 /* Declare inputs. */
7394 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
7395 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7396
7397 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
7398 add_arg(&fninfo, ARG_VGPR, ctx->f32);
7399
7400 /* Declare outputs (same as inputs + add colors if needed) */
7401 num_returns = fninfo.num_params;
7402 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
7403 for (i = 0; i < num_color_channels; i++)
7404 fninfo.types[num_returns++] = ctx->f32;
7405
7406 /* Create the function. */
7407 si_create_function(ctx, "ps_prolog", fninfo.types, num_returns,
7408 &fninfo, 0);
7409 func = ctx->main_fn;
7410
7411 /* Copy inputs to outputs. This should be no-op, as the registers match,
7412 * but it will prevent the compiler from overwriting them unintentionally.
7413 */
7414 ret = ctx->return_value;
7415 for (i = 0; i < fninfo.num_params; i++) {
7416 LLVMValueRef p = LLVMGetParam(func, i);
7417 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
7418 }
7419
7420 /* Polygon stippling. */
7421 if (key->ps_prolog.states.poly_stipple) {
7422 /* POS_FIXED_PT is always last. */
7423 unsigned pos = key->ps_prolog.num_input_sgprs +
7424 key->ps_prolog.num_input_vgprs - 1;
7425 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
7426
7427 si_llvm_emit_polygon_stipple(ctx, list, pos);
7428 }
7429
7430 if (key->ps_prolog.states.bc_optimize_for_persp ||
7431 key->ps_prolog.states.bc_optimize_for_linear) {
7432 unsigned i, base = key->ps_prolog.num_input_sgprs;
7433 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
7434
7435 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
7436 * The hw doesn't compute CENTROID if the whole wave only
7437 * contains fully-covered quads.
7438 *
7439 * PRIM_MASK is after user SGPRs.
7440 */
7441 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7442 bc_optimize = LLVMBuildLShr(ctx->ac.builder, bc_optimize,
7443 LLVMConstInt(ctx->i32, 31, 0), "");
7444 bc_optimize = LLVMBuildTrunc(ctx->ac.builder, bc_optimize,
7445 ctx->i1, "");
7446
7447 if (key->ps_prolog.states.bc_optimize_for_persp) {
7448 /* Read PERSP_CENTER. */
7449 for (i = 0; i < 2; i++)
7450 center[i] = LLVMGetParam(func, base + 2 + i);
7451 /* Read PERSP_CENTROID. */
7452 for (i = 0; i < 2; i++)
7453 centroid[i] = LLVMGetParam(func, base + 4 + i);
7454 /* Select PERSP_CENTROID. */
7455 for (i = 0; i < 2; i++) {
7456 tmp = LLVMBuildSelect(ctx->ac.builder, bc_optimize,
7457 center[i], centroid[i], "");
7458 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7459 tmp, base + 4 + i, "");
7460 }
7461 }
7462 if (key->ps_prolog.states.bc_optimize_for_linear) {
7463 /* Read LINEAR_CENTER. */
7464 for (i = 0; i < 2; i++)
7465 center[i] = LLVMGetParam(func, base + 8 + i);
7466 /* Read LINEAR_CENTROID. */
7467 for (i = 0; i < 2; i++)
7468 centroid[i] = LLVMGetParam(func, base + 10 + i);
7469 /* Select LINEAR_CENTROID. */
7470 for (i = 0; i < 2; i++) {
7471 tmp = LLVMBuildSelect(ctx->ac.builder, bc_optimize,
7472 center[i], centroid[i], "");
7473 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7474 tmp, base + 10 + i, "");
7475 }
7476 }
7477 }
7478
7479 /* Force per-sample interpolation. */
7480 if (key->ps_prolog.states.force_persp_sample_interp) {
7481 unsigned i, base = key->ps_prolog.num_input_sgprs;
7482 LLVMValueRef persp_sample[2];
7483
7484 /* Read PERSP_SAMPLE. */
7485 for (i = 0; i < 2; i++)
7486 persp_sample[i] = LLVMGetParam(func, base + i);
7487 /* Overwrite PERSP_CENTER. */
7488 for (i = 0; i < 2; i++)
7489 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7490 persp_sample[i], base + 2 + i, "");
7491 /* Overwrite PERSP_CENTROID. */
7492 for (i = 0; i < 2; i++)
7493 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7494 persp_sample[i], base + 4 + i, "");
7495 }
7496 if (key->ps_prolog.states.force_linear_sample_interp) {
7497 unsigned i, base = key->ps_prolog.num_input_sgprs;
7498 LLVMValueRef linear_sample[2];
7499
7500 /* Read LINEAR_SAMPLE. */
7501 for (i = 0; i < 2; i++)
7502 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
7503 /* Overwrite LINEAR_CENTER. */
7504 for (i = 0; i < 2; i++)
7505 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7506 linear_sample[i], base + 8 + i, "");
7507 /* Overwrite LINEAR_CENTROID. */
7508 for (i = 0; i < 2; i++)
7509 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7510 linear_sample[i], base + 10 + i, "");
7511 }
7512
7513 /* Force center interpolation. */
7514 if (key->ps_prolog.states.force_persp_center_interp) {
7515 unsigned i, base = key->ps_prolog.num_input_sgprs;
7516 LLVMValueRef persp_center[2];
7517
7518 /* Read PERSP_CENTER. */
7519 for (i = 0; i < 2; i++)
7520 persp_center[i] = LLVMGetParam(func, base + 2 + i);
7521 /* Overwrite PERSP_SAMPLE. */
7522 for (i = 0; i < 2; i++)
7523 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7524 persp_center[i], base + i, "");
7525 /* Overwrite PERSP_CENTROID. */
7526 for (i = 0; i < 2; i++)
7527 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7528 persp_center[i], base + 4 + i, "");
7529 }
7530 if (key->ps_prolog.states.force_linear_center_interp) {
7531 unsigned i, base = key->ps_prolog.num_input_sgprs;
7532 LLVMValueRef linear_center[2];
7533
7534 /* Read LINEAR_CENTER. */
7535 for (i = 0; i < 2; i++)
7536 linear_center[i] = LLVMGetParam(func, base + 8 + i);
7537 /* Overwrite LINEAR_SAMPLE. */
7538 for (i = 0; i < 2; i++)
7539 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7540 linear_center[i], base + 6 + i, "");
7541 /* Overwrite LINEAR_CENTROID. */
7542 for (i = 0; i < 2; i++)
7543 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7544 linear_center[i], base + 10 + i, "");
7545 }
7546
7547 /* Interpolate colors. */
7548 unsigned color_out_idx = 0;
7549 for (i = 0; i < 2; i++) {
7550 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
7551 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
7552 key->ps_prolog.face_vgpr_index;
7553 LLVMValueRef interp[2], color[4];
7554 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
7555
7556 if (!writemask)
7557 continue;
7558
7559 /* If the interpolation qualifier is not CONSTANT (-1). */
7560 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
7561 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
7562 key->ps_prolog.color_interp_vgpr_index[i];
7563
7564 /* Get the (i,j) updated by bc_optimize handling. */
7565 interp[0] = LLVMBuildExtractValue(ctx->ac.builder, ret,
7566 interp_vgpr, "");
7567 interp[1] = LLVMBuildExtractValue(ctx->ac.builder, ret,
7568 interp_vgpr + 1, "");
7569 interp_ij = lp_build_gather_values(&ctx->gallivm, interp, 2);
7570 }
7571
7572 /* Use the absolute location of the input. */
7573 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7574
7575 if (key->ps_prolog.states.color_two_side) {
7576 face = LLVMGetParam(func, face_vgpr);
7577 face = ac_to_integer(&ctx->ac, face);
7578 }
7579
7580 interp_fs_input(ctx,
7581 key->ps_prolog.color_attr_index[i],
7582 TGSI_SEMANTIC_COLOR, i,
7583 key->ps_prolog.num_interp_inputs,
7584 key->ps_prolog.colors_read, interp_ij,
7585 prim_mask, face, color);
7586
7587 while (writemask) {
7588 unsigned chan = u_bit_scan(&writemask);
7589 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, color[chan],
7590 fninfo.num_params + color_out_idx++, "");
7591 }
7592 }
7593
7594 /* Section 15.2.2 (Shader Inputs) of the OpenGL 4.5 (Core Profile) spec
7595 * says:
7596 *
7597 * "When per-sample shading is active due to the use of a fragment
7598 * input qualified by sample or due to the use of the gl_SampleID
7599 * or gl_SamplePosition variables, only the bit for the current
7600 * sample is set in gl_SampleMaskIn. When state specifies multiple
7601 * fragment shader invocations for a given fragment, the sample
7602 * mask for any single fragment shader invocation may specify a
7603 * subset of the covered samples for the fragment. In this case,
7604 * the bit corresponding to each covered sample will be set in
7605 * exactly one fragment shader invocation."
7606 *
7607 * The samplemask loaded by hardware is always the coverage of the
7608 * entire pixel/fragment, so mask bits out based on the sample ID.
7609 */
7610 if (key->ps_prolog.states.samplemask_log_ps_iter) {
7611 /* The bit pattern matches that used by fixed function fragment
7612 * processing. */
7613 static const uint16_t ps_iter_masks[] = {
7614 0xffff, /* not used */
7615 0x5555,
7616 0x1111,
7617 0x0101,
7618 0x0001,
7619 };
7620 assert(key->ps_prolog.states.samplemask_log_ps_iter < ARRAY_SIZE(ps_iter_masks));
7621
7622 uint32_t ps_iter_mask = ps_iter_masks[key->ps_prolog.states.samplemask_log_ps_iter];
7623 unsigned ancillary_vgpr = key->ps_prolog.num_input_sgprs +
7624 key->ps_prolog.ancillary_vgpr_index;
7625 LLVMValueRef sampleid = unpack_param(ctx, ancillary_vgpr, 8, 4);
7626 LLVMValueRef samplemask = LLVMGetParam(func, ancillary_vgpr + 1);
7627
7628 samplemask = ac_to_integer(&ctx->ac, samplemask);
7629 samplemask = LLVMBuildAnd(
7630 ctx->ac.builder,
7631 samplemask,
7632 LLVMBuildShl(ctx->ac.builder,
7633 LLVMConstInt(ctx->i32, ps_iter_mask, false),
7634 sampleid, ""),
7635 "");
7636 samplemask = ac_to_float(&ctx->ac, samplemask);
7637
7638 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, samplemask,
7639 ancillary_vgpr + 1, "");
7640 }
7641
7642 /* Tell LLVM to insert WQM instruction sequence when needed. */
7643 if (key->ps_prolog.wqm) {
7644 LLVMAddTargetDependentFunctionAttr(func,
7645 "amdgpu-ps-wqm-outputs", "");
7646 }
7647
7648 si_llvm_build_ret(ctx, ret);
7649 }
7650
7651 /**
7652 * Build the pixel shader epilog function. This handles everything that must be
7653 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7654 */
7655 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
7656 union si_shader_part_key *key)
7657 {
7658 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7659 struct si_function_info fninfo;
7660 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7661 int i;
7662 struct si_ps_exports exp = {};
7663
7664 si_init_function_info(&fninfo);
7665
7666 /* Declare input SGPRs. */
7667 ctx->param_rw_buffers = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7668 ctx->param_bindless_samplers_and_images = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7669 ctx->param_const_and_shader_buffers = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7670 ctx->param_samplers_and_images = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7671 add_arg_checked(&fninfo, ARG_SGPR, ctx->f32, SI_PARAM_ALPHA_REF);
7672
7673 /* Declare input VGPRs. */
7674 unsigned required_num_params =
7675 fninfo.num_sgpr_params +
7676 util_bitcount(key->ps_epilog.colors_written) * 4 +
7677 key->ps_epilog.writes_z +
7678 key->ps_epilog.writes_stencil +
7679 key->ps_epilog.writes_samplemask;
7680
7681 required_num_params = MAX2(required_num_params,
7682 fninfo.num_sgpr_params + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7683
7684 while (fninfo.num_params < required_num_params)
7685 add_arg(&fninfo, ARG_VGPR, ctx->f32);
7686
7687 /* Create the function. */
7688 si_create_function(ctx, "ps_epilog", NULL, 0, &fninfo, 0);
7689 /* Disable elimination of unused inputs. */
7690 si_llvm_add_attribute(ctx->main_fn,
7691 "InitialPSInputAddr", 0xffffff);
7692
7693 /* Process colors. */
7694 unsigned vgpr = fninfo.num_sgpr_params;
7695 unsigned colors_written = key->ps_epilog.colors_written;
7696 int last_color_export = -1;
7697
7698 /* Find the last color export. */
7699 if (!key->ps_epilog.writes_z &&
7700 !key->ps_epilog.writes_stencil &&
7701 !key->ps_epilog.writes_samplemask) {
7702 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7703
7704 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7705 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7706 /* Just set this if any of the colorbuffers are enabled. */
7707 if (spi_format &
7708 ((1ull << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7709 last_color_export = 0;
7710 } else {
7711 for (i = 0; i < 8; i++)
7712 if (colors_written & (1 << i) &&
7713 (spi_format >> (i * 4)) & 0xf)
7714 last_color_export = i;
7715 }
7716 }
7717
7718 while (colors_written) {
7719 LLVMValueRef color[4];
7720 int mrt = u_bit_scan(&colors_written);
7721
7722 for (i = 0; i < 4; i++)
7723 color[i] = LLVMGetParam(ctx->main_fn, vgpr++);
7724
7725 si_export_mrt_color(bld_base, color, mrt,
7726 fninfo.num_params - 1,
7727 mrt == last_color_export, &exp);
7728 }
7729
7730 /* Process depth, stencil, samplemask. */
7731 if (key->ps_epilog.writes_z)
7732 depth = LLVMGetParam(ctx->main_fn, vgpr++);
7733 if (key->ps_epilog.writes_stencil)
7734 stencil = LLVMGetParam(ctx->main_fn, vgpr++);
7735 if (key->ps_epilog.writes_samplemask)
7736 samplemask = LLVMGetParam(ctx->main_fn, vgpr++);
7737
7738 if (depth || stencil || samplemask)
7739 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
7740 else if (last_color_export == -1)
7741 si_export_null(bld_base);
7742
7743 if (exp.num)
7744 si_emit_ps_exports(ctx, &exp);
7745
7746 /* Compile. */
7747 LLVMBuildRetVoid(ctx->ac.builder);
7748 }
7749
7750 /**
7751 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7752 */
7753 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7754 LLVMTargetMachineRef tm,
7755 struct si_shader *shader,
7756 struct pipe_debug_callback *debug)
7757 {
7758 union si_shader_part_key prolog_key;
7759 union si_shader_part_key epilog_key;
7760
7761 /* Get the prolog. */
7762 si_get_ps_prolog_key(shader, &prolog_key, true);
7763
7764 /* The prolog is a no-op if these aren't set. */
7765 if (si_need_ps_prolog(&prolog_key)) {
7766 shader->prolog =
7767 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7768 PIPE_SHADER_FRAGMENT, true,
7769 &prolog_key, tm, debug,
7770 si_build_ps_prolog_function,
7771 "Fragment Shader Prolog");
7772 if (!shader->prolog)
7773 return false;
7774 }
7775
7776 /* Get the epilog. */
7777 si_get_ps_epilog_key(shader, &epilog_key);
7778
7779 shader->epilog =
7780 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7781 PIPE_SHADER_FRAGMENT, false,
7782 &epilog_key, tm, debug,
7783 si_build_ps_epilog_function,
7784 "Fragment Shader Epilog");
7785 if (!shader->epilog)
7786 return false;
7787
7788 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7789 if (shader->key.part.ps.prolog.poly_stipple) {
7790 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7791 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7792 }
7793
7794 /* Set up the enable bits for per-sample shading if needed. */
7795 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
7796 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7797 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7798 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7799 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7800 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7801 }
7802 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
7803 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7804 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7805 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7806 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7807 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7808 }
7809 if (shader->key.part.ps.prolog.force_persp_center_interp &&
7810 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7811 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7812 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
7813 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7814 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7815 }
7816 if (shader->key.part.ps.prolog.force_linear_center_interp &&
7817 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7818 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7819 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
7820 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7821 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7822 }
7823
7824 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7825 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7826 !(shader->config.spi_ps_input_ena & 0xf)) {
7827 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7828 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7829 }
7830
7831 /* At least one pair of interpolation weights must be enabled. */
7832 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7833 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7834 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7835 }
7836
7837 /* Samplemask fixup requires the sample ID. */
7838 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
7839 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
7840 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
7841 }
7842
7843 /* The sample mask input is always enabled, because the API shader always
7844 * passes it through to the epilog. Disable it here if it's unused.
7845 */
7846 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
7847 !shader->selector->info.reads_samplemask)
7848 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7849
7850 return true;
7851 }
7852
7853 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
7854 unsigned *lds_size)
7855 {
7856 /* SPI barrier management bug:
7857 * Make sure we have at least 4k of LDS in use to avoid the bug.
7858 * It applies to workgroup sizes of more than one wavefront.
7859 */
7860 if (sscreen->info.family == CHIP_BONAIRE ||
7861 sscreen->info.family == CHIP_KABINI ||
7862 sscreen->info.family == CHIP_MULLINS)
7863 *lds_size = MAX2(*lds_size, 8);
7864 }
7865
7866 static void si_fix_resource_usage(struct si_screen *sscreen,
7867 struct si_shader *shader)
7868 {
7869 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7870
7871 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7872
7873 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
7874 si_get_max_workgroup_size(shader) > 64) {
7875 si_multiwave_lds_size_workaround(sscreen,
7876 &shader->config.lds_size);
7877 }
7878 }
7879
7880 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
7881 struct si_shader *shader,
7882 struct pipe_debug_callback *debug)
7883 {
7884 struct si_shader_selector *sel = shader->selector;
7885 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
7886 int r;
7887
7888 /* LS, ES, VS are compiled on demand if the main part hasn't been
7889 * compiled for that stage.
7890 *
7891 * Vertex shaders are compiled on demand when a vertex fetch
7892 * workaround must be applied.
7893 */
7894 if (shader->is_monolithic) {
7895 /* Monolithic shader (compiled as a whole, has many variants,
7896 * may take a long time to compile).
7897 */
7898 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
7899 if (r)
7900 return r;
7901 } else {
7902 /* The shader consists of several parts:
7903 *
7904 * - the middle part is the user shader, it has 1 variant only
7905 * and it was compiled during the creation of the shader
7906 * selector
7907 * - the prolog part is inserted at the beginning
7908 * - the epilog part is inserted at the end
7909 *
7910 * The prolog and epilog have many (but simple) variants.
7911 *
7912 * Starting with gfx9, geometry and tessellation control
7913 * shaders also contain the prolog and user shader parts of
7914 * the previous shader stage.
7915 */
7916
7917 if (!mainp)
7918 return -1;
7919
7920 /* Copy the compiled TGSI shader data over. */
7921 shader->is_binary_shared = true;
7922 shader->binary = mainp->binary;
7923 shader->config = mainp->config;
7924 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7925 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7926 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7927 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
7928 memcpy(shader->info.vs_output_param_offset,
7929 mainp->info.vs_output_param_offset,
7930 sizeof(mainp->info.vs_output_param_offset));
7931 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7932 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7933 shader->info.nr_param_exports = mainp->info.nr_param_exports;
7934
7935 /* Select prologs and/or epilogs. */
7936 switch (sel->type) {
7937 case PIPE_SHADER_VERTEX:
7938 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
7939 return -1;
7940 break;
7941 case PIPE_SHADER_TESS_CTRL:
7942 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
7943 return -1;
7944 break;
7945 case PIPE_SHADER_TESS_EVAL:
7946 break;
7947 case PIPE_SHADER_GEOMETRY:
7948 if (!si_shader_select_gs_parts(sscreen, tm, shader, debug))
7949 return -1;
7950 break;
7951 case PIPE_SHADER_FRAGMENT:
7952 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
7953 return -1;
7954
7955 /* Make sure we have at least as many VGPRs as there
7956 * are allocated inputs.
7957 */
7958 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7959 shader->info.num_input_vgprs);
7960 break;
7961 }
7962
7963 /* Update SGPR and VGPR counts. */
7964 if (shader->prolog) {
7965 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7966 shader->prolog->config.num_sgprs);
7967 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7968 shader->prolog->config.num_vgprs);
7969 }
7970 if (shader->previous_stage) {
7971 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7972 shader->previous_stage->config.num_sgprs);
7973 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7974 shader->previous_stage->config.num_vgprs);
7975 shader->config.spilled_sgprs =
7976 MAX2(shader->config.spilled_sgprs,
7977 shader->previous_stage->config.spilled_sgprs);
7978 shader->config.spilled_vgprs =
7979 MAX2(shader->config.spilled_vgprs,
7980 shader->previous_stage->config.spilled_vgprs);
7981 shader->config.private_mem_vgprs =
7982 MAX2(shader->config.private_mem_vgprs,
7983 shader->previous_stage->config.private_mem_vgprs);
7984 shader->config.scratch_bytes_per_wave =
7985 MAX2(shader->config.scratch_bytes_per_wave,
7986 shader->previous_stage->config.scratch_bytes_per_wave);
7987 shader->info.uses_instanceid |=
7988 shader->previous_stage->info.uses_instanceid;
7989 }
7990 if (shader->prolog2) {
7991 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7992 shader->prolog2->config.num_sgprs);
7993 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7994 shader->prolog2->config.num_vgprs);
7995 }
7996 if (shader->epilog) {
7997 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7998 shader->epilog->config.num_sgprs);
7999 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8000 shader->epilog->config.num_vgprs);
8001 }
8002 si_calculate_max_simd_waves(shader);
8003 }
8004
8005 si_fix_resource_usage(sscreen, shader);
8006 si_shader_dump(sscreen, shader, debug, sel->info.processor,
8007 stderr, true);
8008
8009 /* Upload. */
8010 r = si_shader_binary_upload(sscreen, shader);
8011 if (r) {
8012 fprintf(stderr, "LLVM failed to upload shader\n");
8013 return r;
8014 }
8015
8016 return 0;
8017 }
8018
8019 void si_shader_destroy(struct si_shader *shader)
8020 {
8021 if (shader->scratch_bo)
8022 r600_resource_reference(&shader->scratch_bo, NULL);
8023
8024 r600_resource_reference(&shader->bo, NULL);
8025
8026 if (!shader->is_binary_shared)
8027 ac_shader_binary_clean(&shader->binary);
8028
8029 free(shader->shader_log);
8030 }