Revert "radeonsi: use uint32_t to declare si_shader_key.opt.kill_outputs"
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_flow.h"
35 #include "gallivm/lp_bld_misc.h"
36 #include "util/u_memory.h"
37 #include "util/u_string.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_util.h"
40 #include "tgsi/tgsi_dump.h"
41
42 #include "ac_binary.h"
43 #include "ac_llvm_util.h"
44 #include "ac_exp_param.h"
45 #include "si_shader_internal.h"
46 #include "si_pipe.h"
47 #include "sid.h"
48
49
50 static const char *scratch_rsrc_dword0_symbol =
51 "SCRATCH_RSRC_DWORD0";
52
53 static const char *scratch_rsrc_dword1_symbol =
54 "SCRATCH_RSRC_DWORD1";
55
56 struct si_shader_output_values
57 {
58 LLVMValueRef values[4];
59 unsigned semantic_name;
60 unsigned semantic_index;
61 ubyte vertex_stream[4];
62 };
63
64 static void si_init_shader_ctx(struct si_shader_context *ctx,
65 struct si_screen *sscreen,
66 LLVMTargetMachineRef tm);
67
68 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
69 struct lp_build_tgsi_context *bld_base,
70 struct lp_build_emit_data *emit_data);
71
72 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
73 FILE *f);
74
75 static unsigned llvm_get_type_size(LLVMTypeRef type);
76
77 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
78 union si_shader_part_key *key);
79 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
80 union si_shader_part_key *key);
81 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
82 union si_shader_part_key *key);
83 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
84 union si_shader_part_key *key);
85
86 /* Ideally pass the sample mask input to the PS epilog as v13, which
87 * is its usual location, so that the shader doesn't have to add v_mov.
88 */
89 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 13
90
91 enum {
92 CONST_ADDR_SPACE = 2,
93 LOCAL_ADDR_SPACE = 3,
94 };
95
96 static bool is_merged_shader(struct si_shader *shader)
97 {
98 if (shader->selector->screen->b.chip_class <= VI)
99 return false;
100
101 return shader->key.as_ls ||
102 shader->key.as_es ||
103 shader->selector->type == PIPE_SHADER_TESS_CTRL ||
104 shader->selector->type == PIPE_SHADER_GEOMETRY;
105 }
106
107 /**
108 * Returns a unique index for a per-patch semantic name and index. The index
109 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
110 * can be calculated.
111 */
112 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
113 {
114 switch (semantic_name) {
115 case TGSI_SEMANTIC_TESSOUTER:
116 return 0;
117 case TGSI_SEMANTIC_TESSINNER:
118 return 1;
119 case TGSI_SEMANTIC_PATCH:
120 assert(index < 30);
121 return 2 + index;
122
123 default:
124 assert(!"invalid semantic name");
125 return 0;
126 }
127 }
128
129 /**
130 * Returns a unique index for a semantic name and index. The index must be
131 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
132 * calculated.
133 */
134 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
135 {
136 switch (semantic_name) {
137 case TGSI_SEMANTIC_POSITION:
138 return 0;
139 case TGSI_SEMANTIC_GENERIC:
140 /* Since some shader stages use the the highest used IO index
141 * to determine the size to allocate for inputs/outputs
142 * (in LDS, tess and GS rings). GENERIC should be placed right
143 * after POSITION to make that size as small as possible.
144 */
145 if (index < SI_MAX_IO_GENERIC)
146 return 1 + index;
147
148 assert(!"invalid generic index");
149 return 0;
150 case TGSI_SEMANTIC_PSIZE:
151 return SI_MAX_IO_GENERIC + 1;
152 case TGSI_SEMANTIC_CLIPDIST:
153 assert(index <= 1);
154 return SI_MAX_IO_GENERIC + 2 + index;
155 case TGSI_SEMANTIC_FOG:
156 return SI_MAX_IO_GENERIC + 4;
157 case TGSI_SEMANTIC_LAYER:
158 return SI_MAX_IO_GENERIC + 5;
159 case TGSI_SEMANTIC_VIEWPORT_INDEX:
160 return SI_MAX_IO_GENERIC + 6;
161 case TGSI_SEMANTIC_PRIMID:
162 return SI_MAX_IO_GENERIC + 7;
163 case TGSI_SEMANTIC_COLOR: /* these alias */
164 case TGSI_SEMANTIC_BCOLOR:
165 assert(index < 2);
166 return SI_MAX_IO_GENERIC + 8 + index;
167 case TGSI_SEMANTIC_TEXCOORD:
168 assert(index < 8);
169 assert(SI_MAX_IO_GENERIC + 10 + index < 64);
170 return SI_MAX_IO_GENERIC + 10 + index;
171 default:
172 assert(!"invalid semantic name");
173 return 0;
174 }
175 }
176
177 /**
178 * Get the value of a shader input parameter and extract a bitfield.
179 */
180 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
181 unsigned param, unsigned rshift,
182 unsigned bitwidth)
183 {
184 struct gallivm_state *gallivm = &ctx->gallivm;
185 LLVMValueRef value = LLVMGetParam(ctx->main_fn,
186 param);
187
188 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
189 value = bitcast(&ctx->bld_base,
190 TGSI_TYPE_UNSIGNED, value);
191
192 if (rshift)
193 value = LLVMBuildLShr(gallivm->builder, value,
194 LLVMConstInt(ctx->i32, rshift, 0), "");
195
196 if (rshift + bitwidth < 32) {
197 unsigned mask = (1 << bitwidth) - 1;
198 value = LLVMBuildAnd(gallivm->builder, value,
199 LLVMConstInt(ctx->i32, mask, 0), "");
200 }
201
202 return value;
203 }
204
205 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
206 {
207 switch (ctx->type) {
208 case PIPE_SHADER_TESS_CTRL:
209 return unpack_param(ctx, ctx->param_tcs_rel_ids, 0, 8);
210
211 case PIPE_SHADER_TESS_EVAL:
212 return LLVMGetParam(ctx->main_fn,
213 ctx->param_tes_rel_patch_id);
214
215 default:
216 assert(0);
217 return NULL;
218 }
219 }
220
221 /* Tessellation shaders pass outputs to the next shader using LDS.
222 *
223 * LS outputs = TCS inputs
224 * TCS outputs = TES inputs
225 *
226 * The LDS layout is:
227 * - TCS inputs for patch 0
228 * - TCS inputs for patch 1
229 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
230 * - ...
231 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
232 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
233 * - TCS outputs for patch 1
234 * - Per-patch TCS outputs for patch 1
235 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
236 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
237 * - ...
238 *
239 * All three shaders VS(LS), TCS, TES share the same LDS space.
240 */
241
242 static LLVMValueRef
243 get_tcs_in_patch_stride(struct si_shader_context *ctx)
244 {
245 return unpack_param(ctx, ctx->param_vs_state_bits, 8, 13);
246 }
247
248 static LLVMValueRef
249 get_tcs_out_patch_stride(struct si_shader_context *ctx)
250 {
251 return unpack_param(ctx, ctx->param_tcs_out_lds_layout, 0, 13);
252 }
253
254 static LLVMValueRef
255 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
256 {
257 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
258 unpack_param(ctx,
259 ctx->param_tcs_out_lds_offsets,
260 0, 16),
261 4);
262 }
263
264 static LLVMValueRef
265 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
266 {
267 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
268 unpack_param(ctx,
269 ctx->param_tcs_out_lds_offsets,
270 16, 16),
271 4);
272 }
273
274 static LLVMValueRef
275 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
276 {
277 struct gallivm_state *gallivm = &ctx->gallivm;
278 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
279 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
280
281 return LLVMBuildMul(gallivm->builder, patch_stride, rel_patch_id, "");
282 }
283
284 static LLVMValueRef
285 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
286 {
287 struct gallivm_state *gallivm = &ctx->gallivm;
288 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
289 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
290 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
291
292 return LLVMBuildAdd(gallivm->builder, patch0_offset,
293 LLVMBuildMul(gallivm->builder, patch_stride,
294 rel_patch_id, ""),
295 "");
296 }
297
298 static LLVMValueRef
299 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
300 {
301 struct gallivm_state *gallivm = &ctx->gallivm;
302 LLVMValueRef patch0_patch_data_offset =
303 get_tcs_out_patch0_patch_data_offset(ctx);
304 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
305 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
306
307 return LLVMBuildAdd(gallivm->builder, patch0_patch_data_offset,
308 LLVMBuildMul(gallivm->builder, patch_stride,
309 rel_patch_id, ""),
310 "");
311 }
312
313 static LLVMValueRef get_instance_index_for_fetch(
314 struct si_shader_context *ctx,
315 unsigned param_start_instance, unsigned divisor)
316 {
317 struct gallivm_state *gallivm = &ctx->gallivm;
318
319 LLVMValueRef result = LLVMGetParam(ctx->main_fn,
320 ctx->param_instance_id);
321
322 /* The division must be done before START_INSTANCE is added. */
323 if (divisor > 1)
324 result = LLVMBuildUDiv(gallivm->builder, result,
325 LLVMConstInt(ctx->i32, divisor, 0), "");
326
327 return LLVMBuildAdd(gallivm->builder, result,
328 LLVMGetParam(ctx->main_fn, param_start_instance), "");
329 }
330
331 /* Bitcast <4 x float> to <2 x double>, extract the component, and convert
332 * to float. */
333 static LLVMValueRef extract_double_to_float(struct si_shader_context *ctx,
334 LLVMValueRef vec4,
335 unsigned double_index)
336 {
337 LLVMBuilderRef builder = ctx->gallivm.builder;
338 LLVMTypeRef f64 = LLVMDoubleTypeInContext(ctx->gallivm.context);
339 LLVMValueRef dvec2 = LLVMBuildBitCast(builder, vec4,
340 LLVMVectorType(f64, 2), "");
341 LLVMValueRef index = LLVMConstInt(ctx->i32, double_index, 0);
342 LLVMValueRef value = LLVMBuildExtractElement(builder, dvec2, index, "");
343 return LLVMBuildFPTrunc(builder, value, ctx->f32, "");
344 }
345
346 static void declare_input_vs(
347 struct si_shader_context *ctx,
348 unsigned input_index,
349 const struct tgsi_full_declaration *decl,
350 LLVMValueRef out[4])
351 {
352 struct gallivm_state *gallivm = &ctx->gallivm;
353
354 unsigned chan;
355 unsigned fix_fetch;
356 unsigned num_fetches;
357 unsigned fetch_stride;
358
359 LLVMValueRef t_list_ptr;
360 LLVMValueRef t_offset;
361 LLVMValueRef t_list;
362 LLVMValueRef vertex_index;
363 LLVMValueRef input[3];
364
365 /* Load the T list */
366 t_list_ptr = LLVMGetParam(ctx->main_fn, ctx->param_vertex_buffers);
367
368 t_offset = LLVMConstInt(ctx->i32, input_index, 0);
369
370 t_list = ac_build_indexed_load_const(&ctx->ac, t_list_ptr, t_offset);
371
372 vertex_index = LLVMGetParam(ctx->main_fn,
373 ctx->param_vertex_index0 +
374 input_index);
375
376 fix_fetch = ctx->shader->key.mono.vs_fix_fetch[input_index];
377
378 /* Do multiple loads for special formats. */
379 switch (fix_fetch) {
380 case SI_FIX_FETCH_RGB_64_FLOAT:
381 num_fetches = 3; /* 3 2-dword loads */
382 fetch_stride = 8;
383 break;
384 case SI_FIX_FETCH_RGBA_64_FLOAT:
385 num_fetches = 2; /* 2 4-dword loads */
386 fetch_stride = 16;
387 break;
388 case SI_FIX_FETCH_RGB_8:
389 case SI_FIX_FETCH_RGB_8_INT:
390 num_fetches = 3;
391 fetch_stride = 1;
392 break;
393 case SI_FIX_FETCH_RGB_16:
394 case SI_FIX_FETCH_RGB_16_INT:
395 num_fetches = 3;
396 fetch_stride = 2;
397 break;
398 default:
399 num_fetches = 1;
400 fetch_stride = 0;
401 }
402
403 for (unsigned i = 0; i < num_fetches; i++) {
404 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
405
406 input[i] = ac_build_buffer_load_format(&ctx->ac, t_list,
407 vertex_index, voffset,
408 true);
409 }
410
411 /* Break up the vec4 into individual components */
412 for (chan = 0; chan < 4; chan++) {
413 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
414 out[chan] = LLVMBuildExtractElement(gallivm->builder,
415 input[0], llvm_chan, "");
416 }
417
418 switch (fix_fetch) {
419 case SI_FIX_FETCH_A2_SNORM:
420 case SI_FIX_FETCH_A2_SSCALED:
421 case SI_FIX_FETCH_A2_SINT: {
422 /* The hardware returns an unsigned value; convert it to a
423 * signed one.
424 */
425 LLVMValueRef tmp = out[3];
426 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
427
428 /* First, recover the sign-extended signed integer value. */
429 if (fix_fetch == SI_FIX_FETCH_A2_SSCALED)
430 tmp = LLVMBuildFPToUI(gallivm->builder, tmp, ctx->i32, "");
431 else
432 tmp = LLVMBuildBitCast(gallivm->builder, tmp, ctx->i32, "");
433
434 /* For the integer-like cases, do a natural sign extension.
435 *
436 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
437 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
438 * exponent.
439 */
440 tmp = LLVMBuildShl(gallivm->builder, tmp,
441 fix_fetch == SI_FIX_FETCH_A2_SNORM ?
442 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
443 tmp = LLVMBuildAShr(gallivm->builder, tmp, c30, "");
444
445 /* Convert back to the right type. */
446 if (fix_fetch == SI_FIX_FETCH_A2_SNORM) {
447 LLVMValueRef clamp;
448 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
449 tmp = LLVMBuildSIToFP(gallivm->builder, tmp, ctx->f32, "");
450 clamp = LLVMBuildFCmp(gallivm->builder, LLVMRealULT, tmp, neg_one, "");
451 tmp = LLVMBuildSelect(gallivm->builder, clamp, neg_one, tmp, "");
452 } else if (fix_fetch == SI_FIX_FETCH_A2_SSCALED) {
453 tmp = LLVMBuildSIToFP(gallivm->builder, tmp, ctx->f32, "");
454 }
455
456 out[3] = tmp;
457 break;
458 }
459 case SI_FIX_FETCH_RGBA_32_UNORM:
460 case SI_FIX_FETCH_RGBX_32_UNORM:
461 for (chan = 0; chan < 4; chan++) {
462 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
463 ctx->i32, "");
464 out[chan] = LLVMBuildUIToFP(gallivm->builder,
465 out[chan], ctx->f32, "");
466 out[chan] = LLVMBuildFMul(gallivm->builder, out[chan],
467 LLVMConstReal(ctx->f32, 1.0 / UINT_MAX), "");
468 }
469 /* RGBX UINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
470 if (fix_fetch == SI_FIX_FETCH_RGBX_32_UNORM)
471 out[3] = LLVMConstReal(ctx->f32, 1);
472 break;
473 case SI_FIX_FETCH_RGBA_32_SNORM:
474 case SI_FIX_FETCH_RGBX_32_SNORM:
475 case SI_FIX_FETCH_RGBA_32_FIXED:
476 case SI_FIX_FETCH_RGBX_32_FIXED: {
477 double scale;
478 if (fix_fetch >= SI_FIX_FETCH_RGBA_32_FIXED)
479 scale = 1.0 / 0x10000;
480 else
481 scale = 1.0 / INT_MAX;
482
483 for (chan = 0; chan < 4; chan++) {
484 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
485 ctx->i32, "");
486 out[chan] = LLVMBuildSIToFP(gallivm->builder,
487 out[chan], ctx->f32, "");
488 out[chan] = LLVMBuildFMul(gallivm->builder, out[chan],
489 LLVMConstReal(ctx->f32, scale), "");
490 }
491 /* RGBX SINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
492 if (fix_fetch == SI_FIX_FETCH_RGBX_32_SNORM ||
493 fix_fetch == SI_FIX_FETCH_RGBX_32_FIXED)
494 out[3] = LLVMConstReal(ctx->f32, 1);
495 break;
496 }
497 case SI_FIX_FETCH_RGBA_32_USCALED:
498 for (chan = 0; chan < 4; chan++) {
499 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
500 ctx->i32, "");
501 out[chan] = LLVMBuildUIToFP(gallivm->builder,
502 out[chan], ctx->f32, "");
503 }
504 break;
505 case SI_FIX_FETCH_RGBA_32_SSCALED:
506 for (chan = 0; chan < 4; chan++) {
507 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
508 ctx->i32, "");
509 out[chan] = LLVMBuildSIToFP(gallivm->builder,
510 out[chan], ctx->f32, "");
511 }
512 break;
513 case SI_FIX_FETCH_RG_64_FLOAT:
514 for (chan = 0; chan < 2; chan++)
515 out[chan] = extract_double_to_float(ctx, input[0], chan);
516
517 out[2] = LLVMConstReal(ctx->f32, 0);
518 out[3] = LLVMConstReal(ctx->f32, 1);
519 break;
520 case SI_FIX_FETCH_RGB_64_FLOAT:
521 for (chan = 0; chan < 3; chan++)
522 out[chan] = extract_double_to_float(ctx, input[chan], 0);
523
524 out[3] = LLVMConstReal(ctx->f32, 1);
525 break;
526 case SI_FIX_FETCH_RGBA_64_FLOAT:
527 for (chan = 0; chan < 4; chan++) {
528 out[chan] = extract_double_to_float(ctx, input[chan / 2],
529 chan % 2);
530 }
531 break;
532 case SI_FIX_FETCH_RGB_8:
533 case SI_FIX_FETCH_RGB_8_INT:
534 case SI_FIX_FETCH_RGB_16:
535 case SI_FIX_FETCH_RGB_16_INT:
536 for (chan = 0; chan < 3; chan++) {
537 out[chan] = LLVMBuildExtractElement(gallivm->builder,
538 input[chan],
539 ctx->i32_0, "");
540 }
541 if (fix_fetch == SI_FIX_FETCH_RGB_8 ||
542 fix_fetch == SI_FIX_FETCH_RGB_16) {
543 out[3] = LLVMConstReal(ctx->f32, 1);
544 } else {
545 out[3] = LLVMBuildBitCast(gallivm->builder, ctx->i32_1,
546 ctx->f32, "");
547 }
548 break;
549 }
550 }
551
552 static LLVMValueRef get_primitive_id(struct lp_build_tgsi_context *bld_base,
553 unsigned swizzle)
554 {
555 struct si_shader_context *ctx = si_shader_context(bld_base);
556
557 if (swizzle > 0)
558 return ctx->i32_0;
559
560 switch (ctx->type) {
561 case PIPE_SHADER_VERTEX:
562 return LLVMGetParam(ctx->main_fn,
563 ctx->param_vs_prim_id);
564 case PIPE_SHADER_TESS_CTRL:
565 return LLVMGetParam(ctx->main_fn,
566 ctx->param_tcs_patch_id);
567 case PIPE_SHADER_TESS_EVAL:
568 return LLVMGetParam(ctx->main_fn,
569 ctx->param_tes_patch_id);
570 case PIPE_SHADER_GEOMETRY:
571 return LLVMGetParam(ctx->main_fn,
572 ctx->param_gs_prim_id);
573 default:
574 assert(0);
575 return ctx->i32_0;
576 }
577 }
578
579 /**
580 * Return the value of tgsi_ind_register for indexing.
581 * This is the indirect index with the constant offset added to it.
582 */
583 static LLVMValueRef get_indirect_index(struct si_shader_context *ctx,
584 const struct tgsi_ind_register *ind,
585 int rel_index)
586 {
587 struct gallivm_state *gallivm = &ctx->gallivm;
588 LLVMValueRef result;
589
590 result = ctx->addrs[ind->Index][ind->Swizzle];
591 result = LLVMBuildLoad(gallivm->builder, result, "");
592 result = LLVMBuildAdd(gallivm->builder, result,
593 LLVMConstInt(ctx->i32, rel_index, 0), "");
594 return result;
595 }
596
597 /**
598 * Like get_indirect_index, but restricts the return value to a (possibly
599 * undefined) value inside [0..num).
600 */
601 LLVMValueRef si_get_bounded_indirect_index(struct si_shader_context *ctx,
602 const struct tgsi_ind_register *ind,
603 int rel_index, unsigned num)
604 {
605 LLVMValueRef result = get_indirect_index(ctx, ind, rel_index);
606
607 return si_llvm_bound_index(ctx, result, num);
608 }
609
610
611 /**
612 * Calculate a dword address given an input or output register and a stride.
613 */
614 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
615 const struct tgsi_full_dst_register *dst,
616 const struct tgsi_full_src_register *src,
617 LLVMValueRef vertex_dw_stride,
618 LLVMValueRef base_addr)
619 {
620 struct gallivm_state *gallivm = &ctx->gallivm;
621 struct tgsi_shader_info *info = &ctx->shader->selector->info;
622 ubyte *name, *index, *array_first;
623 int first, param;
624 struct tgsi_full_dst_register reg;
625
626 /* Set the register description. The address computation is the same
627 * for sources and destinations. */
628 if (src) {
629 reg.Register.File = src->Register.File;
630 reg.Register.Index = src->Register.Index;
631 reg.Register.Indirect = src->Register.Indirect;
632 reg.Register.Dimension = src->Register.Dimension;
633 reg.Indirect = src->Indirect;
634 reg.Dimension = src->Dimension;
635 reg.DimIndirect = src->DimIndirect;
636 } else
637 reg = *dst;
638
639 /* If the register is 2-dimensional (e.g. an array of vertices
640 * in a primitive), calculate the base address of the vertex. */
641 if (reg.Register.Dimension) {
642 LLVMValueRef index;
643
644 if (reg.Dimension.Indirect)
645 index = get_indirect_index(ctx, &reg.DimIndirect,
646 reg.Dimension.Index);
647 else
648 index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
649
650 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
651 LLVMBuildMul(gallivm->builder, index,
652 vertex_dw_stride, ""), "");
653 }
654
655 /* Get information about the register. */
656 if (reg.Register.File == TGSI_FILE_INPUT) {
657 name = info->input_semantic_name;
658 index = info->input_semantic_index;
659 array_first = info->input_array_first;
660 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
661 name = info->output_semantic_name;
662 index = info->output_semantic_index;
663 array_first = info->output_array_first;
664 } else {
665 assert(0);
666 return NULL;
667 }
668
669 if (reg.Register.Indirect) {
670 /* Add the relative address of the element. */
671 LLVMValueRef ind_index;
672
673 if (reg.Indirect.ArrayID)
674 first = array_first[reg.Indirect.ArrayID];
675 else
676 first = reg.Register.Index;
677
678 ind_index = get_indirect_index(ctx, &reg.Indirect,
679 reg.Register.Index - first);
680
681 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
682 LLVMBuildMul(gallivm->builder, ind_index,
683 LLVMConstInt(ctx->i32, 4, 0), ""), "");
684
685 param = reg.Register.Dimension ?
686 si_shader_io_get_unique_index(name[first], index[first]) :
687 si_shader_io_get_unique_index_patch(name[first], index[first]);
688 } else {
689 param = reg.Register.Dimension ?
690 si_shader_io_get_unique_index(name[reg.Register.Index],
691 index[reg.Register.Index]) :
692 si_shader_io_get_unique_index_patch(name[reg.Register.Index],
693 index[reg.Register.Index]);
694 }
695
696 /* Add the base address of the element. */
697 return LLVMBuildAdd(gallivm->builder, base_addr,
698 LLVMConstInt(ctx->i32, param * 4, 0), "");
699 }
700
701 /* The offchip buffer layout for TCS->TES is
702 *
703 * - attribute 0 of patch 0 vertex 0
704 * - attribute 0 of patch 0 vertex 1
705 * - attribute 0 of patch 0 vertex 2
706 * ...
707 * - attribute 0 of patch 1 vertex 0
708 * - attribute 0 of patch 1 vertex 1
709 * ...
710 * - attribute 1 of patch 0 vertex 0
711 * - attribute 1 of patch 0 vertex 1
712 * ...
713 * - per patch attribute 0 of patch 0
714 * - per patch attribute 0 of patch 1
715 * ...
716 *
717 * Note that every attribute has 4 components.
718 */
719 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
720 LLVMValueRef rel_patch_id,
721 LLVMValueRef vertex_index,
722 LLVMValueRef param_index)
723 {
724 struct gallivm_state *gallivm = &ctx->gallivm;
725 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
726 LLVMValueRef param_stride, constant16;
727
728 vertices_per_patch = unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
729 num_patches = unpack_param(ctx, ctx->param_tcs_offchip_layout, 0, 6);
730 total_vertices = LLVMBuildMul(gallivm->builder, vertices_per_patch,
731 num_patches, "");
732
733 constant16 = LLVMConstInt(ctx->i32, 16, 0);
734 if (vertex_index) {
735 base_addr = LLVMBuildMul(gallivm->builder, rel_patch_id,
736 vertices_per_patch, "");
737
738 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
739 vertex_index, "");
740
741 param_stride = total_vertices;
742 } else {
743 base_addr = rel_patch_id;
744 param_stride = num_patches;
745 }
746
747 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
748 LLVMBuildMul(gallivm->builder, param_index,
749 param_stride, ""), "");
750
751 base_addr = LLVMBuildMul(gallivm->builder, base_addr, constant16, "");
752
753 if (!vertex_index) {
754 LLVMValueRef patch_data_offset =
755 unpack_param(ctx, ctx->param_tcs_offchip_layout, 12, 20);
756
757 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
758 patch_data_offset, "");
759 }
760 return base_addr;
761 }
762
763 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
764 struct si_shader_context *ctx,
765 const struct tgsi_full_dst_register *dst,
766 const struct tgsi_full_src_register *src)
767 {
768 struct gallivm_state *gallivm = &ctx->gallivm;
769 struct tgsi_shader_info *info = &ctx->shader->selector->info;
770 ubyte *name, *index, *array_first;
771 struct tgsi_full_src_register reg;
772 LLVMValueRef vertex_index = NULL;
773 LLVMValueRef param_index = NULL;
774 unsigned param_index_base, param_base;
775
776 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
777
778 if (reg.Register.Dimension) {
779
780 if (reg.Dimension.Indirect)
781 vertex_index = get_indirect_index(ctx, &reg.DimIndirect,
782 reg.Dimension.Index);
783 else
784 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
785 }
786
787 /* Get information about the register. */
788 if (reg.Register.File == TGSI_FILE_INPUT) {
789 name = info->input_semantic_name;
790 index = info->input_semantic_index;
791 array_first = info->input_array_first;
792 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
793 name = info->output_semantic_name;
794 index = info->output_semantic_index;
795 array_first = info->output_array_first;
796 } else {
797 assert(0);
798 return NULL;
799 }
800
801 if (reg.Register.Indirect) {
802 if (reg.Indirect.ArrayID)
803 param_base = array_first[reg.Indirect.ArrayID];
804 else
805 param_base = reg.Register.Index;
806
807 param_index = get_indirect_index(ctx, &reg.Indirect,
808 reg.Register.Index - param_base);
809
810 } else {
811 param_base = reg.Register.Index;
812 param_index = ctx->i32_0;
813 }
814
815 param_index_base = reg.Register.Dimension ?
816 si_shader_io_get_unique_index(name[param_base], index[param_base]) :
817 si_shader_io_get_unique_index_patch(name[param_base], index[param_base]);
818
819 param_index = LLVMBuildAdd(gallivm->builder, param_index,
820 LLVMConstInt(ctx->i32, param_index_base, 0),
821 "");
822
823 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
824 vertex_index, param_index);
825 }
826
827 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
828 enum tgsi_opcode_type type, unsigned swizzle,
829 LLVMValueRef buffer, LLVMValueRef offset,
830 LLVMValueRef base, bool can_speculate)
831 {
832 struct si_shader_context *ctx = si_shader_context(bld_base);
833 struct gallivm_state *gallivm = &ctx->gallivm;
834 LLVMValueRef value, value2;
835 LLVMTypeRef llvm_type = tgsi2llvmtype(bld_base, type);
836 LLVMTypeRef vec_type = LLVMVectorType(llvm_type, 4);
837
838 if (swizzle == ~0) {
839 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
840 0, 1, 0, can_speculate, false);
841
842 return LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
843 }
844
845 if (!tgsi_type_is_64bit(type)) {
846 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
847 0, 1, 0, can_speculate, false);
848
849 value = LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
850 return LLVMBuildExtractElement(gallivm->builder, value,
851 LLVMConstInt(ctx->i32, swizzle, 0), "");
852 }
853
854 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
855 swizzle * 4, 1, 0, can_speculate, false);
856
857 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
858 swizzle * 4 + 4, 1, 0, can_speculate, false);
859
860 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
861 }
862
863 /**
864 * Load from LDS.
865 *
866 * \param type output value type
867 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
868 * \param dw_addr address in dwords
869 */
870 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
871 enum tgsi_opcode_type type, unsigned swizzle,
872 LLVMValueRef dw_addr)
873 {
874 struct si_shader_context *ctx = si_shader_context(bld_base);
875 struct gallivm_state *gallivm = &ctx->gallivm;
876 LLVMValueRef value;
877
878 if (swizzle == ~0) {
879 LLVMValueRef values[TGSI_NUM_CHANNELS];
880
881 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
882 values[chan] = lds_load(bld_base, type, chan, dw_addr);
883
884 return lp_build_gather_values(gallivm, values,
885 TGSI_NUM_CHANNELS);
886 }
887
888 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
889 LLVMConstInt(ctx->i32, swizzle, 0));
890
891 value = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
892 if (tgsi_type_is_64bit(type)) {
893 LLVMValueRef value2;
894 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
895 ctx->i32_1);
896 value2 = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
897 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
898 }
899
900 return LLVMBuildBitCast(gallivm->builder, value,
901 tgsi2llvmtype(bld_base, type), "");
902 }
903
904 /**
905 * Store to LDS.
906 *
907 * \param swizzle offset (typically 0..3)
908 * \param dw_addr address in dwords
909 * \param value value to store
910 */
911 static void lds_store(struct lp_build_tgsi_context *bld_base,
912 unsigned dw_offset_imm, LLVMValueRef dw_addr,
913 LLVMValueRef value)
914 {
915 struct si_shader_context *ctx = si_shader_context(bld_base);
916 struct gallivm_state *gallivm = &ctx->gallivm;
917
918 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
919 LLVMConstInt(ctx->i32, dw_offset_imm, 0));
920
921 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
922 ac_build_indexed_store(&ctx->ac, ctx->lds,
923 dw_addr, value);
924 }
925
926 static LLVMValueRef desc_from_addr_base64k(struct si_shader_context *ctx,
927 unsigned param)
928 {
929 LLVMBuilderRef builder = ctx->gallivm.builder;
930
931 LLVMValueRef addr = LLVMGetParam(ctx->main_fn, param);
932 addr = LLVMBuildZExt(builder, addr, ctx->i64, "");
933 addr = LLVMBuildShl(builder, addr, LLVMConstInt(ctx->i64, 16, 0), "");
934
935 uint64_t desc2 = 0xffffffff;
936 uint64_t desc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
937 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
938 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
939 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
940 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
941 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
942 LLVMValueRef hi = LLVMConstInt(ctx->i64, desc2 | (desc3 << 32), 0);
943
944 LLVMValueRef desc = LLVMGetUndef(LLVMVectorType(ctx->i64, 2));
945 desc = LLVMBuildInsertElement(builder, desc, addr, ctx->i32_0, "");
946 desc = LLVMBuildInsertElement(builder, desc, hi, ctx->i32_1, "");
947 return LLVMBuildBitCast(builder, desc, ctx->v4i32, "");
948 }
949
950 static LLVMValueRef fetch_input_tcs(
951 struct lp_build_tgsi_context *bld_base,
952 const struct tgsi_full_src_register *reg,
953 enum tgsi_opcode_type type, unsigned swizzle)
954 {
955 struct si_shader_context *ctx = si_shader_context(bld_base);
956 LLVMValueRef dw_addr, stride;
957
958 stride = unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
959 dw_addr = get_tcs_in_current_patch_offset(ctx);
960 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
961
962 return lds_load(bld_base, type, swizzle, dw_addr);
963 }
964
965 static LLVMValueRef fetch_output_tcs(
966 struct lp_build_tgsi_context *bld_base,
967 const struct tgsi_full_src_register *reg,
968 enum tgsi_opcode_type type, unsigned swizzle)
969 {
970 struct si_shader_context *ctx = si_shader_context(bld_base);
971 LLVMValueRef dw_addr, stride;
972
973 if (reg->Register.Dimension) {
974 stride = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 8);
975 dw_addr = get_tcs_out_current_patch_offset(ctx);
976 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
977 } else {
978 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
979 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
980 }
981
982 return lds_load(bld_base, type, swizzle, dw_addr);
983 }
984
985 static LLVMValueRef fetch_input_tes(
986 struct lp_build_tgsi_context *bld_base,
987 const struct tgsi_full_src_register *reg,
988 enum tgsi_opcode_type type, unsigned swizzle)
989 {
990 struct si_shader_context *ctx = si_shader_context(bld_base);
991 LLVMValueRef buffer, base, addr;
992
993 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
994
995 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
996 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
997
998 return buffer_load(bld_base, type, swizzle, buffer, base, addr, true);
999 }
1000
1001 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1002 const struct tgsi_full_instruction *inst,
1003 const struct tgsi_opcode_info *info,
1004 LLVMValueRef dst[4])
1005 {
1006 struct si_shader_context *ctx = si_shader_context(bld_base);
1007 struct gallivm_state *gallivm = &ctx->gallivm;
1008 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
1009 const struct tgsi_shader_info *sh_info = &ctx->shader->selector->info;
1010 unsigned chan_index;
1011 LLVMValueRef dw_addr, stride;
1012 LLVMValueRef buffer, base, buf_addr;
1013 LLVMValueRef values[4];
1014 bool skip_lds_store;
1015 bool is_tess_factor = false;
1016
1017 /* Only handle per-patch and per-vertex outputs here.
1018 * Vectors will be lowered to scalars and this function will be called again.
1019 */
1020 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1021 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1022 si_llvm_emit_store(bld_base, inst, info, dst);
1023 return;
1024 }
1025
1026 if (reg->Register.Dimension) {
1027 stride = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 8);
1028 dw_addr = get_tcs_out_current_patch_offset(ctx);
1029 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1030 skip_lds_store = !sh_info->reads_pervertex_outputs;
1031 } else {
1032 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1033 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1034 skip_lds_store = !sh_info->reads_perpatch_outputs;
1035
1036 if (!reg->Register.Indirect) {
1037 int name = sh_info->output_semantic_name[reg->Register.Index];
1038
1039 /* Always write tess factors into LDS for the TCS epilog. */
1040 if (name == TGSI_SEMANTIC_TESSINNER ||
1041 name == TGSI_SEMANTIC_TESSOUTER) {
1042 skip_lds_store = false;
1043 is_tess_factor = true;
1044 }
1045 }
1046 }
1047
1048 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1049
1050 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1051 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1052
1053
1054 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
1055 LLVMValueRef value = dst[chan_index];
1056
1057 if (inst->Instruction.Saturate)
1058 value = ac_build_clamp(&ctx->ac, value);
1059
1060 /* Skip LDS stores if there is no LDS read of this output. */
1061 if (!skip_lds_store)
1062 lds_store(bld_base, chan_index, dw_addr, value);
1063
1064 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
1065 values[chan_index] = value;
1066
1067 if (inst->Dst[0].Register.WriteMask != 0xF && !is_tess_factor) {
1068 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1069 buf_addr, base,
1070 4 * chan_index, 1, 0, true, false);
1071 }
1072 }
1073
1074 if (inst->Dst[0].Register.WriteMask == 0xF && !is_tess_factor) {
1075 LLVMValueRef value = lp_build_gather_values(gallivm,
1076 values, 4);
1077 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buf_addr,
1078 base, 0, 1, 0, true, false);
1079 }
1080 }
1081
1082 static LLVMValueRef fetch_input_gs(
1083 struct lp_build_tgsi_context *bld_base,
1084 const struct tgsi_full_src_register *reg,
1085 enum tgsi_opcode_type type,
1086 unsigned swizzle)
1087 {
1088 struct si_shader_context *ctx = si_shader_context(bld_base);
1089 struct si_shader *shader = ctx->shader;
1090 struct lp_build_context *uint = &ctx->bld_base.uint_bld;
1091 struct gallivm_state *gallivm = &ctx->gallivm;
1092 LLVMValueRef vtx_offset, soffset;
1093 struct tgsi_shader_info *info = &shader->selector->info;
1094 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1095 unsigned semantic_index = info->input_semantic_index[reg->Register.Index];
1096 unsigned param;
1097 LLVMValueRef value;
1098
1099 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1100 return get_primitive_id(bld_base, swizzle);
1101
1102 if (!reg->Register.Dimension)
1103 return NULL;
1104
1105 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1106
1107 /* GFX9 has the ESGS ring in LDS. */
1108 if (ctx->screen->b.chip_class >= GFX9) {
1109 unsigned index = reg->Dimension.Index;
1110
1111 switch (index / 2) {
1112 case 0:
1113 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx01_offset,
1114 index % 2 ? 16 : 0, 16);
1115 break;
1116 case 1:
1117 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx23_offset,
1118 index % 2 ? 16 : 0, 16);
1119 break;
1120 case 2:
1121 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx45_offset,
1122 index % 2 ? 16 : 0, 16);
1123 break;
1124 default:
1125 assert(0);
1126 return NULL;
1127 }
1128
1129 vtx_offset = LLVMBuildAdd(gallivm->builder, vtx_offset,
1130 LLVMConstInt(ctx->i32, param * 4, 0), "");
1131 return lds_load(bld_base, type, swizzle, vtx_offset);
1132 }
1133
1134 /* GFX6: input load from the ESGS ring in memory. */
1135 if (swizzle == ~0) {
1136 LLVMValueRef values[TGSI_NUM_CHANNELS];
1137 unsigned chan;
1138 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1139 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
1140 }
1141 return lp_build_gather_values(gallivm, values,
1142 TGSI_NUM_CHANNELS);
1143 }
1144
1145 /* Get the vertex offset parameter on GFX6. */
1146 unsigned vtx_offset_param = reg->Dimension.Index;
1147 if (vtx_offset_param < 2) {
1148 vtx_offset_param += ctx->param_gs_vtx0_offset;
1149 } else {
1150 assert(vtx_offset_param < 6);
1151 vtx_offset_param += ctx->param_gs_vtx2_offset - 2;
1152 }
1153 vtx_offset = lp_build_mul_imm(uint,
1154 LLVMGetParam(ctx->main_fn,
1155 vtx_offset_param),
1156 4);
1157
1158 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1159
1160 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1161 vtx_offset, soffset, 0, 1, 0, true, false);
1162 if (tgsi_type_is_64bit(type)) {
1163 LLVMValueRef value2;
1164 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1165
1166 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1167 ctx->i32_0, vtx_offset, soffset,
1168 0, 1, 0, true, false);
1169 return si_llvm_emit_fetch_64bit(bld_base, type,
1170 value, value2);
1171 }
1172 return LLVMBuildBitCast(gallivm->builder,
1173 value,
1174 tgsi2llvmtype(bld_base, type), "");
1175 }
1176
1177 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1178 {
1179 switch (interpolate) {
1180 case TGSI_INTERPOLATE_CONSTANT:
1181 return 0;
1182
1183 case TGSI_INTERPOLATE_LINEAR:
1184 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1185 return SI_PARAM_LINEAR_SAMPLE;
1186 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1187 return SI_PARAM_LINEAR_CENTROID;
1188 else
1189 return SI_PARAM_LINEAR_CENTER;
1190 break;
1191 case TGSI_INTERPOLATE_COLOR:
1192 case TGSI_INTERPOLATE_PERSPECTIVE:
1193 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1194 return SI_PARAM_PERSP_SAMPLE;
1195 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1196 return SI_PARAM_PERSP_CENTROID;
1197 else
1198 return SI_PARAM_PERSP_CENTER;
1199 break;
1200 default:
1201 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1202 return -1;
1203 }
1204 }
1205
1206 /**
1207 * Interpolate a fragment shader input.
1208 *
1209 * @param ctx context
1210 * @param input_index index of the input in hardware
1211 * @param semantic_name TGSI_SEMANTIC_*
1212 * @param semantic_index semantic index
1213 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1214 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1215 * @param interp_param interpolation weights (i,j)
1216 * @param prim_mask SI_PARAM_PRIM_MASK
1217 * @param face SI_PARAM_FRONT_FACE
1218 * @param result the return value (4 components)
1219 */
1220 static void interp_fs_input(struct si_shader_context *ctx,
1221 unsigned input_index,
1222 unsigned semantic_name,
1223 unsigned semantic_index,
1224 unsigned num_interp_inputs,
1225 unsigned colors_read_mask,
1226 LLVMValueRef interp_param,
1227 LLVMValueRef prim_mask,
1228 LLVMValueRef face,
1229 LLVMValueRef result[4])
1230 {
1231 struct gallivm_state *gallivm = &ctx->gallivm;
1232 LLVMValueRef attr_number;
1233 LLVMValueRef i, j;
1234
1235 unsigned chan;
1236
1237 /* fs.constant returns the param from the middle vertex, so it's not
1238 * really useful for flat shading. It's meant to be used for custom
1239 * interpolation (but the intrinsic can't fetch from the other two
1240 * vertices).
1241 *
1242 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1243 * to do the right thing. The only reason we use fs.constant is that
1244 * fs.interp cannot be used on integers, because they can be equal
1245 * to NaN.
1246 *
1247 * When interp is false we will use fs.constant or for newer llvm,
1248 * amdgcn.interp.mov.
1249 */
1250 bool interp = interp_param != NULL;
1251
1252 attr_number = LLVMConstInt(ctx->i32, input_index, 0);
1253
1254 if (interp) {
1255 interp_param = LLVMBuildBitCast(gallivm->builder, interp_param,
1256 LLVMVectorType(ctx->f32, 2), "");
1257
1258 i = LLVMBuildExtractElement(gallivm->builder, interp_param,
1259 ctx->i32_0, "");
1260 j = LLVMBuildExtractElement(gallivm->builder, interp_param,
1261 ctx->i32_1, "");
1262 }
1263
1264 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1265 ctx->shader->key.part.ps.prolog.color_two_side) {
1266 LLVMValueRef is_face_positive;
1267 LLVMValueRef back_attr_number;
1268
1269 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1270 * otherwise it's at offset "num_inputs".
1271 */
1272 unsigned back_attr_offset = num_interp_inputs;
1273 if (semantic_index == 1 && colors_read_mask & 0xf)
1274 back_attr_offset += 1;
1275
1276 back_attr_number = LLVMConstInt(ctx->i32, back_attr_offset, 0);
1277
1278 is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1279 face, ctx->i32_0, "");
1280
1281 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1282 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
1283 LLVMValueRef front, back;
1284
1285 if (interp) {
1286 front = ac_build_fs_interp(&ctx->ac, llvm_chan,
1287 attr_number, prim_mask,
1288 i, j);
1289 back = ac_build_fs_interp(&ctx->ac, llvm_chan,
1290 back_attr_number, prim_mask,
1291 i, j);
1292 } else {
1293 front = ac_build_fs_interp_mov(&ctx->ac,
1294 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1295 llvm_chan, attr_number, prim_mask);
1296 back = ac_build_fs_interp_mov(&ctx->ac,
1297 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1298 llvm_chan, back_attr_number, prim_mask);
1299 }
1300
1301 result[chan] = LLVMBuildSelect(gallivm->builder,
1302 is_face_positive,
1303 front,
1304 back,
1305 "");
1306 }
1307 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1308 if (interp) {
1309 result[0] = ac_build_fs_interp(&ctx->ac, ctx->i32_0,
1310 attr_number, prim_mask, i, j);
1311 } else {
1312 result[0] = ac_build_fs_interp_mov(&ctx->ac, ctx->i32_0,
1313 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1314 attr_number, prim_mask);
1315 }
1316 result[1] =
1317 result[2] = LLVMConstReal(ctx->f32, 0.0f);
1318 result[3] = LLVMConstReal(ctx->f32, 1.0f);
1319 } else {
1320 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1321 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
1322
1323 if (interp) {
1324 result[chan] = ac_build_fs_interp(&ctx->ac,
1325 llvm_chan, attr_number, prim_mask, i, j);
1326 } else {
1327 result[chan] = ac_build_fs_interp_mov(&ctx->ac,
1328 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1329 llvm_chan, attr_number, prim_mask);
1330 }
1331 }
1332 }
1333 }
1334
1335 static void declare_input_fs(
1336 struct si_shader_context *ctx,
1337 unsigned input_index,
1338 const struct tgsi_full_declaration *decl,
1339 LLVMValueRef out[4])
1340 {
1341 struct lp_build_context *base = &ctx->bld_base.base;
1342 struct si_shader *shader = ctx->shader;
1343 LLVMValueRef main_fn = ctx->main_fn;
1344 LLVMValueRef interp_param = NULL;
1345 int interp_param_idx;
1346
1347 /* Get colors from input VGPRs (set by the prolog). */
1348 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR) {
1349 unsigned i = decl->Semantic.Index;
1350 unsigned colors_read = shader->selector->info.colors_read;
1351 unsigned mask = colors_read >> (i * 4);
1352 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1353 (i ? util_bitcount(colors_read & 0xf) : 0);
1354
1355 out[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1356 out[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1357 out[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1358 out[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1359 return;
1360 }
1361
1362 interp_param_idx = lookup_interp_param_index(decl->Interp.Interpolate,
1363 decl->Interp.Location);
1364 if (interp_param_idx == -1)
1365 return;
1366 else if (interp_param_idx) {
1367 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
1368 }
1369
1370 interp_fs_input(ctx, input_index, decl->Semantic.Name,
1371 decl->Semantic.Index, shader->selector->info.num_inputs,
1372 shader->selector->info.colors_read, interp_param,
1373 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1374 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1375 &out[0]);
1376 }
1377
1378 static LLVMValueRef get_sample_id(struct si_shader_context *ctx)
1379 {
1380 return unpack_param(ctx, SI_PARAM_ANCILLARY, 8, 4);
1381 }
1382
1383
1384 /**
1385 * Load a dword from a constant buffer.
1386 */
1387 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1388 LLVMValueRef resource,
1389 LLVMValueRef offset)
1390 {
1391 return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL,
1392 0, 0, 0, true, true);
1393 }
1394
1395 static LLVMValueRef load_sample_position(struct si_shader_context *ctx, LLVMValueRef sample_id)
1396 {
1397 struct lp_build_context *uint_bld = &ctx->bld_base.uint_bld;
1398 struct gallivm_state *gallivm = &ctx->gallivm;
1399 LLVMBuilderRef builder = gallivm->builder;
1400 LLVMValueRef desc = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1401 LLVMValueRef buf_index = LLVMConstInt(ctx->i32, SI_PS_CONST_SAMPLE_POSITIONS, 0);
1402 LLVMValueRef resource = ac_build_indexed_load_const(&ctx->ac, desc, buf_index);
1403
1404 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1405 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1406 LLVMValueRef offset1 = LLVMBuildAdd(builder, offset0, LLVMConstInt(ctx->i32, 4, 0), "");
1407
1408 LLVMValueRef pos[4] = {
1409 buffer_load_const(ctx, resource, offset0),
1410 buffer_load_const(ctx, resource, offset1),
1411 LLVMConstReal(ctx->f32, 0),
1412 LLVMConstReal(ctx->f32, 0)
1413 };
1414
1415 return lp_build_gather_values(gallivm, pos, 4);
1416 }
1417
1418 static void declare_system_value(struct si_shader_context *ctx,
1419 unsigned index,
1420 const struct tgsi_full_declaration *decl)
1421 {
1422 struct lp_build_context *bld = &ctx->bld_base.base;
1423 struct gallivm_state *gallivm = &ctx->gallivm;
1424 LLVMValueRef value = 0;
1425
1426 assert(index < RADEON_LLVM_MAX_SYSTEM_VALUES);
1427
1428 switch (decl->Semantic.Name) {
1429 case TGSI_SEMANTIC_INSTANCEID:
1430 value = LLVMGetParam(ctx->main_fn,
1431 ctx->param_instance_id);
1432 break;
1433
1434 case TGSI_SEMANTIC_VERTEXID:
1435 value = LLVMBuildAdd(gallivm->builder,
1436 LLVMGetParam(ctx->main_fn,
1437 ctx->param_vertex_id),
1438 LLVMGetParam(ctx->main_fn,
1439 ctx->param_base_vertex), "");
1440 break;
1441
1442 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1443 /* Unused. Clarify the meaning in indexed vs. non-indexed
1444 * draws if this is ever used again. */
1445 assert(false);
1446 break;
1447
1448 case TGSI_SEMANTIC_BASEVERTEX:
1449 {
1450 /* For non-indexed draws, the base vertex set by the driver
1451 * (for direct draws) or the CP (for indirect draws) is the
1452 * first vertex ID, but GLSL expects 0 to be returned.
1453 */
1454 LLVMValueRef vs_state = LLVMGetParam(ctx->main_fn, ctx->param_vs_state_bits);
1455 LLVMValueRef indexed;
1456
1457 indexed = LLVMBuildLShr(gallivm->builder, vs_state, ctx->i32_1, "");
1458 indexed = LLVMBuildTrunc(gallivm->builder, indexed, ctx->i1, "");
1459
1460 value = LLVMBuildSelect(gallivm->builder, indexed,
1461 LLVMGetParam(ctx->main_fn, ctx->param_base_vertex),
1462 ctx->i32_0, "");
1463 break;
1464 }
1465
1466 case TGSI_SEMANTIC_BASEINSTANCE:
1467 value = LLVMGetParam(ctx->main_fn, ctx->param_start_instance);
1468 break;
1469
1470 case TGSI_SEMANTIC_DRAWID:
1471 value = LLVMGetParam(ctx->main_fn, ctx->param_draw_id);
1472 break;
1473
1474 case TGSI_SEMANTIC_INVOCATIONID:
1475 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1476 value = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
1477 else if (ctx->type == PIPE_SHADER_GEOMETRY)
1478 value = LLVMGetParam(ctx->main_fn,
1479 ctx->param_gs_instance_id);
1480 else
1481 assert(!"INVOCATIONID not implemented");
1482 break;
1483
1484 case TGSI_SEMANTIC_POSITION:
1485 {
1486 LLVMValueRef pos[4] = {
1487 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
1488 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
1489 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Z_FLOAT),
1490 lp_build_emit_llvm_unary(&ctx->bld_base, TGSI_OPCODE_RCP,
1491 LLVMGetParam(ctx->main_fn,
1492 SI_PARAM_POS_W_FLOAT)),
1493 };
1494 value = lp_build_gather_values(gallivm, pos, 4);
1495 break;
1496 }
1497
1498 case TGSI_SEMANTIC_FACE:
1499 value = LLVMGetParam(ctx->main_fn, SI_PARAM_FRONT_FACE);
1500 break;
1501
1502 case TGSI_SEMANTIC_SAMPLEID:
1503 value = get_sample_id(ctx);
1504 break;
1505
1506 case TGSI_SEMANTIC_SAMPLEPOS: {
1507 LLVMValueRef pos[4] = {
1508 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
1509 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
1510 LLVMConstReal(ctx->f32, 0),
1511 LLVMConstReal(ctx->f32, 0)
1512 };
1513 pos[0] = lp_build_emit_llvm_unary(&ctx->bld_base,
1514 TGSI_OPCODE_FRC, pos[0]);
1515 pos[1] = lp_build_emit_llvm_unary(&ctx->bld_base,
1516 TGSI_OPCODE_FRC, pos[1]);
1517 value = lp_build_gather_values(gallivm, pos, 4);
1518 break;
1519 }
1520
1521 case TGSI_SEMANTIC_SAMPLEMASK:
1522 /* This can only occur with the OpenGL Core profile, which
1523 * doesn't support smoothing.
1524 */
1525 value = LLVMGetParam(ctx->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1526 break;
1527
1528 case TGSI_SEMANTIC_TESSCOORD:
1529 {
1530 LLVMValueRef coord[4] = {
1531 LLVMGetParam(ctx->main_fn, ctx->param_tes_u),
1532 LLVMGetParam(ctx->main_fn, ctx->param_tes_v),
1533 bld->zero,
1534 bld->zero
1535 };
1536
1537 /* For triangles, the vector should be (u, v, 1-u-v). */
1538 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1539 PIPE_PRIM_TRIANGLES)
1540 coord[2] = lp_build_sub(bld, bld->one,
1541 lp_build_add(bld, coord[0], coord[1]));
1542
1543 value = lp_build_gather_values(gallivm, coord, 4);
1544 break;
1545 }
1546
1547 case TGSI_SEMANTIC_VERTICESIN:
1548 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1549 value = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 26, 6);
1550 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1551 value = unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
1552 else
1553 assert(!"invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1554 break;
1555
1556 case TGSI_SEMANTIC_TESSINNER:
1557 case TGSI_SEMANTIC_TESSOUTER:
1558 {
1559 LLVMValueRef buffer, base, addr;
1560 int param = si_shader_io_get_unique_index_patch(decl->Semantic.Name, 0);
1561
1562 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1563
1564 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1565 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1566 LLVMConstInt(ctx->i32, param, 0));
1567
1568 value = buffer_load(&ctx->bld_base, TGSI_TYPE_FLOAT,
1569 ~0, buffer, base, addr, true);
1570
1571 break;
1572 }
1573
1574 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
1575 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
1576 {
1577 LLVMValueRef buf, slot, val[4];
1578 int i, offset;
1579
1580 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
1581 buf = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1582 buf = ac_build_indexed_load_const(&ctx->ac, buf, slot);
1583 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
1584
1585 for (i = 0; i < 4; i++)
1586 val[i] = buffer_load_const(ctx, buf,
1587 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
1588 value = lp_build_gather_values(gallivm, val, 4);
1589 break;
1590 }
1591
1592 case TGSI_SEMANTIC_PRIMID:
1593 value = get_primitive_id(&ctx->bld_base, 0);
1594 break;
1595
1596 case TGSI_SEMANTIC_GRID_SIZE:
1597 value = LLVMGetParam(ctx->main_fn, ctx->param_grid_size);
1598 break;
1599
1600 case TGSI_SEMANTIC_BLOCK_SIZE:
1601 {
1602 LLVMValueRef values[3];
1603 unsigned i;
1604 unsigned *properties = ctx->shader->selector->info.properties;
1605
1606 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1607 unsigned sizes[3] = {
1608 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1609 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1610 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1611 };
1612
1613 for (i = 0; i < 3; ++i)
1614 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1615
1616 value = lp_build_gather_values(gallivm, values, 3);
1617 } else {
1618 value = LLVMGetParam(ctx->main_fn, ctx->param_block_size);
1619 }
1620 break;
1621 }
1622
1623 case TGSI_SEMANTIC_BLOCK_ID:
1624 {
1625 LLVMValueRef values[3];
1626
1627 for (int i = 0; i < 3; i++) {
1628 values[i] = ctx->i32_0;
1629 if (ctx->param_block_id[i] >= 0) {
1630 values[i] = LLVMGetParam(ctx->main_fn,
1631 ctx->param_block_id[i]);
1632 }
1633 }
1634 value = lp_build_gather_values(gallivm, values, 3);
1635 break;
1636 }
1637
1638 case TGSI_SEMANTIC_THREAD_ID:
1639 value = LLVMGetParam(ctx->main_fn, ctx->param_thread_id);
1640 break;
1641
1642 case TGSI_SEMANTIC_HELPER_INVOCATION:
1643 value = lp_build_intrinsic(gallivm->builder,
1644 "llvm.amdgcn.ps.live",
1645 ctx->i1, NULL, 0,
1646 LP_FUNC_ATTR_READNONE);
1647 value = LLVMBuildNot(gallivm->builder, value, "");
1648 value = LLVMBuildSExt(gallivm->builder, value, ctx->i32, "");
1649 break;
1650
1651 case TGSI_SEMANTIC_SUBGROUP_SIZE:
1652 value = LLVMConstInt(ctx->i32, 64, 0);
1653 break;
1654
1655 case TGSI_SEMANTIC_SUBGROUP_INVOCATION:
1656 value = ac_get_thread_id(&ctx->ac);
1657 break;
1658
1659 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
1660 {
1661 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
1662 id = LLVMBuildZExt(gallivm->builder, id, ctx->i64, "");
1663 value = LLVMBuildShl(gallivm->builder, LLVMConstInt(ctx->i64, 1, 0), id, "");
1664 value = LLVMBuildBitCast(gallivm->builder, value, ctx->v2i32, "");
1665 break;
1666 }
1667
1668 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
1669 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
1670 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
1671 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
1672 {
1673 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
1674 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_GT_MASK ||
1675 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK) {
1676 /* All bits set except LSB */
1677 value = LLVMConstInt(ctx->i64, -2, 0);
1678 } else {
1679 /* All bits set */
1680 value = LLVMConstInt(ctx->i64, -1, 0);
1681 }
1682 id = LLVMBuildZExt(gallivm->builder, id, ctx->i64, "");
1683 value = LLVMBuildShl(gallivm->builder, value, id, "");
1684 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK ||
1685 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LT_MASK)
1686 value = LLVMBuildNot(gallivm->builder, value, "");
1687 value = LLVMBuildBitCast(gallivm->builder, value, ctx->v2i32, "");
1688 break;
1689 }
1690
1691 default:
1692 assert(!"unknown system value");
1693 return;
1694 }
1695
1696 ctx->system_values[index] = value;
1697 }
1698
1699 static void declare_compute_memory(struct si_shader_context *ctx,
1700 const struct tgsi_full_declaration *decl)
1701 {
1702 struct si_shader_selector *sel = ctx->shader->selector;
1703 struct gallivm_state *gallivm = &ctx->gallivm;
1704
1705 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, LOCAL_ADDR_SPACE);
1706 LLVMValueRef var;
1707
1708 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
1709 assert(decl->Range.First == decl->Range.Last);
1710 assert(!ctx->shared_memory);
1711
1712 var = LLVMAddGlobalInAddressSpace(gallivm->module,
1713 LLVMArrayType(ctx->i8, sel->local_size),
1714 "compute_lds",
1715 LOCAL_ADDR_SPACE);
1716 LLVMSetAlignment(var, 4);
1717
1718 ctx->shared_memory = LLVMBuildBitCast(gallivm->builder, var, i8p, "");
1719 }
1720
1721 static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
1722 {
1723 LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
1724 ctx->param_const_and_shader_buffers);
1725
1726 return ac_build_indexed_load_const(&ctx->ac, list_ptr,
1727 LLVMConstInt(ctx->i32, si_get_constbuf_slot(i), 0));
1728 }
1729
1730 static LLVMValueRef fetch_constant(
1731 struct lp_build_tgsi_context *bld_base,
1732 const struct tgsi_full_src_register *reg,
1733 enum tgsi_opcode_type type,
1734 unsigned swizzle)
1735 {
1736 struct si_shader_context *ctx = si_shader_context(bld_base);
1737 struct lp_build_context *base = &bld_base->base;
1738 const struct tgsi_ind_register *ireg = &reg->Indirect;
1739 unsigned buf, idx;
1740
1741 LLVMValueRef addr, bufp;
1742 LLVMValueRef result;
1743
1744 if (swizzle == LP_CHAN_ALL) {
1745 unsigned chan;
1746 LLVMValueRef values[4];
1747 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
1748 values[chan] = fetch_constant(bld_base, reg, type, chan);
1749
1750 return lp_build_gather_values(&ctx->gallivm, values, 4);
1751 }
1752
1753 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
1754 idx = reg->Register.Index * 4 + swizzle;
1755
1756 if (reg->Register.Dimension && reg->Dimension.Indirect) {
1757 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
1758 LLVMValueRef index;
1759 index = si_get_bounded_indirect_index(ctx, &reg->DimIndirect,
1760 reg->Dimension.Index,
1761 ctx->num_const_buffers);
1762 index = LLVMBuildAdd(ctx->gallivm.builder, index,
1763 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
1764 bufp = ac_build_indexed_load_const(&ctx->ac, ptr, index);
1765 } else
1766 bufp = load_const_buffer_desc(ctx, buf);
1767
1768 if (reg->Register.Indirect) {
1769 addr = ctx->addrs[ireg->Index][ireg->Swizzle];
1770 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
1771 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
1772 addr = lp_build_add(&bld_base->uint_bld, addr,
1773 LLVMConstInt(ctx->i32, idx * 4, 0));
1774 } else {
1775 addr = LLVMConstInt(ctx->i32, idx * 4, 0);
1776 }
1777
1778 result = buffer_load_const(ctx, bufp, addr);
1779
1780 if (!tgsi_type_is_64bit(type))
1781 result = bitcast(bld_base, type, result);
1782 else {
1783 LLVMValueRef addr2, result2;
1784
1785 addr2 = lp_build_add(&bld_base->uint_bld, addr,
1786 LLVMConstInt(ctx->i32, 4, 0));
1787 result2 = buffer_load_const(ctx, bufp, addr2);
1788
1789 result = si_llvm_emit_fetch_64bit(bld_base, type,
1790 result, result2);
1791 }
1792 return result;
1793 }
1794
1795 /* Upper 16 bits must be zero. */
1796 static LLVMValueRef si_llvm_pack_two_int16(struct si_shader_context *ctx,
1797 LLVMValueRef val[2])
1798 {
1799 return LLVMBuildOr(ctx->gallivm.builder, val[0],
1800 LLVMBuildShl(ctx->gallivm.builder, val[1],
1801 LLVMConstInt(ctx->i32, 16, 0),
1802 ""), "");
1803 }
1804
1805 /* Upper 16 bits are ignored and will be dropped. */
1806 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct si_shader_context *ctx,
1807 LLVMValueRef val[2])
1808 {
1809 LLVMValueRef v[2] = {
1810 LLVMBuildAnd(ctx->gallivm.builder, val[0],
1811 LLVMConstInt(ctx->i32, 0xffff, 0), ""),
1812 val[1],
1813 };
1814 return si_llvm_pack_two_int16(ctx, v);
1815 }
1816
1817 /* Initialize arguments for the shader export intrinsic */
1818 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
1819 LLVMValueRef *values,
1820 unsigned target,
1821 struct ac_export_args *args)
1822 {
1823 struct si_shader_context *ctx = si_shader_context(bld_base);
1824 struct lp_build_context *base = &bld_base->base;
1825 LLVMBuilderRef builder = ctx->gallivm.builder;
1826 LLVMValueRef val[4];
1827 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1828 unsigned chan;
1829 bool is_int8, is_int10;
1830
1831 /* Default is 0xf. Adjusted below depending on the format. */
1832 args->enabled_channels = 0xf; /* writemask */
1833
1834 /* Specify whether the EXEC mask represents the valid mask */
1835 args->valid_mask = 0;
1836
1837 /* Specify whether this is the last export */
1838 args->done = 0;
1839
1840 /* Specify the target we are exporting */
1841 args->target = target;
1842
1843 if (ctx->type == PIPE_SHADER_FRAGMENT) {
1844 const struct si_shader_key *key = &ctx->shader->key;
1845 unsigned col_formats = key->part.ps.epilog.spi_shader_col_format;
1846 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1847
1848 assert(cbuf >= 0 && cbuf < 8);
1849 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1850 is_int8 = (key->part.ps.epilog.color_is_int8 >> cbuf) & 0x1;
1851 is_int10 = (key->part.ps.epilog.color_is_int10 >> cbuf) & 0x1;
1852 }
1853
1854 args->compr = false;
1855 args->out[0] = base->undef;
1856 args->out[1] = base->undef;
1857 args->out[2] = base->undef;
1858 args->out[3] = base->undef;
1859
1860 switch (spi_shader_col_format) {
1861 case V_028714_SPI_SHADER_ZERO:
1862 args->enabled_channels = 0; /* writemask */
1863 args->target = V_008DFC_SQ_EXP_NULL;
1864 break;
1865
1866 case V_028714_SPI_SHADER_32_R:
1867 args->enabled_channels = 1; /* writemask */
1868 args->out[0] = values[0];
1869 break;
1870
1871 case V_028714_SPI_SHADER_32_GR:
1872 args->enabled_channels = 0x3; /* writemask */
1873 args->out[0] = values[0];
1874 args->out[1] = values[1];
1875 break;
1876
1877 case V_028714_SPI_SHADER_32_AR:
1878 args->enabled_channels = 0x9; /* writemask */
1879 args->out[0] = values[0];
1880 args->out[3] = values[3];
1881 break;
1882
1883 case V_028714_SPI_SHADER_FP16_ABGR:
1884 args->compr = 1; /* COMPR flag */
1885
1886 for (chan = 0; chan < 2; chan++) {
1887 LLVMValueRef pack_args[2] = {
1888 values[2 * chan],
1889 values[2 * chan + 1]
1890 };
1891 LLVMValueRef packed;
1892
1893 packed = ac_build_cvt_pkrtz_f16(&ctx->ac, pack_args);
1894 args->out[chan] =
1895 LLVMBuildBitCast(ctx->gallivm.builder,
1896 packed, ctx->f32, "");
1897 }
1898 break;
1899
1900 case V_028714_SPI_SHADER_UNORM16_ABGR:
1901 for (chan = 0; chan < 4; chan++) {
1902 val[chan] = ac_build_clamp(&ctx->ac, values[chan]);
1903 val[chan] = LLVMBuildFMul(builder, val[chan],
1904 LLVMConstReal(ctx->f32, 65535), "");
1905 val[chan] = LLVMBuildFAdd(builder, val[chan],
1906 LLVMConstReal(ctx->f32, 0.5), "");
1907 val[chan] = LLVMBuildFPToUI(builder, val[chan],
1908 ctx->i32, "");
1909 }
1910
1911 args->compr = 1; /* COMPR flag */
1912 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1913 si_llvm_pack_two_int16(ctx, val));
1914 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1915 si_llvm_pack_two_int16(ctx, val+2));
1916 break;
1917
1918 case V_028714_SPI_SHADER_SNORM16_ABGR:
1919 for (chan = 0; chan < 4; chan++) {
1920 /* Clamp between [-1, 1]. */
1921 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MIN,
1922 values[chan],
1923 LLVMConstReal(ctx->f32, 1));
1924 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MAX,
1925 val[chan],
1926 LLVMConstReal(ctx->f32, -1));
1927 /* Convert to a signed integer in [-32767, 32767]. */
1928 val[chan] = LLVMBuildFMul(builder, val[chan],
1929 LLVMConstReal(ctx->f32, 32767), "");
1930 /* If positive, add 0.5, else add -0.5. */
1931 val[chan] = LLVMBuildFAdd(builder, val[chan],
1932 LLVMBuildSelect(builder,
1933 LLVMBuildFCmp(builder, LLVMRealOGE,
1934 val[chan], base->zero, ""),
1935 LLVMConstReal(ctx->f32, 0.5),
1936 LLVMConstReal(ctx->f32, -0.5), ""), "");
1937 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
1938 }
1939
1940 args->compr = 1; /* COMPR flag */
1941 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1942 si_llvm_pack_two_int32_as_int16(ctx, val));
1943 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1944 si_llvm_pack_two_int32_as_int16(ctx, val+2));
1945 break;
1946
1947 case V_028714_SPI_SHADER_UINT16_ABGR: {
1948 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1949 is_int8 ? 255 : is_int10 ? 1023 : 65535, 0);
1950 LLVMValueRef max_alpha =
1951 !is_int10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
1952
1953 /* Clamp. */
1954 for (chan = 0; chan < 4; chan++) {
1955 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1956 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
1957 val[chan],
1958 chan == 3 ? max_alpha : max_rgb);
1959 }
1960
1961 args->compr = 1; /* COMPR flag */
1962 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1963 si_llvm_pack_two_int16(ctx, val));
1964 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1965 si_llvm_pack_two_int16(ctx, val+2));
1966 break;
1967 }
1968
1969 case V_028714_SPI_SHADER_SINT16_ABGR: {
1970 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1971 is_int8 ? 127 : is_int10 ? 511 : 32767, 0);
1972 LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
1973 is_int8 ? -128 : is_int10 ? -512 : -32768, 0);
1974 LLVMValueRef max_alpha =
1975 !is_int10 ? max_rgb : ctx->i32_1;
1976 LLVMValueRef min_alpha =
1977 !is_int10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
1978
1979 /* Clamp. */
1980 for (chan = 0; chan < 4; chan++) {
1981 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1982 val[chan] = lp_build_emit_llvm_binary(bld_base,
1983 TGSI_OPCODE_IMIN,
1984 val[chan], chan == 3 ? max_alpha : max_rgb);
1985 val[chan] = lp_build_emit_llvm_binary(bld_base,
1986 TGSI_OPCODE_IMAX,
1987 val[chan], chan == 3 ? min_alpha : min_rgb);
1988 }
1989
1990 args->compr = 1; /* COMPR flag */
1991 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1992 si_llvm_pack_two_int32_as_int16(ctx, val));
1993 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1994 si_llvm_pack_two_int32_as_int16(ctx, val+2));
1995 break;
1996 }
1997
1998 case V_028714_SPI_SHADER_32_ABGR:
1999 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2000 break;
2001 }
2002 }
2003
2004 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2005 LLVMValueRef alpha)
2006 {
2007 struct si_shader_context *ctx = si_shader_context(bld_base);
2008
2009 if (ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2010 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
2011 SI_PARAM_ALPHA_REF);
2012
2013 LLVMValueRef alpha_pass =
2014 lp_build_cmp(&bld_base->base,
2015 ctx->shader->key.part.ps.epilog.alpha_func,
2016 alpha, alpha_ref);
2017 LLVMValueRef arg =
2018 lp_build_select(&bld_base->base,
2019 alpha_pass,
2020 LLVMConstReal(ctx->f32, 1.0f),
2021 LLVMConstReal(ctx->f32, -1.0f));
2022
2023 ac_build_kill(&ctx->ac, arg);
2024 } else {
2025 ac_build_kill(&ctx->ac, NULL);
2026 }
2027 }
2028
2029 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2030 LLVMValueRef alpha,
2031 unsigned samplemask_param)
2032 {
2033 struct si_shader_context *ctx = si_shader_context(bld_base);
2034 struct gallivm_state *gallivm = &ctx->gallivm;
2035 LLVMValueRef coverage;
2036
2037 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2038 coverage = LLVMGetParam(ctx->main_fn,
2039 samplemask_param);
2040 coverage = bitcast(bld_base, TGSI_TYPE_SIGNED, coverage);
2041
2042 coverage = lp_build_intrinsic(gallivm->builder, "llvm.ctpop.i32",
2043 ctx->i32,
2044 &coverage, 1, LP_FUNC_ATTR_READNONE);
2045
2046 coverage = LLVMBuildUIToFP(gallivm->builder, coverage,
2047 ctx->f32, "");
2048
2049 coverage = LLVMBuildFMul(gallivm->builder, coverage,
2050 LLVMConstReal(ctx->f32,
2051 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2052
2053 return LLVMBuildFMul(gallivm->builder, alpha, coverage, "");
2054 }
2055
2056 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context *bld_base,
2057 struct ac_export_args *pos, LLVMValueRef *out_elts)
2058 {
2059 struct si_shader_context *ctx = si_shader_context(bld_base);
2060 struct lp_build_context *base = &bld_base->base;
2061 unsigned reg_index;
2062 unsigned chan;
2063 unsigned const_chan;
2064 LLVMValueRef base_elt;
2065 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2066 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
2067 SI_VS_CONST_CLIP_PLANES, 0);
2068 LLVMValueRef const_resource = ac_build_indexed_load_const(&ctx->ac, ptr, constbuf_index);
2069
2070 for (reg_index = 0; reg_index < 2; reg_index ++) {
2071 struct ac_export_args *args = &pos[2 + reg_index];
2072
2073 args->out[0] =
2074 args->out[1] =
2075 args->out[2] =
2076 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
2077
2078 /* Compute dot products of position and user clip plane vectors */
2079 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2080 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2081 LLVMValueRef addr =
2082 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
2083 const_chan) * 4, 0);
2084 base_elt = buffer_load_const(ctx, const_resource,
2085 addr);
2086 args->out[chan] =
2087 lp_build_add(base, args->out[chan],
2088 lp_build_mul(base, base_elt,
2089 out_elts[const_chan]));
2090 }
2091 }
2092
2093 args->enabled_channels = 0xf;
2094 args->valid_mask = 0;
2095 args->done = 0;
2096 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
2097 args->compr = 0;
2098 }
2099 }
2100
2101 static void si_dump_streamout(struct pipe_stream_output_info *so)
2102 {
2103 unsigned i;
2104
2105 if (so->num_outputs)
2106 fprintf(stderr, "STREAMOUT\n");
2107
2108 for (i = 0; i < so->num_outputs; i++) {
2109 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2110 so->output[i].start_component;
2111 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2112 i, so->output[i].output_buffer,
2113 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2114 so->output[i].register_index,
2115 mask & 1 ? "x" : "",
2116 mask & 2 ? "y" : "",
2117 mask & 4 ? "z" : "",
2118 mask & 8 ? "w" : "");
2119 }
2120 }
2121
2122 static void emit_streamout_output(struct si_shader_context *ctx,
2123 LLVMValueRef const *so_buffers,
2124 LLVMValueRef const *so_write_offsets,
2125 struct pipe_stream_output *stream_out,
2126 struct si_shader_output_values *shader_out)
2127 {
2128 struct gallivm_state *gallivm = &ctx->gallivm;
2129 LLVMBuilderRef builder = gallivm->builder;
2130 unsigned buf_idx = stream_out->output_buffer;
2131 unsigned start = stream_out->start_component;
2132 unsigned num_comps = stream_out->num_components;
2133 LLVMValueRef out[4];
2134
2135 assert(num_comps && num_comps <= 4);
2136 if (!num_comps || num_comps > 4)
2137 return;
2138
2139 /* Load the output as int. */
2140 for (int j = 0; j < num_comps; j++) {
2141 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
2142
2143 out[j] = LLVMBuildBitCast(builder,
2144 shader_out->values[start + j],
2145 ctx->i32, "");
2146 }
2147
2148 /* Pack the output. */
2149 LLVMValueRef vdata = NULL;
2150
2151 switch (num_comps) {
2152 case 1: /* as i32 */
2153 vdata = out[0];
2154 break;
2155 case 2: /* as v2i32 */
2156 case 3: /* as v4i32 (aligned to 4) */
2157 case 4: /* as v4i32 */
2158 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2159 for (int j = 0; j < num_comps; j++) {
2160 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
2161 LLVMConstInt(ctx->i32, j, 0), "");
2162 }
2163 break;
2164 }
2165
2166 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
2167 vdata, num_comps,
2168 so_write_offsets[buf_idx],
2169 ctx->i32_0,
2170 stream_out->dst_offset * 4, 1, 1, true, false);
2171 }
2172
2173 /**
2174 * Write streamout data to buffers for vertex stream @p stream (different
2175 * vertex streams can occur for GS copy shaders).
2176 */
2177 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2178 struct si_shader_output_values *outputs,
2179 unsigned noutput, unsigned stream)
2180 {
2181 struct si_shader_selector *sel = ctx->shader->selector;
2182 struct pipe_stream_output_info *so = &sel->so;
2183 struct gallivm_state *gallivm = &ctx->gallivm;
2184 LLVMBuilderRef builder = gallivm->builder;
2185 int i;
2186 struct lp_build_if_state if_ctx;
2187
2188 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2189 LLVMValueRef so_vtx_count =
2190 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2191
2192 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
2193
2194 /* can_emit = tid < so_vtx_count; */
2195 LLVMValueRef can_emit =
2196 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2197
2198 /* Emit the streamout code conditionally. This actually avoids
2199 * out-of-bounds buffer access. The hw tells us via the SGPR
2200 * (so_vtx_count) which threads are allowed to emit streamout data. */
2201 lp_build_if(&if_ctx, gallivm, can_emit);
2202 {
2203 /* The buffer offset is computed as follows:
2204 * ByteOffset = streamout_offset[buffer_id]*4 +
2205 * (streamout_write_index + thread_id)*stride[buffer_id] +
2206 * attrib_offset
2207 */
2208
2209 LLVMValueRef so_write_index =
2210 LLVMGetParam(ctx->main_fn,
2211 ctx->param_streamout_write_index);
2212
2213 /* Compute (streamout_write_index + thread_id). */
2214 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2215
2216 /* Load the descriptor and compute the write offset for each
2217 * enabled buffer. */
2218 LLVMValueRef so_write_offset[4] = {};
2219 LLVMValueRef so_buffers[4];
2220 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
2221 ctx->param_rw_buffers);
2222
2223 for (i = 0; i < 4; i++) {
2224 if (!so->stride[i])
2225 continue;
2226
2227 LLVMValueRef offset = LLVMConstInt(ctx->i32,
2228 SI_VS_STREAMOUT_BUF0 + i, 0);
2229
2230 so_buffers[i] = ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
2231
2232 LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
2233 ctx->param_streamout_offset[i]);
2234 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2235
2236 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2237 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2238 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2239 }
2240
2241 /* Write streamout data. */
2242 for (i = 0; i < so->num_outputs; i++) {
2243 unsigned reg = so->output[i].register_index;
2244
2245 if (reg >= noutput)
2246 continue;
2247
2248 if (stream != so->output[i].stream)
2249 continue;
2250
2251 emit_streamout_output(ctx, so_buffers, so_write_offset,
2252 &so->output[i], &outputs[reg]);
2253 }
2254 }
2255 lp_build_endif(&if_ctx);
2256 }
2257
2258
2259 /* Generate export instructions for hardware VS shader stage */
2260 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
2261 struct si_shader_output_values *outputs,
2262 unsigned noutput)
2263 {
2264 struct si_shader_context *ctx = si_shader_context(bld_base);
2265 struct si_shader *shader = ctx->shader;
2266 struct lp_build_context *base = &bld_base->base;
2267 struct ac_export_args args, pos_args[4] = {};
2268 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2269 unsigned semantic_name, semantic_index;
2270 unsigned target;
2271 unsigned param_count = 0;
2272 unsigned pos_idx;
2273 int i;
2274
2275 for (i = 0; i < noutput; i++) {
2276 semantic_name = outputs[i].semantic_name;
2277 semantic_index = outputs[i].semantic_index;
2278 bool export_param = true;
2279
2280 switch (semantic_name) {
2281 case TGSI_SEMANTIC_POSITION: /* ignore these */
2282 case TGSI_SEMANTIC_PSIZE:
2283 case TGSI_SEMANTIC_CLIPVERTEX:
2284 case TGSI_SEMANTIC_EDGEFLAG:
2285 break;
2286 case TGSI_SEMANTIC_GENERIC:
2287 /* don't process indices the function can't handle */
2288 if (semantic_index >= SI_MAX_IO_GENERIC)
2289 break;
2290 /* fall through */
2291 default:
2292 if (shader->key.opt.kill_outputs &
2293 (1ull << si_shader_io_get_unique_index(semantic_name, semantic_index)))
2294 export_param = false;
2295 }
2296
2297 if (outputs[i].vertex_stream[0] != 0 &&
2298 outputs[i].vertex_stream[1] != 0 &&
2299 outputs[i].vertex_stream[2] != 0 &&
2300 outputs[i].vertex_stream[3] != 0)
2301 export_param = false;
2302
2303 handle_semantic:
2304 /* Select the correct target */
2305 switch(semantic_name) {
2306 case TGSI_SEMANTIC_PSIZE:
2307 psize_value = outputs[i].values[0];
2308 continue;
2309 case TGSI_SEMANTIC_EDGEFLAG:
2310 edgeflag_value = outputs[i].values[0];
2311 continue;
2312 case TGSI_SEMANTIC_LAYER:
2313 layer_value = outputs[i].values[0];
2314 semantic_name = TGSI_SEMANTIC_GENERIC;
2315 goto handle_semantic;
2316 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2317 viewport_index_value = outputs[i].values[0];
2318 semantic_name = TGSI_SEMANTIC_GENERIC;
2319 goto handle_semantic;
2320 case TGSI_SEMANTIC_POSITION:
2321 target = V_008DFC_SQ_EXP_POS;
2322 break;
2323 case TGSI_SEMANTIC_CLIPDIST:
2324 if (shader->key.opt.clip_disable) {
2325 semantic_name = TGSI_SEMANTIC_GENERIC;
2326 goto handle_semantic;
2327 }
2328 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
2329 break;
2330 case TGSI_SEMANTIC_CLIPVERTEX:
2331 if (shader->key.opt.clip_disable)
2332 continue;
2333 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
2334 continue;
2335 case TGSI_SEMANTIC_COLOR:
2336 case TGSI_SEMANTIC_BCOLOR:
2337 case TGSI_SEMANTIC_PRIMID:
2338 case TGSI_SEMANTIC_FOG:
2339 case TGSI_SEMANTIC_TEXCOORD:
2340 case TGSI_SEMANTIC_GENERIC:
2341 if (!export_param)
2342 continue;
2343 target = V_008DFC_SQ_EXP_PARAM + param_count;
2344 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2345 shader->info.vs_output_param_offset[i] = param_count;
2346 param_count++;
2347 break;
2348 default:
2349 target = 0;
2350 fprintf(stderr,
2351 "Warning: SI unhandled vs output type:%d\n",
2352 semantic_name);
2353 }
2354
2355 si_llvm_init_export_args(bld_base, outputs[i].values, target, &args);
2356
2357 if (target >= V_008DFC_SQ_EXP_POS &&
2358 target <= (V_008DFC_SQ_EXP_POS + 3)) {
2359 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2360 &args, sizeof(args));
2361 } else {
2362 ac_build_export(&ctx->ac, &args);
2363 }
2364
2365 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
2366 semantic_name = TGSI_SEMANTIC_GENERIC;
2367 goto handle_semantic;
2368 }
2369 }
2370
2371 shader->info.nr_param_exports = param_count;
2372
2373 /* We need to add the position output manually if it's missing. */
2374 if (!pos_args[0].out[0]) {
2375 pos_args[0].enabled_channels = 0xf; /* writemask */
2376 pos_args[0].valid_mask = 0; /* EXEC mask */
2377 pos_args[0].done = 0; /* last export? */
2378 pos_args[0].target = V_008DFC_SQ_EXP_POS;
2379 pos_args[0].compr = 0; /* COMPR flag */
2380 pos_args[0].out[0] = base->zero; /* X */
2381 pos_args[0].out[1] = base->zero; /* Y */
2382 pos_args[0].out[2] = base->zero; /* Z */
2383 pos_args[0].out[3] = base->one; /* W */
2384 }
2385
2386 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2387 if (shader->selector->info.writes_psize ||
2388 shader->selector->info.writes_edgeflag ||
2389 shader->selector->info.writes_viewport_index ||
2390 shader->selector->info.writes_layer) {
2391 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
2392 (shader->selector->info.writes_edgeflag << 1) |
2393 (shader->selector->info.writes_layer << 2);
2394
2395 pos_args[1].valid_mask = 0; /* EXEC mask */
2396 pos_args[1].done = 0; /* last export? */
2397 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2398 pos_args[1].compr = 0; /* COMPR flag */
2399 pos_args[1].out[0] = base->zero; /* X */
2400 pos_args[1].out[1] = base->zero; /* Y */
2401 pos_args[1].out[2] = base->zero; /* Z */
2402 pos_args[1].out[3] = base->zero; /* W */
2403
2404 if (shader->selector->info.writes_psize)
2405 pos_args[1].out[0] = psize_value;
2406
2407 if (shader->selector->info.writes_edgeflag) {
2408 /* The output is a float, but the hw expects an integer
2409 * with the first bit containing the edge flag. */
2410 edgeflag_value = LLVMBuildFPToUI(ctx->gallivm.builder,
2411 edgeflag_value,
2412 ctx->i32, "");
2413 edgeflag_value = lp_build_min(&bld_base->int_bld,
2414 edgeflag_value,
2415 ctx->i32_1);
2416
2417 /* The LLVM intrinsic expects a float. */
2418 pos_args[1].out[1] = LLVMBuildBitCast(ctx->gallivm.builder,
2419 edgeflag_value,
2420 ctx->f32, "");
2421 }
2422
2423 if (ctx->screen->b.chip_class >= GFX9) {
2424 /* GFX9 has the layer in out.z[10:0] and the viewport
2425 * index in out.z[19:16].
2426 */
2427 if (shader->selector->info.writes_layer)
2428 pos_args[1].out[2] = layer_value;
2429
2430 if (shader->selector->info.writes_viewport_index) {
2431 LLVMValueRef v = viewport_index_value;
2432
2433 v = bitcast(bld_base, TGSI_TYPE_UNSIGNED, v);
2434 v = LLVMBuildShl(ctx->gallivm.builder, v,
2435 LLVMConstInt(ctx->i32, 16, 0), "");
2436 v = LLVMBuildOr(ctx->gallivm.builder, v,
2437 bitcast(bld_base, TGSI_TYPE_UNSIGNED,
2438 pos_args[1].out[2]), "");
2439 pos_args[1].out[2] = bitcast(bld_base, TGSI_TYPE_FLOAT, v);
2440 pos_args[1].enabled_channels |= 1 << 2;
2441 }
2442 } else {
2443 if (shader->selector->info.writes_layer)
2444 pos_args[1].out[2] = layer_value;
2445
2446 if (shader->selector->info.writes_viewport_index) {
2447 pos_args[1].out[3] = viewport_index_value;
2448 pos_args[1].enabled_channels |= 1 << 3;
2449 }
2450 }
2451 }
2452
2453 for (i = 0; i < 4; i++)
2454 if (pos_args[i].out[0])
2455 shader->info.nr_pos_exports++;
2456
2457 pos_idx = 0;
2458 for (i = 0; i < 4; i++) {
2459 if (!pos_args[i].out[0])
2460 continue;
2461
2462 /* Specify the target we are exporting */
2463 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2464
2465 if (pos_idx == shader->info.nr_pos_exports)
2466 /* Specify that this is the last export */
2467 pos_args[i].done = 1;
2468
2469 ac_build_export(&ctx->ac, &pos_args[i]);
2470 }
2471 }
2472
2473 /**
2474 * Forward all outputs from the vertex shader to the TES. This is only used
2475 * for the fixed function TCS.
2476 */
2477 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2478 {
2479 struct si_shader_context *ctx = si_shader_context(bld_base);
2480 struct gallivm_state *gallivm = &ctx->gallivm;
2481 LLVMValueRef invocation_id, buffer, buffer_offset;
2482 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
2483 uint64_t inputs;
2484
2485 invocation_id = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
2486 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
2487 buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2488
2489 lds_vertex_stride = unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
2490 lds_vertex_offset = LLVMBuildMul(gallivm->builder, invocation_id,
2491 lds_vertex_stride, "");
2492 lds_base = get_tcs_in_current_patch_offset(ctx);
2493 lds_base = LLVMBuildAdd(gallivm->builder, lds_base, lds_vertex_offset, "");
2494
2495 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
2496 while (inputs) {
2497 unsigned i = u_bit_scan64(&inputs);
2498
2499 LLVMValueRef lds_ptr = LLVMBuildAdd(gallivm->builder, lds_base,
2500 LLVMConstInt(ctx->i32, 4 * i, 0),
2501 "");
2502
2503 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
2504 get_rel_patch_id(ctx),
2505 invocation_id,
2506 LLVMConstInt(ctx->i32, i, 0));
2507
2508 LLVMValueRef value = lds_load(bld_base, TGSI_TYPE_SIGNED, ~0,
2509 lds_ptr);
2510
2511 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
2512 buffer_offset, 0, 1, 0, true, false);
2513 }
2514 }
2515
2516 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
2517 LLVMValueRef rel_patch_id,
2518 LLVMValueRef invocation_id,
2519 LLVMValueRef tcs_out_current_patch_data_offset)
2520 {
2521 struct si_shader_context *ctx = si_shader_context(bld_base);
2522 struct gallivm_state *gallivm = &ctx->gallivm;
2523 struct si_shader *shader = ctx->shader;
2524 unsigned tess_inner_index, tess_outer_index;
2525 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2526 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2527 unsigned stride, outer_comps, inner_comps, i, offset;
2528 struct lp_build_if_state if_ctx, inner_if_ctx;
2529
2530 si_llvm_emit_barrier(NULL, bld_base, NULL);
2531
2532 /* Do this only for invocation 0, because the tess levels are per-patch,
2533 * not per-vertex.
2534 *
2535 * This can't jump, because invocation 0 executes this. It should
2536 * at least mask out the loads and stores for other invocations.
2537 */
2538 lp_build_if(&if_ctx, gallivm,
2539 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2540 invocation_id, ctx->i32_0, ""));
2541
2542 /* Determine the layout of one tess factor element in the buffer. */
2543 switch (shader->key.part.tcs.epilog.prim_mode) {
2544 case PIPE_PRIM_LINES:
2545 stride = 2; /* 2 dwords, 1 vec2 store */
2546 outer_comps = 2;
2547 inner_comps = 0;
2548 break;
2549 case PIPE_PRIM_TRIANGLES:
2550 stride = 4; /* 4 dwords, 1 vec4 store */
2551 outer_comps = 3;
2552 inner_comps = 1;
2553 break;
2554 case PIPE_PRIM_QUADS:
2555 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2556 outer_comps = 4;
2557 inner_comps = 2;
2558 break;
2559 default:
2560 assert(0);
2561 return;
2562 }
2563
2564 /* Load tess_inner and tess_outer from LDS.
2565 * Any invocation can write them, so we can't get them from a temporary.
2566 */
2567 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
2568 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
2569
2570 lds_base = tcs_out_current_patch_data_offset;
2571 lds_inner = LLVMBuildAdd(gallivm->builder, lds_base,
2572 LLVMConstInt(ctx->i32,
2573 tess_inner_index * 4, 0), "");
2574 lds_outer = LLVMBuildAdd(gallivm->builder, lds_base,
2575 LLVMConstInt(ctx->i32,
2576 tess_outer_index * 4, 0), "");
2577
2578 for (i = 0; i < 4; i++) {
2579 inner[i] = LLVMGetUndef(ctx->i32);
2580 outer[i] = LLVMGetUndef(ctx->i32);
2581 }
2582
2583 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
2584 /* For isolines, the hardware expects tess factors in the
2585 * reverse order from what GLSL / TGSI specify.
2586 */
2587 outer[0] = out[1] = lds_load(bld_base, TGSI_TYPE_SIGNED, 0, lds_outer);
2588 outer[1] = out[0] = lds_load(bld_base, TGSI_TYPE_SIGNED, 1, lds_outer);
2589 } else {
2590 for (i = 0; i < outer_comps; i++) {
2591 outer[i] = out[i] =
2592 lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
2593 }
2594 for (i = 0; i < inner_comps; i++) {
2595 inner[i] = out[outer_comps+i] =
2596 lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
2597 }
2598 }
2599
2600 /* Convert the outputs to vectors for stores. */
2601 vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
2602 vec1 = NULL;
2603
2604 if (stride > 4)
2605 vec1 = lp_build_gather_values(gallivm, out+4, stride - 4);
2606
2607 /* Get the buffer. */
2608 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_factor_addr_base64k);
2609
2610 /* Get the offset. */
2611 tf_base = LLVMGetParam(ctx->main_fn,
2612 ctx->param_tcs_factor_offset);
2613 byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
2614 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
2615
2616 lp_build_if(&inner_if_ctx, gallivm,
2617 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2618 rel_patch_id, ctx->i32_0, ""));
2619
2620 /* Store the dynamic HS control word. */
2621 offset = 0;
2622 if (ctx->screen->b.chip_class <= VI) {
2623 ac_build_buffer_store_dword(&ctx->ac, buffer,
2624 LLVMConstInt(ctx->i32, 0x80000000, 0),
2625 1, ctx->i32_0, tf_base,
2626 offset, 1, 0, true, false);
2627 offset += 4;
2628 }
2629
2630 lp_build_endif(&inner_if_ctx);
2631
2632 /* Store the tessellation factors. */
2633 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2634 MIN2(stride, 4), byteoffset, tf_base,
2635 offset, 1, 0, true, false);
2636 offset += 16;
2637 if (vec1)
2638 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2639 stride - 4, byteoffset, tf_base,
2640 offset, 1, 0, true, false);
2641
2642 /* Store the tess factors into the offchip buffer if TES reads them. */
2643 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
2644 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
2645 LLVMValueRef tf_inner_offset;
2646 unsigned param_outer, param_inner;
2647
2648 buf = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
2649 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2650
2651 param_outer = si_shader_io_get_unique_index_patch(
2652 TGSI_SEMANTIC_TESSOUTER, 0);
2653 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2654 LLVMConstInt(ctx->i32, param_outer, 0));
2655
2656 outer_vec = lp_build_gather_values(gallivm, outer,
2657 util_next_power_of_two(outer_comps));
2658
2659 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
2660 outer_comps, tf_outer_offset,
2661 base, 0, 1, 0, true, false);
2662 if (inner_comps) {
2663 param_inner = si_shader_io_get_unique_index_patch(
2664 TGSI_SEMANTIC_TESSINNER, 0);
2665 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2666 LLVMConstInt(ctx->i32, param_inner, 0));
2667
2668 inner_vec = inner_comps == 1 ? inner[0] :
2669 lp_build_gather_values(gallivm, inner, inner_comps);
2670 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
2671 inner_comps, tf_inner_offset,
2672 base, 0, 1, 0, true, false);
2673 }
2674 }
2675
2676 lp_build_endif(&if_ctx);
2677 }
2678
2679 static LLVMValueRef
2680 si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
2681 unsigned param, unsigned return_index)
2682 {
2683 return LLVMBuildInsertValue(ctx->gallivm.builder, ret,
2684 LLVMGetParam(ctx->main_fn, param),
2685 return_index, "");
2686 }
2687
2688 static LLVMValueRef
2689 si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
2690 unsigned param, unsigned return_index)
2691 {
2692 LLVMBuilderRef builder = ctx->gallivm.builder;
2693 LLVMValueRef p = LLVMGetParam(ctx->main_fn, param);
2694
2695 return LLVMBuildInsertValue(builder, ret,
2696 LLVMBuildBitCast(builder, p, ctx->f32, ""),
2697 return_index, "");
2698 }
2699
2700 static LLVMValueRef
2701 si_insert_input_ptr_as_2xi32(struct si_shader_context *ctx, LLVMValueRef ret,
2702 unsigned param, unsigned return_index)
2703 {
2704 LLVMBuilderRef builder = ctx->gallivm.builder;
2705 LLVMValueRef ptr, lo, hi;
2706
2707 ptr = LLVMGetParam(ctx->main_fn, param);
2708 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i64, "");
2709 ptr = LLVMBuildBitCast(builder, ptr, ctx->v2i32, "");
2710 lo = LLVMBuildExtractElement(builder, ptr, ctx->i32_0, "");
2711 hi = LLVMBuildExtractElement(builder, ptr, ctx->i32_1, "");
2712 ret = LLVMBuildInsertValue(builder, ret, lo, return_index, "");
2713 return LLVMBuildInsertValue(builder, ret, hi, return_index + 1, "");
2714 }
2715
2716 /* This only writes the tessellation factor levels. */
2717 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
2718 {
2719 struct si_shader_context *ctx = si_shader_context(bld_base);
2720 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2721
2722 si_copy_tcs_inputs(bld_base);
2723
2724 rel_patch_id = get_rel_patch_id(ctx);
2725 invocation_id = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
2726 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2727
2728 /* Return epilog parameters from this function. */
2729 LLVMBuilderRef builder = ctx->gallivm.builder;
2730 LLVMValueRef ret = ctx->return_value;
2731 unsigned vgpr;
2732
2733 if (ctx->screen->b.chip_class >= GFX9) {
2734 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2735 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2736 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2737 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2738 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2739 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
2740 /* Tess offchip and tess factor offsets are at the beginning. */
2741 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
2742 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
2743 vgpr = 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K + 1;
2744 } else {
2745 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2746 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
2747 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2748 GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2749 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2750 GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K);
2751 /* Tess offchip and tess factor offsets are after user SGPRs. */
2752 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset,
2753 GFX6_TCS_NUM_USER_SGPR);
2754 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset,
2755 GFX6_TCS_NUM_USER_SGPR + 1);
2756 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
2757 }
2758
2759 /* VGPRs */
2760 rel_patch_id = bitcast(bld_base, TGSI_TYPE_FLOAT, rel_patch_id);
2761 invocation_id = bitcast(bld_base, TGSI_TYPE_FLOAT, invocation_id);
2762 tf_lds_offset = bitcast(bld_base, TGSI_TYPE_FLOAT, tf_lds_offset);
2763
2764 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2765 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2766 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2767 ctx->return_value = ret;
2768 }
2769
2770 /* Pass TCS inputs from LS to TCS on GFX9. */
2771 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
2772 {
2773 LLVMValueRef ret = ctx->return_value;
2774
2775 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers, 0);
2776 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
2777 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
2778 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
2779 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
2780
2781 ret = si_insert_input_ret(ctx, ret, ctx->param_vs_state_bits,
2782 8 + SI_SGPR_VS_STATE_BITS);
2783 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2784 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2785 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_offsets,
2786 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
2787 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_layout,
2788 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2789 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2790 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2791 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2792 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
2793
2794 unsigned desc_param = ctx->param_tcs_factor_addr_base64k + 2;
2795 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
2796 8 + GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS);
2797 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
2798 8 + GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES);
2799
2800 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
2801 ret = si_insert_input_ret_float(ctx, ret,
2802 ctx->param_tcs_patch_id, vgpr++);
2803 ret = si_insert_input_ret_float(ctx, ret,
2804 ctx->param_tcs_rel_ids, vgpr++);
2805 ctx->return_value = ret;
2806 }
2807
2808 /* Pass GS inputs from ES to GS on GFX9. */
2809 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
2810 {
2811 LLVMValueRef ret = ctx->return_value;
2812
2813 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers, 0);
2814 ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
2815 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
2816
2817 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
2818
2819 unsigned desc_param = ctx->param_vs_state_bits + 1;
2820 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
2821 8 + GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS);
2822 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
2823 8 + GFX9_SGPR_GS_SAMPLERS_AND_IMAGES);
2824
2825 unsigned vgpr = 8 + GFX9_GS_NUM_USER_SGPR;
2826 for (unsigned i = 0; i < 5; i++) {
2827 unsigned param = ctx->param_gs_vtx01_offset + i;
2828 ret = si_insert_input_ret_float(ctx, ret, param, vgpr++);
2829 }
2830 ctx->return_value = ret;
2831 }
2832
2833 static void si_llvm_emit_ls_epilogue(struct lp_build_tgsi_context *bld_base)
2834 {
2835 struct si_shader_context *ctx = si_shader_context(bld_base);
2836 struct si_shader *shader = ctx->shader;
2837 struct tgsi_shader_info *info = &shader->selector->info;
2838 struct gallivm_state *gallivm = &ctx->gallivm;
2839 unsigned i, chan;
2840 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
2841 ctx->param_rel_auto_id);
2842 LLVMValueRef vertex_dw_stride =
2843 unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
2844 LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
2845 vertex_dw_stride, "");
2846
2847 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2848 * its inputs from it. */
2849 for (i = 0; i < info->num_outputs; i++) {
2850 LLVMValueRef *out_ptr = ctx->outputs[i];
2851 unsigned name = info->output_semantic_name[i];
2852 unsigned index = info->output_semantic_index[i];
2853
2854 /* The ARB_shader_viewport_layer_array spec contains the
2855 * following issue:
2856 *
2857 * 2) What happens if gl_ViewportIndex or gl_Layer is
2858 * written in the vertex shader and a geometry shader is
2859 * present?
2860 *
2861 * RESOLVED: The value written by the last vertex processing
2862 * stage is used. If the last vertex processing stage
2863 * (vertex, tessellation evaluation or geometry) does not
2864 * statically assign to gl_ViewportIndex or gl_Layer, index
2865 * or layer zero is assumed.
2866 *
2867 * So writes to those outputs in VS-as-LS are simply ignored.
2868 */
2869 if (name == TGSI_SEMANTIC_LAYER ||
2870 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
2871 continue;
2872
2873 int param = si_shader_io_get_unique_index(name, index);
2874 LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
2875 LLVMConstInt(ctx->i32, param * 4, 0), "");
2876
2877 for (chan = 0; chan < 4; chan++) {
2878 lds_store(bld_base, chan, dw_addr,
2879 LLVMBuildLoad(gallivm->builder, out_ptr[chan], ""));
2880 }
2881 }
2882
2883 if (ctx->screen->b.chip_class >= GFX9)
2884 si_set_ls_return_value_for_tcs(ctx);
2885 }
2886
2887 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context *bld_base)
2888 {
2889 struct si_shader_context *ctx = si_shader_context(bld_base);
2890 struct gallivm_state *gallivm = &ctx->gallivm;
2891 struct si_shader *es = ctx->shader;
2892 struct tgsi_shader_info *info = &es->selector->info;
2893 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
2894 ctx->param_es2gs_offset);
2895 LLVMValueRef lds_base = NULL;
2896 unsigned chan;
2897 int i;
2898
2899 if (ctx->screen->b.chip_class >= GFX9 && info->num_outputs) {
2900 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
2901 lds_base = LLVMBuildMul(gallivm->builder, ac_get_thread_id(&ctx->ac),
2902 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
2903 }
2904
2905 for (i = 0; i < info->num_outputs; i++) {
2906 LLVMValueRef *out_ptr = ctx->outputs[i];
2907 int param;
2908
2909 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2910 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2911 continue;
2912
2913 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
2914 info->output_semantic_index[i]);
2915
2916 for (chan = 0; chan < 4; chan++) {
2917 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
2918 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
2919
2920 /* GFX9 has the ESGS ring in LDS. */
2921 if (ctx->screen->b.chip_class >= GFX9) {
2922 lds_store(bld_base, param * 4 + chan, lds_base, out_val);
2923 continue;
2924 }
2925
2926 ac_build_buffer_store_dword(&ctx->ac,
2927 ctx->esgs_ring,
2928 out_val, 1, NULL, soffset,
2929 (4 * param + chan) * 4,
2930 1, 1, true, true);
2931 }
2932 }
2933
2934 if (ctx->screen->b.chip_class >= GFX9)
2935 si_set_es_return_value_for_gs(ctx);
2936 }
2937
2938 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
2939 {
2940 if (ctx->screen->b.chip_class >= GFX9)
2941 return unpack_param(ctx, ctx->param_merged_wave_info, 16, 8);
2942 else
2943 return LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id);
2944 }
2945
2946 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
2947 {
2948 struct si_shader_context *ctx = si_shader_context(bld_base);
2949
2950 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
2951 si_get_gs_wave_id(ctx));
2952 }
2953
2954 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
2955 {
2956 struct si_shader_context *ctx = si_shader_context(bld_base);
2957 struct gallivm_state *gallivm = &ctx->gallivm;
2958 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2959 struct si_shader_output_values *outputs = NULL;
2960 int i,j;
2961
2962 assert(!ctx->shader->is_gs_copy_shader);
2963
2964 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2965
2966 /* Vertex color clamping.
2967 *
2968 * This uses a state constant loaded in a user data SGPR and
2969 * an IF statement is added that clamps all colors if the constant
2970 * is true.
2971 */
2972 if (ctx->type == PIPE_SHADER_VERTEX) {
2973 struct lp_build_if_state if_ctx;
2974 LLVMValueRef cond = NULL;
2975 LLVMValueRef addr, val;
2976
2977 for (i = 0; i < info->num_outputs; i++) {
2978 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
2979 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
2980 continue;
2981
2982 /* We've found a color. */
2983 if (!cond) {
2984 /* The state is in the first bit of the user SGPR. */
2985 cond = LLVMGetParam(ctx->main_fn,
2986 ctx->param_vs_state_bits);
2987 cond = LLVMBuildTrunc(gallivm->builder, cond,
2988 ctx->i1, "");
2989 lp_build_if(&if_ctx, gallivm, cond);
2990 }
2991
2992 for (j = 0; j < 4; j++) {
2993 addr = ctx->outputs[i][j];
2994 val = LLVMBuildLoad(gallivm->builder, addr, "");
2995 val = ac_build_clamp(&ctx->ac, val);
2996 LLVMBuildStore(gallivm->builder, val, addr);
2997 }
2998 }
2999
3000 if (cond)
3001 lp_build_endif(&if_ctx);
3002 }
3003
3004 for (i = 0; i < info->num_outputs; i++) {
3005 outputs[i].semantic_name = info->output_semantic_name[i];
3006 outputs[i].semantic_index = info->output_semantic_index[i];
3007
3008 for (j = 0; j < 4; j++) {
3009 outputs[i].values[j] =
3010 LLVMBuildLoad(gallivm->builder,
3011 ctx->outputs[i][j],
3012 "");
3013 outputs[i].vertex_stream[j] =
3014 (info->output_streams[i] >> (2 * j)) & 3;
3015 }
3016 }
3017
3018 if (ctx->shader->selector->so.num_outputs)
3019 si_llvm_emit_streamout(ctx, outputs, i, 0);
3020
3021 /* Export PrimitiveID. */
3022 if (ctx->shader->key.mono.u.vs_export_prim_id) {
3023 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
3024 outputs[i].semantic_index = 0;
3025 outputs[i].values[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
3026 get_primitive_id(bld_base, 0));
3027 for (j = 1; j < 4; j++)
3028 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
3029
3030 memset(outputs[i].vertex_stream, 0,
3031 sizeof(outputs[i].vertex_stream));
3032 i++;
3033 }
3034
3035 si_llvm_export_vs(bld_base, outputs, i);
3036 FREE(outputs);
3037 }
3038
3039 struct si_ps_exports {
3040 unsigned num;
3041 struct ac_export_args args[10];
3042 };
3043
3044 unsigned si_get_spi_shader_z_format(bool writes_z, bool writes_stencil,
3045 bool writes_samplemask)
3046 {
3047 if (writes_z) {
3048 /* Z needs 32 bits. */
3049 if (writes_samplemask)
3050 return V_028710_SPI_SHADER_32_ABGR;
3051 else if (writes_stencil)
3052 return V_028710_SPI_SHADER_32_GR;
3053 else
3054 return V_028710_SPI_SHADER_32_R;
3055 } else if (writes_stencil || writes_samplemask) {
3056 /* Both stencil and sample mask need only 16 bits. */
3057 return V_028710_SPI_SHADER_UINT16_ABGR;
3058 } else {
3059 return V_028710_SPI_SHADER_ZERO;
3060 }
3061 }
3062
3063 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
3064 LLVMValueRef depth, LLVMValueRef stencil,
3065 LLVMValueRef samplemask, struct si_ps_exports *exp)
3066 {
3067 struct si_shader_context *ctx = si_shader_context(bld_base);
3068 struct lp_build_context *base = &bld_base->base;
3069 struct ac_export_args args;
3070 unsigned mask = 0;
3071 unsigned format = si_get_spi_shader_z_format(depth != NULL,
3072 stencil != NULL,
3073 samplemask != NULL);
3074
3075 assert(depth || stencil || samplemask);
3076
3077 args.valid_mask = 1; /* whether the EXEC mask is valid */
3078 args.done = 1; /* DONE bit */
3079
3080 /* Specify the target we are exporting */
3081 args.target = V_008DFC_SQ_EXP_MRTZ;
3082
3083 args.compr = 0; /* COMP flag */
3084 args.out[0] = base->undef; /* R, depth */
3085 args.out[1] = base->undef; /* G, stencil test value[0:7], stencil op value[8:15] */
3086 args.out[2] = base->undef; /* B, sample mask */
3087 args.out[3] = base->undef; /* A, alpha to mask */
3088
3089 if (format == V_028710_SPI_SHADER_UINT16_ABGR) {
3090 assert(!depth);
3091 args.compr = 1; /* COMPR flag */
3092
3093 if (stencil) {
3094 /* Stencil should be in X[23:16]. */
3095 stencil = bitcast(bld_base, TGSI_TYPE_UNSIGNED, stencil);
3096 stencil = LLVMBuildShl(ctx->gallivm.builder, stencil,
3097 LLVMConstInt(ctx->i32, 16, 0), "");
3098 args.out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT, stencil);
3099 mask |= 0x3;
3100 }
3101 if (samplemask) {
3102 /* SampleMask should be in Y[15:0]. */
3103 args.out[1] = samplemask;
3104 mask |= 0xc;
3105 }
3106 } else {
3107 if (depth) {
3108 args.out[0] = depth;
3109 mask |= 0x1;
3110 }
3111 if (stencil) {
3112 args.out[1] = stencil;
3113 mask |= 0x2;
3114 }
3115 if (samplemask) {
3116 args.out[2] = samplemask;
3117 mask |= 0x4;
3118 }
3119 }
3120
3121 /* SI (except OLAND and HAINAN) has a bug that it only looks
3122 * at the X writemask component. */
3123 if (ctx->screen->b.chip_class == SI &&
3124 ctx->screen->b.family != CHIP_OLAND &&
3125 ctx->screen->b.family != CHIP_HAINAN)
3126 mask |= 0x1;
3127
3128 /* Specify which components to enable */
3129 args.enabled_channels = mask;
3130
3131 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3132 }
3133
3134 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
3135 LLVMValueRef *color, unsigned index,
3136 unsigned samplemask_param,
3137 bool is_last, struct si_ps_exports *exp)
3138 {
3139 struct si_shader_context *ctx = si_shader_context(bld_base);
3140 struct lp_build_context *base = &bld_base->base;
3141 int i;
3142
3143 /* Clamp color */
3144 if (ctx->shader->key.part.ps.epilog.clamp_color)
3145 for (i = 0; i < 4; i++)
3146 color[i] = ac_build_clamp(&ctx->ac, color[i]);
3147
3148 /* Alpha to one */
3149 if (ctx->shader->key.part.ps.epilog.alpha_to_one)
3150 color[3] = base->one;
3151
3152 /* Alpha test */
3153 if (index == 0 &&
3154 ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
3155 si_alpha_test(bld_base, color[3]);
3156
3157 /* Line & polygon smoothing */
3158 if (ctx->shader->key.part.ps.epilog.poly_line_smoothing)
3159 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
3160 samplemask_param);
3161
3162 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3163 if (ctx->shader->key.part.ps.epilog.last_cbuf > 0) {
3164 struct ac_export_args args[8];
3165 int c, last = -1;
3166
3167 /* Get the export arguments, also find out what the last one is. */
3168 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3169 si_llvm_init_export_args(bld_base, color,
3170 V_008DFC_SQ_EXP_MRT + c, &args[c]);
3171 if (args[c].enabled_channels)
3172 last = c;
3173 }
3174
3175 /* Emit all exports. */
3176 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3177 if (is_last && last == c) {
3178 args[c].valid_mask = 1; /* whether the EXEC mask is valid */
3179 args[c].done = 1; /* DONE bit */
3180 } else if (!args[c].enabled_channels)
3181 continue; /* unnecessary NULL export */
3182
3183 memcpy(&exp->args[exp->num++], &args[c], sizeof(args[c]));
3184 }
3185 } else {
3186 struct ac_export_args args;
3187
3188 /* Export */
3189 si_llvm_init_export_args(bld_base, color, V_008DFC_SQ_EXP_MRT + index,
3190 &args);
3191 if (is_last) {
3192 args.valid_mask = 1; /* whether the EXEC mask is valid */
3193 args.done = 1; /* DONE bit */
3194 } else if (!args.enabled_channels)
3195 return; /* unnecessary NULL export */
3196
3197 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3198 }
3199 }
3200
3201 static void si_emit_ps_exports(struct si_shader_context *ctx,
3202 struct si_ps_exports *exp)
3203 {
3204 for (unsigned i = 0; i < exp->num; i++)
3205 ac_build_export(&ctx->ac, &exp->args[i]);
3206 }
3207
3208 static void si_export_null(struct lp_build_tgsi_context *bld_base)
3209 {
3210 struct si_shader_context *ctx = si_shader_context(bld_base);
3211 struct lp_build_context *base = &bld_base->base;
3212 struct ac_export_args args;
3213
3214 args.enabled_channels = 0x0; /* enabled channels */
3215 args.valid_mask = 1; /* whether the EXEC mask is valid */
3216 args.done = 1; /* DONE bit */
3217 args.target = V_008DFC_SQ_EXP_NULL;
3218 args.compr = 0; /* COMPR flag (0 = 32-bit export) */
3219 args.out[0] = base->undef; /* R */
3220 args.out[1] = base->undef; /* G */
3221 args.out[2] = base->undef; /* B */
3222 args.out[3] = base->undef; /* A */
3223
3224 ac_build_export(&ctx->ac, &args);
3225 }
3226
3227 /**
3228 * Return PS outputs in this order:
3229 *
3230 * v[0:3] = color0.xyzw
3231 * v[4:7] = color1.xyzw
3232 * ...
3233 * vN+0 = Depth
3234 * vN+1 = Stencil
3235 * vN+2 = SampleMask
3236 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3237 *
3238 * The alpha-ref SGPR is returned via its original location.
3239 */
3240 static void si_llvm_return_fs_outputs(struct lp_build_tgsi_context *bld_base)
3241 {
3242 struct si_shader_context *ctx = si_shader_context(bld_base);
3243 struct si_shader *shader = ctx->shader;
3244 struct tgsi_shader_info *info = &shader->selector->info;
3245 LLVMBuilderRef builder = ctx->gallivm.builder;
3246 unsigned i, j, first_vgpr, vgpr;
3247
3248 LLVMValueRef color[8][4] = {};
3249 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3250 LLVMValueRef ret;
3251
3252 if (ctx->postponed_kill)
3253 ac_build_kill(&ctx->ac, LLVMBuildLoad(builder, ctx->postponed_kill, ""));
3254
3255 /* Read the output values. */
3256 for (i = 0; i < info->num_outputs; i++) {
3257 unsigned semantic_name = info->output_semantic_name[i];
3258 unsigned semantic_index = info->output_semantic_index[i];
3259
3260 switch (semantic_name) {
3261 case TGSI_SEMANTIC_COLOR:
3262 assert(semantic_index < 8);
3263 for (j = 0; j < 4; j++) {
3264 LLVMValueRef ptr = ctx->outputs[i][j];
3265 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3266 color[semantic_index][j] = result;
3267 }
3268 break;
3269 case TGSI_SEMANTIC_POSITION:
3270 depth = LLVMBuildLoad(builder,
3271 ctx->outputs[i][2], "");
3272 break;
3273 case TGSI_SEMANTIC_STENCIL:
3274 stencil = LLVMBuildLoad(builder,
3275 ctx->outputs[i][1], "");
3276 break;
3277 case TGSI_SEMANTIC_SAMPLEMASK:
3278 samplemask = LLVMBuildLoad(builder,
3279 ctx->outputs[i][0], "");
3280 break;
3281 default:
3282 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3283 semantic_name);
3284 }
3285 }
3286
3287 /* Fill the return structure. */
3288 ret = ctx->return_value;
3289
3290 /* Set SGPRs. */
3291 ret = LLVMBuildInsertValue(builder, ret,
3292 bitcast(bld_base, TGSI_TYPE_SIGNED,
3293 LLVMGetParam(ctx->main_fn,
3294 SI_PARAM_ALPHA_REF)),
3295 SI_SGPR_ALPHA_REF, "");
3296
3297 /* Set VGPRs */
3298 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3299 for (i = 0; i < ARRAY_SIZE(color); i++) {
3300 if (!color[i][0])
3301 continue;
3302
3303 for (j = 0; j < 4; j++)
3304 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3305 }
3306 if (depth)
3307 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3308 if (stencil)
3309 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3310 if (samplemask)
3311 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3312
3313 /* Add the input sample mask for smoothing at the end. */
3314 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3315 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3316 ret = LLVMBuildInsertValue(builder, ret,
3317 LLVMGetParam(ctx->main_fn,
3318 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3319
3320 ctx->return_value = ret;
3321 }
3322
3323 /* Prevent optimizations (at least of memory accesses) across the current
3324 * point in the program by emitting empty inline assembly that is marked as
3325 * having side effects.
3326 *
3327 * Optionally, a value can be passed through the inline assembly to prevent
3328 * LLVM from hoisting calls to ReadNone functions.
3329 */
3330 static void emit_optimization_barrier(struct si_shader_context *ctx,
3331 LLVMValueRef *pvgpr)
3332 {
3333 static int counter = 0;
3334
3335 LLVMBuilderRef builder = ctx->gallivm.builder;
3336 char code[16];
3337
3338 snprintf(code, sizeof(code), "; %d", p_atomic_inc_return(&counter));
3339
3340 if (!pvgpr) {
3341 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
3342 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
3343 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
3344 } else {
3345 LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
3346 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "=v,0", true, false);
3347 LLVMValueRef vgpr = *pvgpr;
3348 LLVMTypeRef vgpr_type = LLVMTypeOf(vgpr);
3349 unsigned vgpr_size = llvm_get_type_size(vgpr_type);
3350 LLVMValueRef vgpr0;
3351
3352 assert(vgpr_size % 4 == 0);
3353
3354 vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
3355 vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
3356 vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
3357 vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
3358 vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
3359
3360 *pvgpr = vgpr;
3361 }
3362 }
3363
3364 void si_emit_waitcnt(struct si_shader_context *ctx, unsigned simm16)
3365 {
3366 struct gallivm_state *gallivm = &ctx->gallivm;
3367 LLVMBuilderRef builder = gallivm->builder;
3368 LLVMValueRef args[1] = {
3369 LLVMConstInt(ctx->i32, simm16, 0)
3370 };
3371 lp_build_intrinsic(builder, "llvm.amdgcn.s.waitcnt",
3372 ctx->voidt, args, 1, 0);
3373 }
3374
3375 static void membar_emit(
3376 const struct lp_build_tgsi_action *action,
3377 struct lp_build_tgsi_context *bld_base,
3378 struct lp_build_emit_data *emit_data)
3379 {
3380 struct si_shader_context *ctx = si_shader_context(bld_base);
3381 LLVMValueRef src0 = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
3382 unsigned flags = LLVMConstIntGetZExtValue(src0);
3383 unsigned waitcnt = NOOP_WAITCNT;
3384
3385 if (flags & TGSI_MEMBAR_THREAD_GROUP)
3386 waitcnt &= VM_CNT & LGKM_CNT;
3387
3388 if (flags & (TGSI_MEMBAR_ATOMIC_BUFFER |
3389 TGSI_MEMBAR_SHADER_BUFFER |
3390 TGSI_MEMBAR_SHADER_IMAGE))
3391 waitcnt &= VM_CNT;
3392
3393 if (flags & TGSI_MEMBAR_SHARED)
3394 waitcnt &= LGKM_CNT;
3395
3396 if (waitcnt != NOOP_WAITCNT)
3397 si_emit_waitcnt(ctx, waitcnt);
3398 }
3399
3400 static void clock_emit(
3401 const struct lp_build_tgsi_action *action,
3402 struct lp_build_tgsi_context *bld_base,
3403 struct lp_build_emit_data *emit_data)
3404 {
3405 struct si_shader_context *ctx = si_shader_context(bld_base);
3406 struct gallivm_state *gallivm = &ctx->gallivm;
3407 LLVMValueRef tmp;
3408
3409 tmp = lp_build_intrinsic(gallivm->builder, "llvm.readcyclecounter",
3410 ctx->i64, NULL, 0, 0);
3411 tmp = LLVMBuildBitCast(gallivm->builder, tmp, ctx->v2i32, "");
3412
3413 emit_data->output[0] =
3414 LLVMBuildExtractElement(gallivm->builder, tmp, ctx->i32_0, "");
3415 emit_data->output[1] =
3416 LLVMBuildExtractElement(gallivm->builder, tmp, ctx->i32_1, "");
3417 }
3418
3419 LLVMTypeRef si_const_array(LLVMTypeRef elem_type, int num_elements)
3420 {
3421 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
3422 CONST_ADDR_SPACE);
3423 }
3424
3425 static void si_llvm_emit_ddxy(
3426 const struct lp_build_tgsi_action *action,
3427 struct lp_build_tgsi_context *bld_base,
3428 struct lp_build_emit_data *emit_data)
3429 {
3430 struct si_shader_context *ctx = si_shader_context(bld_base);
3431 struct gallivm_state *gallivm = &ctx->gallivm;
3432 unsigned opcode = emit_data->info->opcode;
3433 LLVMValueRef val;
3434 int idx;
3435 unsigned mask;
3436
3437 if (opcode == TGSI_OPCODE_DDX_FINE)
3438 mask = AC_TID_MASK_LEFT;
3439 else if (opcode == TGSI_OPCODE_DDY_FINE)
3440 mask = AC_TID_MASK_TOP;
3441 else
3442 mask = AC_TID_MASK_TOP_LEFT;
3443
3444 /* for DDX we want to next X pixel, DDY next Y pixel. */
3445 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
3446
3447 val = LLVMBuildBitCast(gallivm->builder, emit_data->args[0], ctx->i32, "");
3448 val = ac_build_ddxy(&ctx->ac, ctx->screen->has_ds_bpermute,
3449 mask, idx, ctx->lds, val);
3450 emit_data->output[emit_data->chan] = val;
3451 }
3452
3453 /*
3454 * this takes an I,J coordinate pair,
3455 * and works out the X and Y derivatives.
3456 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
3457 */
3458 static LLVMValueRef si_llvm_emit_ddxy_interp(
3459 struct lp_build_tgsi_context *bld_base,
3460 LLVMValueRef interp_ij)
3461 {
3462 struct si_shader_context *ctx = si_shader_context(bld_base);
3463 struct gallivm_state *gallivm = &ctx->gallivm;
3464 LLVMValueRef result[4], a;
3465 unsigned i;
3466
3467 for (i = 0; i < 2; i++) {
3468 a = LLVMBuildExtractElement(gallivm->builder, interp_ij,
3469 LLVMConstInt(ctx->i32, i, 0), "");
3470 result[i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDX, a);
3471 result[2+i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDY, a);
3472 }
3473
3474 return lp_build_gather_values(gallivm, result, 4);
3475 }
3476
3477 static void interp_fetch_args(
3478 struct lp_build_tgsi_context *bld_base,
3479 struct lp_build_emit_data *emit_data)
3480 {
3481 struct si_shader_context *ctx = si_shader_context(bld_base);
3482 struct gallivm_state *gallivm = &ctx->gallivm;
3483 const struct tgsi_full_instruction *inst = emit_data->inst;
3484
3485 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
3486 /* offset is in second src, first two channels */
3487 emit_data->args[0] = lp_build_emit_fetch(bld_base,
3488 emit_data->inst, 1,
3489 TGSI_CHAN_X);
3490 emit_data->args[1] = lp_build_emit_fetch(bld_base,
3491 emit_data->inst, 1,
3492 TGSI_CHAN_Y);
3493 emit_data->arg_count = 2;
3494 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3495 LLVMValueRef sample_position;
3496 LLVMValueRef sample_id;
3497 LLVMValueRef halfval = LLVMConstReal(ctx->f32, 0.5f);
3498
3499 /* fetch sample ID, then fetch its sample position,
3500 * and place into first two channels.
3501 */
3502 sample_id = lp_build_emit_fetch(bld_base,
3503 emit_data->inst, 1, TGSI_CHAN_X);
3504 sample_id = LLVMBuildBitCast(gallivm->builder, sample_id,
3505 ctx->i32, "");
3506 sample_position = load_sample_position(ctx, sample_id);
3507
3508 emit_data->args[0] = LLVMBuildExtractElement(gallivm->builder,
3509 sample_position,
3510 ctx->i32_0, "");
3511
3512 emit_data->args[0] = LLVMBuildFSub(gallivm->builder, emit_data->args[0], halfval, "");
3513 emit_data->args[1] = LLVMBuildExtractElement(gallivm->builder,
3514 sample_position,
3515 ctx->i32_1, "");
3516 emit_data->args[1] = LLVMBuildFSub(gallivm->builder, emit_data->args[1], halfval, "");
3517 emit_data->arg_count = 2;
3518 }
3519 }
3520
3521 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
3522 struct lp_build_tgsi_context *bld_base,
3523 struct lp_build_emit_data *emit_data)
3524 {
3525 struct si_shader_context *ctx = si_shader_context(bld_base);
3526 struct si_shader *shader = ctx->shader;
3527 struct gallivm_state *gallivm = &ctx->gallivm;
3528 const struct tgsi_shader_info *info = &shader->selector->info;
3529 LLVMValueRef interp_param;
3530 const struct tgsi_full_instruction *inst = emit_data->inst;
3531 const struct tgsi_full_src_register *input = &inst->Src[0];
3532 int input_base, input_array_size;
3533 int chan;
3534 int i;
3535 LLVMValueRef params = LLVMGetParam(ctx->main_fn, SI_PARAM_PRIM_MASK);
3536 LLVMValueRef array_idx;
3537 int interp_param_idx;
3538 unsigned interp;
3539 unsigned location;
3540
3541 assert(input->Register.File == TGSI_FILE_INPUT);
3542
3543 if (input->Register.Indirect) {
3544 unsigned array_id = input->Indirect.ArrayID;
3545
3546 if (array_id) {
3547 input_base = info->input_array_first[array_id];
3548 input_array_size = info->input_array_last[array_id] - input_base + 1;
3549 } else {
3550 input_base = inst->Src[0].Register.Index;
3551 input_array_size = info->num_inputs - input_base;
3552 }
3553
3554 array_idx = get_indirect_index(ctx, &input->Indirect,
3555 input->Register.Index - input_base);
3556 } else {
3557 input_base = inst->Src[0].Register.Index;
3558 input_array_size = 1;
3559 array_idx = ctx->i32_0;
3560 }
3561
3562 interp = shader->selector->info.input_interpolate[input_base];
3563
3564 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3565 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
3566 location = TGSI_INTERPOLATE_LOC_CENTER;
3567 else
3568 location = TGSI_INTERPOLATE_LOC_CENTROID;
3569
3570 interp_param_idx = lookup_interp_param_index(interp, location);
3571 if (interp_param_idx == -1)
3572 return;
3573 else if (interp_param_idx)
3574 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
3575 else
3576 interp_param = NULL;
3577
3578 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3579 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3580 LLVMValueRef ij_out[2];
3581 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
3582
3583 /*
3584 * take the I then J parameters, and the DDX/Y for it, and
3585 * calculate the IJ inputs for the interpolator.
3586 * temp1 = ddx * offset/sample.x + I;
3587 * interp_param.I = ddy * offset/sample.y + temp1;
3588 * temp1 = ddx * offset/sample.x + J;
3589 * interp_param.J = ddy * offset/sample.y + temp1;
3590 */
3591 for (i = 0; i < 2; i++) {
3592 LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, 0);
3593 LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, 0);
3594 LLVMValueRef ddx_el = LLVMBuildExtractElement(gallivm->builder,
3595 ddxy_out, ix_ll, "");
3596 LLVMValueRef ddy_el = LLVMBuildExtractElement(gallivm->builder,
3597 ddxy_out, iy_ll, "");
3598 LLVMValueRef interp_el = LLVMBuildExtractElement(gallivm->builder,
3599 interp_param, ix_ll, "");
3600 LLVMValueRef temp1, temp2;
3601
3602 interp_el = LLVMBuildBitCast(gallivm->builder, interp_el,
3603 ctx->f32, "");
3604
3605 temp1 = LLVMBuildFMul(gallivm->builder, ddx_el, emit_data->args[0], "");
3606
3607 temp1 = LLVMBuildFAdd(gallivm->builder, temp1, interp_el, "");
3608
3609 temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
3610
3611 ij_out[i] = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
3612 }
3613 interp_param = lp_build_gather_values(gallivm, ij_out, 2);
3614 }
3615
3616 if (interp_param) {
3617 interp_param = LLVMBuildBitCast(gallivm->builder,
3618 interp_param, LLVMVectorType(ctx->f32, 2), "");
3619 }
3620
3621 for (chan = 0; chan < 4; chan++) {
3622 LLVMValueRef llvm_chan;
3623 LLVMValueRef gather = LLVMGetUndef(LLVMVectorType(ctx->f32, input_array_size));
3624 unsigned schan;
3625
3626 schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
3627 llvm_chan = LLVMConstInt(ctx->i32, schan, 0);
3628
3629 for (unsigned i = 0; i < input_array_size; ++i) {
3630 LLVMValueRef attr_number = LLVMConstInt(ctx->i32, input_base + i, false);
3631 LLVMValueRef v;
3632
3633 if (interp_param) {
3634 interp_param = LLVMBuildBitCast(gallivm->builder,
3635 interp_param, LLVMVectorType(ctx->f32, 2), "");
3636 LLVMValueRef i = LLVMBuildExtractElement(
3637 gallivm->builder, interp_param, ctx->i32_0, "");
3638 LLVMValueRef j = LLVMBuildExtractElement(
3639 gallivm->builder, interp_param, ctx->i32_1, "");
3640 v = ac_build_fs_interp(&ctx->ac,
3641 llvm_chan, attr_number, params,
3642 i, j);
3643 } else {
3644 v = ac_build_fs_interp_mov(&ctx->ac,
3645 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
3646 llvm_chan, attr_number, params);
3647 }
3648
3649 gather = LLVMBuildInsertElement(gallivm->builder,
3650 gather, v, LLVMConstInt(ctx->i32, i, false), "");
3651 }
3652
3653 emit_data->output[chan] = LLVMBuildExtractElement(
3654 gallivm->builder, gather, array_idx, "");
3655 }
3656 }
3657
3658 static LLVMValueRef si_emit_ballot(struct si_shader_context *ctx,
3659 LLVMValueRef value)
3660 {
3661 struct gallivm_state *gallivm = &ctx->gallivm;
3662 LLVMValueRef args[3] = {
3663 value,
3664 ctx->i32_0,
3665 LLVMConstInt(ctx->i32, LLVMIntNE, 0)
3666 };
3667
3668 /* We currently have no other way to prevent LLVM from lifting the icmp
3669 * calls to a dominating basic block.
3670 */
3671 emit_optimization_barrier(ctx, &args[0]);
3672
3673 if (LLVMTypeOf(args[0]) != ctx->i32)
3674 args[0] = LLVMBuildBitCast(gallivm->builder, args[0], ctx->i32, "");
3675
3676 return lp_build_intrinsic(gallivm->builder,
3677 "llvm.amdgcn.icmp.i32",
3678 ctx->i64, args, 3,
3679 LP_FUNC_ATTR_NOUNWIND |
3680 LP_FUNC_ATTR_READNONE |
3681 LP_FUNC_ATTR_CONVERGENT);
3682 }
3683
3684 static void vote_all_emit(
3685 const struct lp_build_tgsi_action *action,
3686 struct lp_build_tgsi_context *bld_base,
3687 struct lp_build_emit_data *emit_data)
3688 {
3689 struct si_shader_context *ctx = si_shader_context(bld_base);
3690 struct gallivm_state *gallivm = &ctx->gallivm;
3691 LLVMValueRef active_set, vote_set;
3692 LLVMValueRef tmp;
3693
3694 active_set = si_emit_ballot(ctx, ctx->i32_1);
3695 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3696
3697 tmp = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, vote_set, active_set, "");
3698 emit_data->output[emit_data->chan] =
3699 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3700 }
3701
3702 static void vote_any_emit(
3703 const struct lp_build_tgsi_action *action,
3704 struct lp_build_tgsi_context *bld_base,
3705 struct lp_build_emit_data *emit_data)
3706 {
3707 struct si_shader_context *ctx = si_shader_context(bld_base);
3708 struct gallivm_state *gallivm = &ctx->gallivm;
3709 LLVMValueRef vote_set;
3710 LLVMValueRef tmp;
3711
3712 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3713
3714 tmp = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
3715 vote_set, LLVMConstInt(ctx->i64, 0, 0), "");
3716 emit_data->output[emit_data->chan] =
3717 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3718 }
3719
3720 static void vote_eq_emit(
3721 const struct lp_build_tgsi_action *action,
3722 struct lp_build_tgsi_context *bld_base,
3723 struct lp_build_emit_data *emit_data)
3724 {
3725 struct si_shader_context *ctx = si_shader_context(bld_base);
3726 struct gallivm_state *gallivm = &ctx->gallivm;
3727 LLVMValueRef active_set, vote_set;
3728 LLVMValueRef all, none, tmp;
3729
3730 active_set = si_emit_ballot(ctx, ctx->i32_1);
3731 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3732
3733 all = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, vote_set, active_set, "");
3734 none = LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
3735 vote_set, LLVMConstInt(ctx->i64, 0, 0), "");
3736 tmp = LLVMBuildOr(gallivm->builder, all, none, "");
3737 emit_data->output[emit_data->chan] =
3738 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3739 }
3740
3741 static void ballot_emit(
3742 const struct lp_build_tgsi_action *action,
3743 struct lp_build_tgsi_context *bld_base,
3744 struct lp_build_emit_data *emit_data)
3745 {
3746 struct si_shader_context *ctx = si_shader_context(bld_base);
3747 LLVMBuilderRef builder = ctx->gallivm.builder;
3748 LLVMValueRef tmp;
3749
3750 tmp = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
3751 tmp = si_emit_ballot(ctx, tmp);
3752 tmp = LLVMBuildBitCast(builder, tmp, ctx->v2i32, "");
3753
3754 emit_data->output[0] = LLVMBuildExtractElement(builder, tmp, ctx->i32_0, "");
3755 emit_data->output[1] = LLVMBuildExtractElement(builder, tmp, ctx->i32_1, "");
3756 }
3757
3758 static void read_invoc_fetch_args(
3759 struct lp_build_tgsi_context *bld_base,
3760 struct lp_build_emit_data *emit_data)
3761 {
3762 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
3763 0, emit_data->src_chan);
3764
3765 /* Always read the source invocation (= lane) from the X channel. */
3766 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
3767 1, TGSI_CHAN_X);
3768 emit_data->arg_count = 2;
3769 }
3770
3771 static void read_lane_emit(
3772 const struct lp_build_tgsi_action *action,
3773 struct lp_build_tgsi_context *bld_base,
3774 struct lp_build_emit_data *emit_data)
3775 {
3776 struct si_shader_context *ctx = si_shader_context(bld_base);
3777 LLVMBuilderRef builder = ctx->gallivm.builder;
3778
3779 /* We currently have no other way to prevent LLVM from lifting the icmp
3780 * calls to a dominating basic block.
3781 */
3782 emit_optimization_barrier(ctx, &emit_data->args[0]);
3783
3784 for (unsigned i = 0; i < emit_data->arg_count; ++i) {
3785 emit_data->args[i] = LLVMBuildBitCast(builder, emit_data->args[i],
3786 ctx->i32, "");
3787 }
3788
3789 emit_data->output[emit_data->chan] =
3790 ac_build_intrinsic(&ctx->ac, action->intr_name,
3791 ctx->i32, emit_data->args, emit_data->arg_count,
3792 AC_FUNC_ATTR_READNONE |
3793 AC_FUNC_ATTR_CONVERGENT);
3794 }
3795
3796 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
3797 struct lp_build_emit_data *emit_data)
3798 {
3799 struct si_shader_context *ctx = si_shader_context(bld_base);
3800 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
3801 LLVMValueRef imm;
3802 unsigned stream;
3803
3804 assert(src0.File == TGSI_FILE_IMMEDIATE);
3805
3806 imm = ctx->imms[src0.Index * TGSI_NUM_CHANNELS + src0.SwizzleX];
3807 stream = LLVMConstIntGetZExtValue(imm) & 0x3;
3808 return stream;
3809 }
3810
3811 /* Emit one vertex from the geometry shader */
3812 static void si_llvm_emit_vertex(
3813 const struct lp_build_tgsi_action *action,
3814 struct lp_build_tgsi_context *bld_base,
3815 struct lp_build_emit_data *emit_data)
3816 {
3817 struct si_shader_context *ctx = si_shader_context(bld_base);
3818 struct lp_build_context *uint = &bld_base->uint_bld;
3819 struct si_shader *shader = ctx->shader;
3820 struct tgsi_shader_info *info = &shader->selector->info;
3821 struct gallivm_state *gallivm = &ctx->gallivm;
3822 struct lp_build_if_state if_state;
3823 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
3824 ctx->param_gs2vs_offset);
3825 LLVMValueRef gs_next_vertex;
3826 LLVMValueRef can_emit, kill;
3827 unsigned chan, offset;
3828 int i;
3829 unsigned stream;
3830
3831 stream = si_llvm_get_stream(bld_base, emit_data);
3832
3833 /* Write vertex attribute values to GSVS ring */
3834 gs_next_vertex = LLVMBuildLoad(gallivm->builder,
3835 ctx->gs_next_vertex[stream],
3836 "");
3837
3838 /* If this thread has already emitted the declared maximum number of
3839 * vertices, skip the write: excessive vertex emissions are not
3840 * supposed to have any effect.
3841 *
3842 * If the shader has no writes to memory, kill it instead. This skips
3843 * further memory loads and may allow LLVM to skip to the end
3844 * altogether.
3845 */
3846 can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULT, gs_next_vertex,
3847 LLVMConstInt(ctx->i32,
3848 shader->selector->gs_max_out_vertices, 0), "");
3849
3850 bool use_kill = !info->writes_memory;
3851 if (use_kill) {
3852 kill = lp_build_select(&bld_base->base, can_emit,
3853 LLVMConstReal(ctx->f32, 1.0f),
3854 LLVMConstReal(ctx->f32, -1.0f));
3855
3856 ac_build_kill(&ctx->ac, kill);
3857 } else {
3858 lp_build_if(&if_state, gallivm, can_emit);
3859 }
3860
3861 offset = 0;
3862 for (i = 0; i < info->num_outputs; i++) {
3863 LLVMValueRef *out_ptr = ctx->outputs[i];
3864
3865 for (chan = 0; chan < 4; chan++) {
3866 if (!(info->output_usagemask[i] & (1 << chan)) ||
3867 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
3868 continue;
3869
3870 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
3871 LLVMValueRef voffset =
3872 LLVMConstInt(ctx->i32, offset *
3873 shader->selector->gs_max_out_vertices, 0);
3874 offset++;
3875
3876 voffset = lp_build_add(uint, voffset, gs_next_vertex);
3877 voffset = lp_build_mul_imm(uint, voffset, 4);
3878
3879 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
3880
3881 ac_build_buffer_store_dword(&ctx->ac,
3882 ctx->gsvs_ring[stream],
3883 out_val, 1,
3884 voffset, soffset, 0,
3885 1, 1, true, true);
3886 }
3887 }
3888
3889 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
3890 ctx->i32_1);
3891
3892 LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
3893
3894 /* Signal vertex emission */
3895 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
3896 si_get_gs_wave_id(ctx));
3897 if (!use_kill)
3898 lp_build_endif(&if_state);
3899 }
3900
3901 /* Cut one primitive from the geometry shader */
3902 static void si_llvm_emit_primitive(
3903 const struct lp_build_tgsi_action *action,
3904 struct lp_build_tgsi_context *bld_base,
3905 struct lp_build_emit_data *emit_data)
3906 {
3907 struct si_shader_context *ctx = si_shader_context(bld_base);
3908 unsigned stream;
3909
3910 /* Signal primitive cut */
3911 stream = si_llvm_get_stream(bld_base, emit_data);
3912 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
3913 si_get_gs_wave_id(ctx));
3914 }
3915
3916 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
3917 struct lp_build_tgsi_context *bld_base,
3918 struct lp_build_emit_data *emit_data)
3919 {
3920 struct si_shader_context *ctx = si_shader_context(bld_base);
3921 struct gallivm_state *gallivm = &ctx->gallivm;
3922
3923 /* SI only (thanks to a hw bug workaround):
3924 * The real barrier instruction isn’t needed, because an entire patch
3925 * always fits into a single wave.
3926 */
3927 if (ctx->screen->b.chip_class == SI &&
3928 ctx->type == PIPE_SHADER_TESS_CTRL) {
3929 si_emit_waitcnt(ctx, LGKM_CNT & VM_CNT);
3930 return;
3931 }
3932
3933 lp_build_intrinsic(gallivm->builder,
3934 "llvm.amdgcn.s.barrier",
3935 ctx->voidt, NULL, 0, LP_FUNC_ATTR_CONVERGENT);
3936 }
3937
3938 static const struct lp_build_tgsi_action interp_action = {
3939 .fetch_args = interp_fetch_args,
3940 .emit = build_interp_intrinsic,
3941 };
3942
3943 static void si_create_function(struct si_shader_context *ctx,
3944 const char *name,
3945 LLVMTypeRef *returns, unsigned num_returns,
3946 LLVMTypeRef *params, unsigned num_params,
3947 int last_sgpr, unsigned max_workgroup_size)
3948 {
3949 int i;
3950
3951 si_llvm_create_func(ctx, name, returns, num_returns,
3952 params, num_params);
3953 ctx->return_value = LLVMGetUndef(ctx->return_type);
3954
3955 for (i = 0; i <= last_sgpr; ++i) {
3956 LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
3957
3958 /* The combination of:
3959 * - ByVal
3960 * - dereferenceable
3961 * - invariant.load
3962 * allows the optimization passes to move loads and reduces
3963 * SGPR spilling significantly.
3964 */
3965 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
3966 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_BYVAL);
3967 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_NOALIAS);
3968 ac_add_attr_dereferenceable(P, UINT64_MAX);
3969 } else
3970 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_INREG);
3971 }
3972
3973 if (max_workgroup_size) {
3974 si_llvm_add_attribute(ctx->main_fn, "amdgpu-max-work-group-size",
3975 max_workgroup_size);
3976 }
3977 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3978 "no-signed-zeros-fp-math",
3979 "true");
3980
3981 if (ctx->screen->b.debug_flags & DBG_UNSAFE_MATH) {
3982 /* These were copied from some LLVM test. */
3983 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3984 "less-precise-fpmad",
3985 "true");
3986 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3987 "no-infs-fp-math",
3988 "true");
3989 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3990 "no-nans-fp-math",
3991 "true");
3992 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3993 "unsafe-fp-math",
3994 "true");
3995 }
3996 }
3997
3998 static void declare_streamout_params(struct si_shader_context *ctx,
3999 struct pipe_stream_output_info *so,
4000 LLVMTypeRef *params, LLVMTypeRef i32,
4001 unsigned *num_params)
4002 {
4003 int i;
4004
4005 /* Streamout SGPRs. */
4006 if (so->num_outputs) {
4007 if (ctx->type != PIPE_SHADER_TESS_EVAL)
4008 params[ctx->param_streamout_config = (*num_params)++] = i32;
4009 else
4010 ctx->param_streamout_config = *num_params - 1;
4011
4012 params[ctx->param_streamout_write_index = (*num_params)++] = i32;
4013 }
4014 /* A streamout buffer offset is loaded if the stride is non-zero. */
4015 for (i = 0; i < 4; i++) {
4016 if (!so->stride[i])
4017 continue;
4018
4019 params[ctx->param_streamout_offset[i] = (*num_params)++] = i32;
4020 }
4021 }
4022
4023 static unsigned llvm_get_type_size(LLVMTypeRef type)
4024 {
4025 LLVMTypeKind kind = LLVMGetTypeKind(type);
4026
4027 switch (kind) {
4028 case LLVMIntegerTypeKind:
4029 return LLVMGetIntTypeWidth(type) / 8;
4030 case LLVMFloatTypeKind:
4031 return 4;
4032 case LLVMPointerTypeKind:
4033 return 8;
4034 case LLVMVectorTypeKind:
4035 return LLVMGetVectorSize(type) *
4036 llvm_get_type_size(LLVMGetElementType(type));
4037 case LLVMArrayTypeKind:
4038 return LLVMGetArrayLength(type) *
4039 llvm_get_type_size(LLVMGetElementType(type));
4040 default:
4041 assert(0);
4042 return 0;
4043 }
4044 }
4045
4046 static void declare_lds_as_pointer(struct si_shader_context *ctx)
4047 {
4048 struct gallivm_state *gallivm = &ctx->gallivm;
4049
4050 unsigned lds_size = ctx->screen->b.chip_class >= CIK ? 65536 : 32768;
4051 ctx->lds = LLVMBuildIntToPtr(gallivm->builder, ctx->i32_0,
4052 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), LOCAL_ADDR_SPACE),
4053 "lds");
4054 }
4055
4056 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
4057 {
4058 switch (shader->selector->type) {
4059 case PIPE_SHADER_TESS_CTRL:
4060 /* Return this so that LLVM doesn't remove s_barrier
4061 * instructions on chips where we use s_barrier. */
4062 return shader->selector->screen->b.chip_class >= CIK ? 128 : 64;
4063
4064 case PIPE_SHADER_GEOMETRY:
4065 return shader->selector->screen->b.chip_class >= GFX9 ? 128 : 64;
4066
4067 case PIPE_SHADER_COMPUTE:
4068 break; /* see below */
4069
4070 default:
4071 return 0;
4072 }
4073
4074 const unsigned *properties = shader->selector->info.properties;
4075 unsigned max_work_group_size =
4076 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
4077 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
4078 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
4079
4080 if (!max_work_group_size) {
4081 /* This is a variable group size compute shader,
4082 * compile it for the maximum possible group size.
4083 */
4084 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
4085 }
4086 return max_work_group_size;
4087 }
4088
4089 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
4090 LLVMTypeRef *params,
4091 unsigned *num_params,
4092 bool assign_params)
4093 {
4094 params[(*num_params)++] = si_const_array(ctx->v4i32,
4095 SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS);
4096 params[(*num_params)++] = si_const_array(ctx->v8i32,
4097 SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2);
4098
4099 if (assign_params) {
4100 ctx->param_const_and_shader_buffers = *num_params - 2;
4101 ctx->param_samplers_and_images = *num_params - 1;
4102 }
4103 }
4104
4105 static void declare_default_desc_pointers(struct si_shader_context *ctx,
4106 LLVMTypeRef *params,
4107 unsigned *num_params)
4108 {
4109 params[ctx->param_rw_buffers = (*num_params)++] =
4110 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4111 declare_per_stage_desc_pointers(ctx, params, num_params, true);
4112 }
4113
4114 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx,
4115 LLVMTypeRef *params,
4116 unsigned *num_params)
4117 {
4118 params[ctx->param_vertex_buffers = (*num_params)++] =
4119 si_const_array(ctx->v4i32, SI_NUM_VERTEX_BUFFERS);
4120 params[ctx->param_base_vertex = (*num_params)++] = ctx->i32;
4121 params[ctx->param_start_instance = (*num_params)++] = ctx->i32;
4122 params[ctx->param_draw_id = (*num_params)++] = ctx->i32;
4123 params[ctx->param_vs_state_bits = (*num_params)++] = ctx->i32;
4124 }
4125
4126 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
4127 LLVMTypeRef *params, unsigned *num_params,
4128 unsigned *num_prolog_vgprs)
4129 {
4130 struct si_shader *shader = ctx->shader;
4131
4132 params[ctx->param_vertex_id = (*num_params)++] = ctx->i32;
4133 if (shader->key.as_ls) {
4134 params[ctx->param_rel_auto_id = (*num_params)++] = ctx->i32;
4135 params[ctx->param_instance_id = (*num_params)++] = ctx->i32;
4136 } else {
4137 params[ctx->param_instance_id = (*num_params)++] = ctx->i32;
4138 params[ctx->param_vs_prim_id = (*num_params)++] = ctx->i32;
4139 }
4140 params[(*num_params)++] = ctx->i32; /* unused */
4141
4142 if (!shader->is_gs_copy_shader) {
4143 /* Vertex load indices. */
4144 ctx->param_vertex_index0 = (*num_params);
4145 for (unsigned i = 0; i < shader->selector->info.num_inputs; i++)
4146 params[(*num_params)++] = ctx->i32;
4147 *num_prolog_vgprs += shader->selector->info.num_inputs;
4148 }
4149 }
4150
4151 static void declare_tes_input_vgprs(struct si_shader_context *ctx,
4152 LLVMTypeRef *params, unsigned *num_params)
4153 {
4154 params[ctx->param_tes_u = (*num_params)++] = ctx->f32;
4155 params[ctx->param_tes_v = (*num_params)++] = ctx->f32;
4156 params[ctx->param_tes_rel_patch_id = (*num_params)++] = ctx->i32;
4157 params[ctx->param_tes_patch_id = (*num_params)++] = ctx->i32;
4158 }
4159
4160 enum {
4161 /* Convenient merged shader definitions. */
4162 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
4163 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
4164 };
4165
4166 static void create_function(struct si_shader_context *ctx)
4167 {
4168 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
4169 struct gallivm_state *gallivm = &ctx->gallivm;
4170 struct si_shader *shader = ctx->shader;
4171 LLVMTypeRef params[100]; /* just make it large enough */
4172 LLVMTypeRef returns[16+32*4];
4173 unsigned i, last_sgpr, num_params = 0, num_return_sgprs;
4174 unsigned num_returns = 0;
4175 unsigned num_prolog_vgprs = 0;
4176 unsigned type = ctx->type;
4177
4178 /* Set MERGED shaders. */
4179 if (ctx->screen->b.chip_class >= GFX9) {
4180 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
4181 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
4182 else if (shader->key.as_es || type == PIPE_SHADER_GEOMETRY)
4183 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
4184 }
4185
4186 LLVMTypeRef v3i32 = LLVMVectorType(ctx->i32, 3);
4187
4188 switch (type) {
4189 case PIPE_SHADER_VERTEX:
4190 declare_default_desc_pointers(ctx, params, &num_params);
4191 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4192
4193 if (shader->key.as_es) {
4194 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
4195 } else if (shader->key.as_ls) {
4196 /* no extra parameters */
4197 } else {
4198 if (shader->is_gs_copy_shader)
4199 num_params = ctx->param_rw_buffers + 1;
4200
4201 /* The locations of the other parameters are assigned dynamically. */
4202 declare_streamout_params(ctx, &shader->selector->so,
4203 params, ctx->i32, &num_params);
4204 }
4205
4206 last_sgpr = num_params-1;
4207
4208 /* VGPRs */
4209 declare_vs_input_vgprs(ctx, params, &num_params,
4210 &num_prolog_vgprs);
4211 break;
4212
4213 case PIPE_SHADER_TESS_CTRL: /* SI-CI-VI */
4214 declare_default_desc_pointers(ctx, params, &num_params);
4215 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4216 params[ctx->param_tcs_out_lds_offsets = num_params++] = ctx->i32;
4217 params[ctx->param_tcs_out_lds_layout = num_params++] = ctx->i32;
4218 params[ctx->param_vs_state_bits = num_params++] = ctx->i32;
4219 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4220 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
4221 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4222 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
4223 last_sgpr = num_params - 1;
4224
4225 /* VGPRs */
4226 params[ctx->param_tcs_patch_id = num_params++] = ctx->i32;
4227 params[ctx->param_tcs_rel_ids = num_params++] = ctx->i32;
4228
4229 /* param_tcs_offchip_offset and param_tcs_factor_offset are
4230 * placed after the user SGPRs.
4231 */
4232 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
4233 returns[num_returns++] = ctx->i32; /* SGPRs */
4234 for (i = 0; i < 3; i++)
4235 returns[num_returns++] = ctx->f32; /* VGPRs */
4236 break;
4237
4238 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
4239 /* Merged stages have 8 system SGPRs at the beginning. */
4240 params[ctx->param_rw_buffers = num_params++] = /* SPI_SHADER_USER_DATA_ADDR_LO_HS */
4241 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4242 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4243 params[ctx->param_merged_wave_info = num_params++] = ctx->i32;
4244 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
4245 params[ctx->param_merged_scratch_offset = num_params++] = ctx->i32;
4246 params[num_params++] = ctx->i32; /* unused */
4247 params[num_params++] = ctx->i32; /* unused */
4248
4249 params[num_params++] = ctx->i32; /* unused */
4250 params[num_params++] = ctx->i32; /* unused */
4251 declare_per_stage_desc_pointers(ctx, params, &num_params,
4252 ctx->type == PIPE_SHADER_VERTEX);
4253 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4254
4255 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4256 params[ctx->param_tcs_out_lds_offsets = num_params++] = ctx->i32;
4257 params[ctx->param_tcs_out_lds_layout = num_params++] = ctx->i32;
4258 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4259 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
4260 params[num_params++] = ctx->i32; /* unused */
4261
4262 declare_per_stage_desc_pointers(ctx, params, &num_params,
4263 ctx->type == PIPE_SHADER_TESS_CTRL);
4264 last_sgpr = num_params - 1;
4265
4266 /* VGPRs (first TCS, then VS) */
4267 params[ctx->param_tcs_patch_id = num_params++] = ctx->i32;
4268 params[ctx->param_tcs_rel_ids = num_params++] = ctx->i32;
4269
4270 if (ctx->type == PIPE_SHADER_VERTEX) {
4271 declare_vs_input_vgprs(ctx, params, &num_params,
4272 &num_prolog_vgprs);
4273
4274 /* LS return values are inputs to the TCS main shader part. */
4275 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
4276 returns[num_returns++] = ctx->i32; /* SGPRs */
4277 for (i = 0; i < 2; i++)
4278 returns[num_returns++] = ctx->f32; /* VGPRs */
4279 } else {
4280 /* TCS return values are inputs to the TCS epilog.
4281 *
4282 * param_tcs_offchip_offset, param_tcs_factor_offset,
4283 * param_tcs_offchip_layout, and param_rw_buffers
4284 * should be passed to the epilog.
4285 */
4286 for (i = 0; i <= 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K; i++)
4287 returns[num_returns++] = ctx->i32; /* SGPRs */
4288 for (i = 0; i < 3; i++)
4289 returns[num_returns++] = ctx->f32; /* VGPRs */
4290 }
4291 break;
4292
4293 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
4294 /* Merged stages have 8 system SGPRs at the beginning. */
4295 params[ctx->param_rw_buffers = num_params++] = /* SPI_SHADER_USER_DATA_ADDR_LO_GS */
4296 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4297 params[ctx->param_gs2vs_offset = num_params++] = ctx->i32;
4298 params[ctx->param_merged_wave_info = num_params++] = ctx->i32;
4299 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4300 params[ctx->param_merged_scratch_offset = num_params++] = ctx->i32;
4301 params[num_params++] = ctx->i32; /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
4302 params[num_params++] = ctx->i32; /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
4303
4304 params[num_params++] = ctx->i32; /* unused */
4305 params[num_params++] = ctx->i32; /* unused */
4306 declare_per_stage_desc_pointers(ctx, params, &num_params,
4307 (ctx->type == PIPE_SHADER_VERTEX ||
4308 ctx->type == PIPE_SHADER_TESS_EVAL));
4309 if (ctx->type == PIPE_SHADER_VERTEX) {
4310 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4311 } else {
4312 /* TESS_EVAL (and also GEOMETRY):
4313 * Declare as many input SGPRs as the VS has. */
4314 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4315 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4316 params[num_params++] = ctx->i32; /* unused */
4317 params[num_params++] = ctx->i32; /* unused */
4318 params[num_params++] = ctx->i32; /* unused */
4319 params[ctx->param_vs_state_bits = num_params++] = ctx->i32; /* unused */
4320 }
4321
4322 declare_per_stage_desc_pointers(ctx, params, &num_params,
4323 ctx->type == PIPE_SHADER_GEOMETRY);
4324 last_sgpr = num_params - 1;
4325
4326 /* VGPRs (first GS, then VS/TES) */
4327 params[ctx->param_gs_vtx01_offset = num_params++] = ctx->i32;
4328 params[ctx->param_gs_vtx23_offset = num_params++] = ctx->i32;
4329 params[ctx->param_gs_prim_id = num_params++] = ctx->i32;
4330 params[ctx->param_gs_instance_id = num_params++] = ctx->i32;
4331 params[ctx->param_gs_vtx45_offset = num_params++] = ctx->i32;
4332
4333 if (ctx->type == PIPE_SHADER_VERTEX) {
4334 declare_vs_input_vgprs(ctx, params, &num_params,
4335 &num_prolog_vgprs);
4336 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
4337 declare_tes_input_vgprs(ctx, params, &num_params);
4338 }
4339
4340 if (ctx->type == PIPE_SHADER_VERTEX ||
4341 ctx->type == PIPE_SHADER_TESS_EVAL) {
4342 /* ES return values are inputs to GS. */
4343 for (i = 0; i < 8 + GFX9_GS_NUM_USER_SGPR; i++)
4344 returns[num_returns++] = ctx->i32; /* SGPRs */
4345 for (i = 0; i < 5; i++)
4346 returns[num_returns++] = ctx->f32; /* VGPRs */
4347 }
4348 break;
4349
4350 case PIPE_SHADER_TESS_EVAL:
4351 declare_default_desc_pointers(ctx, params, &num_params);
4352 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4353 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4354
4355 if (shader->key.as_es) {
4356 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4357 params[num_params++] = ctx->i32;
4358 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
4359 } else {
4360 params[num_params++] = ctx->i32;
4361 declare_streamout_params(ctx, &shader->selector->so,
4362 params, ctx->i32, &num_params);
4363 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4364 }
4365 last_sgpr = num_params - 1;
4366
4367 /* VGPRs */
4368 declare_tes_input_vgprs(ctx, params, &num_params);
4369 break;
4370
4371 case PIPE_SHADER_GEOMETRY:
4372 declare_default_desc_pointers(ctx, params, &num_params);
4373 params[ctx->param_gs2vs_offset = num_params++] = ctx->i32;
4374 params[ctx->param_gs_wave_id = num_params++] = ctx->i32;
4375 last_sgpr = num_params - 1;
4376
4377 /* VGPRs */
4378 params[ctx->param_gs_vtx0_offset = num_params++] = ctx->i32;
4379 params[ctx->param_gs_vtx1_offset = num_params++] = ctx->i32;
4380 params[ctx->param_gs_prim_id = num_params++] = ctx->i32;
4381 params[ctx->param_gs_vtx2_offset = num_params++] = ctx->i32;
4382 params[ctx->param_gs_vtx3_offset = num_params++] = ctx->i32;
4383 params[ctx->param_gs_vtx4_offset = num_params++] = ctx->i32;
4384 params[ctx->param_gs_vtx5_offset = num_params++] = ctx->i32;
4385 params[ctx->param_gs_instance_id = num_params++] = ctx->i32;
4386 break;
4387
4388 case PIPE_SHADER_FRAGMENT:
4389 declare_default_desc_pointers(ctx, params, &num_params);
4390 params[SI_PARAM_ALPHA_REF] = ctx->f32;
4391 params[SI_PARAM_PRIM_MASK] = ctx->i32;
4392 last_sgpr = SI_PARAM_PRIM_MASK;
4393 params[SI_PARAM_PERSP_SAMPLE] = ctx->v2i32;
4394 params[SI_PARAM_PERSP_CENTER] = ctx->v2i32;
4395 params[SI_PARAM_PERSP_CENTROID] = ctx->v2i32;
4396 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
4397 params[SI_PARAM_LINEAR_SAMPLE] = ctx->v2i32;
4398 params[SI_PARAM_LINEAR_CENTER] = ctx->v2i32;
4399 params[SI_PARAM_LINEAR_CENTROID] = ctx->v2i32;
4400 params[SI_PARAM_LINE_STIPPLE_TEX] = ctx->f32;
4401 params[SI_PARAM_POS_X_FLOAT] = ctx->f32;
4402 params[SI_PARAM_POS_Y_FLOAT] = ctx->f32;
4403 params[SI_PARAM_POS_Z_FLOAT] = ctx->f32;
4404 params[SI_PARAM_POS_W_FLOAT] = ctx->f32;
4405 params[SI_PARAM_FRONT_FACE] = ctx->i32;
4406 shader->info.face_vgpr_index = 20;
4407 params[SI_PARAM_ANCILLARY] = ctx->i32;
4408 params[SI_PARAM_SAMPLE_COVERAGE] = ctx->f32;
4409 params[SI_PARAM_POS_FIXED_PT] = ctx->i32;
4410 num_params = SI_PARAM_POS_FIXED_PT+1;
4411
4412 /* Color inputs from the prolog. */
4413 if (shader->selector->info.colors_read) {
4414 unsigned num_color_elements =
4415 util_bitcount(shader->selector->info.colors_read);
4416
4417 assert(num_params + num_color_elements <= ARRAY_SIZE(params));
4418 for (i = 0; i < num_color_elements; i++)
4419 params[num_params++] = ctx->f32;
4420
4421 num_prolog_vgprs += num_color_elements;
4422 }
4423
4424 /* Outputs for the epilog. */
4425 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
4426 num_returns =
4427 num_return_sgprs +
4428 util_bitcount(shader->selector->info.colors_written) * 4 +
4429 shader->selector->info.writes_z +
4430 shader->selector->info.writes_stencil +
4431 shader->selector->info.writes_samplemask +
4432 1 /* SampleMaskIn */;
4433
4434 num_returns = MAX2(num_returns,
4435 num_return_sgprs +
4436 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
4437
4438 for (i = 0; i < num_return_sgprs; i++)
4439 returns[i] = ctx->i32;
4440 for (; i < num_returns; i++)
4441 returns[i] = ctx->f32;
4442 break;
4443
4444 case PIPE_SHADER_COMPUTE:
4445 declare_default_desc_pointers(ctx, params, &num_params);
4446 if (shader->selector->info.uses_grid_size)
4447 params[ctx->param_grid_size = num_params++] = v3i32;
4448 if (shader->selector->info.uses_block_size)
4449 params[ctx->param_block_size = num_params++] = v3i32;
4450
4451 for (i = 0; i < 3; i++) {
4452 ctx->param_block_id[i] = -1;
4453 if (shader->selector->info.uses_block_id[i])
4454 params[ctx->param_block_id[i] = num_params++] = ctx->i32;
4455 }
4456 last_sgpr = num_params - 1;
4457
4458 params[ctx->param_thread_id = num_params++] = v3i32;
4459 break;
4460 default:
4461 assert(0 && "unimplemented shader");
4462 return;
4463 }
4464
4465 assert(num_params <= ARRAY_SIZE(params));
4466
4467 si_create_function(ctx, "main", returns, num_returns, params,
4468 num_params, last_sgpr,
4469 si_get_max_workgroup_size(shader));
4470
4471 /* Reserve register locations for VGPR inputs the PS prolog may need. */
4472 if (ctx->type == PIPE_SHADER_FRAGMENT &&
4473 ctx->separate_prolog) {
4474 si_llvm_add_attribute(ctx->main_fn,
4475 "InitialPSInputAddr",
4476 S_0286D0_PERSP_SAMPLE_ENA(1) |
4477 S_0286D0_PERSP_CENTER_ENA(1) |
4478 S_0286D0_PERSP_CENTROID_ENA(1) |
4479 S_0286D0_LINEAR_SAMPLE_ENA(1) |
4480 S_0286D0_LINEAR_CENTER_ENA(1) |
4481 S_0286D0_LINEAR_CENTROID_ENA(1) |
4482 S_0286D0_FRONT_FACE_ENA(1) |
4483 S_0286D0_POS_FIXED_PT_ENA(1));
4484 }
4485
4486 shader->info.num_input_sgprs = 0;
4487 shader->info.num_input_vgprs = 0;
4488
4489 for (i = 0; i <= last_sgpr; ++i)
4490 shader->info.num_input_sgprs += llvm_get_type_size(params[i]) / 4;
4491
4492 for (; i < num_params; ++i)
4493 shader->info.num_input_vgprs += llvm_get_type_size(params[i]) / 4;
4494
4495 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
4496 shader->info.num_input_vgprs -= num_prolog_vgprs;
4497
4498 if (!ctx->screen->has_ds_bpermute &&
4499 bld_base->info &&
4500 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
4501 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0 ||
4502 bld_base->info->opcode_count[TGSI_OPCODE_DDX_FINE] > 0 ||
4503 bld_base->info->opcode_count[TGSI_OPCODE_DDY_FINE] > 0 ||
4504 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_OFFSET] > 0 ||
4505 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_SAMPLE] > 0))
4506 ctx->lds =
4507 LLVMAddGlobalInAddressSpace(gallivm->module,
4508 LLVMArrayType(ctx->i32, 64),
4509 "ddxy_lds",
4510 LOCAL_ADDR_SPACE);
4511
4512 if (shader->key.as_ls ||
4513 ctx->type == PIPE_SHADER_TESS_CTRL ||
4514 /* GFX9 has the ESGS ring buffer in LDS. */
4515 (ctx->screen->b.chip_class >= GFX9 &&
4516 (shader->key.as_es ||
4517 ctx->type == PIPE_SHADER_GEOMETRY)))
4518 declare_lds_as_pointer(ctx);
4519 }
4520
4521 /**
4522 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
4523 * for later use.
4524 */
4525 static void preload_ring_buffers(struct si_shader_context *ctx)
4526 {
4527 struct gallivm_state *gallivm = &ctx->gallivm;
4528 LLVMBuilderRef builder = gallivm->builder;
4529
4530 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
4531 ctx->param_rw_buffers);
4532
4533 if (ctx->screen->b.chip_class <= VI &&
4534 (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)) {
4535 unsigned ring =
4536 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
4537 : SI_ES_RING_ESGS;
4538 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
4539
4540 ctx->esgs_ring =
4541 ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4542 }
4543
4544 if (ctx->shader->is_gs_copy_shader) {
4545 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4546
4547 ctx->gsvs_ring[0] =
4548 ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4549 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
4550 const struct si_shader_selector *sel = ctx->shader->selector;
4551 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4552 LLVMValueRef base_ring;
4553
4554 base_ring = ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4555
4556 /* The conceptual layout of the GSVS ring is
4557 * v0c0 .. vLv0 v0c1 .. vLc1 ..
4558 * but the real memory layout is swizzled across
4559 * threads:
4560 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
4561 * t16v0c0 ..
4562 * Override the buffer descriptor accordingly.
4563 */
4564 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
4565 uint64_t stream_offset = 0;
4566
4567 for (unsigned stream = 0; stream < 4; ++stream) {
4568 unsigned num_components;
4569 unsigned stride;
4570 unsigned num_records;
4571 LLVMValueRef ring, tmp;
4572
4573 num_components = sel->info.num_stream_output_components[stream];
4574 if (!num_components)
4575 continue;
4576
4577 stride = 4 * num_components * sel->gs_max_out_vertices;
4578
4579 /* Limit on the stride field for <= CIK. */
4580 assert(stride < (1 << 14));
4581
4582 num_records = 64;
4583
4584 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
4585 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
4586 tmp = LLVMBuildAdd(builder, tmp,
4587 LLVMConstInt(ctx->i64,
4588 stream_offset, 0), "");
4589 stream_offset += stride * 64;
4590
4591 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
4592 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
4593 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
4594 tmp = LLVMBuildOr(builder, tmp,
4595 LLVMConstInt(ctx->i32,
4596 S_008F04_STRIDE(stride) |
4597 S_008F04_SWIZZLE_ENABLE(1), 0), "");
4598 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
4599 ring = LLVMBuildInsertElement(builder, ring,
4600 LLVMConstInt(ctx->i32, num_records, 0),
4601 LLVMConstInt(ctx->i32, 2, 0), "");
4602 ring = LLVMBuildInsertElement(builder, ring,
4603 LLVMConstInt(ctx->i32,
4604 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
4605 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
4606 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
4607 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
4608 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
4609 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
4610 S_008F0C_ELEMENT_SIZE(1) | /* element_size = 4 (bytes) */
4611 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
4612 S_008F0C_ADD_TID_ENABLE(1),
4613 0),
4614 LLVMConstInt(ctx->i32, 3, 0), "");
4615
4616 ctx->gsvs_ring[stream] = ring;
4617 }
4618 }
4619 }
4620
4621 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
4622 LLVMValueRef param_rw_buffers,
4623 unsigned param_pos_fixed_pt)
4624 {
4625 struct gallivm_state *gallivm = &ctx->gallivm;
4626 LLVMBuilderRef builder = gallivm->builder;
4627 LLVMValueRef slot, desc, offset, row, bit, address[2];
4628
4629 /* Use the fixed-point gl_FragCoord input.
4630 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
4631 * per coordinate to get the repeating effect.
4632 */
4633 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
4634 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
4635
4636 /* Load the buffer descriptor. */
4637 slot = LLVMConstInt(ctx->i32, SI_PS_CONST_POLY_STIPPLE, 0);
4638 desc = ac_build_indexed_load_const(&ctx->ac, param_rw_buffers, slot);
4639
4640 /* The stipple pattern is 32x32, each row has 32 bits. */
4641 offset = LLVMBuildMul(builder, address[1],
4642 LLVMConstInt(ctx->i32, 4, 0), "");
4643 row = buffer_load_const(ctx, desc, offset);
4644 row = LLVMBuildBitCast(builder, row, ctx->i32, "");
4645 bit = LLVMBuildLShr(builder, row, address[0], "");
4646 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
4647
4648 /* The intrinsic kills the thread if arg < 0. */
4649 bit = LLVMBuildSelect(builder, bit, LLVMConstReal(ctx->f32, 0),
4650 LLVMConstReal(ctx->f32, -1), "");
4651 ac_build_kill(&ctx->ac, bit);
4652 }
4653
4654 void si_shader_binary_read_config(struct ac_shader_binary *binary,
4655 struct si_shader_config *conf,
4656 unsigned symbol_offset)
4657 {
4658 unsigned i;
4659 const unsigned char *config =
4660 ac_shader_binary_config_start(binary, symbol_offset);
4661 bool really_needs_scratch = false;
4662
4663 /* LLVM adds SGPR spills to the scratch size.
4664 * Find out if we really need the scratch buffer.
4665 */
4666 for (i = 0; i < binary->reloc_count; i++) {
4667 const struct ac_shader_reloc *reloc = &binary->relocs[i];
4668
4669 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
4670 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
4671 really_needs_scratch = true;
4672 break;
4673 }
4674 }
4675
4676 /* XXX: We may be able to emit some of these values directly rather than
4677 * extracting fields to be emitted later.
4678 */
4679
4680 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
4681 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
4682 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
4683 switch (reg) {
4684 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
4685 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
4686 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
4687 case R_00B428_SPI_SHADER_PGM_RSRC1_HS:
4688 case R_00B848_COMPUTE_PGM_RSRC1:
4689 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
4690 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
4691 conf->float_mode = G_00B028_FLOAT_MODE(value);
4692 conf->rsrc1 = value;
4693 break;
4694 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
4695 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
4696 break;
4697 case R_00B84C_COMPUTE_PGM_RSRC2:
4698 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
4699 conf->rsrc2 = value;
4700 break;
4701 case R_0286CC_SPI_PS_INPUT_ENA:
4702 conf->spi_ps_input_ena = value;
4703 break;
4704 case R_0286D0_SPI_PS_INPUT_ADDR:
4705 conf->spi_ps_input_addr = value;
4706 break;
4707 case R_0286E8_SPI_TMPRING_SIZE:
4708 case R_00B860_COMPUTE_TMPRING_SIZE:
4709 /* WAVESIZE is in units of 256 dwords. */
4710 if (really_needs_scratch)
4711 conf->scratch_bytes_per_wave =
4712 G_00B860_WAVESIZE(value) * 256 * 4;
4713 break;
4714 case 0x4: /* SPILLED_SGPRS */
4715 conf->spilled_sgprs = value;
4716 break;
4717 case 0x8: /* SPILLED_VGPRS */
4718 conf->spilled_vgprs = value;
4719 break;
4720 default:
4721 {
4722 static bool printed;
4723
4724 if (!printed) {
4725 fprintf(stderr, "Warning: LLVM emitted unknown "
4726 "config register: 0x%x\n", reg);
4727 printed = true;
4728 }
4729 }
4730 break;
4731 }
4732 }
4733
4734 if (!conf->spi_ps_input_addr)
4735 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
4736 }
4737
4738 void si_shader_apply_scratch_relocs(struct si_shader *shader,
4739 uint64_t scratch_va)
4740 {
4741 unsigned i;
4742 uint32_t scratch_rsrc_dword0 = scratch_va;
4743 uint32_t scratch_rsrc_dword1 =
4744 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
4745
4746 /* Enable scratch coalescing. */
4747 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
4748
4749 for (i = 0 ; i < shader->binary.reloc_count; i++) {
4750 const struct ac_shader_reloc *reloc =
4751 &shader->binary.relocs[i];
4752 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
4753 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
4754 &scratch_rsrc_dword0, 4);
4755 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
4756 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
4757 &scratch_rsrc_dword1, 4);
4758 }
4759 }
4760 }
4761
4762 static unsigned si_get_shader_binary_size(const struct si_shader *shader)
4763 {
4764 unsigned size = shader->binary.code_size;
4765
4766 if (shader->prolog)
4767 size += shader->prolog->binary.code_size;
4768 if (shader->previous_stage)
4769 size += shader->previous_stage->binary.code_size;
4770 if (shader->prolog2)
4771 size += shader->prolog2->binary.code_size;
4772 if (shader->epilog)
4773 size += shader->epilog->binary.code_size;
4774 return size;
4775 }
4776
4777 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
4778 {
4779 const struct ac_shader_binary *prolog =
4780 shader->prolog ? &shader->prolog->binary : NULL;
4781 const struct ac_shader_binary *previous_stage =
4782 shader->previous_stage ? &shader->previous_stage->binary : NULL;
4783 const struct ac_shader_binary *prolog2 =
4784 shader->prolog2 ? &shader->prolog2->binary : NULL;
4785 const struct ac_shader_binary *epilog =
4786 shader->epilog ? &shader->epilog->binary : NULL;
4787 const struct ac_shader_binary *mainb = &shader->binary;
4788 unsigned bo_size = si_get_shader_binary_size(shader) +
4789 (!epilog ? mainb->rodata_size : 0);
4790 unsigned char *ptr;
4791
4792 assert(!prolog || !prolog->rodata_size);
4793 assert(!previous_stage || !previous_stage->rodata_size);
4794 assert(!prolog2 || !prolog2->rodata_size);
4795 assert((!prolog && !previous_stage && !prolog2 && !epilog) ||
4796 !mainb->rodata_size);
4797 assert(!epilog || !epilog->rodata_size);
4798
4799 r600_resource_reference(&shader->bo, NULL);
4800 shader->bo = (struct r600_resource*)
4801 pipe_buffer_create(&sscreen->b.b, 0,
4802 PIPE_USAGE_IMMUTABLE,
4803 align(bo_size, SI_CPDMA_ALIGNMENT));
4804 if (!shader->bo)
4805 return -ENOMEM;
4806
4807 /* Upload. */
4808 ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
4809 PIPE_TRANSFER_READ_WRITE |
4810 PIPE_TRANSFER_UNSYNCHRONIZED);
4811
4812 /* Don't use util_memcpy_cpu_to_le32. LLVM binaries are
4813 * endian-independent. */
4814 if (prolog) {
4815 memcpy(ptr, prolog->code, prolog->code_size);
4816 ptr += prolog->code_size;
4817 }
4818 if (previous_stage) {
4819 memcpy(ptr, previous_stage->code, previous_stage->code_size);
4820 ptr += previous_stage->code_size;
4821 }
4822 if (prolog2) {
4823 memcpy(ptr, prolog2->code, prolog2->code_size);
4824 ptr += prolog2->code_size;
4825 }
4826
4827 memcpy(ptr, mainb->code, mainb->code_size);
4828 ptr += mainb->code_size;
4829
4830 if (epilog)
4831 memcpy(ptr, epilog->code, epilog->code_size);
4832 else if (mainb->rodata_size > 0)
4833 memcpy(ptr, mainb->rodata, mainb->rodata_size);
4834
4835 sscreen->b.ws->buffer_unmap(shader->bo->buf);
4836 return 0;
4837 }
4838
4839 static void si_shader_dump_disassembly(const struct ac_shader_binary *binary,
4840 struct pipe_debug_callback *debug,
4841 const char *name, FILE *file)
4842 {
4843 char *line, *p;
4844 unsigned i, count;
4845
4846 if (binary->disasm_string) {
4847 fprintf(file, "Shader %s disassembly:\n", name);
4848 fprintf(file, "%s", binary->disasm_string);
4849
4850 if (debug && debug->debug_message) {
4851 /* Very long debug messages are cut off, so send the
4852 * disassembly one line at a time. This causes more
4853 * overhead, but on the plus side it simplifies
4854 * parsing of resulting logs.
4855 */
4856 pipe_debug_message(debug, SHADER_INFO,
4857 "Shader Disassembly Begin");
4858
4859 line = binary->disasm_string;
4860 while (*line) {
4861 p = util_strchrnul(line, '\n');
4862 count = p - line;
4863
4864 if (count) {
4865 pipe_debug_message(debug, SHADER_INFO,
4866 "%.*s", count, line);
4867 }
4868
4869 if (!*p)
4870 break;
4871 line = p + 1;
4872 }
4873
4874 pipe_debug_message(debug, SHADER_INFO,
4875 "Shader Disassembly End");
4876 }
4877 } else {
4878 fprintf(file, "Shader %s binary:\n", name);
4879 for (i = 0; i < binary->code_size; i += 4) {
4880 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
4881 binary->code[i + 3], binary->code[i + 2],
4882 binary->code[i + 1], binary->code[i]);
4883 }
4884 }
4885 }
4886
4887 static void si_shader_dump_stats(struct si_screen *sscreen,
4888 const struct si_shader *shader,
4889 struct pipe_debug_callback *debug,
4890 unsigned processor,
4891 FILE *file,
4892 bool check_debug_option)
4893 {
4894 const struct si_shader_config *conf = &shader->config;
4895 unsigned num_inputs = shader->selector ? shader->selector->info.num_inputs : 0;
4896 unsigned code_size = si_get_shader_binary_size(shader);
4897 unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
4898 unsigned lds_per_wave = 0;
4899 unsigned max_simd_waves = 10;
4900
4901 /* Compute LDS usage for PS. */
4902 switch (processor) {
4903 case PIPE_SHADER_FRAGMENT:
4904 /* The minimum usage per wave is (num_inputs * 48). The maximum
4905 * usage is (num_inputs * 48 * 16).
4906 * We can get anything in between and it varies between waves.
4907 *
4908 * The 48 bytes per input for a single primitive is equal to
4909 * 4 bytes/component * 4 components/input * 3 points.
4910 *
4911 * Other stages don't know the size at compile time or don't
4912 * allocate LDS per wave, but instead they do it per thread group.
4913 */
4914 lds_per_wave = conf->lds_size * lds_increment +
4915 align(num_inputs * 48, lds_increment);
4916 break;
4917 case PIPE_SHADER_COMPUTE:
4918 if (shader->selector) {
4919 unsigned max_workgroup_size =
4920 si_get_max_workgroup_size(shader);
4921 lds_per_wave = (conf->lds_size * lds_increment) /
4922 DIV_ROUND_UP(max_workgroup_size, 64);
4923 }
4924 break;
4925 }
4926
4927 /* Compute the per-SIMD wave counts. */
4928 if (conf->num_sgprs) {
4929 if (sscreen->b.chip_class >= VI)
4930 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
4931 else
4932 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
4933 }
4934
4935 if (conf->num_vgprs)
4936 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
4937
4938 /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
4939 * 16KB makes some SIMDs unoccupied). */
4940 if (lds_per_wave)
4941 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
4942
4943 if (!check_debug_option ||
4944 r600_can_dump_shader(&sscreen->b, processor)) {
4945 if (processor == PIPE_SHADER_FRAGMENT) {
4946 fprintf(file, "*** SHADER CONFIG ***\n"
4947 "SPI_PS_INPUT_ADDR = 0x%04x\n"
4948 "SPI_PS_INPUT_ENA = 0x%04x\n",
4949 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
4950 }
4951
4952 fprintf(file, "*** SHADER STATS ***\n"
4953 "SGPRS: %d\n"
4954 "VGPRS: %d\n"
4955 "Spilled SGPRs: %d\n"
4956 "Spilled VGPRs: %d\n"
4957 "Private memory VGPRs: %d\n"
4958 "Code Size: %d bytes\n"
4959 "LDS: %d blocks\n"
4960 "Scratch: %d bytes per wave\n"
4961 "Max Waves: %d\n"
4962 "********************\n\n\n",
4963 conf->num_sgprs, conf->num_vgprs,
4964 conf->spilled_sgprs, conf->spilled_vgprs,
4965 conf->private_mem_vgprs, code_size,
4966 conf->lds_size, conf->scratch_bytes_per_wave,
4967 max_simd_waves);
4968 }
4969
4970 pipe_debug_message(debug, SHADER_INFO,
4971 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
4972 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
4973 "Spilled VGPRs: %d PrivMem VGPRs: %d",
4974 conf->num_sgprs, conf->num_vgprs, code_size,
4975 conf->lds_size, conf->scratch_bytes_per_wave,
4976 max_simd_waves, conf->spilled_sgprs,
4977 conf->spilled_vgprs, conf->private_mem_vgprs);
4978 }
4979
4980 const char *si_get_shader_name(const struct si_shader *shader, unsigned processor)
4981 {
4982 switch (processor) {
4983 case PIPE_SHADER_VERTEX:
4984 if (shader->key.as_es)
4985 return "Vertex Shader as ES";
4986 else if (shader->key.as_ls)
4987 return "Vertex Shader as LS";
4988 else
4989 return "Vertex Shader as VS";
4990 case PIPE_SHADER_TESS_CTRL:
4991 return "Tessellation Control Shader";
4992 case PIPE_SHADER_TESS_EVAL:
4993 if (shader->key.as_es)
4994 return "Tessellation Evaluation Shader as ES";
4995 else
4996 return "Tessellation Evaluation Shader as VS";
4997 case PIPE_SHADER_GEOMETRY:
4998 if (shader->is_gs_copy_shader)
4999 return "GS Copy Shader as VS";
5000 else
5001 return "Geometry Shader";
5002 case PIPE_SHADER_FRAGMENT:
5003 return "Pixel Shader";
5004 case PIPE_SHADER_COMPUTE:
5005 return "Compute Shader";
5006 default:
5007 return "Unknown Shader";
5008 }
5009 }
5010
5011 void si_shader_dump(struct si_screen *sscreen, const struct si_shader *shader,
5012 struct pipe_debug_callback *debug, unsigned processor,
5013 FILE *file, bool check_debug_option)
5014 {
5015 if (!check_debug_option ||
5016 r600_can_dump_shader(&sscreen->b, processor))
5017 si_dump_shader_key(processor, shader, file);
5018
5019 if (!check_debug_option && shader->binary.llvm_ir_string) {
5020 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
5021 si_get_shader_name(shader, processor));
5022 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
5023 }
5024
5025 if (!check_debug_option ||
5026 (r600_can_dump_shader(&sscreen->b, processor) &&
5027 !(sscreen->b.debug_flags & DBG_NO_ASM))) {
5028 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
5029
5030 if (shader->prolog)
5031 si_shader_dump_disassembly(&shader->prolog->binary,
5032 debug, "prolog", file);
5033 if (shader->previous_stage)
5034 si_shader_dump_disassembly(&shader->previous_stage->binary,
5035 debug, "previous stage", file);
5036 if (shader->prolog2)
5037 si_shader_dump_disassembly(&shader->prolog2->binary,
5038 debug, "prolog2", file);
5039
5040 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
5041
5042 if (shader->epilog)
5043 si_shader_dump_disassembly(&shader->epilog->binary,
5044 debug, "epilog", file);
5045 fprintf(file, "\n");
5046 }
5047
5048 si_shader_dump_stats(sscreen, shader, debug, processor, file,
5049 check_debug_option);
5050 }
5051
5052 static int si_compile_llvm(struct si_screen *sscreen,
5053 struct ac_shader_binary *binary,
5054 struct si_shader_config *conf,
5055 LLVMTargetMachineRef tm,
5056 LLVMModuleRef mod,
5057 struct pipe_debug_callback *debug,
5058 unsigned processor,
5059 const char *name)
5060 {
5061 int r = 0;
5062 unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
5063
5064 if (r600_can_dump_shader(&sscreen->b, processor)) {
5065 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
5066
5067 if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
5068 fprintf(stderr, "%s LLVM IR:\n\n", name);
5069 ac_dump_module(mod);
5070 fprintf(stderr, "\n");
5071 }
5072 }
5073
5074 if (sscreen->record_llvm_ir) {
5075 char *ir = LLVMPrintModuleToString(mod);
5076 binary->llvm_ir_string = strdup(ir);
5077 LLVMDisposeMessage(ir);
5078 }
5079
5080 if (!si_replace_shader(count, binary)) {
5081 r = si_llvm_compile(mod, binary, tm, debug);
5082 if (r)
5083 return r;
5084 }
5085
5086 si_shader_binary_read_config(binary, conf, 0);
5087
5088 /* Enable 64-bit and 16-bit denormals, because there is no performance
5089 * cost.
5090 *
5091 * If denormals are enabled, all floating-point output modifiers are
5092 * ignored.
5093 *
5094 * Don't enable denormals for 32-bit floats, because:
5095 * - Floating-point output modifiers would be ignored by the hw.
5096 * - Some opcodes don't support denormals, such as v_mad_f32. We would
5097 * have to stop using those.
5098 * - SI & CI would be very slow.
5099 */
5100 conf->float_mode |= V_00B028_FP_64_DENORMS;
5101
5102 FREE(binary->config);
5103 FREE(binary->global_symbol_offsets);
5104 binary->config = NULL;
5105 binary->global_symbol_offsets = NULL;
5106
5107 /* Some shaders can't have rodata because their binaries can be
5108 * concatenated.
5109 */
5110 if (binary->rodata_size &&
5111 (processor == PIPE_SHADER_VERTEX ||
5112 processor == PIPE_SHADER_TESS_CTRL ||
5113 processor == PIPE_SHADER_TESS_EVAL ||
5114 processor == PIPE_SHADER_FRAGMENT)) {
5115 fprintf(stderr, "radeonsi: The shader can't have rodata.");
5116 return -EINVAL;
5117 }
5118
5119 return r;
5120 }
5121
5122 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
5123 {
5124 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
5125 LLVMBuildRetVoid(ctx->gallivm.builder);
5126 else
5127 LLVMBuildRet(ctx->gallivm.builder, ret);
5128 }
5129
5130 /* Generate code for the hardware VS shader stage to go with a geometry shader */
5131 struct si_shader *
5132 si_generate_gs_copy_shader(struct si_screen *sscreen,
5133 LLVMTargetMachineRef tm,
5134 struct si_shader_selector *gs_selector,
5135 struct pipe_debug_callback *debug)
5136 {
5137 struct si_shader_context ctx;
5138 struct si_shader *shader;
5139 struct gallivm_state *gallivm = &ctx.gallivm;
5140 LLVMBuilderRef builder;
5141 struct lp_build_tgsi_context *bld_base = &ctx.bld_base;
5142 struct lp_build_context *uint = &bld_base->uint_bld;
5143 struct si_shader_output_values *outputs;
5144 struct tgsi_shader_info *gsinfo = &gs_selector->info;
5145 int i, r;
5146
5147 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
5148
5149 if (!outputs)
5150 return NULL;
5151
5152 shader = CALLOC_STRUCT(si_shader);
5153 if (!shader) {
5154 FREE(outputs);
5155 return NULL;
5156 }
5157
5158
5159 shader->selector = gs_selector;
5160 shader->is_gs_copy_shader = true;
5161
5162 si_init_shader_ctx(&ctx, sscreen, tm);
5163 ctx.shader = shader;
5164 ctx.type = PIPE_SHADER_VERTEX;
5165
5166 builder = gallivm->builder;
5167
5168 create_function(&ctx);
5169 preload_ring_buffers(&ctx);
5170
5171 LLVMValueRef voffset =
5172 lp_build_mul_imm(uint, LLVMGetParam(ctx.main_fn,
5173 ctx.param_vertex_id), 4);
5174
5175 /* Fetch the vertex stream ID.*/
5176 LLVMValueRef stream_id;
5177
5178 if (gs_selector->so.num_outputs)
5179 stream_id = unpack_param(&ctx, ctx.param_streamout_config, 24, 2);
5180 else
5181 stream_id = ctx.i32_0;
5182
5183 /* Fill in output information. */
5184 for (i = 0; i < gsinfo->num_outputs; ++i) {
5185 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
5186 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
5187
5188 for (int chan = 0; chan < 4; chan++) {
5189 outputs[i].vertex_stream[chan] =
5190 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
5191 }
5192 }
5193
5194 LLVMBasicBlockRef end_bb;
5195 LLVMValueRef switch_inst;
5196
5197 end_bb = LLVMAppendBasicBlockInContext(gallivm->context, ctx.main_fn, "end");
5198 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
5199
5200 for (int stream = 0; stream < 4; stream++) {
5201 LLVMBasicBlockRef bb;
5202 unsigned offset;
5203
5204 if (!gsinfo->num_stream_output_components[stream])
5205 continue;
5206
5207 if (stream > 0 && !gs_selector->so.num_outputs)
5208 continue;
5209
5210 bb = LLVMInsertBasicBlockInContext(gallivm->context, end_bb, "out");
5211 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
5212 LLVMPositionBuilderAtEnd(builder, bb);
5213
5214 /* Fetch vertex data from GSVS ring */
5215 offset = 0;
5216 for (i = 0; i < gsinfo->num_outputs; ++i) {
5217 for (unsigned chan = 0; chan < 4; chan++) {
5218 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
5219 outputs[i].vertex_stream[chan] != stream) {
5220 outputs[i].values[chan] = ctx.bld_base.base.undef;
5221 continue;
5222 }
5223
5224 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
5225 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
5226 offset++;
5227
5228 outputs[i].values[chan] =
5229 ac_build_buffer_load(&ctx.ac,
5230 ctx.gsvs_ring[0], 1,
5231 ctx.i32_0, voffset,
5232 soffset, 0, 1, 1,
5233 true, false);
5234 }
5235 }
5236
5237 /* Streamout and exports. */
5238 if (gs_selector->so.num_outputs) {
5239 si_llvm_emit_streamout(&ctx, outputs,
5240 gsinfo->num_outputs,
5241 stream);
5242 }
5243
5244 if (stream == 0)
5245 si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
5246
5247 LLVMBuildBr(builder, end_bb);
5248 }
5249
5250 LLVMPositionBuilderAtEnd(builder, end_bb);
5251
5252 LLVMBuildRetVoid(gallivm->builder);
5253
5254 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
5255 si_llvm_optimize_module(&ctx);
5256
5257 r = si_compile_llvm(sscreen, &ctx.shader->binary,
5258 &ctx.shader->config, ctx.tm,
5259 ctx.gallivm.module,
5260 debug, PIPE_SHADER_GEOMETRY,
5261 "GS Copy Shader");
5262 if (!r) {
5263 if (r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
5264 fprintf(stderr, "GS Copy Shader:\n");
5265 si_shader_dump(sscreen, ctx.shader, debug,
5266 PIPE_SHADER_GEOMETRY, stderr, true);
5267 r = si_shader_binary_upload(sscreen, ctx.shader);
5268 }
5269
5270 si_llvm_dispose(&ctx);
5271
5272 FREE(outputs);
5273
5274 if (r != 0) {
5275 FREE(shader);
5276 shader = NULL;
5277 }
5278 return shader;
5279 }
5280
5281 static void si_dump_shader_key_vs(const struct si_shader_key *key,
5282 const struct si_vs_prolog_bits *prolog,
5283 const char *prefix, FILE *f)
5284 {
5285 fprintf(f, " %s.instance_divisors = {", prefix);
5286 for (int i = 0; i < ARRAY_SIZE(prolog->instance_divisors); i++) {
5287 fprintf(f, !i ? "%u" : ", %u",
5288 prolog->instance_divisors[i]);
5289 }
5290 fprintf(f, "}\n");
5291
5292 fprintf(f, " mono.vs.fix_fetch = {");
5293 for (int i = 0; i < SI_MAX_ATTRIBS; i++)
5294 fprintf(f, !i ? "%u" : ", %u", key->mono.vs_fix_fetch[i]);
5295 fprintf(f, "}\n");
5296 }
5297
5298 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
5299 FILE *f)
5300 {
5301 const struct si_shader_key *key = &shader->key;
5302
5303 fprintf(f, "SHADER KEY\n");
5304
5305 switch (processor) {
5306 case PIPE_SHADER_VERTEX:
5307 si_dump_shader_key_vs(key, &key->part.vs.prolog,
5308 "part.vs.prolog", f);
5309 fprintf(f, " as_es = %u\n", key->as_es);
5310 fprintf(f, " as_ls = %u\n", key->as_ls);
5311 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5312 key->mono.u.vs_export_prim_id);
5313 break;
5314
5315 case PIPE_SHADER_TESS_CTRL:
5316 if (shader->selector->screen->b.chip_class >= GFX9) {
5317 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
5318 "part.tcs.ls_prolog", f);
5319 }
5320 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
5321 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
5322 break;
5323
5324 case PIPE_SHADER_TESS_EVAL:
5325 fprintf(f, " as_es = %u\n", key->as_es);
5326 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5327 key->mono.u.vs_export_prim_id);
5328 break;
5329
5330 case PIPE_SHADER_GEOMETRY:
5331 if (shader->is_gs_copy_shader)
5332 break;
5333
5334 if (shader->selector->screen->b.chip_class >= GFX9 &&
5335 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
5336 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
5337 "part.gs.vs_prolog", f);
5338 }
5339 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
5340 break;
5341
5342 case PIPE_SHADER_COMPUTE:
5343 break;
5344
5345 case PIPE_SHADER_FRAGMENT:
5346 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
5347 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
5348 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
5349 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
5350 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
5351 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
5352 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
5353 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
5354 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
5355 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
5356 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
5357 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
5358 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
5359 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
5360 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
5361 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
5362 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
5363 break;
5364
5365 default:
5366 assert(0);
5367 }
5368
5369 if ((processor == PIPE_SHADER_GEOMETRY ||
5370 processor == PIPE_SHADER_TESS_EVAL ||
5371 processor == PIPE_SHADER_VERTEX) &&
5372 !key->as_es && !key->as_ls) {
5373 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
5374 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
5375 }
5376 }
5377
5378 static void si_init_shader_ctx(struct si_shader_context *ctx,
5379 struct si_screen *sscreen,
5380 LLVMTargetMachineRef tm)
5381 {
5382 struct lp_build_tgsi_context *bld_base;
5383
5384 si_llvm_context_init(ctx, sscreen, tm);
5385
5386 bld_base = &ctx->bld_base;
5387 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
5388
5389 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
5390 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
5391 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
5392
5393 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
5394
5395 bld_base->op_actions[TGSI_OPCODE_CLOCK].emit = clock_emit;
5396
5397 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
5398 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
5399 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
5400 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
5401
5402 bld_base->op_actions[TGSI_OPCODE_VOTE_ALL].emit = vote_all_emit;
5403 bld_base->op_actions[TGSI_OPCODE_VOTE_ANY].emit = vote_any_emit;
5404 bld_base->op_actions[TGSI_OPCODE_VOTE_EQ].emit = vote_eq_emit;
5405 bld_base->op_actions[TGSI_OPCODE_BALLOT].emit = ballot_emit;
5406 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].intr_name = "llvm.amdgcn.readfirstlane";
5407 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].emit = read_lane_emit;
5408 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].intr_name = "llvm.amdgcn.readlane";
5409 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].fetch_args = read_invoc_fetch_args;
5410 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].emit = read_lane_emit;
5411
5412 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
5413 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
5414 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
5415 }
5416
5417 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
5418 {
5419 struct si_shader *shader = ctx->shader;
5420 struct tgsi_shader_info *info = &shader->selector->info;
5421
5422 if ((ctx->type != PIPE_SHADER_VERTEX &&
5423 ctx->type != PIPE_SHADER_TESS_EVAL) ||
5424 shader->key.as_ls ||
5425 shader->key.as_es)
5426 return;
5427
5428 ac_optimize_vs_outputs(&ctx->ac,
5429 ctx->main_fn,
5430 shader->info.vs_output_param_offset,
5431 info->num_outputs,
5432 &shader->info.nr_param_exports);
5433 }
5434
5435 static void si_count_scratch_private_memory(struct si_shader_context *ctx)
5436 {
5437 ctx->shader->config.private_mem_vgprs = 0;
5438
5439 /* Process all LLVM instructions. */
5440 LLVMBasicBlockRef bb = LLVMGetFirstBasicBlock(ctx->main_fn);
5441 while (bb) {
5442 LLVMValueRef next = LLVMGetFirstInstruction(bb);
5443
5444 while (next) {
5445 LLVMValueRef inst = next;
5446 next = LLVMGetNextInstruction(next);
5447
5448 if (LLVMGetInstructionOpcode(inst) != LLVMAlloca)
5449 continue;
5450
5451 LLVMTypeRef type = LLVMGetElementType(LLVMTypeOf(inst));
5452 /* No idea why LLVM aligns allocas to 4 elements. */
5453 unsigned alignment = LLVMGetAlignment(inst);
5454 unsigned dw_size = align(llvm_get_type_size(type) / 4, alignment);
5455 ctx->shader->config.private_mem_vgprs += dw_size;
5456 }
5457 bb = LLVMGetNextBasicBlock(bb);
5458 }
5459 }
5460
5461 static void si_init_exec_full_mask(struct si_shader_context *ctx)
5462 {
5463 LLVMValueRef full_mask = LLVMConstInt(ctx->i64, ~0ull, 0);
5464 lp_build_intrinsic(ctx->gallivm.builder,
5465 "llvm.amdgcn.init.exec", ctx->voidt,
5466 &full_mask, 1, LP_FUNC_ATTR_CONVERGENT);
5467 }
5468
5469 static void si_init_exec_from_input(struct si_shader_context *ctx,
5470 unsigned param, unsigned bitoffset)
5471 {
5472 LLVMValueRef args[] = {
5473 LLVMGetParam(ctx->main_fn, param),
5474 LLVMConstInt(ctx->i32, bitoffset, 0),
5475 };
5476 lp_build_intrinsic(ctx->gallivm.builder,
5477 "llvm.amdgcn.init.exec.from.input",
5478 ctx->voidt, args, 2, LP_FUNC_ATTR_CONVERGENT);
5479 }
5480
5481 static bool si_compile_tgsi_main(struct si_shader_context *ctx,
5482 bool is_monolithic)
5483 {
5484 struct si_shader *shader = ctx->shader;
5485 struct si_shader_selector *sel = shader->selector;
5486 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
5487
5488 switch (ctx->type) {
5489 case PIPE_SHADER_VERTEX:
5490 ctx->load_input = declare_input_vs;
5491 if (shader->key.as_ls)
5492 bld_base->emit_epilogue = si_llvm_emit_ls_epilogue;
5493 else if (shader->key.as_es)
5494 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
5495 else
5496 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
5497 break;
5498 case PIPE_SHADER_TESS_CTRL:
5499 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
5500 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
5501 bld_base->emit_store = store_output_tcs;
5502 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
5503 break;
5504 case PIPE_SHADER_TESS_EVAL:
5505 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
5506 if (shader->key.as_es)
5507 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
5508 else
5509 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
5510 break;
5511 case PIPE_SHADER_GEOMETRY:
5512 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
5513 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
5514 break;
5515 case PIPE_SHADER_FRAGMENT:
5516 ctx->load_input = declare_input_fs;
5517 bld_base->emit_epilogue = si_llvm_return_fs_outputs;
5518 break;
5519 case PIPE_SHADER_COMPUTE:
5520 ctx->declare_memory_region = declare_compute_memory;
5521 break;
5522 default:
5523 assert(!"Unsupported shader type");
5524 return false;
5525 }
5526
5527 create_function(ctx);
5528 preload_ring_buffers(ctx);
5529
5530 /* For GFX9 merged shaders:
5531 * - Set EXEC. If the prolog is present, set EXEC there instead.
5532 * - Add a barrier before the second shader.
5533 *
5534 * The same thing for monolithic shaders is done in
5535 * si_build_wrapper_function.
5536 */
5537 if (ctx->screen->b.chip_class >= GFX9 && !is_monolithic) {
5538 if (sel->info.num_instructions > 1 && /* not empty shader */
5539 (shader->key.as_es || shader->key.as_ls) &&
5540 (ctx->type == PIPE_SHADER_TESS_EVAL ||
5541 (ctx->type == PIPE_SHADER_VERTEX &&
5542 !sel->vs_needs_prolog))) {
5543 si_init_exec_from_input(ctx,
5544 ctx->param_merged_wave_info, 0);
5545 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
5546 ctx->type == PIPE_SHADER_GEOMETRY) {
5547 si_init_exec_from_input(ctx,
5548 ctx->param_merged_wave_info, 8);
5549 si_llvm_emit_barrier(NULL, bld_base, NULL);
5550 }
5551 }
5552
5553 if (ctx->type == PIPE_SHADER_GEOMETRY) {
5554 int i;
5555 for (i = 0; i < 4; i++) {
5556 ctx->gs_next_vertex[i] =
5557 lp_build_alloca(&ctx->gallivm,
5558 ctx->i32, "");
5559 }
5560 }
5561
5562 if (ctx->type == PIPE_SHADER_FRAGMENT && sel->info.uses_kill &&
5563 ctx->screen->b.debug_flags & DBG_FS_CORRECT_DERIVS_AFTER_KILL) {
5564 /* This is initialized to 0.0 = not kill. */
5565 ctx->postponed_kill = lp_build_alloca(&ctx->gallivm, ctx->f32, "");
5566 }
5567
5568 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
5569 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
5570 return false;
5571 }
5572
5573 si_llvm_build_ret(ctx, ctx->return_value);
5574 return true;
5575 }
5576
5577 /**
5578 * Compute the VS prolog key, which contains all the information needed to
5579 * build the VS prolog function, and set shader->info bits where needed.
5580 *
5581 * \param info Shader info of the vertex shader.
5582 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
5583 * \param prolog_key Key of the VS prolog
5584 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
5585 * \param key Output shader part key.
5586 */
5587 static void si_get_vs_prolog_key(const struct tgsi_shader_info *info,
5588 unsigned num_input_sgprs,
5589 const struct si_vs_prolog_bits *prolog_key,
5590 struct si_shader *shader_out,
5591 union si_shader_part_key *key)
5592 {
5593 memset(key, 0, sizeof(*key));
5594 key->vs_prolog.states = *prolog_key;
5595 key->vs_prolog.num_input_sgprs = num_input_sgprs;
5596 key->vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
5597 key->vs_prolog.as_ls = shader_out->key.as_ls;
5598
5599 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
5600 key->vs_prolog.as_ls = 1;
5601 key->vs_prolog.num_merged_next_stage_vgprs = 2;
5602 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
5603 key->vs_prolog.num_merged_next_stage_vgprs = 5;
5604 }
5605
5606 /* Set the instanceID flag. */
5607 for (unsigned i = 0; i < info->num_inputs; i++)
5608 if (key->vs_prolog.states.instance_divisors[i])
5609 shader_out->info.uses_instanceid = true;
5610 }
5611
5612 /**
5613 * Compute the PS prolog key, which contains all the information needed to
5614 * build the PS prolog function, and set related bits in shader->config.
5615 */
5616 static void si_get_ps_prolog_key(struct si_shader *shader,
5617 union si_shader_part_key *key,
5618 bool separate_prolog)
5619 {
5620 struct tgsi_shader_info *info = &shader->selector->info;
5621
5622 memset(key, 0, sizeof(*key));
5623 key->ps_prolog.states = shader->key.part.ps.prolog;
5624 key->ps_prolog.colors_read = info->colors_read;
5625 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
5626 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
5627 key->ps_prolog.wqm = info->uses_derivatives &&
5628 (key->ps_prolog.colors_read ||
5629 key->ps_prolog.states.force_persp_sample_interp ||
5630 key->ps_prolog.states.force_linear_sample_interp ||
5631 key->ps_prolog.states.force_persp_center_interp ||
5632 key->ps_prolog.states.force_linear_center_interp ||
5633 key->ps_prolog.states.bc_optimize_for_persp ||
5634 key->ps_prolog.states.bc_optimize_for_linear);
5635
5636 if (info->colors_read) {
5637 unsigned *color = shader->selector->color_attr_index;
5638
5639 if (shader->key.part.ps.prolog.color_two_side) {
5640 /* BCOLORs are stored after the last input. */
5641 key->ps_prolog.num_interp_inputs = info->num_inputs;
5642 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
5643 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
5644 }
5645
5646 for (unsigned i = 0; i < 2; i++) {
5647 unsigned interp = info->input_interpolate[color[i]];
5648 unsigned location = info->input_interpolate_loc[color[i]];
5649
5650 if (!(info->colors_read & (0xf << i*4)))
5651 continue;
5652
5653 key->ps_prolog.color_attr_index[i] = color[i];
5654
5655 if (shader->key.part.ps.prolog.flatshade_colors &&
5656 interp == TGSI_INTERPOLATE_COLOR)
5657 interp = TGSI_INTERPOLATE_CONSTANT;
5658
5659 switch (interp) {
5660 case TGSI_INTERPOLATE_CONSTANT:
5661 key->ps_prolog.color_interp_vgpr_index[i] = -1;
5662 break;
5663 case TGSI_INTERPOLATE_PERSPECTIVE:
5664 case TGSI_INTERPOLATE_COLOR:
5665 /* Force the interpolation location for colors here. */
5666 if (shader->key.part.ps.prolog.force_persp_sample_interp)
5667 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5668 if (shader->key.part.ps.prolog.force_persp_center_interp)
5669 location = TGSI_INTERPOLATE_LOC_CENTER;
5670
5671 switch (location) {
5672 case TGSI_INTERPOLATE_LOC_SAMPLE:
5673 key->ps_prolog.color_interp_vgpr_index[i] = 0;
5674 shader->config.spi_ps_input_ena |=
5675 S_0286CC_PERSP_SAMPLE_ENA(1);
5676 break;
5677 case TGSI_INTERPOLATE_LOC_CENTER:
5678 key->ps_prolog.color_interp_vgpr_index[i] = 2;
5679 shader->config.spi_ps_input_ena |=
5680 S_0286CC_PERSP_CENTER_ENA(1);
5681 break;
5682 case TGSI_INTERPOLATE_LOC_CENTROID:
5683 key->ps_prolog.color_interp_vgpr_index[i] = 4;
5684 shader->config.spi_ps_input_ena |=
5685 S_0286CC_PERSP_CENTROID_ENA(1);
5686 break;
5687 default:
5688 assert(0);
5689 }
5690 break;
5691 case TGSI_INTERPOLATE_LINEAR:
5692 /* Force the interpolation location for colors here. */
5693 if (shader->key.part.ps.prolog.force_linear_sample_interp)
5694 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5695 if (shader->key.part.ps.prolog.force_linear_center_interp)
5696 location = TGSI_INTERPOLATE_LOC_CENTER;
5697
5698 /* The VGPR assignment for non-monolithic shaders
5699 * works because InitialPSInputAddr is set on the
5700 * main shader and PERSP_PULL_MODEL is never used.
5701 */
5702 switch (location) {
5703 case TGSI_INTERPOLATE_LOC_SAMPLE:
5704 key->ps_prolog.color_interp_vgpr_index[i] =
5705 separate_prolog ? 6 : 9;
5706 shader->config.spi_ps_input_ena |=
5707 S_0286CC_LINEAR_SAMPLE_ENA(1);
5708 break;
5709 case TGSI_INTERPOLATE_LOC_CENTER:
5710 key->ps_prolog.color_interp_vgpr_index[i] =
5711 separate_prolog ? 8 : 11;
5712 shader->config.spi_ps_input_ena |=
5713 S_0286CC_LINEAR_CENTER_ENA(1);
5714 break;
5715 case TGSI_INTERPOLATE_LOC_CENTROID:
5716 key->ps_prolog.color_interp_vgpr_index[i] =
5717 separate_prolog ? 10 : 13;
5718 shader->config.spi_ps_input_ena |=
5719 S_0286CC_LINEAR_CENTROID_ENA(1);
5720 break;
5721 default:
5722 assert(0);
5723 }
5724 break;
5725 default:
5726 assert(0);
5727 }
5728 }
5729 }
5730 }
5731
5732 /**
5733 * Check whether a PS prolog is required based on the key.
5734 */
5735 static bool si_need_ps_prolog(const union si_shader_part_key *key)
5736 {
5737 return key->ps_prolog.colors_read ||
5738 key->ps_prolog.states.force_persp_sample_interp ||
5739 key->ps_prolog.states.force_linear_sample_interp ||
5740 key->ps_prolog.states.force_persp_center_interp ||
5741 key->ps_prolog.states.force_linear_center_interp ||
5742 key->ps_prolog.states.bc_optimize_for_persp ||
5743 key->ps_prolog.states.bc_optimize_for_linear ||
5744 key->ps_prolog.states.poly_stipple;
5745 }
5746
5747 /**
5748 * Compute the PS epilog key, which contains all the information needed to
5749 * build the PS epilog function.
5750 */
5751 static void si_get_ps_epilog_key(struct si_shader *shader,
5752 union si_shader_part_key *key)
5753 {
5754 struct tgsi_shader_info *info = &shader->selector->info;
5755 memset(key, 0, sizeof(*key));
5756 key->ps_epilog.colors_written = info->colors_written;
5757 key->ps_epilog.writes_z = info->writes_z;
5758 key->ps_epilog.writes_stencil = info->writes_stencil;
5759 key->ps_epilog.writes_samplemask = info->writes_samplemask;
5760 key->ps_epilog.states = shader->key.part.ps.epilog;
5761 }
5762
5763 /**
5764 * Build the GS prolog function. Rotate the input vertices for triangle strips
5765 * with adjacency.
5766 */
5767 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
5768 union si_shader_part_key *key)
5769 {
5770 unsigned num_sgprs, num_vgprs;
5771 struct gallivm_state *gallivm = &ctx->gallivm;
5772 LLVMBuilderRef builder = gallivm->builder;
5773 LLVMTypeRef params[48]; /* 40 SGPRs (maximum) + some VGPRs */
5774 LLVMTypeRef returns[48];
5775 LLVMValueRef func, ret;
5776
5777 if (ctx->screen->b.chip_class >= GFX9) {
5778 num_sgprs = 8 + GFX9_GS_NUM_USER_SGPR;
5779 num_vgprs = 5; /* ES inputs are not needed by GS */
5780 } else {
5781 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
5782 num_vgprs = 8;
5783 }
5784
5785 for (unsigned i = 0; i < num_sgprs; ++i) {
5786 params[i] = ctx->i32;
5787 returns[i] = ctx->i32;
5788 }
5789
5790 for (unsigned i = 0; i < num_vgprs; ++i) {
5791 params[num_sgprs + i] = ctx->i32;
5792 returns[num_sgprs + i] = ctx->f32;
5793 }
5794
5795 /* Create the function. */
5796 si_create_function(ctx, "gs_prolog", returns, num_sgprs + num_vgprs,
5797 params, num_sgprs + num_vgprs, num_sgprs - 1, 0);
5798 func = ctx->main_fn;
5799
5800 /* Set the full EXEC mask for the prolog, because we are only fiddling
5801 * with registers here. The main shader part will set the correct EXEC
5802 * mask.
5803 */
5804 if (ctx->screen->b.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
5805 si_init_exec_full_mask(ctx);
5806
5807 /* Copy inputs to outputs. This should be no-op, as the registers match,
5808 * but it will prevent the compiler from overwriting them unintentionally.
5809 */
5810 ret = ctx->return_value;
5811 for (unsigned i = 0; i < num_sgprs; i++) {
5812 LLVMValueRef p = LLVMGetParam(func, i);
5813 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
5814 }
5815 for (unsigned i = 0; i < num_vgprs; i++) {
5816 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
5817 p = LLVMBuildBitCast(builder, p, ctx->f32, "");
5818 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
5819 }
5820
5821 if (key->gs_prolog.states.tri_strip_adj_fix) {
5822 /* Remap the input vertices for every other primitive. */
5823 const unsigned gfx6_vtx_params[6] = {
5824 num_sgprs,
5825 num_sgprs + 1,
5826 num_sgprs + 3,
5827 num_sgprs + 4,
5828 num_sgprs + 5,
5829 num_sgprs + 6
5830 };
5831 const unsigned gfx9_vtx_params[3] = {
5832 num_sgprs,
5833 num_sgprs + 1,
5834 num_sgprs + 4,
5835 };
5836 LLVMValueRef vtx_in[6], vtx_out[6];
5837 LLVMValueRef prim_id, rotate;
5838
5839 if (ctx->screen->b.chip_class >= GFX9) {
5840 for (unsigned i = 0; i < 3; i++) {
5841 vtx_in[i*2] = unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
5842 vtx_in[i*2+1] = unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
5843 }
5844 } else {
5845 for (unsigned i = 0; i < 6; i++)
5846 vtx_in[i] = LLVMGetParam(func, gfx6_vtx_params[i]);
5847 }
5848
5849 prim_id = LLVMGetParam(func, num_sgprs + 2);
5850 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
5851
5852 for (unsigned i = 0; i < 6; ++i) {
5853 LLVMValueRef base, rotated;
5854 base = vtx_in[i];
5855 rotated = vtx_in[(i + 4) % 6];
5856 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
5857 }
5858
5859 if (ctx->screen->b.chip_class >= GFX9) {
5860 for (unsigned i = 0; i < 3; i++) {
5861 LLVMValueRef hi, out;
5862
5863 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
5864 LLVMConstInt(ctx->i32, 16, 0), "");
5865 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
5866 out = LLVMBuildBitCast(builder, out, ctx->f32, "");
5867 ret = LLVMBuildInsertValue(builder, ret, out,
5868 gfx9_vtx_params[i], "");
5869 }
5870 } else {
5871 for (unsigned i = 0; i < 6; i++) {
5872 LLVMValueRef out;
5873
5874 out = LLVMBuildBitCast(builder, vtx_out[i], ctx->f32, "");
5875 ret = LLVMBuildInsertValue(builder, ret, out,
5876 gfx6_vtx_params[i], "");
5877 }
5878 }
5879 }
5880
5881 LLVMBuildRet(builder, ret);
5882 }
5883
5884 /**
5885 * Given a list of shader part functions, build a wrapper function that
5886 * runs them in sequence to form a monolithic shader.
5887 */
5888 static void si_build_wrapper_function(struct si_shader_context *ctx,
5889 LLVMValueRef *parts,
5890 unsigned num_parts,
5891 unsigned main_part,
5892 unsigned next_shader_first_part)
5893 {
5894 struct gallivm_state *gallivm = &ctx->gallivm;
5895 LLVMBuilderRef builder = ctx->gallivm.builder;
5896 /* PS epilog has one arg per color component */
5897 LLVMTypeRef param_types[48];
5898 LLVMValueRef initial[48], out[48];
5899 LLVMTypeRef function_type;
5900 unsigned num_params;
5901 unsigned num_out, initial_num_out;
5902 MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
5903 MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
5904 unsigned num_sgprs, num_vgprs;
5905 unsigned last_sgpr_param;
5906 unsigned gprs;
5907 struct lp_build_if_state if_state;
5908
5909 for (unsigned i = 0; i < num_parts; ++i) {
5910 lp_add_function_attr(parts[i], -1, LP_FUNC_ATTR_ALWAYSINLINE);
5911 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
5912 }
5913
5914 /* The parameters of the wrapper function correspond to those of the
5915 * first part in terms of SGPRs and VGPRs, but we use the types of the
5916 * main part to get the right types. This is relevant for the
5917 * dereferenceable attribute on descriptor table pointers.
5918 */
5919 num_sgprs = 0;
5920 num_vgprs = 0;
5921
5922 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
5923 num_params = LLVMCountParamTypes(function_type);
5924
5925 for (unsigned i = 0; i < num_params; ++i) {
5926 LLVMValueRef param = LLVMGetParam(parts[0], i);
5927
5928 if (ac_is_sgpr_param(param)) {
5929 assert(num_vgprs == 0);
5930 num_sgprs += llvm_get_type_size(LLVMTypeOf(param)) / 4;
5931 } else {
5932 num_vgprs += llvm_get_type_size(LLVMTypeOf(param)) / 4;
5933 }
5934 }
5935 assert(num_vgprs + num_sgprs <= ARRAY_SIZE(param_types));
5936
5937 num_params = 0;
5938 last_sgpr_param = 0;
5939 gprs = 0;
5940 while (gprs < num_sgprs + num_vgprs) {
5941 LLVMValueRef param = LLVMGetParam(parts[main_part], num_params);
5942 unsigned size;
5943
5944 param_types[num_params] = LLVMTypeOf(param);
5945 if (gprs < num_sgprs)
5946 last_sgpr_param = num_params;
5947 size = llvm_get_type_size(param_types[num_params]) / 4;
5948 num_params++;
5949
5950 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
5951 assert(gprs + size <= num_sgprs + num_vgprs &&
5952 (gprs >= num_sgprs || gprs + size <= num_sgprs));
5953
5954 gprs += size;
5955 }
5956
5957 si_create_function(ctx, "wrapper", NULL, 0, param_types, num_params,
5958 last_sgpr_param,
5959 si_get_max_workgroup_size(ctx->shader));
5960
5961 if (is_merged_shader(ctx->shader))
5962 si_init_exec_full_mask(ctx);
5963
5964 /* Record the arguments of the function as if they were an output of
5965 * a previous part.
5966 */
5967 num_out = 0;
5968 num_out_sgpr = 0;
5969
5970 for (unsigned i = 0; i < num_params; ++i) {
5971 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
5972 LLVMTypeRef param_type = LLVMTypeOf(param);
5973 LLVMTypeRef out_type = i <= last_sgpr_param ? ctx->i32 : ctx->f32;
5974 unsigned size = llvm_get_type_size(param_type) / 4;
5975
5976 if (size == 1) {
5977 if (param_type != out_type)
5978 param = LLVMBuildBitCast(builder, param, out_type, "");
5979 out[num_out++] = param;
5980 } else {
5981 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
5982
5983 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
5984 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
5985 param_type = ctx->i64;
5986 }
5987
5988 if (param_type != vector_type)
5989 param = LLVMBuildBitCast(builder, param, vector_type, "");
5990
5991 for (unsigned j = 0; j < size; ++j)
5992 out[num_out++] = LLVMBuildExtractElement(
5993 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
5994 }
5995
5996 if (i <= last_sgpr_param)
5997 num_out_sgpr = num_out;
5998 }
5999
6000 memcpy(initial, out, sizeof(out));
6001 initial_num_out = num_out;
6002 initial_num_out_sgpr = num_out_sgpr;
6003
6004 /* Now chain the parts. */
6005 for (unsigned part = 0; part < num_parts; ++part) {
6006 LLVMValueRef in[48];
6007 LLVMValueRef ret;
6008 LLVMTypeRef ret_type;
6009 unsigned out_idx = 0;
6010
6011 num_params = LLVMCountParams(parts[part]);
6012 assert(num_params <= ARRAY_SIZE(param_types));
6013
6014 /* Merged shaders are executed conditionally depending
6015 * on the number of enabled threads passed in the input SGPRs. */
6016 if (is_merged_shader(ctx->shader) &&
6017 (part == 0 || part == next_shader_first_part)) {
6018 LLVMValueRef ena, count = initial[3];
6019
6020 /* The thread count for the 2nd shader is at bit-offset 8. */
6021 if (part == next_shader_first_part) {
6022 count = LLVMBuildLShr(builder, count,
6023 LLVMConstInt(ctx->i32, 8, 0), "");
6024 }
6025 count = LLVMBuildAnd(builder, count,
6026 LLVMConstInt(ctx->i32, 0x7f, 0), "");
6027 ena = LLVMBuildICmp(builder, LLVMIntULT,
6028 ac_get_thread_id(&ctx->ac), count, "");
6029 lp_build_if(&if_state, &ctx->gallivm, ena);
6030 }
6031
6032 /* Derive arguments for the next part from outputs of the
6033 * previous one.
6034 */
6035 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
6036 LLVMValueRef param;
6037 LLVMTypeRef param_type;
6038 bool is_sgpr;
6039 unsigned param_size;
6040 LLVMValueRef arg = NULL;
6041
6042 param = LLVMGetParam(parts[part], param_idx);
6043 param_type = LLVMTypeOf(param);
6044 param_size = llvm_get_type_size(param_type) / 4;
6045 is_sgpr = ac_is_sgpr_param(param);
6046
6047 if (is_sgpr) {
6048 #if HAVE_LLVM < 0x0400
6049 LLVMRemoveAttribute(param, LLVMByValAttribute);
6050 #else
6051 unsigned kind_id = LLVMGetEnumAttributeKindForName("byval", 5);
6052 LLVMRemoveEnumAttributeAtIndex(parts[part], param_idx + 1, kind_id);
6053 #endif
6054 lp_add_function_attr(parts[part], param_idx + 1, LP_FUNC_ATTR_INREG);
6055 }
6056
6057 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
6058 assert(is_sgpr || out_idx >= num_out_sgpr);
6059
6060 if (param_size == 1)
6061 arg = out[out_idx];
6062 else
6063 arg = lp_build_gather_values(gallivm, &out[out_idx], param_size);
6064
6065 if (LLVMTypeOf(arg) != param_type) {
6066 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6067 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
6068 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
6069 } else {
6070 arg = LLVMBuildBitCast(builder, arg, param_type, "");
6071 }
6072 }
6073
6074 in[param_idx] = arg;
6075 out_idx += param_size;
6076 }
6077
6078 ret = LLVMBuildCall(builder, parts[part], in, num_params, "");
6079
6080 if (is_merged_shader(ctx->shader) &&
6081 (part + 1 == next_shader_first_part ||
6082 part + 1 == num_parts)) {
6083 lp_build_endif(&if_state);
6084
6085 if (part + 1 == next_shader_first_part) {
6086 /* A barrier is required between 2 merged shaders. */
6087 si_llvm_emit_barrier(NULL, &ctx->bld_base, NULL);
6088
6089 /* The second half of the merged shader should use
6090 * the inputs from the toplevel (wrapper) function,
6091 * not the return value from the last call.
6092 *
6093 * That's because the last call was executed condi-
6094 * tionally, so we can't consume it in the main
6095 * block.
6096 */
6097 memcpy(out, initial, sizeof(initial));
6098 num_out = initial_num_out;
6099 num_out_sgpr = initial_num_out_sgpr;
6100 }
6101 continue;
6102 }
6103
6104 /* Extract the returned GPRs. */
6105 ret_type = LLVMTypeOf(ret);
6106 num_out = 0;
6107 num_out_sgpr = 0;
6108
6109 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
6110 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
6111
6112 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
6113
6114 for (unsigned i = 0; i < ret_size; ++i) {
6115 LLVMValueRef val =
6116 LLVMBuildExtractValue(builder, ret, i, "");
6117
6118 out[num_out++] = val;
6119
6120 if (LLVMTypeOf(val) == ctx->i32) {
6121 assert(num_out_sgpr + 1 == num_out);
6122 num_out_sgpr = num_out;
6123 }
6124 }
6125 }
6126 }
6127
6128 LLVMBuildRetVoid(builder);
6129 }
6130
6131 int si_compile_tgsi_shader(struct si_screen *sscreen,
6132 LLVMTargetMachineRef tm,
6133 struct si_shader *shader,
6134 bool is_monolithic,
6135 struct pipe_debug_callback *debug)
6136 {
6137 struct si_shader_selector *sel = shader->selector;
6138 struct si_shader_context ctx;
6139 int r = -1;
6140
6141 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6142 * conversion fails. */
6143 if (r600_can_dump_shader(&sscreen->b, sel->info.processor) &&
6144 !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
6145 tgsi_dump(sel->tokens, 0);
6146 si_dump_streamout(&sel->so);
6147 }
6148
6149 si_init_shader_ctx(&ctx, sscreen, tm);
6150 si_llvm_context_set_tgsi(&ctx, shader);
6151 ctx.separate_prolog = !is_monolithic;
6152
6153 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
6154 sizeof(shader->info.vs_output_param_offset));
6155
6156 shader->info.uses_instanceid = sel->info.uses_instanceid;
6157
6158 ctx.load_system_value = declare_system_value;
6159
6160 if (!si_compile_tgsi_main(&ctx, is_monolithic)) {
6161 si_llvm_dispose(&ctx);
6162 return -1;
6163 }
6164
6165 if (is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
6166 LLVMValueRef parts[2];
6167 bool need_prolog = sel->vs_needs_prolog;
6168
6169 parts[1] = ctx.main_fn;
6170
6171 if (need_prolog) {
6172 union si_shader_part_key prolog_key;
6173 si_get_vs_prolog_key(&sel->info,
6174 shader->info.num_input_sgprs,
6175 &shader->key.part.vs.prolog,
6176 shader, &prolog_key);
6177 si_build_vs_prolog_function(&ctx, &prolog_key);
6178 parts[0] = ctx.main_fn;
6179 }
6180
6181 si_build_wrapper_function(&ctx, parts + !need_prolog,
6182 1 + need_prolog, need_prolog, 0);
6183 } else if (is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
6184 if (sscreen->b.chip_class >= GFX9) {
6185 struct si_shader_selector *ls = shader->key.part.tcs.ls;
6186 LLVMValueRef parts[4];
6187
6188 /* TCS main part */
6189 parts[2] = ctx.main_fn;
6190
6191 /* TCS epilog */
6192 union si_shader_part_key tcs_epilog_key;
6193 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
6194 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6195 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
6196 parts[3] = ctx.main_fn;
6197
6198 /* VS prolog */
6199 if (ls->vs_needs_prolog) {
6200 union si_shader_part_key vs_prolog_key;
6201 si_get_vs_prolog_key(&ls->info,
6202 shader->info.num_input_sgprs,
6203 &shader->key.part.tcs.ls_prolog,
6204 shader, &vs_prolog_key);
6205 vs_prolog_key.vs_prolog.is_monolithic = true;
6206 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6207 parts[0] = ctx.main_fn;
6208 }
6209
6210 /* VS as LS main part */
6211 struct si_shader shader_ls = {};
6212 shader_ls.selector = ls;
6213 shader_ls.key.as_ls = 1;
6214 shader_ls.key.mono = shader->key.mono;
6215 shader_ls.key.opt = shader->key.opt;
6216 si_llvm_context_set_tgsi(&ctx, &shader_ls);
6217
6218 if (!si_compile_tgsi_main(&ctx, true)) {
6219 si_llvm_dispose(&ctx);
6220 return -1;
6221 }
6222 shader->info.uses_instanceid |= ls->info.uses_instanceid;
6223 parts[1] = ctx.main_fn;
6224
6225 /* Reset the shader context. */
6226 ctx.shader = shader;
6227 ctx.type = PIPE_SHADER_TESS_CTRL;
6228
6229 si_build_wrapper_function(&ctx,
6230 parts + !ls->vs_needs_prolog,
6231 4 - !ls->vs_needs_prolog, 0,
6232 ls->vs_needs_prolog ? 2 : 1);
6233 } else {
6234 LLVMValueRef parts[2];
6235 union si_shader_part_key epilog_key;
6236
6237 parts[0] = ctx.main_fn;
6238
6239 memset(&epilog_key, 0, sizeof(epilog_key));
6240 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6241 si_build_tcs_epilog_function(&ctx, &epilog_key);
6242 parts[1] = ctx.main_fn;
6243
6244 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
6245 }
6246 } else if (is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
6247 if (ctx.screen->b.chip_class >= GFX9) {
6248 struct si_shader_selector *es = shader->key.part.gs.es;
6249 LLVMValueRef es_prolog = NULL;
6250 LLVMValueRef es_main = NULL;
6251 LLVMValueRef gs_prolog = NULL;
6252 LLVMValueRef gs_main = ctx.main_fn;
6253
6254 /* GS prolog */
6255 union si_shader_part_key gs_prolog_key;
6256 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
6257 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6258 gs_prolog_key.gs_prolog.is_monolithic = true;
6259 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
6260 gs_prolog = ctx.main_fn;
6261
6262 /* ES prolog */
6263 if (es->vs_needs_prolog) {
6264 union si_shader_part_key vs_prolog_key;
6265 si_get_vs_prolog_key(&es->info,
6266 shader->info.num_input_sgprs,
6267 &shader->key.part.tcs.ls_prolog,
6268 shader, &vs_prolog_key);
6269 vs_prolog_key.vs_prolog.is_monolithic = true;
6270 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6271 es_prolog = ctx.main_fn;
6272 }
6273
6274 /* ES main part */
6275 struct si_shader shader_es = {};
6276 shader_es.selector = es;
6277 shader_es.key.as_es = 1;
6278 shader_es.key.mono = shader->key.mono;
6279 shader_es.key.opt = shader->key.opt;
6280 si_llvm_context_set_tgsi(&ctx, &shader_es);
6281
6282 if (!si_compile_tgsi_main(&ctx, true)) {
6283 si_llvm_dispose(&ctx);
6284 return -1;
6285 }
6286 shader->info.uses_instanceid |= es->info.uses_instanceid;
6287 es_main = ctx.main_fn;
6288
6289 /* Reset the shader context. */
6290 ctx.shader = shader;
6291 ctx.type = PIPE_SHADER_GEOMETRY;
6292
6293 /* Prepare the array of shader parts. */
6294 LLVMValueRef parts[4];
6295 unsigned num_parts = 0, main_part, next_first_part;
6296
6297 if (es_prolog)
6298 parts[num_parts++] = es_prolog;
6299
6300 parts[main_part = num_parts++] = es_main;
6301 parts[next_first_part = num_parts++] = gs_prolog;
6302 parts[num_parts++] = gs_main;
6303
6304 si_build_wrapper_function(&ctx, parts, num_parts,
6305 main_part, next_first_part);
6306 } else {
6307 LLVMValueRef parts[2];
6308 union si_shader_part_key prolog_key;
6309
6310 parts[1] = ctx.main_fn;
6311
6312 memset(&prolog_key, 0, sizeof(prolog_key));
6313 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6314 si_build_gs_prolog_function(&ctx, &prolog_key);
6315 parts[0] = ctx.main_fn;
6316
6317 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
6318 }
6319 } else if (is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
6320 LLVMValueRef parts[3];
6321 union si_shader_part_key prolog_key;
6322 union si_shader_part_key epilog_key;
6323 bool need_prolog;
6324
6325 si_get_ps_prolog_key(shader, &prolog_key, false);
6326 need_prolog = si_need_ps_prolog(&prolog_key);
6327
6328 parts[need_prolog ? 1 : 0] = ctx.main_fn;
6329
6330 if (need_prolog) {
6331 si_build_ps_prolog_function(&ctx, &prolog_key);
6332 parts[0] = ctx.main_fn;
6333 }
6334
6335 si_get_ps_epilog_key(shader, &epilog_key);
6336 si_build_ps_epilog_function(&ctx, &epilog_key);
6337 parts[need_prolog ? 2 : 1] = ctx.main_fn;
6338
6339 si_build_wrapper_function(&ctx, parts, need_prolog ? 3 : 2,
6340 need_prolog ? 1 : 0, 0);
6341 }
6342
6343 si_llvm_optimize_module(&ctx);
6344
6345 /* Post-optimization transformations and analysis. */
6346 si_optimize_vs_outputs(&ctx);
6347
6348 if ((debug && debug->debug_message) ||
6349 r600_can_dump_shader(&sscreen->b, ctx.type))
6350 si_count_scratch_private_memory(&ctx);
6351
6352 /* Compile to bytecode. */
6353 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
6354 ctx.gallivm.module, debug, ctx.type, "TGSI shader");
6355 si_llvm_dispose(&ctx);
6356 if (r) {
6357 fprintf(stderr, "LLVM failed to compile shader\n");
6358 return r;
6359 }
6360
6361 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
6362 * LLVM 3.9svn has this bug.
6363 */
6364 if (sel->type == PIPE_SHADER_COMPUTE) {
6365 unsigned wave_size = 64;
6366 unsigned max_vgprs = 256;
6367 unsigned max_sgprs = sscreen->b.chip_class >= VI ? 800 : 512;
6368 unsigned max_sgprs_per_wave = 128;
6369 unsigned max_block_threads = si_get_max_workgroup_size(shader);
6370 unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
6371 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
6372
6373 max_vgprs = max_vgprs / min_waves_per_simd;
6374 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
6375
6376 if (shader->config.num_sgprs > max_sgprs ||
6377 shader->config.num_vgprs > max_vgprs) {
6378 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6379 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6380 shader->config.num_sgprs, shader->config.num_vgprs,
6381 max_sgprs, max_vgprs);
6382
6383 /* Just terminate the process, because dependent
6384 * shaders can hang due to bad input data, but use
6385 * the env var to allow shader-db to work.
6386 */
6387 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6388 abort();
6389 }
6390 }
6391
6392 /* Add the scratch offset to input SGPRs. */
6393 if (shader->config.scratch_bytes_per_wave && !is_merged_shader(shader))
6394 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6395
6396 /* Calculate the number of fragment input VGPRs. */
6397 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6398 shader->info.num_input_vgprs = 0;
6399 shader->info.face_vgpr_index = -1;
6400
6401 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6402 shader->info.num_input_vgprs += 2;
6403 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6404 shader->info.num_input_vgprs += 2;
6405 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6406 shader->info.num_input_vgprs += 2;
6407 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6408 shader->info.num_input_vgprs += 3;
6409 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6410 shader->info.num_input_vgprs += 2;
6411 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6412 shader->info.num_input_vgprs += 2;
6413 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6414 shader->info.num_input_vgprs += 2;
6415 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6416 shader->info.num_input_vgprs += 1;
6417 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6418 shader->info.num_input_vgprs += 1;
6419 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6420 shader->info.num_input_vgprs += 1;
6421 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6422 shader->info.num_input_vgprs += 1;
6423 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6424 shader->info.num_input_vgprs += 1;
6425 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6426 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6427 shader->info.num_input_vgprs += 1;
6428 }
6429 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr))
6430 shader->info.num_input_vgprs += 1;
6431 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6432 shader->info.num_input_vgprs += 1;
6433 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6434 shader->info.num_input_vgprs += 1;
6435 }
6436
6437 return 0;
6438 }
6439
6440 /**
6441 * Create, compile and return a shader part (prolog or epilog).
6442 *
6443 * \param sscreen screen
6444 * \param list list of shader parts of the same category
6445 * \param type shader type
6446 * \param key shader part key
6447 * \param prolog whether the part being requested is a prolog
6448 * \param tm LLVM target machine
6449 * \param debug debug callback
6450 * \param build the callback responsible for building the main function
6451 * \return non-NULL on success
6452 */
6453 static struct si_shader_part *
6454 si_get_shader_part(struct si_screen *sscreen,
6455 struct si_shader_part **list,
6456 enum pipe_shader_type type,
6457 bool prolog,
6458 union si_shader_part_key *key,
6459 LLVMTargetMachineRef tm,
6460 struct pipe_debug_callback *debug,
6461 void (*build)(struct si_shader_context *,
6462 union si_shader_part_key *),
6463 const char *name)
6464 {
6465 struct si_shader_part *result;
6466
6467 mtx_lock(&sscreen->shader_parts_mutex);
6468
6469 /* Find existing. */
6470 for (result = *list; result; result = result->next) {
6471 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
6472 mtx_unlock(&sscreen->shader_parts_mutex);
6473 return result;
6474 }
6475 }
6476
6477 /* Compile a new one. */
6478 result = CALLOC_STRUCT(si_shader_part);
6479 result->key = *key;
6480
6481 struct si_shader shader = {};
6482 struct si_shader_context ctx;
6483 struct gallivm_state *gallivm = &ctx.gallivm;
6484
6485 si_init_shader_ctx(&ctx, sscreen, tm);
6486 ctx.shader = &shader;
6487 ctx.type = type;
6488
6489 switch (type) {
6490 case PIPE_SHADER_VERTEX:
6491 break;
6492 case PIPE_SHADER_TESS_CTRL:
6493 assert(!prolog);
6494 shader.key.part.tcs.epilog = key->tcs_epilog.states;
6495 break;
6496 case PIPE_SHADER_GEOMETRY:
6497 assert(prolog);
6498 break;
6499 case PIPE_SHADER_FRAGMENT:
6500 if (prolog)
6501 shader.key.part.ps.prolog = key->ps_prolog.states;
6502 else
6503 shader.key.part.ps.epilog = key->ps_epilog.states;
6504 break;
6505 default:
6506 unreachable("bad shader part");
6507 }
6508
6509 build(&ctx, key);
6510
6511 /* Compile. */
6512 si_llvm_optimize_module(&ctx);
6513
6514 if (si_compile_llvm(sscreen, &result->binary, &result->config, tm,
6515 gallivm->module, debug, ctx.type, name)) {
6516 FREE(result);
6517 result = NULL;
6518 goto out;
6519 }
6520
6521 result->next = *list;
6522 *list = result;
6523
6524 out:
6525 si_llvm_dispose(&ctx);
6526 mtx_unlock(&sscreen->shader_parts_mutex);
6527 return result;
6528 }
6529
6530 /**
6531 * Build the vertex shader prolog function.
6532 *
6533 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
6534 * All inputs are returned unmodified. The vertex load indices are
6535 * stored after them, which will be used by the API VS for fetching inputs.
6536 *
6537 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
6538 * input_v0,
6539 * input_v1,
6540 * input_v2,
6541 * input_v3,
6542 * (VertexID + BaseVertex),
6543 * (InstanceID + StartInstance),
6544 * (InstanceID / 2 + StartInstance)
6545 */
6546 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
6547 union si_shader_part_key *key)
6548 {
6549 struct gallivm_state *gallivm = &ctx->gallivm;
6550 LLVMTypeRef *params, *returns;
6551 LLVMValueRef ret, func;
6552 int last_sgpr, num_params, num_returns, i;
6553 unsigned first_vs_vgpr = key->vs_prolog.num_input_sgprs +
6554 key->vs_prolog.num_merged_next_stage_vgprs;
6555 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
6556 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
6557 num_input_vgprs;
6558 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
6559
6560 ctx->param_vertex_id = first_vs_vgpr;
6561 ctx->param_instance_id = first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
6562
6563 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
6564 params = alloca(num_all_input_regs * sizeof(LLVMTypeRef));
6565 returns = alloca((num_all_input_regs + key->vs_prolog.last_input + 1) *
6566 sizeof(LLVMTypeRef));
6567 num_params = 0;
6568 num_returns = 0;
6569
6570 /* Declare input and output SGPRs. */
6571 num_params = 0;
6572 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6573 params[num_params++] = ctx->i32;
6574 returns[num_returns++] = ctx->i32;
6575 }
6576 last_sgpr = num_params - 1;
6577
6578 /* Preloaded VGPRs (outputs must be floats) */
6579 for (i = 0; i < num_input_vgprs; i++) {
6580 params[num_params++] = ctx->i32;
6581 returns[num_returns++] = ctx->f32;
6582 }
6583
6584 /* Vertex load indices. */
6585 for (i = 0; i <= key->vs_prolog.last_input; i++)
6586 returns[num_returns++] = ctx->f32;
6587
6588 /* Create the function. */
6589 si_create_function(ctx, "vs_prolog", returns, num_returns, params,
6590 num_params, last_sgpr, 0);
6591 func = ctx->main_fn;
6592
6593 if (key->vs_prolog.num_merged_next_stage_vgprs &&
6594 !key->vs_prolog.is_monolithic)
6595 si_init_exec_from_input(ctx, 3, 0);
6596
6597 /* Copy inputs to outputs. This should be no-op, as the registers match,
6598 * but it will prevent the compiler from overwriting them unintentionally.
6599 */
6600 ret = ctx->return_value;
6601 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6602 LLVMValueRef p = LLVMGetParam(func, i);
6603 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6604 }
6605 for (; i < num_params; i++) {
6606 LLVMValueRef p = LLVMGetParam(func, i);
6607 p = LLVMBuildBitCast(gallivm->builder, p, ctx->f32, "");
6608 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6609 }
6610
6611 /* Compute vertex load indices from instance divisors. */
6612 for (i = 0; i <= key->vs_prolog.last_input; i++) {
6613 unsigned divisor = key->vs_prolog.states.instance_divisors[i];
6614 LLVMValueRef index;
6615
6616 if (divisor) {
6617 /* InstanceID / Divisor + StartInstance */
6618 index = get_instance_index_for_fetch(ctx,
6619 user_sgpr_base +
6620 SI_SGPR_START_INSTANCE,
6621 divisor);
6622 } else {
6623 /* VertexID + BaseVertex */
6624 index = LLVMBuildAdd(gallivm->builder,
6625 LLVMGetParam(func, ctx->param_vertex_id),
6626 LLVMGetParam(func, user_sgpr_base +
6627 SI_SGPR_BASE_VERTEX), "");
6628 }
6629
6630 index = LLVMBuildBitCast(gallivm->builder, index, ctx->f32, "");
6631 ret = LLVMBuildInsertValue(gallivm->builder, ret, index,
6632 num_params++, "");
6633 }
6634
6635 si_llvm_build_ret(ctx, ret);
6636 }
6637
6638 static bool si_get_vs_prolog(struct si_screen *sscreen,
6639 LLVMTargetMachineRef tm,
6640 struct si_shader *shader,
6641 struct pipe_debug_callback *debug,
6642 struct si_shader *main_part,
6643 const struct si_vs_prolog_bits *key)
6644 {
6645 struct si_shader_selector *vs = main_part->selector;
6646
6647 /* The prolog is a no-op if there are no inputs. */
6648 if (!vs->vs_needs_prolog)
6649 return true;
6650
6651 /* Get the prolog. */
6652 union si_shader_part_key prolog_key;
6653 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
6654 key, shader, &prolog_key);
6655
6656 shader->prolog =
6657 si_get_shader_part(sscreen, &sscreen->vs_prologs,
6658 PIPE_SHADER_VERTEX, true, &prolog_key, tm,
6659 debug, si_build_vs_prolog_function,
6660 "Vertex Shader Prolog");
6661 return shader->prolog != NULL;
6662 }
6663
6664 /**
6665 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
6666 */
6667 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
6668 LLVMTargetMachineRef tm,
6669 struct si_shader *shader,
6670 struct pipe_debug_callback *debug)
6671 {
6672 return si_get_vs_prolog(sscreen, tm, shader, debug, shader,
6673 &shader->key.part.vs.prolog);
6674 }
6675
6676 /**
6677 * Compile the TCS epilog function. This writes tesselation factors to memory
6678 * based on the output primitive type of the tesselator (determined by TES).
6679 */
6680 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
6681 union si_shader_part_key *key)
6682 {
6683 struct gallivm_state *gallivm = &ctx->gallivm;
6684 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
6685 LLVMTypeRef params[32];
6686 LLVMValueRef func;
6687 int last_sgpr, num_params = 0;
6688
6689 if (ctx->screen->b.chip_class >= GFX9) {
6690 params[num_params++] = ctx->i64;
6691 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
6692 params[num_params++] = ctx->i32; /* wave info */
6693 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
6694 params[num_params++] = ctx->i32;
6695 params[num_params++] = ctx->i32;
6696 params[num_params++] = ctx->i32;
6697 params[num_params++] = ctx->i64;
6698 params[num_params++] = ctx->i64;
6699 params[num_params++] = ctx->i64;
6700 params[num_params++] = ctx->i64;
6701 params[num_params++] = ctx->i32;
6702 params[num_params++] = ctx->i32;
6703 params[num_params++] = ctx->i32;
6704 params[num_params++] = ctx->i32;
6705 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
6706 params[num_params++] = ctx->i32;
6707 params[num_params++] = ctx->i32;
6708 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
6709 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
6710 } else {
6711 params[num_params++] = ctx->i64;
6712 params[num_params++] = ctx->i64;
6713 params[num_params++] = ctx->i64;
6714 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
6715 params[num_params++] = ctx->i32;
6716 params[num_params++] = ctx->i32;
6717 params[num_params++] = ctx->i32;
6718 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
6719 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
6720 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
6721 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
6722 }
6723 last_sgpr = num_params - 1;
6724
6725 params[num_params++] = ctx->i32; /* patch index within the wave (REL_PATCH_ID) */
6726 params[num_params++] = ctx->i32; /* invocation ID within the patch */
6727 params[num_params++] = ctx->i32; /* LDS offset where tess factors should be loaded from */
6728
6729 /* Create the function. */
6730 si_create_function(ctx, "tcs_epilog", NULL, 0, params, num_params, last_sgpr,
6731 ctx->screen->b.chip_class >= CIK ? 128 : 64);
6732 declare_lds_as_pointer(ctx);
6733 func = ctx->main_fn;
6734
6735 si_write_tess_factors(bld_base,
6736 LLVMGetParam(func, last_sgpr + 1),
6737 LLVMGetParam(func, last_sgpr + 2),
6738 LLVMGetParam(func, last_sgpr + 3));
6739
6740 LLVMBuildRetVoid(gallivm->builder);
6741 }
6742
6743 /**
6744 * Select and compile (or reuse) TCS parts (epilog).
6745 */
6746 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
6747 LLVMTargetMachineRef tm,
6748 struct si_shader *shader,
6749 struct pipe_debug_callback *debug)
6750 {
6751 if (sscreen->b.chip_class >= GFX9) {
6752 struct si_shader *ls_main_part =
6753 shader->key.part.tcs.ls->main_shader_part_ls;
6754
6755 if (!si_get_vs_prolog(sscreen, tm, shader, debug, ls_main_part,
6756 &shader->key.part.tcs.ls_prolog))
6757 return false;
6758
6759 shader->previous_stage = ls_main_part;
6760 }
6761
6762 /* Get the epilog. */
6763 union si_shader_part_key epilog_key;
6764 memset(&epilog_key, 0, sizeof(epilog_key));
6765 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6766
6767 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
6768 PIPE_SHADER_TESS_CTRL, false,
6769 &epilog_key, tm, debug,
6770 si_build_tcs_epilog_function,
6771 "Tessellation Control Shader Epilog");
6772 return shader->epilog != NULL;
6773 }
6774
6775 /**
6776 * Select and compile (or reuse) GS parts (prolog).
6777 */
6778 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
6779 LLVMTargetMachineRef tm,
6780 struct si_shader *shader,
6781 struct pipe_debug_callback *debug)
6782 {
6783 if (sscreen->b.chip_class >= GFX9) {
6784 struct si_shader *es_main_part =
6785 shader->key.part.gs.es->main_shader_part_es;
6786
6787 if (shader->key.part.gs.es->type == PIPE_SHADER_VERTEX &&
6788 !si_get_vs_prolog(sscreen, tm, shader, debug, es_main_part,
6789 &shader->key.part.gs.vs_prolog))
6790 return false;
6791
6792 shader->previous_stage = es_main_part;
6793 }
6794
6795 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
6796 return true;
6797
6798 union si_shader_part_key prolog_key;
6799 memset(&prolog_key, 0, sizeof(prolog_key));
6800 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6801
6802 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
6803 PIPE_SHADER_GEOMETRY, true,
6804 &prolog_key, tm, debug,
6805 si_build_gs_prolog_function,
6806 "Geometry Shader Prolog");
6807 return shader->prolog2 != NULL;
6808 }
6809
6810 /**
6811 * Build the pixel shader prolog function. This handles:
6812 * - two-side color selection and interpolation
6813 * - overriding interpolation parameters for the API PS
6814 * - polygon stippling
6815 *
6816 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
6817 * overriden by other states. (e.g. per-sample interpolation)
6818 * Interpolated colors are stored after the preloaded VGPRs.
6819 */
6820 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
6821 union si_shader_part_key *key)
6822 {
6823 struct gallivm_state *gallivm = &ctx->gallivm;
6824 LLVMTypeRef *params;
6825 LLVMValueRef ret, func;
6826 int last_sgpr, num_params, num_returns, i, num_color_channels;
6827
6828 assert(si_need_ps_prolog(key));
6829
6830 /* Number of inputs + 8 color elements. */
6831 params = alloca((key->ps_prolog.num_input_sgprs +
6832 key->ps_prolog.num_input_vgprs + 8) *
6833 sizeof(LLVMTypeRef));
6834
6835 /* Declare inputs. */
6836 num_params = 0;
6837 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
6838 params[num_params++] = ctx->i32;
6839 last_sgpr = num_params - 1;
6840
6841 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
6842 params[num_params++] = ctx->f32;
6843
6844 /* Declare outputs (same as inputs + add colors if needed) */
6845 num_returns = num_params;
6846 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
6847 for (i = 0; i < num_color_channels; i++)
6848 params[num_returns++] = ctx->f32;
6849
6850 /* Create the function. */
6851 si_create_function(ctx, "ps_prolog", params, num_returns, params,
6852 num_params, last_sgpr, 0);
6853 func = ctx->main_fn;
6854
6855 /* Copy inputs to outputs. This should be no-op, as the registers match,
6856 * but it will prevent the compiler from overwriting them unintentionally.
6857 */
6858 ret = ctx->return_value;
6859 for (i = 0; i < num_params; i++) {
6860 LLVMValueRef p = LLVMGetParam(func, i);
6861 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6862 }
6863
6864 /* Polygon stippling. */
6865 if (key->ps_prolog.states.poly_stipple) {
6866 /* POS_FIXED_PT is always last. */
6867 unsigned pos = key->ps_prolog.num_input_sgprs +
6868 key->ps_prolog.num_input_vgprs - 1;
6869 LLVMValueRef ptr[2], list;
6870
6871 /* Get the pointer to rw buffers. */
6872 ptr[0] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS);
6873 ptr[1] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS_HI);
6874 list = lp_build_gather_values(gallivm, ptr, 2);
6875 list = LLVMBuildBitCast(gallivm->builder, list, ctx->i64, "");
6876 list = LLVMBuildIntToPtr(gallivm->builder, list,
6877 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS), "");
6878
6879 si_llvm_emit_polygon_stipple(ctx, list, pos);
6880 }
6881
6882 if (key->ps_prolog.states.bc_optimize_for_persp ||
6883 key->ps_prolog.states.bc_optimize_for_linear) {
6884 unsigned i, base = key->ps_prolog.num_input_sgprs;
6885 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
6886
6887 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
6888 * The hw doesn't compute CENTROID if the whole wave only
6889 * contains fully-covered quads.
6890 *
6891 * PRIM_MASK is after user SGPRs.
6892 */
6893 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
6894 bc_optimize = LLVMBuildLShr(gallivm->builder, bc_optimize,
6895 LLVMConstInt(ctx->i32, 31, 0), "");
6896 bc_optimize = LLVMBuildTrunc(gallivm->builder, bc_optimize,
6897 ctx->i1, "");
6898
6899 if (key->ps_prolog.states.bc_optimize_for_persp) {
6900 /* Read PERSP_CENTER. */
6901 for (i = 0; i < 2; i++)
6902 center[i] = LLVMGetParam(func, base + 2 + i);
6903 /* Read PERSP_CENTROID. */
6904 for (i = 0; i < 2; i++)
6905 centroid[i] = LLVMGetParam(func, base + 4 + i);
6906 /* Select PERSP_CENTROID. */
6907 for (i = 0; i < 2; i++) {
6908 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
6909 center[i], centroid[i], "");
6910 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6911 tmp, base + 4 + i, "");
6912 }
6913 }
6914 if (key->ps_prolog.states.bc_optimize_for_linear) {
6915 /* Read LINEAR_CENTER. */
6916 for (i = 0; i < 2; i++)
6917 center[i] = LLVMGetParam(func, base + 8 + i);
6918 /* Read LINEAR_CENTROID. */
6919 for (i = 0; i < 2; i++)
6920 centroid[i] = LLVMGetParam(func, base + 10 + i);
6921 /* Select LINEAR_CENTROID. */
6922 for (i = 0; i < 2; i++) {
6923 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
6924 center[i], centroid[i], "");
6925 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6926 tmp, base + 10 + i, "");
6927 }
6928 }
6929 }
6930
6931 /* Force per-sample interpolation. */
6932 if (key->ps_prolog.states.force_persp_sample_interp) {
6933 unsigned i, base = key->ps_prolog.num_input_sgprs;
6934 LLVMValueRef persp_sample[2];
6935
6936 /* Read PERSP_SAMPLE. */
6937 for (i = 0; i < 2; i++)
6938 persp_sample[i] = LLVMGetParam(func, base + i);
6939 /* Overwrite PERSP_CENTER. */
6940 for (i = 0; i < 2; i++)
6941 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6942 persp_sample[i], base + 2 + i, "");
6943 /* Overwrite PERSP_CENTROID. */
6944 for (i = 0; i < 2; i++)
6945 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6946 persp_sample[i], base + 4 + i, "");
6947 }
6948 if (key->ps_prolog.states.force_linear_sample_interp) {
6949 unsigned i, base = key->ps_prolog.num_input_sgprs;
6950 LLVMValueRef linear_sample[2];
6951
6952 /* Read LINEAR_SAMPLE. */
6953 for (i = 0; i < 2; i++)
6954 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
6955 /* Overwrite LINEAR_CENTER. */
6956 for (i = 0; i < 2; i++)
6957 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6958 linear_sample[i], base + 8 + i, "");
6959 /* Overwrite LINEAR_CENTROID. */
6960 for (i = 0; i < 2; i++)
6961 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6962 linear_sample[i], base + 10 + i, "");
6963 }
6964
6965 /* Force center interpolation. */
6966 if (key->ps_prolog.states.force_persp_center_interp) {
6967 unsigned i, base = key->ps_prolog.num_input_sgprs;
6968 LLVMValueRef persp_center[2];
6969
6970 /* Read PERSP_CENTER. */
6971 for (i = 0; i < 2; i++)
6972 persp_center[i] = LLVMGetParam(func, base + 2 + i);
6973 /* Overwrite PERSP_SAMPLE. */
6974 for (i = 0; i < 2; i++)
6975 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6976 persp_center[i], base + i, "");
6977 /* Overwrite PERSP_CENTROID. */
6978 for (i = 0; i < 2; i++)
6979 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6980 persp_center[i], base + 4 + i, "");
6981 }
6982 if (key->ps_prolog.states.force_linear_center_interp) {
6983 unsigned i, base = key->ps_prolog.num_input_sgprs;
6984 LLVMValueRef linear_center[2];
6985
6986 /* Read LINEAR_CENTER. */
6987 for (i = 0; i < 2; i++)
6988 linear_center[i] = LLVMGetParam(func, base + 8 + i);
6989 /* Overwrite LINEAR_SAMPLE. */
6990 for (i = 0; i < 2; i++)
6991 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6992 linear_center[i], base + 6 + i, "");
6993 /* Overwrite LINEAR_CENTROID. */
6994 for (i = 0; i < 2; i++)
6995 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6996 linear_center[i], base + 10 + i, "");
6997 }
6998
6999 /* Interpolate colors. */
7000 for (i = 0; i < 2; i++) {
7001 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
7002 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
7003 key->ps_prolog.face_vgpr_index;
7004 LLVMValueRef interp[2], color[4];
7005 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
7006
7007 if (!writemask)
7008 continue;
7009
7010 /* If the interpolation qualifier is not CONSTANT (-1). */
7011 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
7012 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
7013 key->ps_prolog.color_interp_vgpr_index[i];
7014
7015 /* Get the (i,j) updated by bc_optimize handling. */
7016 interp[0] = LLVMBuildExtractValue(gallivm->builder, ret,
7017 interp_vgpr, "");
7018 interp[1] = LLVMBuildExtractValue(gallivm->builder, ret,
7019 interp_vgpr + 1, "");
7020 interp_ij = lp_build_gather_values(gallivm, interp, 2);
7021 }
7022
7023 /* Use the absolute location of the input. */
7024 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7025
7026 if (key->ps_prolog.states.color_two_side) {
7027 face = LLVMGetParam(func, face_vgpr);
7028 face = LLVMBuildBitCast(gallivm->builder, face, ctx->i32, "");
7029 }
7030
7031 interp_fs_input(ctx,
7032 key->ps_prolog.color_attr_index[i],
7033 TGSI_SEMANTIC_COLOR, i,
7034 key->ps_prolog.num_interp_inputs,
7035 key->ps_prolog.colors_read, interp_ij,
7036 prim_mask, face, color);
7037
7038 while (writemask) {
7039 unsigned chan = u_bit_scan(&writemask);
7040 ret = LLVMBuildInsertValue(gallivm->builder, ret, color[chan],
7041 num_params++, "");
7042 }
7043 }
7044
7045 /* Tell LLVM to insert WQM instruction sequence when needed. */
7046 if (key->ps_prolog.wqm) {
7047 LLVMAddTargetDependentFunctionAttr(func,
7048 "amdgpu-ps-wqm-outputs", "");
7049 }
7050
7051 si_llvm_build_ret(ctx, ret);
7052 }
7053
7054 /**
7055 * Build the pixel shader epilog function. This handles everything that must be
7056 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7057 */
7058 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
7059 union si_shader_part_key *key)
7060 {
7061 struct gallivm_state *gallivm = &ctx->gallivm;
7062 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7063 LLVMTypeRef params[16+8*4+3];
7064 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7065 int last_sgpr, num_params = 0, i;
7066 struct si_ps_exports exp = {};
7067
7068 /* Declare input SGPRs. */
7069 params[ctx->param_rw_buffers = num_params++] = ctx->i64;
7070 params[ctx->param_const_and_shader_buffers = num_params++] = ctx->i64;
7071 params[ctx->param_samplers_and_images = num_params++] = ctx->i64;
7072 assert(num_params == SI_PARAM_ALPHA_REF);
7073 params[SI_PARAM_ALPHA_REF] = ctx->f32;
7074 last_sgpr = SI_PARAM_ALPHA_REF;
7075
7076 /* Declare input VGPRs. */
7077 num_params = (last_sgpr + 1) +
7078 util_bitcount(key->ps_epilog.colors_written) * 4 +
7079 key->ps_epilog.writes_z +
7080 key->ps_epilog.writes_stencil +
7081 key->ps_epilog.writes_samplemask;
7082
7083 num_params = MAX2(num_params,
7084 last_sgpr + 1 + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7085
7086 assert(num_params <= ARRAY_SIZE(params));
7087
7088 for (i = last_sgpr + 1; i < num_params; i++)
7089 params[i] = ctx->f32;
7090
7091 /* Create the function. */
7092 si_create_function(ctx, "ps_epilog", NULL, 0, params, num_params,
7093 last_sgpr, 0);
7094 /* Disable elimination of unused inputs. */
7095 si_llvm_add_attribute(ctx->main_fn,
7096 "InitialPSInputAddr", 0xffffff);
7097
7098 /* Process colors. */
7099 unsigned vgpr = last_sgpr + 1;
7100 unsigned colors_written = key->ps_epilog.colors_written;
7101 int last_color_export = -1;
7102
7103 /* Find the last color export. */
7104 if (!key->ps_epilog.writes_z &&
7105 !key->ps_epilog.writes_stencil &&
7106 !key->ps_epilog.writes_samplemask) {
7107 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7108
7109 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7110 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7111 /* Just set this if any of the colorbuffers are enabled. */
7112 if (spi_format &
7113 ((1llu << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7114 last_color_export = 0;
7115 } else {
7116 for (i = 0; i < 8; i++)
7117 if (colors_written & (1 << i) &&
7118 (spi_format >> (i * 4)) & 0xf)
7119 last_color_export = i;
7120 }
7121 }
7122
7123 while (colors_written) {
7124 LLVMValueRef color[4];
7125 int mrt = u_bit_scan(&colors_written);
7126
7127 for (i = 0; i < 4; i++)
7128 color[i] = LLVMGetParam(ctx->main_fn, vgpr++);
7129
7130 si_export_mrt_color(bld_base, color, mrt,
7131 num_params - 1,
7132 mrt == last_color_export, &exp);
7133 }
7134
7135 /* Process depth, stencil, samplemask. */
7136 if (key->ps_epilog.writes_z)
7137 depth = LLVMGetParam(ctx->main_fn, vgpr++);
7138 if (key->ps_epilog.writes_stencil)
7139 stencil = LLVMGetParam(ctx->main_fn, vgpr++);
7140 if (key->ps_epilog.writes_samplemask)
7141 samplemask = LLVMGetParam(ctx->main_fn, vgpr++);
7142
7143 if (depth || stencil || samplemask)
7144 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
7145 else if (last_color_export == -1)
7146 si_export_null(bld_base);
7147
7148 if (exp.num)
7149 si_emit_ps_exports(ctx, &exp);
7150
7151 /* Compile. */
7152 LLVMBuildRetVoid(gallivm->builder);
7153 }
7154
7155 /**
7156 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7157 */
7158 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7159 LLVMTargetMachineRef tm,
7160 struct si_shader *shader,
7161 struct pipe_debug_callback *debug)
7162 {
7163 union si_shader_part_key prolog_key;
7164 union si_shader_part_key epilog_key;
7165
7166 /* Get the prolog. */
7167 si_get_ps_prolog_key(shader, &prolog_key, true);
7168
7169 /* The prolog is a no-op if these aren't set. */
7170 if (si_need_ps_prolog(&prolog_key)) {
7171 shader->prolog =
7172 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7173 PIPE_SHADER_FRAGMENT, true,
7174 &prolog_key, tm, debug,
7175 si_build_ps_prolog_function,
7176 "Fragment Shader Prolog");
7177 if (!shader->prolog)
7178 return false;
7179 }
7180
7181 /* Get the epilog. */
7182 si_get_ps_epilog_key(shader, &epilog_key);
7183
7184 shader->epilog =
7185 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7186 PIPE_SHADER_FRAGMENT, false,
7187 &epilog_key, tm, debug,
7188 si_build_ps_epilog_function,
7189 "Fragment Shader Epilog");
7190 if (!shader->epilog)
7191 return false;
7192
7193 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7194 if (shader->key.part.ps.prolog.poly_stipple) {
7195 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7196 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7197 }
7198
7199 /* Set up the enable bits for per-sample shading if needed. */
7200 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
7201 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7202 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7203 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7204 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7205 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7206 }
7207 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
7208 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7209 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7210 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7211 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7212 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7213 }
7214 if (shader->key.part.ps.prolog.force_persp_center_interp &&
7215 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7216 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7217 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
7218 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7219 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7220 }
7221 if (shader->key.part.ps.prolog.force_linear_center_interp &&
7222 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7223 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7224 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
7225 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7226 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7227 }
7228
7229 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7230 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7231 !(shader->config.spi_ps_input_ena & 0xf)) {
7232 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7233 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7234 }
7235
7236 /* At least one pair of interpolation weights must be enabled. */
7237 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7238 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7239 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7240 }
7241
7242 /* The sample mask input is always enabled, because the API shader always
7243 * passes it through to the epilog. Disable it here if it's unused.
7244 */
7245 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
7246 !shader->selector->info.reads_samplemask)
7247 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7248
7249 return true;
7250 }
7251
7252 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
7253 unsigned *lds_size)
7254 {
7255 /* SPI barrier management bug:
7256 * Make sure we have at least 4k of LDS in use to avoid the bug.
7257 * It applies to workgroup sizes of more than one wavefront.
7258 */
7259 if (sscreen->b.family == CHIP_BONAIRE ||
7260 sscreen->b.family == CHIP_KABINI ||
7261 sscreen->b.family == CHIP_MULLINS)
7262 *lds_size = MAX2(*lds_size, 8);
7263 }
7264
7265 static void si_fix_resource_usage(struct si_screen *sscreen,
7266 struct si_shader *shader)
7267 {
7268 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7269
7270 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7271
7272 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
7273 si_get_max_workgroup_size(shader) > 64) {
7274 si_multiwave_lds_size_workaround(sscreen,
7275 &shader->config.lds_size);
7276 }
7277 }
7278
7279 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
7280 struct si_shader *shader,
7281 struct pipe_debug_callback *debug)
7282 {
7283 struct si_shader_selector *sel = shader->selector;
7284 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
7285 int r;
7286
7287 /* LS, ES, VS are compiled on demand if the main part hasn't been
7288 * compiled for that stage.
7289 *
7290 * Vertex shaders are compiled on demand when a vertex fetch
7291 * workaround must be applied.
7292 */
7293 if (shader->is_monolithic) {
7294 /* Monolithic shader (compiled as a whole, has many variants,
7295 * may take a long time to compile).
7296 */
7297 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
7298 if (r)
7299 return r;
7300 } else {
7301 /* The shader consists of 2-3 parts:
7302 *
7303 * - the middle part is the user shader, it has 1 variant only
7304 * and it was compiled during the creation of the shader
7305 * selector
7306 * - the prolog part is inserted at the beginning
7307 * - the epilog part is inserted at the end
7308 *
7309 * The prolog and epilog have many (but simple) variants.
7310 */
7311
7312 /* Copy the compiled TGSI shader data over. */
7313 shader->is_binary_shared = true;
7314 shader->binary = mainp->binary;
7315 shader->config = mainp->config;
7316 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7317 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7318 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7319 memcpy(shader->info.vs_output_param_offset,
7320 mainp->info.vs_output_param_offset,
7321 sizeof(mainp->info.vs_output_param_offset));
7322 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7323 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7324 shader->info.nr_param_exports = mainp->info.nr_param_exports;
7325
7326 /* Select prologs and/or epilogs. */
7327 switch (sel->type) {
7328 case PIPE_SHADER_VERTEX:
7329 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
7330 return -1;
7331 break;
7332 case PIPE_SHADER_TESS_CTRL:
7333 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
7334 return -1;
7335 break;
7336 case PIPE_SHADER_TESS_EVAL:
7337 break;
7338 case PIPE_SHADER_GEOMETRY:
7339 if (!si_shader_select_gs_parts(sscreen, tm, shader, debug))
7340 return -1;
7341 break;
7342 case PIPE_SHADER_FRAGMENT:
7343 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
7344 return -1;
7345
7346 /* Make sure we have at least as many VGPRs as there
7347 * are allocated inputs.
7348 */
7349 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7350 shader->info.num_input_vgprs);
7351 break;
7352 }
7353
7354 /* Update SGPR and VGPR counts. */
7355 if (shader->prolog) {
7356 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7357 shader->prolog->config.num_sgprs);
7358 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7359 shader->prolog->config.num_vgprs);
7360 }
7361 if (shader->previous_stage) {
7362 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7363 shader->previous_stage->config.num_sgprs);
7364 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7365 shader->previous_stage->config.num_vgprs);
7366 shader->config.spilled_sgprs =
7367 MAX2(shader->config.spilled_sgprs,
7368 shader->previous_stage->config.spilled_sgprs);
7369 shader->config.spilled_vgprs =
7370 MAX2(shader->config.spilled_vgprs,
7371 shader->previous_stage->config.spilled_vgprs);
7372 shader->config.private_mem_vgprs =
7373 MAX2(shader->config.private_mem_vgprs,
7374 shader->previous_stage->config.private_mem_vgprs);
7375 shader->config.scratch_bytes_per_wave =
7376 MAX2(shader->config.scratch_bytes_per_wave,
7377 shader->previous_stage->config.scratch_bytes_per_wave);
7378 shader->info.uses_instanceid |=
7379 shader->previous_stage->info.uses_instanceid;
7380 }
7381 if (shader->prolog2) {
7382 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7383 shader->prolog2->config.num_sgprs);
7384 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7385 shader->prolog2->config.num_vgprs);
7386 }
7387 if (shader->epilog) {
7388 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7389 shader->epilog->config.num_sgprs);
7390 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7391 shader->epilog->config.num_vgprs);
7392 }
7393 }
7394
7395 si_fix_resource_usage(sscreen, shader);
7396 si_shader_dump(sscreen, shader, debug, sel->info.processor,
7397 stderr, true);
7398
7399 /* Upload. */
7400 r = si_shader_binary_upload(sscreen, shader);
7401 if (r) {
7402 fprintf(stderr, "LLVM failed to upload shader\n");
7403 return r;
7404 }
7405
7406 return 0;
7407 }
7408
7409 void si_shader_destroy(struct si_shader *shader)
7410 {
7411 if (shader->scratch_bo)
7412 r600_resource_reference(&shader->scratch_bo, NULL);
7413
7414 r600_resource_reference(&shader->bo, NULL);
7415
7416 if (!shader->is_binary_shared)
7417 radeon_shader_binary_clean(&shader->binary);
7418
7419 free(shader->shader_log);
7420 }