radeonsi: remove 8 bytes from si_shader_key
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_flow.h"
35 #include "gallivm/lp_bld_misc.h"
36 #include "util/u_memory.h"
37 #include "util/u_string.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_util.h"
40 #include "tgsi/tgsi_dump.h"
41
42 #include "ac_binary.h"
43 #include "ac_llvm_util.h"
44 #include "ac_exp_param.h"
45 #include "si_shader_internal.h"
46 #include "si_pipe.h"
47 #include "sid.h"
48
49
50 static const char *scratch_rsrc_dword0_symbol =
51 "SCRATCH_RSRC_DWORD0";
52
53 static const char *scratch_rsrc_dword1_symbol =
54 "SCRATCH_RSRC_DWORD1";
55
56 struct si_shader_output_values
57 {
58 LLVMValueRef values[4];
59 unsigned semantic_name;
60 unsigned semantic_index;
61 ubyte vertex_stream[4];
62 };
63
64 static void si_init_shader_ctx(struct si_shader_context *ctx,
65 struct si_screen *sscreen,
66 LLVMTargetMachineRef tm);
67
68 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
69 struct lp_build_tgsi_context *bld_base,
70 struct lp_build_emit_data *emit_data);
71
72 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
73 FILE *f);
74
75 static unsigned llvm_get_type_size(LLVMTypeRef type);
76
77 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
78 union si_shader_part_key *key);
79 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
80 union si_shader_part_key *key);
81 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
82 union si_shader_part_key *key);
83 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
84 union si_shader_part_key *key);
85
86 /* Ideally pass the sample mask input to the PS epilog as v13, which
87 * is its usual location, so that the shader doesn't have to add v_mov.
88 */
89 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 13
90
91 enum {
92 CONST_ADDR_SPACE = 2,
93 LOCAL_ADDR_SPACE = 3,
94 };
95
96 static bool is_merged_shader(struct si_shader *shader)
97 {
98 if (shader->selector->screen->b.chip_class <= VI)
99 return false;
100
101 return shader->key.as_ls ||
102 shader->key.as_es ||
103 shader->selector->type == PIPE_SHADER_TESS_CTRL ||
104 shader->selector->type == PIPE_SHADER_GEOMETRY;
105 }
106
107 /**
108 * Returns a unique index for a per-patch semantic name and index. The index
109 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
110 * can be calculated.
111 */
112 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
113 {
114 switch (semantic_name) {
115 case TGSI_SEMANTIC_TESSOUTER:
116 return 0;
117 case TGSI_SEMANTIC_TESSINNER:
118 return 1;
119 case TGSI_SEMANTIC_PATCH:
120 assert(index < 30);
121 return 2 + index;
122
123 default:
124 assert(!"invalid semantic name");
125 return 0;
126 }
127 }
128
129 /**
130 * Returns a unique index for a semantic name and index. The index must be
131 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
132 * calculated.
133 */
134 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
135 {
136 switch (semantic_name) {
137 case TGSI_SEMANTIC_POSITION:
138 return 0;
139 case TGSI_SEMANTIC_GENERIC:
140 /* Since some shader stages use the the highest used IO index
141 * to determine the size to allocate for inputs/outputs
142 * (in LDS, tess and GS rings). GENERIC should be placed right
143 * after POSITION to make that size as small as possible.
144 */
145 if (index < SI_MAX_IO_GENERIC)
146 return 1 + index;
147
148 assert(!"invalid generic index");
149 return 0;
150 case TGSI_SEMANTIC_PSIZE:
151 return SI_MAX_IO_GENERIC + 1;
152 case TGSI_SEMANTIC_CLIPDIST:
153 assert(index <= 1);
154 return SI_MAX_IO_GENERIC + 2 + index;
155 case TGSI_SEMANTIC_FOG:
156 return SI_MAX_IO_GENERIC + 4;
157 case TGSI_SEMANTIC_LAYER:
158 return SI_MAX_IO_GENERIC + 5;
159 case TGSI_SEMANTIC_VIEWPORT_INDEX:
160 return SI_MAX_IO_GENERIC + 6;
161 case TGSI_SEMANTIC_PRIMID:
162 return SI_MAX_IO_GENERIC + 7;
163 case TGSI_SEMANTIC_COLOR: /* these alias */
164 case TGSI_SEMANTIC_BCOLOR:
165 assert(index < 2);
166 return SI_MAX_IO_GENERIC + 8 + index;
167 case TGSI_SEMANTIC_TEXCOORD:
168 assert(index < 8);
169 assert(SI_MAX_IO_GENERIC + 10 + index < 64);
170 return SI_MAX_IO_GENERIC + 10 + index;
171 default:
172 assert(!"invalid semantic name");
173 return 0;
174 }
175 }
176
177 /**
178 * Get the value of a shader input parameter and extract a bitfield.
179 */
180 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
181 unsigned param, unsigned rshift,
182 unsigned bitwidth)
183 {
184 struct gallivm_state *gallivm = &ctx->gallivm;
185 LLVMValueRef value = LLVMGetParam(ctx->main_fn,
186 param);
187
188 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
189 value = bitcast(&ctx->bld_base,
190 TGSI_TYPE_UNSIGNED, value);
191
192 if (rshift)
193 value = LLVMBuildLShr(gallivm->builder, value,
194 LLVMConstInt(ctx->i32, rshift, 0), "");
195
196 if (rshift + bitwidth < 32) {
197 unsigned mask = (1 << bitwidth) - 1;
198 value = LLVMBuildAnd(gallivm->builder, value,
199 LLVMConstInt(ctx->i32, mask, 0), "");
200 }
201
202 return value;
203 }
204
205 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
206 {
207 switch (ctx->type) {
208 case PIPE_SHADER_TESS_CTRL:
209 return unpack_param(ctx, ctx->param_tcs_rel_ids, 0, 8);
210
211 case PIPE_SHADER_TESS_EVAL:
212 return LLVMGetParam(ctx->main_fn,
213 ctx->param_tes_rel_patch_id);
214
215 default:
216 assert(0);
217 return NULL;
218 }
219 }
220
221 /* Tessellation shaders pass outputs to the next shader using LDS.
222 *
223 * LS outputs = TCS inputs
224 * TCS outputs = TES inputs
225 *
226 * The LDS layout is:
227 * - TCS inputs for patch 0
228 * - TCS inputs for patch 1
229 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
230 * - ...
231 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
232 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
233 * - TCS outputs for patch 1
234 * - Per-patch TCS outputs for patch 1
235 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
236 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
237 * - ...
238 *
239 * All three shaders VS(LS), TCS, TES share the same LDS space.
240 */
241
242 static LLVMValueRef
243 get_tcs_in_patch_stride(struct si_shader_context *ctx)
244 {
245 return unpack_param(ctx, ctx->param_vs_state_bits, 8, 13);
246 }
247
248 static LLVMValueRef
249 get_tcs_out_patch_stride(struct si_shader_context *ctx)
250 {
251 return unpack_param(ctx, ctx->param_tcs_out_lds_layout, 0, 13);
252 }
253
254 static LLVMValueRef
255 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
256 {
257 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
258 unpack_param(ctx,
259 ctx->param_tcs_out_lds_offsets,
260 0, 16),
261 4);
262 }
263
264 static LLVMValueRef
265 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
266 {
267 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
268 unpack_param(ctx,
269 ctx->param_tcs_out_lds_offsets,
270 16, 16),
271 4);
272 }
273
274 static LLVMValueRef
275 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
276 {
277 struct gallivm_state *gallivm = &ctx->gallivm;
278 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
279 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
280
281 return LLVMBuildMul(gallivm->builder, patch_stride, rel_patch_id, "");
282 }
283
284 static LLVMValueRef
285 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
286 {
287 struct gallivm_state *gallivm = &ctx->gallivm;
288 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
289 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
290 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
291
292 return LLVMBuildAdd(gallivm->builder, patch0_offset,
293 LLVMBuildMul(gallivm->builder, patch_stride,
294 rel_patch_id, ""),
295 "");
296 }
297
298 static LLVMValueRef
299 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
300 {
301 struct gallivm_state *gallivm = &ctx->gallivm;
302 LLVMValueRef patch0_patch_data_offset =
303 get_tcs_out_patch0_patch_data_offset(ctx);
304 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
305 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
306
307 return LLVMBuildAdd(gallivm->builder, patch0_patch_data_offset,
308 LLVMBuildMul(gallivm->builder, patch_stride,
309 rel_patch_id, ""),
310 "");
311 }
312
313 static LLVMValueRef get_instance_index_for_fetch(
314 struct si_shader_context *ctx,
315 unsigned param_start_instance, unsigned divisor)
316 {
317 struct gallivm_state *gallivm = &ctx->gallivm;
318
319 LLVMValueRef result = LLVMGetParam(ctx->main_fn,
320 ctx->param_instance_id);
321
322 /* The division must be done before START_INSTANCE is added. */
323 if (divisor > 1)
324 result = LLVMBuildUDiv(gallivm->builder, result,
325 LLVMConstInt(ctx->i32, divisor, 0), "");
326
327 return LLVMBuildAdd(gallivm->builder, result,
328 LLVMGetParam(ctx->main_fn, param_start_instance), "");
329 }
330
331 /* Bitcast <4 x float> to <2 x double>, extract the component, and convert
332 * to float. */
333 static LLVMValueRef extract_double_to_float(struct si_shader_context *ctx,
334 LLVMValueRef vec4,
335 unsigned double_index)
336 {
337 LLVMBuilderRef builder = ctx->gallivm.builder;
338 LLVMTypeRef f64 = LLVMDoubleTypeInContext(ctx->gallivm.context);
339 LLVMValueRef dvec2 = LLVMBuildBitCast(builder, vec4,
340 LLVMVectorType(f64, 2), "");
341 LLVMValueRef index = LLVMConstInt(ctx->i32, double_index, 0);
342 LLVMValueRef value = LLVMBuildExtractElement(builder, dvec2, index, "");
343 return LLVMBuildFPTrunc(builder, value, ctx->f32, "");
344 }
345
346 static void declare_input_vs(
347 struct si_shader_context *ctx,
348 unsigned input_index,
349 const struct tgsi_full_declaration *decl,
350 LLVMValueRef out[4])
351 {
352 struct gallivm_state *gallivm = &ctx->gallivm;
353
354 unsigned chan;
355 unsigned fix_fetch;
356 unsigned num_fetches;
357 unsigned fetch_stride;
358
359 LLVMValueRef t_list_ptr;
360 LLVMValueRef t_offset;
361 LLVMValueRef t_list;
362 LLVMValueRef vertex_index;
363 LLVMValueRef input[3];
364
365 /* Load the T list */
366 t_list_ptr = LLVMGetParam(ctx->main_fn, ctx->param_vertex_buffers);
367
368 t_offset = LLVMConstInt(ctx->i32, input_index, 0);
369
370 t_list = ac_build_indexed_load_const(&ctx->ac, t_list_ptr, t_offset);
371
372 vertex_index = LLVMGetParam(ctx->main_fn,
373 ctx->param_vertex_index0 +
374 input_index);
375
376 fix_fetch = ctx->shader->key.mono.vs_fix_fetch[input_index];
377
378 /* Do multiple loads for special formats. */
379 switch (fix_fetch) {
380 case SI_FIX_FETCH_RGB_64_FLOAT:
381 num_fetches = 3; /* 3 2-dword loads */
382 fetch_stride = 8;
383 break;
384 case SI_FIX_FETCH_RGBA_64_FLOAT:
385 num_fetches = 2; /* 2 4-dword loads */
386 fetch_stride = 16;
387 break;
388 case SI_FIX_FETCH_RGB_8:
389 case SI_FIX_FETCH_RGB_8_INT:
390 num_fetches = 3;
391 fetch_stride = 1;
392 break;
393 case SI_FIX_FETCH_RGB_16:
394 case SI_FIX_FETCH_RGB_16_INT:
395 num_fetches = 3;
396 fetch_stride = 2;
397 break;
398 default:
399 num_fetches = 1;
400 fetch_stride = 0;
401 }
402
403 for (unsigned i = 0; i < num_fetches; i++) {
404 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
405
406 input[i] = ac_build_buffer_load_format(&ctx->ac, t_list,
407 vertex_index, voffset,
408 true);
409 }
410
411 /* Break up the vec4 into individual components */
412 for (chan = 0; chan < 4; chan++) {
413 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
414 out[chan] = LLVMBuildExtractElement(gallivm->builder,
415 input[0], llvm_chan, "");
416 }
417
418 switch (fix_fetch) {
419 case SI_FIX_FETCH_A2_SNORM:
420 case SI_FIX_FETCH_A2_SSCALED:
421 case SI_FIX_FETCH_A2_SINT: {
422 /* The hardware returns an unsigned value; convert it to a
423 * signed one.
424 */
425 LLVMValueRef tmp = out[3];
426 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
427
428 /* First, recover the sign-extended signed integer value. */
429 if (fix_fetch == SI_FIX_FETCH_A2_SSCALED)
430 tmp = LLVMBuildFPToUI(gallivm->builder, tmp, ctx->i32, "");
431 else
432 tmp = LLVMBuildBitCast(gallivm->builder, tmp, ctx->i32, "");
433
434 /* For the integer-like cases, do a natural sign extension.
435 *
436 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
437 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
438 * exponent.
439 */
440 tmp = LLVMBuildShl(gallivm->builder, tmp,
441 fix_fetch == SI_FIX_FETCH_A2_SNORM ?
442 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
443 tmp = LLVMBuildAShr(gallivm->builder, tmp, c30, "");
444
445 /* Convert back to the right type. */
446 if (fix_fetch == SI_FIX_FETCH_A2_SNORM) {
447 LLVMValueRef clamp;
448 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
449 tmp = LLVMBuildSIToFP(gallivm->builder, tmp, ctx->f32, "");
450 clamp = LLVMBuildFCmp(gallivm->builder, LLVMRealULT, tmp, neg_one, "");
451 tmp = LLVMBuildSelect(gallivm->builder, clamp, neg_one, tmp, "");
452 } else if (fix_fetch == SI_FIX_FETCH_A2_SSCALED) {
453 tmp = LLVMBuildSIToFP(gallivm->builder, tmp, ctx->f32, "");
454 }
455
456 out[3] = tmp;
457 break;
458 }
459 case SI_FIX_FETCH_RGBA_32_UNORM:
460 case SI_FIX_FETCH_RGBX_32_UNORM:
461 for (chan = 0; chan < 4; chan++) {
462 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
463 ctx->i32, "");
464 out[chan] = LLVMBuildUIToFP(gallivm->builder,
465 out[chan], ctx->f32, "");
466 out[chan] = LLVMBuildFMul(gallivm->builder, out[chan],
467 LLVMConstReal(ctx->f32, 1.0 / UINT_MAX), "");
468 }
469 /* RGBX UINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
470 if (fix_fetch == SI_FIX_FETCH_RGBX_32_UNORM)
471 out[3] = LLVMConstReal(ctx->f32, 1);
472 break;
473 case SI_FIX_FETCH_RGBA_32_SNORM:
474 case SI_FIX_FETCH_RGBX_32_SNORM:
475 case SI_FIX_FETCH_RGBA_32_FIXED:
476 case SI_FIX_FETCH_RGBX_32_FIXED: {
477 double scale;
478 if (fix_fetch >= SI_FIX_FETCH_RGBA_32_FIXED)
479 scale = 1.0 / 0x10000;
480 else
481 scale = 1.0 / INT_MAX;
482
483 for (chan = 0; chan < 4; chan++) {
484 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
485 ctx->i32, "");
486 out[chan] = LLVMBuildSIToFP(gallivm->builder,
487 out[chan], ctx->f32, "");
488 out[chan] = LLVMBuildFMul(gallivm->builder, out[chan],
489 LLVMConstReal(ctx->f32, scale), "");
490 }
491 /* RGBX SINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
492 if (fix_fetch == SI_FIX_FETCH_RGBX_32_SNORM ||
493 fix_fetch == SI_FIX_FETCH_RGBX_32_FIXED)
494 out[3] = LLVMConstReal(ctx->f32, 1);
495 break;
496 }
497 case SI_FIX_FETCH_RGBA_32_USCALED:
498 for (chan = 0; chan < 4; chan++) {
499 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
500 ctx->i32, "");
501 out[chan] = LLVMBuildUIToFP(gallivm->builder,
502 out[chan], ctx->f32, "");
503 }
504 break;
505 case SI_FIX_FETCH_RGBA_32_SSCALED:
506 for (chan = 0; chan < 4; chan++) {
507 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
508 ctx->i32, "");
509 out[chan] = LLVMBuildSIToFP(gallivm->builder,
510 out[chan], ctx->f32, "");
511 }
512 break;
513 case SI_FIX_FETCH_RG_64_FLOAT:
514 for (chan = 0; chan < 2; chan++)
515 out[chan] = extract_double_to_float(ctx, input[0], chan);
516
517 out[2] = LLVMConstReal(ctx->f32, 0);
518 out[3] = LLVMConstReal(ctx->f32, 1);
519 break;
520 case SI_FIX_FETCH_RGB_64_FLOAT:
521 for (chan = 0; chan < 3; chan++)
522 out[chan] = extract_double_to_float(ctx, input[chan], 0);
523
524 out[3] = LLVMConstReal(ctx->f32, 1);
525 break;
526 case SI_FIX_FETCH_RGBA_64_FLOAT:
527 for (chan = 0; chan < 4; chan++) {
528 out[chan] = extract_double_to_float(ctx, input[chan / 2],
529 chan % 2);
530 }
531 break;
532 case SI_FIX_FETCH_RGB_8:
533 case SI_FIX_FETCH_RGB_8_INT:
534 case SI_FIX_FETCH_RGB_16:
535 case SI_FIX_FETCH_RGB_16_INT:
536 for (chan = 0; chan < 3; chan++) {
537 out[chan] = LLVMBuildExtractElement(gallivm->builder,
538 input[chan],
539 ctx->i32_0, "");
540 }
541 if (fix_fetch == SI_FIX_FETCH_RGB_8 ||
542 fix_fetch == SI_FIX_FETCH_RGB_16) {
543 out[3] = LLVMConstReal(ctx->f32, 1);
544 } else {
545 out[3] = LLVMBuildBitCast(gallivm->builder, ctx->i32_1,
546 ctx->f32, "");
547 }
548 break;
549 }
550 }
551
552 static LLVMValueRef get_primitive_id(struct lp_build_tgsi_context *bld_base,
553 unsigned swizzle)
554 {
555 struct si_shader_context *ctx = si_shader_context(bld_base);
556
557 if (swizzle > 0)
558 return ctx->i32_0;
559
560 switch (ctx->type) {
561 case PIPE_SHADER_VERTEX:
562 return LLVMGetParam(ctx->main_fn,
563 ctx->param_vs_prim_id);
564 case PIPE_SHADER_TESS_CTRL:
565 return LLVMGetParam(ctx->main_fn,
566 ctx->param_tcs_patch_id);
567 case PIPE_SHADER_TESS_EVAL:
568 return LLVMGetParam(ctx->main_fn,
569 ctx->param_tes_patch_id);
570 case PIPE_SHADER_GEOMETRY:
571 return LLVMGetParam(ctx->main_fn,
572 ctx->param_gs_prim_id);
573 default:
574 assert(0);
575 return ctx->i32_0;
576 }
577 }
578
579 /**
580 * Return the value of tgsi_ind_register for indexing.
581 * This is the indirect index with the constant offset added to it.
582 */
583 static LLVMValueRef get_indirect_index(struct si_shader_context *ctx,
584 const struct tgsi_ind_register *ind,
585 int rel_index)
586 {
587 struct gallivm_state *gallivm = &ctx->gallivm;
588 LLVMValueRef result;
589
590 result = ctx->addrs[ind->Index][ind->Swizzle];
591 result = LLVMBuildLoad(gallivm->builder, result, "");
592 result = LLVMBuildAdd(gallivm->builder, result,
593 LLVMConstInt(ctx->i32, rel_index, 0), "");
594 return result;
595 }
596
597 /**
598 * Like get_indirect_index, but restricts the return value to a (possibly
599 * undefined) value inside [0..num).
600 */
601 LLVMValueRef si_get_bounded_indirect_index(struct si_shader_context *ctx,
602 const struct tgsi_ind_register *ind,
603 int rel_index, unsigned num)
604 {
605 LLVMValueRef result = get_indirect_index(ctx, ind, rel_index);
606
607 return si_llvm_bound_index(ctx, result, num);
608 }
609
610
611 /**
612 * Calculate a dword address given an input or output register and a stride.
613 */
614 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
615 const struct tgsi_full_dst_register *dst,
616 const struct tgsi_full_src_register *src,
617 LLVMValueRef vertex_dw_stride,
618 LLVMValueRef base_addr)
619 {
620 struct gallivm_state *gallivm = &ctx->gallivm;
621 struct tgsi_shader_info *info = &ctx->shader->selector->info;
622 ubyte *name, *index, *array_first;
623 int first, param;
624 struct tgsi_full_dst_register reg;
625
626 /* Set the register description. The address computation is the same
627 * for sources and destinations. */
628 if (src) {
629 reg.Register.File = src->Register.File;
630 reg.Register.Index = src->Register.Index;
631 reg.Register.Indirect = src->Register.Indirect;
632 reg.Register.Dimension = src->Register.Dimension;
633 reg.Indirect = src->Indirect;
634 reg.Dimension = src->Dimension;
635 reg.DimIndirect = src->DimIndirect;
636 } else
637 reg = *dst;
638
639 /* If the register is 2-dimensional (e.g. an array of vertices
640 * in a primitive), calculate the base address of the vertex. */
641 if (reg.Register.Dimension) {
642 LLVMValueRef index;
643
644 if (reg.Dimension.Indirect)
645 index = get_indirect_index(ctx, &reg.DimIndirect,
646 reg.Dimension.Index);
647 else
648 index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
649
650 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
651 LLVMBuildMul(gallivm->builder, index,
652 vertex_dw_stride, ""), "");
653 }
654
655 /* Get information about the register. */
656 if (reg.Register.File == TGSI_FILE_INPUT) {
657 name = info->input_semantic_name;
658 index = info->input_semantic_index;
659 array_first = info->input_array_first;
660 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
661 name = info->output_semantic_name;
662 index = info->output_semantic_index;
663 array_first = info->output_array_first;
664 } else {
665 assert(0);
666 return NULL;
667 }
668
669 if (reg.Register.Indirect) {
670 /* Add the relative address of the element. */
671 LLVMValueRef ind_index;
672
673 if (reg.Indirect.ArrayID)
674 first = array_first[reg.Indirect.ArrayID];
675 else
676 first = reg.Register.Index;
677
678 ind_index = get_indirect_index(ctx, &reg.Indirect,
679 reg.Register.Index - first);
680
681 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
682 LLVMBuildMul(gallivm->builder, ind_index,
683 LLVMConstInt(ctx->i32, 4, 0), ""), "");
684
685 param = reg.Register.Dimension ?
686 si_shader_io_get_unique_index(name[first], index[first]) :
687 si_shader_io_get_unique_index_patch(name[first], index[first]);
688 } else {
689 param = reg.Register.Dimension ?
690 si_shader_io_get_unique_index(name[reg.Register.Index],
691 index[reg.Register.Index]) :
692 si_shader_io_get_unique_index_patch(name[reg.Register.Index],
693 index[reg.Register.Index]);
694 }
695
696 /* Add the base address of the element. */
697 return LLVMBuildAdd(gallivm->builder, base_addr,
698 LLVMConstInt(ctx->i32, param * 4, 0), "");
699 }
700
701 /* The offchip buffer layout for TCS->TES is
702 *
703 * - attribute 0 of patch 0 vertex 0
704 * - attribute 0 of patch 0 vertex 1
705 * - attribute 0 of patch 0 vertex 2
706 * ...
707 * - attribute 0 of patch 1 vertex 0
708 * - attribute 0 of patch 1 vertex 1
709 * ...
710 * - attribute 1 of patch 0 vertex 0
711 * - attribute 1 of patch 0 vertex 1
712 * ...
713 * - per patch attribute 0 of patch 0
714 * - per patch attribute 0 of patch 1
715 * ...
716 *
717 * Note that every attribute has 4 components.
718 */
719 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
720 LLVMValueRef rel_patch_id,
721 LLVMValueRef vertex_index,
722 LLVMValueRef param_index)
723 {
724 struct gallivm_state *gallivm = &ctx->gallivm;
725 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
726 LLVMValueRef param_stride, constant16;
727
728 vertices_per_patch = unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
729 num_patches = unpack_param(ctx, ctx->param_tcs_offchip_layout, 0, 6);
730 total_vertices = LLVMBuildMul(gallivm->builder, vertices_per_patch,
731 num_patches, "");
732
733 constant16 = LLVMConstInt(ctx->i32, 16, 0);
734 if (vertex_index) {
735 base_addr = LLVMBuildMul(gallivm->builder, rel_patch_id,
736 vertices_per_patch, "");
737
738 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
739 vertex_index, "");
740
741 param_stride = total_vertices;
742 } else {
743 base_addr = rel_patch_id;
744 param_stride = num_patches;
745 }
746
747 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
748 LLVMBuildMul(gallivm->builder, param_index,
749 param_stride, ""), "");
750
751 base_addr = LLVMBuildMul(gallivm->builder, base_addr, constant16, "");
752
753 if (!vertex_index) {
754 LLVMValueRef patch_data_offset =
755 unpack_param(ctx, ctx->param_tcs_offchip_layout, 12, 20);
756
757 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
758 patch_data_offset, "");
759 }
760 return base_addr;
761 }
762
763 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
764 struct si_shader_context *ctx,
765 const struct tgsi_full_dst_register *dst,
766 const struct tgsi_full_src_register *src)
767 {
768 struct gallivm_state *gallivm = &ctx->gallivm;
769 struct tgsi_shader_info *info = &ctx->shader->selector->info;
770 ubyte *name, *index, *array_first;
771 struct tgsi_full_src_register reg;
772 LLVMValueRef vertex_index = NULL;
773 LLVMValueRef param_index = NULL;
774 unsigned param_index_base, param_base;
775
776 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
777
778 if (reg.Register.Dimension) {
779
780 if (reg.Dimension.Indirect)
781 vertex_index = get_indirect_index(ctx, &reg.DimIndirect,
782 reg.Dimension.Index);
783 else
784 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
785 }
786
787 /* Get information about the register. */
788 if (reg.Register.File == TGSI_FILE_INPUT) {
789 name = info->input_semantic_name;
790 index = info->input_semantic_index;
791 array_first = info->input_array_first;
792 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
793 name = info->output_semantic_name;
794 index = info->output_semantic_index;
795 array_first = info->output_array_first;
796 } else {
797 assert(0);
798 return NULL;
799 }
800
801 if (reg.Register.Indirect) {
802 if (reg.Indirect.ArrayID)
803 param_base = array_first[reg.Indirect.ArrayID];
804 else
805 param_base = reg.Register.Index;
806
807 param_index = get_indirect_index(ctx, &reg.Indirect,
808 reg.Register.Index - param_base);
809
810 } else {
811 param_base = reg.Register.Index;
812 param_index = ctx->i32_0;
813 }
814
815 param_index_base = reg.Register.Dimension ?
816 si_shader_io_get_unique_index(name[param_base], index[param_base]) :
817 si_shader_io_get_unique_index_patch(name[param_base], index[param_base]);
818
819 param_index = LLVMBuildAdd(gallivm->builder, param_index,
820 LLVMConstInt(ctx->i32, param_index_base, 0),
821 "");
822
823 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
824 vertex_index, param_index);
825 }
826
827 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
828 enum tgsi_opcode_type type, unsigned swizzle,
829 LLVMValueRef buffer, LLVMValueRef offset,
830 LLVMValueRef base, bool can_speculate)
831 {
832 struct si_shader_context *ctx = si_shader_context(bld_base);
833 struct gallivm_state *gallivm = &ctx->gallivm;
834 LLVMValueRef value, value2;
835 LLVMTypeRef llvm_type = tgsi2llvmtype(bld_base, type);
836 LLVMTypeRef vec_type = LLVMVectorType(llvm_type, 4);
837
838 if (swizzle == ~0) {
839 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
840 0, 1, 0, can_speculate, false);
841
842 return LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
843 }
844
845 if (!tgsi_type_is_64bit(type)) {
846 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
847 0, 1, 0, can_speculate, false);
848
849 value = LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
850 return LLVMBuildExtractElement(gallivm->builder, value,
851 LLVMConstInt(ctx->i32, swizzle, 0), "");
852 }
853
854 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
855 swizzle * 4, 1, 0, can_speculate, false);
856
857 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
858 swizzle * 4 + 4, 1, 0, can_speculate, false);
859
860 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
861 }
862
863 /**
864 * Load from LDS.
865 *
866 * \param type output value type
867 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
868 * \param dw_addr address in dwords
869 */
870 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
871 enum tgsi_opcode_type type, unsigned swizzle,
872 LLVMValueRef dw_addr)
873 {
874 struct si_shader_context *ctx = si_shader_context(bld_base);
875 struct gallivm_state *gallivm = &ctx->gallivm;
876 LLVMValueRef value;
877
878 if (swizzle == ~0) {
879 LLVMValueRef values[TGSI_NUM_CHANNELS];
880
881 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
882 values[chan] = lds_load(bld_base, type, chan, dw_addr);
883
884 return lp_build_gather_values(gallivm, values,
885 TGSI_NUM_CHANNELS);
886 }
887
888 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
889 LLVMConstInt(ctx->i32, swizzle, 0));
890
891 value = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
892 if (tgsi_type_is_64bit(type)) {
893 LLVMValueRef value2;
894 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
895 ctx->i32_1);
896 value2 = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
897 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
898 }
899
900 return LLVMBuildBitCast(gallivm->builder, value,
901 tgsi2llvmtype(bld_base, type), "");
902 }
903
904 /**
905 * Store to LDS.
906 *
907 * \param swizzle offset (typically 0..3)
908 * \param dw_addr address in dwords
909 * \param value value to store
910 */
911 static void lds_store(struct lp_build_tgsi_context *bld_base,
912 unsigned dw_offset_imm, LLVMValueRef dw_addr,
913 LLVMValueRef value)
914 {
915 struct si_shader_context *ctx = si_shader_context(bld_base);
916 struct gallivm_state *gallivm = &ctx->gallivm;
917
918 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
919 LLVMConstInt(ctx->i32, dw_offset_imm, 0));
920
921 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
922 ac_build_indexed_store(&ctx->ac, ctx->lds,
923 dw_addr, value);
924 }
925
926 static LLVMValueRef desc_from_addr_base64k(struct si_shader_context *ctx,
927 unsigned param)
928 {
929 LLVMBuilderRef builder = ctx->gallivm.builder;
930
931 LLVMValueRef addr = LLVMGetParam(ctx->main_fn, param);
932 addr = LLVMBuildZExt(builder, addr, ctx->i64, "");
933 addr = LLVMBuildShl(builder, addr, LLVMConstInt(ctx->i64, 16, 0), "");
934
935 uint64_t desc2 = 0xffffffff;
936 uint64_t desc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
937 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
938 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
939 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
940 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
941 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
942 LLVMValueRef hi = LLVMConstInt(ctx->i64, desc2 | (desc3 << 32), 0);
943
944 LLVMValueRef desc = LLVMGetUndef(LLVMVectorType(ctx->i64, 2));
945 desc = LLVMBuildInsertElement(builder, desc, addr, ctx->i32_0, "");
946 desc = LLVMBuildInsertElement(builder, desc, hi, ctx->i32_1, "");
947 return LLVMBuildBitCast(builder, desc, ctx->v4i32, "");
948 }
949
950 static LLVMValueRef fetch_input_tcs(
951 struct lp_build_tgsi_context *bld_base,
952 const struct tgsi_full_src_register *reg,
953 enum tgsi_opcode_type type, unsigned swizzle)
954 {
955 struct si_shader_context *ctx = si_shader_context(bld_base);
956 LLVMValueRef dw_addr, stride;
957
958 stride = unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
959 dw_addr = get_tcs_in_current_patch_offset(ctx);
960 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
961
962 return lds_load(bld_base, type, swizzle, dw_addr);
963 }
964
965 static LLVMValueRef fetch_output_tcs(
966 struct lp_build_tgsi_context *bld_base,
967 const struct tgsi_full_src_register *reg,
968 enum tgsi_opcode_type type, unsigned swizzle)
969 {
970 struct si_shader_context *ctx = si_shader_context(bld_base);
971 LLVMValueRef dw_addr, stride;
972
973 if (reg->Register.Dimension) {
974 stride = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 8);
975 dw_addr = get_tcs_out_current_patch_offset(ctx);
976 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
977 } else {
978 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
979 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
980 }
981
982 return lds_load(bld_base, type, swizzle, dw_addr);
983 }
984
985 static LLVMValueRef fetch_input_tes(
986 struct lp_build_tgsi_context *bld_base,
987 const struct tgsi_full_src_register *reg,
988 enum tgsi_opcode_type type, unsigned swizzle)
989 {
990 struct si_shader_context *ctx = si_shader_context(bld_base);
991 LLVMValueRef buffer, base, addr;
992
993 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
994
995 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
996 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
997
998 return buffer_load(bld_base, type, swizzle, buffer, base, addr, true);
999 }
1000
1001 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1002 const struct tgsi_full_instruction *inst,
1003 const struct tgsi_opcode_info *info,
1004 LLVMValueRef dst[4])
1005 {
1006 struct si_shader_context *ctx = si_shader_context(bld_base);
1007 struct gallivm_state *gallivm = &ctx->gallivm;
1008 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
1009 const struct tgsi_shader_info *sh_info = &ctx->shader->selector->info;
1010 unsigned chan_index;
1011 LLVMValueRef dw_addr, stride;
1012 LLVMValueRef buffer, base, buf_addr;
1013 LLVMValueRef values[4];
1014 bool skip_lds_store;
1015 bool is_tess_factor = false;
1016
1017 /* Only handle per-patch and per-vertex outputs here.
1018 * Vectors will be lowered to scalars and this function will be called again.
1019 */
1020 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1021 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1022 si_llvm_emit_store(bld_base, inst, info, dst);
1023 return;
1024 }
1025
1026 if (reg->Register.Dimension) {
1027 stride = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 8);
1028 dw_addr = get_tcs_out_current_patch_offset(ctx);
1029 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1030 skip_lds_store = !sh_info->reads_pervertex_outputs;
1031 } else {
1032 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1033 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1034 skip_lds_store = !sh_info->reads_perpatch_outputs;
1035
1036 if (!reg->Register.Indirect) {
1037 int name = sh_info->output_semantic_name[reg->Register.Index];
1038
1039 /* Always write tess factors into LDS for the TCS epilog. */
1040 if (name == TGSI_SEMANTIC_TESSINNER ||
1041 name == TGSI_SEMANTIC_TESSOUTER) {
1042 skip_lds_store = false;
1043 is_tess_factor = true;
1044 }
1045 }
1046 }
1047
1048 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1049
1050 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1051 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1052
1053
1054 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
1055 LLVMValueRef value = dst[chan_index];
1056
1057 if (inst->Instruction.Saturate)
1058 value = ac_build_clamp(&ctx->ac, value);
1059
1060 /* Skip LDS stores if there is no LDS read of this output. */
1061 if (!skip_lds_store)
1062 lds_store(bld_base, chan_index, dw_addr, value);
1063
1064 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
1065 values[chan_index] = value;
1066
1067 if (inst->Dst[0].Register.WriteMask != 0xF && !is_tess_factor) {
1068 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1069 buf_addr, base,
1070 4 * chan_index, 1, 0, true, false);
1071 }
1072 }
1073
1074 if (inst->Dst[0].Register.WriteMask == 0xF && !is_tess_factor) {
1075 LLVMValueRef value = lp_build_gather_values(gallivm,
1076 values, 4);
1077 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buf_addr,
1078 base, 0, 1, 0, true, false);
1079 }
1080 }
1081
1082 static LLVMValueRef fetch_input_gs(
1083 struct lp_build_tgsi_context *bld_base,
1084 const struct tgsi_full_src_register *reg,
1085 enum tgsi_opcode_type type,
1086 unsigned swizzle)
1087 {
1088 struct si_shader_context *ctx = si_shader_context(bld_base);
1089 struct si_shader *shader = ctx->shader;
1090 struct lp_build_context *uint = &ctx->bld_base.uint_bld;
1091 struct gallivm_state *gallivm = &ctx->gallivm;
1092 LLVMValueRef vtx_offset, soffset;
1093 struct tgsi_shader_info *info = &shader->selector->info;
1094 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1095 unsigned semantic_index = info->input_semantic_index[reg->Register.Index];
1096 unsigned param;
1097 LLVMValueRef value;
1098
1099 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1100 return get_primitive_id(bld_base, swizzle);
1101
1102 if (!reg->Register.Dimension)
1103 return NULL;
1104
1105 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1106
1107 /* GFX9 has the ESGS ring in LDS. */
1108 if (ctx->screen->b.chip_class >= GFX9) {
1109 unsigned index = reg->Dimension.Index;
1110
1111 switch (index / 2) {
1112 case 0:
1113 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx01_offset,
1114 index % 2 ? 16 : 0, 16);
1115 break;
1116 case 1:
1117 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx23_offset,
1118 index % 2 ? 16 : 0, 16);
1119 break;
1120 case 2:
1121 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx45_offset,
1122 index % 2 ? 16 : 0, 16);
1123 break;
1124 default:
1125 assert(0);
1126 return NULL;
1127 }
1128
1129 vtx_offset = LLVMBuildAdd(gallivm->builder, vtx_offset,
1130 LLVMConstInt(ctx->i32, param * 4, 0), "");
1131 return lds_load(bld_base, type, swizzle, vtx_offset);
1132 }
1133
1134 /* GFX6: input load from the ESGS ring in memory. */
1135 if (swizzle == ~0) {
1136 LLVMValueRef values[TGSI_NUM_CHANNELS];
1137 unsigned chan;
1138 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1139 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
1140 }
1141 return lp_build_gather_values(gallivm, values,
1142 TGSI_NUM_CHANNELS);
1143 }
1144
1145 /* Get the vertex offset parameter on GFX6. */
1146 unsigned vtx_offset_param = reg->Dimension.Index;
1147 if (vtx_offset_param < 2) {
1148 vtx_offset_param += ctx->param_gs_vtx0_offset;
1149 } else {
1150 assert(vtx_offset_param < 6);
1151 vtx_offset_param += ctx->param_gs_vtx2_offset - 2;
1152 }
1153 vtx_offset = lp_build_mul_imm(uint,
1154 LLVMGetParam(ctx->main_fn,
1155 vtx_offset_param),
1156 4);
1157
1158 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1159
1160 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1161 vtx_offset, soffset, 0, 1, 0, true, false);
1162 if (tgsi_type_is_64bit(type)) {
1163 LLVMValueRef value2;
1164 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1165
1166 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1167 ctx->i32_0, vtx_offset, soffset,
1168 0, 1, 0, true, false);
1169 return si_llvm_emit_fetch_64bit(bld_base, type,
1170 value, value2);
1171 }
1172 return LLVMBuildBitCast(gallivm->builder,
1173 value,
1174 tgsi2llvmtype(bld_base, type), "");
1175 }
1176
1177 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1178 {
1179 switch (interpolate) {
1180 case TGSI_INTERPOLATE_CONSTANT:
1181 return 0;
1182
1183 case TGSI_INTERPOLATE_LINEAR:
1184 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1185 return SI_PARAM_LINEAR_SAMPLE;
1186 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1187 return SI_PARAM_LINEAR_CENTROID;
1188 else
1189 return SI_PARAM_LINEAR_CENTER;
1190 break;
1191 case TGSI_INTERPOLATE_COLOR:
1192 case TGSI_INTERPOLATE_PERSPECTIVE:
1193 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1194 return SI_PARAM_PERSP_SAMPLE;
1195 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1196 return SI_PARAM_PERSP_CENTROID;
1197 else
1198 return SI_PARAM_PERSP_CENTER;
1199 break;
1200 default:
1201 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1202 return -1;
1203 }
1204 }
1205
1206 /**
1207 * Interpolate a fragment shader input.
1208 *
1209 * @param ctx context
1210 * @param input_index index of the input in hardware
1211 * @param semantic_name TGSI_SEMANTIC_*
1212 * @param semantic_index semantic index
1213 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1214 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1215 * @param interp_param interpolation weights (i,j)
1216 * @param prim_mask SI_PARAM_PRIM_MASK
1217 * @param face SI_PARAM_FRONT_FACE
1218 * @param result the return value (4 components)
1219 */
1220 static void interp_fs_input(struct si_shader_context *ctx,
1221 unsigned input_index,
1222 unsigned semantic_name,
1223 unsigned semantic_index,
1224 unsigned num_interp_inputs,
1225 unsigned colors_read_mask,
1226 LLVMValueRef interp_param,
1227 LLVMValueRef prim_mask,
1228 LLVMValueRef face,
1229 LLVMValueRef result[4])
1230 {
1231 struct gallivm_state *gallivm = &ctx->gallivm;
1232 LLVMValueRef attr_number;
1233 LLVMValueRef i, j;
1234
1235 unsigned chan;
1236
1237 /* fs.constant returns the param from the middle vertex, so it's not
1238 * really useful for flat shading. It's meant to be used for custom
1239 * interpolation (but the intrinsic can't fetch from the other two
1240 * vertices).
1241 *
1242 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1243 * to do the right thing. The only reason we use fs.constant is that
1244 * fs.interp cannot be used on integers, because they can be equal
1245 * to NaN.
1246 *
1247 * When interp is false we will use fs.constant or for newer llvm,
1248 * amdgcn.interp.mov.
1249 */
1250 bool interp = interp_param != NULL;
1251
1252 attr_number = LLVMConstInt(ctx->i32, input_index, 0);
1253
1254 if (interp) {
1255 interp_param = LLVMBuildBitCast(gallivm->builder, interp_param,
1256 LLVMVectorType(ctx->f32, 2), "");
1257
1258 i = LLVMBuildExtractElement(gallivm->builder, interp_param,
1259 ctx->i32_0, "");
1260 j = LLVMBuildExtractElement(gallivm->builder, interp_param,
1261 ctx->i32_1, "");
1262 }
1263
1264 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1265 ctx->shader->key.part.ps.prolog.color_two_side) {
1266 LLVMValueRef is_face_positive;
1267 LLVMValueRef back_attr_number;
1268
1269 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1270 * otherwise it's at offset "num_inputs".
1271 */
1272 unsigned back_attr_offset = num_interp_inputs;
1273 if (semantic_index == 1 && colors_read_mask & 0xf)
1274 back_attr_offset += 1;
1275
1276 back_attr_number = LLVMConstInt(ctx->i32, back_attr_offset, 0);
1277
1278 is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1279 face, ctx->i32_0, "");
1280
1281 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1282 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
1283 LLVMValueRef front, back;
1284
1285 if (interp) {
1286 front = ac_build_fs_interp(&ctx->ac, llvm_chan,
1287 attr_number, prim_mask,
1288 i, j);
1289 back = ac_build_fs_interp(&ctx->ac, llvm_chan,
1290 back_attr_number, prim_mask,
1291 i, j);
1292 } else {
1293 front = ac_build_fs_interp_mov(&ctx->ac,
1294 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1295 llvm_chan, attr_number, prim_mask);
1296 back = ac_build_fs_interp_mov(&ctx->ac,
1297 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1298 llvm_chan, back_attr_number, prim_mask);
1299 }
1300
1301 result[chan] = LLVMBuildSelect(gallivm->builder,
1302 is_face_positive,
1303 front,
1304 back,
1305 "");
1306 }
1307 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1308 if (interp) {
1309 result[0] = ac_build_fs_interp(&ctx->ac, ctx->i32_0,
1310 attr_number, prim_mask, i, j);
1311 } else {
1312 result[0] = ac_build_fs_interp_mov(&ctx->ac, ctx->i32_0,
1313 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1314 attr_number, prim_mask);
1315 }
1316 result[1] =
1317 result[2] = LLVMConstReal(ctx->f32, 0.0f);
1318 result[3] = LLVMConstReal(ctx->f32, 1.0f);
1319 } else {
1320 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1321 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
1322
1323 if (interp) {
1324 result[chan] = ac_build_fs_interp(&ctx->ac,
1325 llvm_chan, attr_number, prim_mask, i, j);
1326 } else {
1327 result[chan] = ac_build_fs_interp_mov(&ctx->ac,
1328 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1329 llvm_chan, attr_number, prim_mask);
1330 }
1331 }
1332 }
1333 }
1334
1335 static void declare_input_fs(
1336 struct si_shader_context *ctx,
1337 unsigned input_index,
1338 const struct tgsi_full_declaration *decl,
1339 LLVMValueRef out[4])
1340 {
1341 struct lp_build_context *base = &ctx->bld_base.base;
1342 struct si_shader *shader = ctx->shader;
1343 LLVMValueRef main_fn = ctx->main_fn;
1344 LLVMValueRef interp_param = NULL;
1345 int interp_param_idx;
1346
1347 /* Get colors from input VGPRs (set by the prolog). */
1348 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR) {
1349 unsigned i = decl->Semantic.Index;
1350 unsigned colors_read = shader->selector->info.colors_read;
1351 unsigned mask = colors_read >> (i * 4);
1352 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1353 (i ? util_bitcount(colors_read & 0xf) : 0);
1354
1355 out[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1356 out[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1357 out[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1358 out[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1359 return;
1360 }
1361
1362 interp_param_idx = lookup_interp_param_index(decl->Interp.Interpolate,
1363 decl->Interp.Location);
1364 if (interp_param_idx == -1)
1365 return;
1366 else if (interp_param_idx) {
1367 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
1368 }
1369
1370 interp_fs_input(ctx, input_index, decl->Semantic.Name,
1371 decl->Semantic.Index, shader->selector->info.num_inputs,
1372 shader->selector->info.colors_read, interp_param,
1373 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1374 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1375 &out[0]);
1376 }
1377
1378 static LLVMValueRef get_sample_id(struct si_shader_context *ctx)
1379 {
1380 return unpack_param(ctx, SI_PARAM_ANCILLARY, 8, 4);
1381 }
1382
1383
1384 /**
1385 * Load a dword from a constant buffer.
1386 */
1387 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1388 LLVMValueRef resource,
1389 LLVMValueRef offset)
1390 {
1391 return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL,
1392 0, 0, 0, true, true);
1393 }
1394
1395 static LLVMValueRef load_sample_position(struct si_shader_context *ctx, LLVMValueRef sample_id)
1396 {
1397 struct lp_build_context *uint_bld = &ctx->bld_base.uint_bld;
1398 struct gallivm_state *gallivm = &ctx->gallivm;
1399 LLVMBuilderRef builder = gallivm->builder;
1400 LLVMValueRef desc = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1401 LLVMValueRef buf_index = LLVMConstInt(ctx->i32, SI_PS_CONST_SAMPLE_POSITIONS, 0);
1402 LLVMValueRef resource = ac_build_indexed_load_const(&ctx->ac, desc, buf_index);
1403
1404 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1405 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1406 LLVMValueRef offset1 = LLVMBuildAdd(builder, offset0, LLVMConstInt(ctx->i32, 4, 0), "");
1407
1408 LLVMValueRef pos[4] = {
1409 buffer_load_const(ctx, resource, offset0),
1410 buffer_load_const(ctx, resource, offset1),
1411 LLVMConstReal(ctx->f32, 0),
1412 LLVMConstReal(ctx->f32, 0)
1413 };
1414
1415 return lp_build_gather_values(gallivm, pos, 4);
1416 }
1417
1418 static void declare_system_value(struct si_shader_context *ctx,
1419 unsigned index,
1420 const struct tgsi_full_declaration *decl)
1421 {
1422 struct lp_build_context *bld = &ctx->bld_base.base;
1423 struct gallivm_state *gallivm = &ctx->gallivm;
1424 LLVMValueRef value = 0;
1425
1426 assert(index < RADEON_LLVM_MAX_SYSTEM_VALUES);
1427
1428 switch (decl->Semantic.Name) {
1429 case TGSI_SEMANTIC_INSTANCEID:
1430 value = LLVMGetParam(ctx->main_fn,
1431 ctx->param_instance_id);
1432 break;
1433
1434 case TGSI_SEMANTIC_VERTEXID:
1435 value = LLVMBuildAdd(gallivm->builder,
1436 LLVMGetParam(ctx->main_fn,
1437 ctx->param_vertex_id),
1438 LLVMGetParam(ctx->main_fn,
1439 ctx->param_base_vertex), "");
1440 break;
1441
1442 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1443 /* Unused. Clarify the meaning in indexed vs. non-indexed
1444 * draws if this is ever used again. */
1445 assert(false);
1446 break;
1447
1448 case TGSI_SEMANTIC_BASEVERTEX:
1449 {
1450 /* For non-indexed draws, the base vertex set by the driver
1451 * (for direct draws) or the CP (for indirect draws) is the
1452 * first vertex ID, but GLSL expects 0 to be returned.
1453 */
1454 LLVMValueRef vs_state = LLVMGetParam(ctx->main_fn, ctx->param_vs_state_bits);
1455 LLVMValueRef indexed;
1456
1457 indexed = LLVMBuildLShr(gallivm->builder, vs_state, ctx->i32_1, "");
1458 indexed = LLVMBuildTrunc(gallivm->builder, indexed, ctx->i1, "");
1459
1460 value = LLVMBuildSelect(gallivm->builder, indexed,
1461 LLVMGetParam(ctx->main_fn, ctx->param_base_vertex),
1462 ctx->i32_0, "");
1463 break;
1464 }
1465
1466 case TGSI_SEMANTIC_BASEINSTANCE:
1467 value = LLVMGetParam(ctx->main_fn, ctx->param_start_instance);
1468 break;
1469
1470 case TGSI_SEMANTIC_DRAWID:
1471 value = LLVMGetParam(ctx->main_fn, ctx->param_draw_id);
1472 break;
1473
1474 case TGSI_SEMANTIC_INVOCATIONID:
1475 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1476 value = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
1477 else if (ctx->type == PIPE_SHADER_GEOMETRY)
1478 value = LLVMGetParam(ctx->main_fn,
1479 ctx->param_gs_instance_id);
1480 else
1481 assert(!"INVOCATIONID not implemented");
1482 break;
1483
1484 case TGSI_SEMANTIC_POSITION:
1485 {
1486 LLVMValueRef pos[4] = {
1487 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
1488 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
1489 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Z_FLOAT),
1490 lp_build_emit_llvm_unary(&ctx->bld_base, TGSI_OPCODE_RCP,
1491 LLVMGetParam(ctx->main_fn,
1492 SI_PARAM_POS_W_FLOAT)),
1493 };
1494 value = lp_build_gather_values(gallivm, pos, 4);
1495 break;
1496 }
1497
1498 case TGSI_SEMANTIC_FACE:
1499 value = LLVMGetParam(ctx->main_fn, SI_PARAM_FRONT_FACE);
1500 break;
1501
1502 case TGSI_SEMANTIC_SAMPLEID:
1503 value = get_sample_id(ctx);
1504 break;
1505
1506 case TGSI_SEMANTIC_SAMPLEPOS: {
1507 LLVMValueRef pos[4] = {
1508 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
1509 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
1510 LLVMConstReal(ctx->f32, 0),
1511 LLVMConstReal(ctx->f32, 0)
1512 };
1513 pos[0] = lp_build_emit_llvm_unary(&ctx->bld_base,
1514 TGSI_OPCODE_FRC, pos[0]);
1515 pos[1] = lp_build_emit_llvm_unary(&ctx->bld_base,
1516 TGSI_OPCODE_FRC, pos[1]);
1517 value = lp_build_gather_values(gallivm, pos, 4);
1518 break;
1519 }
1520
1521 case TGSI_SEMANTIC_SAMPLEMASK:
1522 /* This can only occur with the OpenGL Core profile, which
1523 * doesn't support smoothing.
1524 */
1525 value = LLVMGetParam(ctx->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1526 break;
1527
1528 case TGSI_SEMANTIC_TESSCOORD:
1529 {
1530 LLVMValueRef coord[4] = {
1531 LLVMGetParam(ctx->main_fn, ctx->param_tes_u),
1532 LLVMGetParam(ctx->main_fn, ctx->param_tes_v),
1533 bld->zero,
1534 bld->zero
1535 };
1536
1537 /* For triangles, the vector should be (u, v, 1-u-v). */
1538 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1539 PIPE_PRIM_TRIANGLES)
1540 coord[2] = lp_build_sub(bld, bld->one,
1541 lp_build_add(bld, coord[0], coord[1]));
1542
1543 value = lp_build_gather_values(gallivm, coord, 4);
1544 break;
1545 }
1546
1547 case TGSI_SEMANTIC_VERTICESIN:
1548 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1549 value = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 26, 6);
1550 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1551 value = unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
1552 else
1553 assert(!"invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1554 break;
1555
1556 case TGSI_SEMANTIC_TESSINNER:
1557 case TGSI_SEMANTIC_TESSOUTER:
1558 {
1559 LLVMValueRef buffer, base, addr;
1560 int param = si_shader_io_get_unique_index_patch(decl->Semantic.Name, 0);
1561
1562 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1563
1564 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1565 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1566 LLVMConstInt(ctx->i32, param, 0));
1567
1568 value = buffer_load(&ctx->bld_base, TGSI_TYPE_FLOAT,
1569 ~0, buffer, base, addr, true);
1570
1571 break;
1572 }
1573
1574 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
1575 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
1576 {
1577 LLVMValueRef buf, slot, val[4];
1578 int i, offset;
1579
1580 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
1581 buf = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1582 buf = ac_build_indexed_load_const(&ctx->ac, buf, slot);
1583 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
1584
1585 for (i = 0; i < 4; i++)
1586 val[i] = buffer_load_const(ctx, buf,
1587 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
1588 value = lp_build_gather_values(gallivm, val, 4);
1589 break;
1590 }
1591
1592 case TGSI_SEMANTIC_PRIMID:
1593 value = get_primitive_id(&ctx->bld_base, 0);
1594 break;
1595
1596 case TGSI_SEMANTIC_GRID_SIZE:
1597 value = LLVMGetParam(ctx->main_fn, ctx->param_grid_size);
1598 break;
1599
1600 case TGSI_SEMANTIC_BLOCK_SIZE:
1601 {
1602 LLVMValueRef values[3];
1603 unsigned i;
1604 unsigned *properties = ctx->shader->selector->info.properties;
1605
1606 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1607 unsigned sizes[3] = {
1608 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1609 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1610 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1611 };
1612
1613 for (i = 0; i < 3; ++i)
1614 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1615
1616 value = lp_build_gather_values(gallivm, values, 3);
1617 } else {
1618 value = LLVMGetParam(ctx->main_fn, ctx->param_block_size);
1619 }
1620 break;
1621 }
1622
1623 case TGSI_SEMANTIC_BLOCK_ID:
1624 {
1625 LLVMValueRef values[3];
1626
1627 for (int i = 0; i < 3; i++) {
1628 values[i] = ctx->i32_0;
1629 if (ctx->param_block_id[i] >= 0) {
1630 values[i] = LLVMGetParam(ctx->main_fn,
1631 ctx->param_block_id[i]);
1632 }
1633 }
1634 value = lp_build_gather_values(gallivm, values, 3);
1635 break;
1636 }
1637
1638 case TGSI_SEMANTIC_THREAD_ID:
1639 value = LLVMGetParam(ctx->main_fn, ctx->param_thread_id);
1640 break;
1641
1642 case TGSI_SEMANTIC_HELPER_INVOCATION:
1643 value = lp_build_intrinsic(gallivm->builder,
1644 "llvm.amdgcn.ps.live",
1645 ctx->i1, NULL, 0,
1646 LP_FUNC_ATTR_READNONE);
1647 value = LLVMBuildNot(gallivm->builder, value, "");
1648 value = LLVMBuildSExt(gallivm->builder, value, ctx->i32, "");
1649 break;
1650
1651 case TGSI_SEMANTIC_SUBGROUP_SIZE:
1652 value = LLVMConstInt(ctx->i32, 64, 0);
1653 break;
1654
1655 case TGSI_SEMANTIC_SUBGROUP_INVOCATION:
1656 value = ac_get_thread_id(&ctx->ac);
1657 break;
1658
1659 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
1660 {
1661 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
1662 id = LLVMBuildZExt(gallivm->builder, id, ctx->i64, "");
1663 value = LLVMBuildShl(gallivm->builder, LLVMConstInt(ctx->i64, 1, 0), id, "");
1664 value = LLVMBuildBitCast(gallivm->builder, value, ctx->v2i32, "");
1665 break;
1666 }
1667
1668 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
1669 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
1670 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
1671 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
1672 {
1673 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
1674 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_GT_MASK ||
1675 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK) {
1676 /* All bits set except LSB */
1677 value = LLVMConstInt(ctx->i64, -2, 0);
1678 } else {
1679 /* All bits set */
1680 value = LLVMConstInt(ctx->i64, -1, 0);
1681 }
1682 id = LLVMBuildZExt(gallivm->builder, id, ctx->i64, "");
1683 value = LLVMBuildShl(gallivm->builder, value, id, "");
1684 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK ||
1685 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LT_MASK)
1686 value = LLVMBuildNot(gallivm->builder, value, "");
1687 value = LLVMBuildBitCast(gallivm->builder, value, ctx->v2i32, "");
1688 break;
1689 }
1690
1691 default:
1692 assert(!"unknown system value");
1693 return;
1694 }
1695
1696 ctx->system_values[index] = value;
1697 }
1698
1699 static void declare_compute_memory(struct si_shader_context *ctx,
1700 const struct tgsi_full_declaration *decl)
1701 {
1702 struct si_shader_selector *sel = ctx->shader->selector;
1703 struct gallivm_state *gallivm = &ctx->gallivm;
1704
1705 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, LOCAL_ADDR_SPACE);
1706 LLVMValueRef var;
1707
1708 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
1709 assert(decl->Range.First == decl->Range.Last);
1710 assert(!ctx->shared_memory);
1711
1712 var = LLVMAddGlobalInAddressSpace(gallivm->module,
1713 LLVMArrayType(ctx->i8, sel->local_size),
1714 "compute_lds",
1715 LOCAL_ADDR_SPACE);
1716 LLVMSetAlignment(var, 4);
1717
1718 ctx->shared_memory = LLVMBuildBitCast(gallivm->builder, var, i8p, "");
1719 }
1720
1721 static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
1722 {
1723 LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
1724 ctx->param_const_and_shader_buffers);
1725
1726 return ac_build_indexed_load_const(&ctx->ac, list_ptr,
1727 LLVMConstInt(ctx->i32, si_get_constbuf_slot(i), 0));
1728 }
1729
1730 static LLVMValueRef fetch_constant(
1731 struct lp_build_tgsi_context *bld_base,
1732 const struct tgsi_full_src_register *reg,
1733 enum tgsi_opcode_type type,
1734 unsigned swizzle)
1735 {
1736 struct si_shader_context *ctx = si_shader_context(bld_base);
1737 struct lp_build_context *base = &bld_base->base;
1738 const struct tgsi_ind_register *ireg = &reg->Indirect;
1739 unsigned buf, idx;
1740
1741 LLVMValueRef addr, bufp;
1742 LLVMValueRef result;
1743
1744 if (swizzle == LP_CHAN_ALL) {
1745 unsigned chan;
1746 LLVMValueRef values[4];
1747 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
1748 values[chan] = fetch_constant(bld_base, reg, type, chan);
1749
1750 return lp_build_gather_values(&ctx->gallivm, values, 4);
1751 }
1752
1753 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
1754 idx = reg->Register.Index * 4 + swizzle;
1755
1756 if (reg->Register.Dimension && reg->Dimension.Indirect) {
1757 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
1758 LLVMValueRef index;
1759 index = si_get_bounded_indirect_index(ctx, &reg->DimIndirect,
1760 reg->Dimension.Index,
1761 ctx->num_const_buffers);
1762 index = LLVMBuildAdd(ctx->gallivm.builder, index,
1763 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
1764 bufp = ac_build_indexed_load_const(&ctx->ac, ptr, index);
1765 } else
1766 bufp = load_const_buffer_desc(ctx, buf);
1767
1768 if (reg->Register.Indirect) {
1769 addr = ctx->addrs[ireg->Index][ireg->Swizzle];
1770 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
1771 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
1772 addr = lp_build_add(&bld_base->uint_bld, addr,
1773 LLVMConstInt(ctx->i32, idx * 4, 0));
1774 } else {
1775 addr = LLVMConstInt(ctx->i32, idx * 4, 0);
1776 }
1777
1778 result = buffer_load_const(ctx, bufp, addr);
1779
1780 if (!tgsi_type_is_64bit(type))
1781 result = bitcast(bld_base, type, result);
1782 else {
1783 LLVMValueRef addr2, result2;
1784
1785 addr2 = lp_build_add(&bld_base->uint_bld, addr,
1786 LLVMConstInt(ctx->i32, 4, 0));
1787 result2 = buffer_load_const(ctx, bufp, addr2);
1788
1789 result = si_llvm_emit_fetch_64bit(bld_base, type,
1790 result, result2);
1791 }
1792 return result;
1793 }
1794
1795 /* Upper 16 bits must be zero. */
1796 static LLVMValueRef si_llvm_pack_two_int16(struct si_shader_context *ctx,
1797 LLVMValueRef val[2])
1798 {
1799 return LLVMBuildOr(ctx->gallivm.builder, val[0],
1800 LLVMBuildShl(ctx->gallivm.builder, val[1],
1801 LLVMConstInt(ctx->i32, 16, 0),
1802 ""), "");
1803 }
1804
1805 /* Upper 16 bits are ignored and will be dropped. */
1806 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct si_shader_context *ctx,
1807 LLVMValueRef val[2])
1808 {
1809 LLVMValueRef v[2] = {
1810 LLVMBuildAnd(ctx->gallivm.builder, val[0],
1811 LLVMConstInt(ctx->i32, 0xffff, 0), ""),
1812 val[1],
1813 };
1814 return si_llvm_pack_two_int16(ctx, v);
1815 }
1816
1817 /* Initialize arguments for the shader export intrinsic */
1818 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
1819 LLVMValueRef *values,
1820 unsigned target,
1821 struct ac_export_args *args)
1822 {
1823 struct si_shader_context *ctx = si_shader_context(bld_base);
1824 struct lp_build_context *base = &bld_base->base;
1825 LLVMBuilderRef builder = ctx->gallivm.builder;
1826 LLVMValueRef val[4];
1827 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1828 unsigned chan;
1829 bool is_int8, is_int10;
1830
1831 /* Default is 0xf. Adjusted below depending on the format. */
1832 args->enabled_channels = 0xf; /* writemask */
1833
1834 /* Specify whether the EXEC mask represents the valid mask */
1835 args->valid_mask = 0;
1836
1837 /* Specify whether this is the last export */
1838 args->done = 0;
1839
1840 /* Specify the target we are exporting */
1841 args->target = target;
1842
1843 if (ctx->type == PIPE_SHADER_FRAGMENT) {
1844 const struct si_shader_key *key = &ctx->shader->key;
1845 unsigned col_formats = key->part.ps.epilog.spi_shader_col_format;
1846 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1847
1848 assert(cbuf >= 0 && cbuf < 8);
1849 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1850 is_int8 = (key->part.ps.epilog.color_is_int8 >> cbuf) & 0x1;
1851 is_int10 = (key->part.ps.epilog.color_is_int10 >> cbuf) & 0x1;
1852 }
1853
1854 args->compr = false;
1855 args->out[0] = base->undef;
1856 args->out[1] = base->undef;
1857 args->out[2] = base->undef;
1858 args->out[3] = base->undef;
1859
1860 switch (spi_shader_col_format) {
1861 case V_028714_SPI_SHADER_ZERO:
1862 args->enabled_channels = 0; /* writemask */
1863 args->target = V_008DFC_SQ_EXP_NULL;
1864 break;
1865
1866 case V_028714_SPI_SHADER_32_R:
1867 args->enabled_channels = 1; /* writemask */
1868 args->out[0] = values[0];
1869 break;
1870
1871 case V_028714_SPI_SHADER_32_GR:
1872 args->enabled_channels = 0x3; /* writemask */
1873 args->out[0] = values[0];
1874 args->out[1] = values[1];
1875 break;
1876
1877 case V_028714_SPI_SHADER_32_AR:
1878 args->enabled_channels = 0x9; /* writemask */
1879 args->out[0] = values[0];
1880 args->out[3] = values[3];
1881 break;
1882
1883 case V_028714_SPI_SHADER_FP16_ABGR:
1884 args->compr = 1; /* COMPR flag */
1885
1886 for (chan = 0; chan < 2; chan++) {
1887 LLVMValueRef pack_args[2] = {
1888 values[2 * chan],
1889 values[2 * chan + 1]
1890 };
1891 LLVMValueRef packed;
1892
1893 packed = ac_build_cvt_pkrtz_f16(&ctx->ac, pack_args);
1894 args->out[chan] =
1895 LLVMBuildBitCast(ctx->gallivm.builder,
1896 packed, ctx->f32, "");
1897 }
1898 break;
1899
1900 case V_028714_SPI_SHADER_UNORM16_ABGR:
1901 for (chan = 0; chan < 4; chan++) {
1902 val[chan] = ac_build_clamp(&ctx->ac, values[chan]);
1903 val[chan] = LLVMBuildFMul(builder, val[chan],
1904 LLVMConstReal(ctx->f32, 65535), "");
1905 val[chan] = LLVMBuildFAdd(builder, val[chan],
1906 LLVMConstReal(ctx->f32, 0.5), "");
1907 val[chan] = LLVMBuildFPToUI(builder, val[chan],
1908 ctx->i32, "");
1909 }
1910
1911 args->compr = 1; /* COMPR flag */
1912 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1913 si_llvm_pack_two_int16(ctx, val));
1914 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1915 si_llvm_pack_two_int16(ctx, val+2));
1916 break;
1917
1918 case V_028714_SPI_SHADER_SNORM16_ABGR:
1919 for (chan = 0; chan < 4; chan++) {
1920 /* Clamp between [-1, 1]. */
1921 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MIN,
1922 values[chan],
1923 LLVMConstReal(ctx->f32, 1));
1924 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MAX,
1925 val[chan],
1926 LLVMConstReal(ctx->f32, -1));
1927 /* Convert to a signed integer in [-32767, 32767]. */
1928 val[chan] = LLVMBuildFMul(builder, val[chan],
1929 LLVMConstReal(ctx->f32, 32767), "");
1930 /* If positive, add 0.5, else add -0.5. */
1931 val[chan] = LLVMBuildFAdd(builder, val[chan],
1932 LLVMBuildSelect(builder,
1933 LLVMBuildFCmp(builder, LLVMRealOGE,
1934 val[chan], base->zero, ""),
1935 LLVMConstReal(ctx->f32, 0.5),
1936 LLVMConstReal(ctx->f32, -0.5), ""), "");
1937 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
1938 }
1939
1940 args->compr = 1; /* COMPR flag */
1941 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1942 si_llvm_pack_two_int32_as_int16(ctx, val));
1943 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1944 si_llvm_pack_two_int32_as_int16(ctx, val+2));
1945 break;
1946
1947 case V_028714_SPI_SHADER_UINT16_ABGR: {
1948 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1949 is_int8 ? 255 : is_int10 ? 1023 : 65535, 0);
1950 LLVMValueRef max_alpha =
1951 !is_int10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
1952
1953 /* Clamp. */
1954 for (chan = 0; chan < 4; chan++) {
1955 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1956 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
1957 val[chan],
1958 chan == 3 ? max_alpha : max_rgb);
1959 }
1960
1961 args->compr = 1; /* COMPR flag */
1962 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1963 si_llvm_pack_two_int16(ctx, val));
1964 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1965 si_llvm_pack_two_int16(ctx, val+2));
1966 break;
1967 }
1968
1969 case V_028714_SPI_SHADER_SINT16_ABGR: {
1970 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1971 is_int8 ? 127 : is_int10 ? 511 : 32767, 0);
1972 LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
1973 is_int8 ? -128 : is_int10 ? -512 : -32768, 0);
1974 LLVMValueRef max_alpha =
1975 !is_int10 ? max_rgb : ctx->i32_1;
1976 LLVMValueRef min_alpha =
1977 !is_int10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
1978
1979 /* Clamp. */
1980 for (chan = 0; chan < 4; chan++) {
1981 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1982 val[chan] = lp_build_emit_llvm_binary(bld_base,
1983 TGSI_OPCODE_IMIN,
1984 val[chan], chan == 3 ? max_alpha : max_rgb);
1985 val[chan] = lp_build_emit_llvm_binary(bld_base,
1986 TGSI_OPCODE_IMAX,
1987 val[chan], chan == 3 ? min_alpha : min_rgb);
1988 }
1989
1990 args->compr = 1; /* COMPR flag */
1991 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1992 si_llvm_pack_two_int32_as_int16(ctx, val));
1993 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1994 si_llvm_pack_two_int32_as_int16(ctx, val+2));
1995 break;
1996 }
1997
1998 case V_028714_SPI_SHADER_32_ABGR:
1999 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2000 break;
2001 }
2002 }
2003
2004 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2005 LLVMValueRef alpha)
2006 {
2007 struct si_shader_context *ctx = si_shader_context(bld_base);
2008
2009 if (ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2010 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
2011 SI_PARAM_ALPHA_REF);
2012
2013 LLVMValueRef alpha_pass =
2014 lp_build_cmp(&bld_base->base,
2015 ctx->shader->key.part.ps.epilog.alpha_func,
2016 alpha, alpha_ref);
2017 LLVMValueRef arg =
2018 lp_build_select(&bld_base->base,
2019 alpha_pass,
2020 LLVMConstReal(ctx->f32, 1.0f),
2021 LLVMConstReal(ctx->f32, -1.0f));
2022
2023 ac_build_kill(&ctx->ac, arg);
2024 } else {
2025 ac_build_kill(&ctx->ac, NULL);
2026 }
2027 }
2028
2029 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2030 LLVMValueRef alpha,
2031 unsigned samplemask_param)
2032 {
2033 struct si_shader_context *ctx = si_shader_context(bld_base);
2034 struct gallivm_state *gallivm = &ctx->gallivm;
2035 LLVMValueRef coverage;
2036
2037 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2038 coverage = LLVMGetParam(ctx->main_fn,
2039 samplemask_param);
2040 coverage = bitcast(bld_base, TGSI_TYPE_SIGNED, coverage);
2041
2042 coverage = lp_build_intrinsic(gallivm->builder, "llvm.ctpop.i32",
2043 ctx->i32,
2044 &coverage, 1, LP_FUNC_ATTR_READNONE);
2045
2046 coverage = LLVMBuildUIToFP(gallivm->builder, coverage,
2047 ctx->f32, "");
2048
2049 coverage = LLVMBuildFMul(gallivm->builder, coverage,
2050 LLVMConstReal(ctx->f32,
2051 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2052
2053 return LLVMBuildFMul(gallivm->builder, alpha, coverage, "");
2054 }
2055
2056 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context *bld_base,
2057 struct ac_export_args *pos, LLVMValueRef *out_elts)
2058 {
2059 struct si_shader_context *ctx = si_shader_context(bld_base);
2060 struct lp_build_context *base = &bld_base->base;
2061 unsigned reg_index;
2062 unsigned chan;
2063 unsigned const_chan;
2064 LLVMValueRef base_elt;
2065 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2066 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
2067 SI_VS_CONST_CLIP_PLANES, 0);
2068 LLVMValueRef const_resource = ac_build_indexed_load_const(&ctx->ac, ptr, constbuf_index);
2069
2070 for (reg_index = 0; reg_index < 2; reg_index ++) {
2071 struct ac_export_args *args = &pos[2 + reg_index];
2072
2073 args->out[0] =
2074 args->out[1] =
2075 args->out[2] =
2076 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
2077
2078 /* Compute dot products of position and user clip plane vectors */
2079 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2080 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2081 LLVMValueRef addr =
2082 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
2083 const_chan) * 4, 0);
2084 base_elt = buffer_load_const(ctx, const_resource,
2085 addr);
2086 args->out[chan] =
2087 lp_build_add(base, args->out[chan],
2088 lp_build_mul(base, base_elt,
2089 out_elts[const_chan]));
2090 }
2091 }
2092
2093 args->enabled_channels = 0xf;
2094 args->valid_mask = 0;
2095 args->done = 0;
2096 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
2097 args->compr = 0;
2098 }
2099 }
2100
2101 static void si_dump_streamout(struct pipe_stream_output_info *so)
2102 {
2103 unsigned i;
2104
2105 if (so->num_outputs)
2106 fprintf(stderr, "STREAMOUT\n");
2107
2108 for (i = 0; i < so->num_outputs; i++) {
2109 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2110 so->output[i].start_component;
2111 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2112 i, so->output[i].output_buffer,
2113 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2114 so->output[i].register_index,
2115 mask & 1 ? "x" : "",
2116 mask & 2 ? "y" : "",
2117 mask & 4 ? "z" : "",
2118 mask & 8 ? "w" : "");
2119 }
2120 }
2121
2122 static void emit_streamout_output(struct si_shader_context *ctx,
2123 LLVMValueRef const *so_buffers,
2124 LLVMValueRef const *so_write_offsets,
2125 struct pipe_stream_output *stream_out,
2126 struct si_shader_output_values *shader_out)
2127 {
2128 struct gallivm_state *gallivm = &ctx->gallivm;
2129 LLVMBuilderRef builder = gallivm->builder;
2130 unsigned buf_idx = stream_out->output_buffer;
2131 unsigned start = stream_out->start_component;
2132 unsigned num_comps = stream_out->num_components;
2133 LLVMValueRef out[4];
2134
2135 assert(num_comps && num_comps <= 4);
2136 if (!num_comps || num_comps > 4)
2137 return;
2138
2139 /* Load the output as int. */
2140 for (int j = 0; j < num_comps; j++) {
2141 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
2142
2143 out[j] = LLVMBuildBitCast(builder,
2144 shader_out->values[start + j],
2145 ctx->i32, "");
2146 }
2147
2148 /* Pack the output. */
2149 LLVMValueRef vdata = NULL;
2150
2151 switch (num_comps) {
2152 case 1: /* as i32 */
2153 vdata = out[0];
2154 break;
2155 case 2: /* as v2i32 */
2156 case 3: /* as v4i32 (aligned to 4) */
2157 case 4: /* as v4i32 */
2158 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2159 for (int j = 0; j < num_comps; j++) {
2160 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
2161 LLVMConstInt(ctx->i32, j, 0), "");
2162 }
2163 break;
2164 }
2165
2166 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
2167 vdata, num_comps,
2168 so_write_offsets[buf_idx],
2169 ctx->i32_0,
2170 stream_out->dst_offset * 4, 1, 1, true, false);
2171 }
2172
2173 /**
2174 * Write streamout data to buffers for vertex stream @p stream (different
2175 * vertex streams can occur for GS copy shaders).
2176 */
2177 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2178 struct si_shader_output_values *outputs,
2179 unsigned noutput, unsigned stream)
2180 {
2181 struct si_shader_selector *sel = ctx->shader->selector;
2182 struct pipe_stream_output_info *so = &sel->so;
2183 struct gallivm_state *gallivm = &ctx->gallivm;
2184 LLVMBuilderRef builder = gallivm->builder;
2185 int i;
2186 struct lp_build_if_state if_ctx;
2187
2188 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2189 LLVMValueRef so_vtx_count =
2190 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2191
2192 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
2193
2194 /* can_emit = tid < so_vtx_count; */
2195 LLVMValueRef can_emit =
2196 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2197
2198 /* Emit the streamout code conditionally. This actually avoids
2199 * out-of-bounds buffer access. The hw tells us via the SGPR
2200 * (so_vtx_count) which threads are allowed to emit streamout data. */
2201 lp_build_if(&if_ctx, gallivm, can_emit);
2202 {
2203 /* The buffer offset is computed as follows:
2204 * ByteOffset = streamout_offset[buffer_id]*4 +
2205 * (streamout_write_index + thread_id)*stride[buffer_id] +
2206 * attrib_offset
2207 */
2208
2209 LLVMValueRef so_write_index =
2210 LLVMGetParam(ctx->main_fn,
2211 ctx->param_streamout_write_index);
2212
2213 /* Compute (streamout_write_index + thread_id). */
2214 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2215
2216 /* Load the descriptor and compute the write offset for each
2217 * enabled buffer. */
2218 LLVMValueRef so_write_offset[4] = {};
2219 LLVMValueRef so_buffers[4];
2220 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
2221 ctx->param_rw_buffers);
2222
2223 for (i = 0; i < 4; i++) {
2224 if (!so->stride[i])
2225 continue;
2226
2227 LLVMValueRef offset = LLVMConstInt(ctx->i32,
2228 SI_VS_STREAMOUT_BUF0 + i, 0);
2229
2230 so_buffers[i] = ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
2231
2232 LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
2233 ctx->param_streamout_offset[i]);
2234 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2235
2236 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2237 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2238 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2239 }
2240
2241 /* Write streamout data. */
2242 for (i = 0; i < so->num_outputs; i++) {
2243 unsigned reg = so->output[i].register_index;
2244
2245 if (reg >= noutput)
2246 continue;
2247
2248 if (stream != so->output[i].stream)
2249 continue;
2250
2251 emit_streamout_output(ctx, so_buffers, so_write_offset,
2252 &so->output[i], &outputs[reg]);
2253 }
2254 }
2255 lp_build_endif(&if_ctx);
2256 }
2257
2258
2259 /* Generate export instructions for hardware VS shader stage */
2260 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
2261 struct si_shader_output_values *outputs,
2262 unsigned noutput)
2263 {
2264 struct si_shader_context *ctx = si_shader_context(bld_base);
2265 struct si_shader *shader = ctx->shader;
2266 struct lp_build_context *base = &bld_base->base;
2267 struct ac_export_args args, pos_args[4] = {};
2268 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2269 unsigned semantic_name, semantic_index;
2270 unsigned target;
2271 unsigned param_count = 0;
2272 unsigned pos_idx;
2273 int i;
2274
2275 for (i = 0; i < noutput; i++) {
2276 semantic_name = outputs[i].semantic_name;
2277 semantic_index = outputs[i].semantic_index;
2278 bool export_param = true;
2279
2280 switch (semantic_name) {
2281 case TGSI_SEMANTIC_POSITION: /* ignore these */
2282 case TGSI_SEMANTIC_PSIZE:
2283 case TGSI_SEMANTIC_CLIPVERTEX:
2284 case TGSI_SEMANTIC_EDGEFLAG:
2285 break;
2286 case TGSI_SEMANTIC_GENERIC:
2287 /* don't process indices the function can't handle */
2288 if (semantic_index >= SI_MAX_IO_GENERIC)
2289 break;
2290 /* fall through */
2291 default:
2292 if (shader->key.opt.hw_vs.kill_outputs &
2293 (1ull << si_shader_io_get_unique_index(semantic_name, semantic_index)))
2294 export_param = false;
2295 }
2296
2297 if (outputs[i].vertex_stream[0] != 0 &&
2298 outputs[i].vertex_stream[1] != 0 &&
2299 outputs[i].vertex_stream[2] != 0 &&
2300 outputs[i].vertex_stream[3] != 0)
2301 export_param = false;
2302
2303 handle_semantic:
2304 /* Select the correct target */
2305 switch(semantic_name) {
2306 case TGSI_SEMANTIC_PSIZE:
2307 psize_value = outputs[i].values[0];
2308 continue;
2309 case TGSI_SEMANTIC_EDGEFLAG:
2310 edgeflag_value = outputs[i].values[0];
2311 continue;
2312 case TGSI_SEMANTIC_LAYER:
2313 layer_value = outputs[i].values[0];
2314 semantic_name = TGSI_SEMANTIC_GENERIC;
2315 goto handle_semantic;
2316 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2317 viewport_index_value = outputs[i].values[0];
2318 semantic_name = TGSI_SEMANTIC_GENERIC;
2319 goto handle_semantic;
2320 case TGSI_SEMANTIC_POSITION:
2321 target = V_008DFC_SQ_EXP_POS;
2322 break;
2323 case TGSI_SEMANTIC_CLIPDIST:
2324 if (shader->key.opt.hw_vs.clip_disable) {
2325 semantic_name = TGSI_SEMANTIC_GENERIC;
2326 goto handle_semantic;
2327 }
2328 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
2329 break;
2330 case TGSI_SEMANTIC_CLIPVERTEX:
2331 if (shader->key.opt.hw_vs.clip_disable)
2332 continue;
2333 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
2334 continue;
2335 case TGSI_SEMANTIC_COLOR:
2336 case TGSI_SEMANTIC_BCOLOR:
2337 case TGSI_SEMANTIC_PRIMID:
2338 case TGSI_SEMANTIC_FOG:
2339 case TGSI_SEMANTIC_TEXCOORD:
2340 case TGSI_SEMANTIC_GENERIC:
2341 if (!export_param)
2342 continue;
2343 target = V_008DFC_SQ_EXP_PARAM + param_count;
2344 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2345 shader->info.vs_output_param_offset[i] = param_count;
2346 param_count++;
2347 break;
2348 default:
2349 target = 0;
2350 fprintf(stderr,
2351 "Warning: SI unhandled vs output type:%d\n",
2352 semantic_name);
2353 }
2354
2355 si_llvm_init_export_args(bld_base, outputs[i].values, target, &args);
2356
2357 if (target >= V_008DFC_SQ_EXP_POS &&
2358 target <= (V_008DFC_SQ_EXP_POS + 3)) {
2359 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2360 &args, sizeof(args));
2361 } else {
2362 ac_build_export(&ctx->ac, &args);
2363 }
2364
2365 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
2366 semantic_name = TGSI_SEMANTIC_GENERIC;
2367 goto handle_semantic;
2368 }
2369 }
2370
2371 shader->info.nr_param_exports = param_count;
2372
2373 /* We need to add the position output manually if it's missing. */
2374 if (!pos_args[0].out[0]) {
2375 pos_args[0].enabled_channels = 0xf; /* writemask */
2376 pos_args[0].valid_mask = 0; /* EXEC mask */
2377 pos_args[0].done = 0; /* last export? */
2378 pos_args[0].target = V_008DFC_SQ_EXP_POS;
2379 pos_args[0].compr = 0; /* COMPR flag */
2380 pos_args[0].out[0] = base->zero; /* X */
2381 pos_args[0].out[1] = base->zero; /* Y */
2382 pos_args[0].out[2] = base->zero; /* Z */
2383 pos_args[0].out[3] = base->one; /* W */
2384 }
2385
2386 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2387 if (shader->selector->info.writes_psize ||
2388 shader->selector->info.writes_edgeflag ||
2389 shader->selector->info.writes_viewport_index ||
2390 shader->selector->info.writes_layer) {
2391 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
2392 (shader->selector->info.writes_edgeflag << 1) |
2393 (shader->selector->info.writes_layer << 2);
2394
2395 pos_args[1].valid_mask = 0; /* EXEC mask */
2396 pos_args[1].done = 0; /* last export? */
2397 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2398 pos_args[1].compr = 0; /* COMPR flag */
2399 pos_args[1].out[0] = base->zero; /* X */
2400 pos_args[1].out[1] = base->zero; /* Y */
2401 pos_args[1].out[2] = base->zero; /* Z */
2402 pos_args[1].out[3] = base->zero; /* W */
2403
2404 if (shader->selector->info.writes_psize)
2405 pos_args[1].out[0] = psize_value;
2406
2407 if (shader->selector->info.writes_edgeflag) {
2408 /* The output is a float, but the hw expects an integer
2409 * with the first bit containing the edge flag. */
2410 edgeflag_value = LLVMBuildFPToUI(ctx->gallivm.builder,
2411 edgeflag_value,
2412 ctx->i32, "");
2413 edgeflag_value = lp_build_min(&bld_base->int_bld,
2414 edgeflag_value,
2415 ctx->i32_1);
2416
2417 /* The LLVM intrinsic expects a float. */
2418 pos_args[1].out[1] = LLVMBuildBitCast(ctx->gallivm.builder,
2419 edgeflag_value,
2420 ctx->f32, "");
2421 }
2422
2423 if (ctx->screen->b.chip_class >= GFX9) {
2424 /* GFX9 has the layer in out.z[10:0] and the viewport
2425 * index in out.z[19:16].
2426 */
2427 if (shader->selector->info.writes_layer)
2428 pos_args[1].out[2] = layer_value;
2429
2430 if (shader->selector->info.writes_viewport_index) {
2431 LLVMValueRef v = viewport_index_value;
2432
2433 v = bitcast(bld_base, TGSI_TYPE_UNSIGNED, v);
2434 v = LLVMBuildShl(ctx->gallivm.builder, v,
2435 LLVMConstInt(ctx->i32, 16, 0), "");
2436 v = LLVMBuildOr(ctx->gallivm.builder, v,
2437 bitcast(bld_base, TGSI_TYPE_UNSIGNED,
2438 pos_args[1].out[2]), "");
2439 pos_args[1].out[2] = bitcast(bld_base, TGSI_TYPE_FLOAT, v);
2440 pos_args[1].enabled_channels |= 1 << 2;
2441 }
2442 } else {
2443 if (shader->selector->info.writes_layer)
2444 pos_args[1].out[2] = layer_value;
2445
2446 if (shader->selector->info.writes_viewport_index) {
2447 pos_args[1].out[3] = viewport_index_value;
2448 pos_args[1].enabled_channels |= 1 << 3;
2449 }
2450 }
2451 }
2452
2453 for (i = 0; i < 4; i++)
2454 if (pos_args[i].out[0])
2455 shader->info.nr_pos_exports++;
2456
2457 pos_idx = 0;
2458 for (i = 0; i < 4; i++) {
2459 if (!pos_args[i].out[0])
2460 continue;
2461
2462 /* Specify the target we are exporting */
2463 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2464
2465 if (pos_idx == shader->info.nr_pos_exports)
2466 /* Specify that this is the last export */
2467 pos_args[i].done = 1;
2468
2469 ac_build_export(&ctx->ac, &pos_args[i]);
2470 }
2471 }
2472
2473 /**
2474 * Forward all outputs from the vertex shader to the TES. This is only used
2475 * for the fixed function TCS.
2476 */
2477 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2478 {
2479 struct si_shader_context *ctx = si_shader_context(bld_base);
2480 struct gallivm_state *gallivm = &ctx->gallivm;
2481 LLVMValueRef invocation_id, buffer, buffer_offset;
2482 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
2483 uint64_t inputs;
2484
2485 invocation_id = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
2486 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
2487 buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2488
2489 lds_vertex_stride = unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
2490 lds_vertex_offset = LLVMBuildMul(gallivm->builder, invocation_id,
2491 lds_vertex_stride, "");
2492 lds_base = get_tcs_in_current_patch_offset(ctx);
2493 lds_base = LLVMBuildAdd(gallivm->builder, lds_base, lds_vertex_offset, "");
2494
2495 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
2496 while (inputs) {
2497 unsigned i = u_bit_scan64(&inputs);
2498
2499 LLVMValueRef lds_ptr = LLVMBuildAdd(gallivm->builder, lds_base,
2500 LLVMConstInt(ctx->i32, 4 * i, 0),
2501 "");
2502
2503 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
2504 get_rel_patch_id(ctx),
2505 invocation_id,
2506 LLVMConstInt(ctx->i32, i, 0));
2507
2508 LLVMValueRef value = lds_load(bld_base, TGSI_TYPE_SIGNED, ~0,
2509 lds_ptr);
2510
2511 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
2512 buffer_offset, 0, 1, 0, true, false);
2513 }
2514 }
2515
2516 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
2517 LLVMValueRef rel_patch_id,
2518 LLVMValueRef invocation_id,
2519 LLVMValueRef tcs_out_current_patch_data_offset)
2520 {
2521 struct si_shader_context *ctx = si_shader_context(bld_base);
2522 struct gallivm_state *gallivm = &ctx->gallivm;
2523 struct si_shader *shader = ctx->shader;
2524 unsigned tess_inner_index, tess_outer_index;
2525 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2526 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2527 unsigned stride, outer_comps, inner_comps, i, offset;
2528 struct lp_build_if_state if_ctx, inner_if_ctx;
2529
2530 si_llvm_emit_barrier(NULL, bld_base, NULL);
2531
2532 /* Do this only for invocation 0, because the tess levels are per-patch,
2533 * not per-vertex.
2534 *
2535 * This can't jump, because invocation 0 executes this. It should
2536 * at least mask out the loads and stores for other invocations.
2537 */
2538 lp_build_if(&if_ctx, gallivm,
2539 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2540 invocation_id, ctx->i32_0, ""));
2541
2542 /* Determine the layout of one tess factor element in the buffer. */
2543 switch (shader->key.part.tcs.epilog.prim_mode) {
2544 case PIPE_PRIM_LINES:
2545 stride = 2; /* 2 dwords, 1 vec2 store */
2546 outer_comps = 2;
2547 inner_comps = 0;
2548 break;
2549 case PIPE_PRIM_TRIANGLES:
2550 stride = 4; /* 4 dwords, 1 vec4 store */
2551 outer_comps = 3;
2552 inner_comps = 1;
2553 break;
2554 case PIPE_PRIM_QUADS:
2555 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2556 outer_comps = 4;
2557 inner_comps = 2;
2558 break;
2559 default:
2560 assert(0);
2561 return;
2562 }
2563
2564 /* Load tess_inner and tess_outer from LDS.
2565 * Any invocation can write them, so we can't get them from a temporary.
2566 */
2567 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
2568 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
2569
2570 lds_base = tcs_out_current_patch_data_offset;
2571 lds_inner = LLVMBuildAdd(gallivm->builder, lds_base,
2572 LLVMConstInt(ctx->i32,
2573 tess_inner_index * 4, 0), "");
2574 lds_outer = LLVMBuildAdd(gallivm->builder, lds_base,
2575 LLVMConstInt(ctx->i32,
2576 tess_outer_index * 4, 0), "");
2577
2578 for (i = 0; i < 4; i++) {
2579 inner[i] = LLVMGetUndef(ctx->i32);
2580 outer[i] = LLVMGetUndef(ctx->i32);
2581 }
2582
2583 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
2584 /* For isolines, the hardware expects tess factors in the
2585 * reverse order from what GLSL / TGSI specify.
2586 */
2587 outer[0] = out[1] = lds_load(bld_base, TGSI_TYPE_SIGNED, 0, lds_outer);
2588 outer[1] = out[0] = lds_load(bld_base, TGSI_TYPE_SIGNED, 1, lds_outer);
2589 } else {
2590 for (i = 0; i < outer_comps; i++) {
2591 outer[i] = out[i] =
2592 lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
2593 }
2594 for (i = 0; i < inner_comps; i++) {
2595 inner[i] = out[outer_comps+i] =
2596 lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
2597 }
2598 }
2599
2600 /* Convert the outputs to vectors for stores. */
2601 vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
2602 vec1 = NULL;
2603
2604 if (stride > 4)
2605 vec1 = lp_build_gather_values(gallivm, out+4, stride - 4);
2606
2607 /* Get the buffer. */
2608 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_factor_addr_base64k);
2609
2610 /* Get the offset. */
2611 tf_base = LLVMGetParam(ctx->main_fn,
2612 ctx->param_tcs_factor_offset);
2613 byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
2614 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
2615
2616 lp_build_if(&inner_if_ctx, gallivm,
2617 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2618 rel_patch_id, ctx->i32_0, ""));
2619
2620 /* Store the dynamic HS control word. */
2621 offset = 0;
2622 if (ctx->screen->b.chip_class <= VI) {
2623 ac_build_buffer_store_dword(&ctx->ac, buffer,
2624 LLVMConstInt(ctx->i32, 0x80000000, 0),
2625 1, ctx->i32_0, tf_base,
2626 offset, 1, 0, true, false);
2627 offset += 4;
2628 }
2629
2630 lp_build_endif(&inner_if_ctx);
2631
2632 /* Store the tessellation factors. */
2633 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2634 MIN2(stride, 4), byteoffset, tf_base,
2635 offset, 1, 0, true, false);
2636 offset += 16;
2637 if (vec1)
2638 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2639 stride - 4, byteoffset, tf_base,
2640 offset, 1, 0, true, false);
2641
2642 /* Store the tess factors into the offchip buffer if TES reads them. */
2643 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
2644 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
2645 LLVMValueRef tf_inner_offset;
2646 unsigned param_outer, param_inner;
2647
2648 buf = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
2649 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2650
2651 param_outer = si_shader_io_get_unique_index_patch(
2652 TGSI_SEMANTIC_TESSOUTER, 0);
2653 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2654 LLVMConstInt(ctx->i32, param_outer, 0));
2655
2656 outer_vec = lp_build_gather_values(gallivm, outer,
2657 util_next_power_of_two(outer_comps));
2658
2659 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
2660 outer_comps, tf_outer_offset,
2661 base, 0, 1, 0, true, false);
2662 if (inner_comps) {
2663 param_inner = si_shader_io_get_unique_index_patch(
2664 TGSI_SEMANTIC_TESSINNER, 0);
2665 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2666 LLVMConstInt(ctx->i32, param_inner, 0));
2667
2668 inner_vec = inner_comps == 1 ? inner[0] :
2669 lp_build_gather_values(gallivm, inner, inner_comps);
2670 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
2671 inner_comps, tf_inner_offset,
2672 base, 0, 1, 0, true, false);
2673 }
2674 }
2675
2676 lp_build_endif(&if_ctx);
2677 }
2678
2679 static LLVMValueRef
2680 si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
2681 unsigned param, unsigned return_index)
2682 {
2683 return LLVMBuildInsertValue(ctx->gallivm.builder, ret,
2684 LLVMGetParam(ctx->main_fn, param),
2685 return_index, "");
2686 }
2687
2688 static LLVMValueRef
2689 si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
2690 unsigned param, unsigned return_index)
2691 {
2692 LLVMBuilderRef builder = ctx->gallivm.builder;
2693 LLVMValueRef p = LLVMGetParam(ctx->main_fn, param);
2694
2695 return LLVMBuildInsertValue(builder, ret,
2696 LLVMBuildBitCast(builder, p, ctx->f32, ""),
2697 return_index, "");
2698 }
2699
2700 static LLVMValueRef
2701 si_insert_input_ptr_as_2xi32(struct si_shader_context *ctx, LLVMValueRef ret,
2702 unsigned param, unsigned return_index)
2703 {
2704 LLVMBuilderRef builder = ctx->gallivm.builder;
2705 LLVMValueRef ptr, lo, hi;
2706
2707 ptr = LLVMGetParam(ctx->main_fn, param);
2708 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i64, "");
2709 ptr = LLVMBuildBitCast(builder, ptr, ctx->v2i32, "");
2710 lo = LLVMBuildExtractElement(builder, ptr, ctx->i32_0, "");
2711 hi = LLVMBuildExtractElement(builder, ptr, ctx->i32_1, "");
2712 ret = LLVMBuildInsertValue(builder, ret, lo, return_index, "");
2713 return LLVMBuildInsertValue(builder, ret, hi, return_index + 1, "");
2714 }
2715
2716 /* This only writes the tessellation factor levels. */
2717 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
2718 {
2719 struct si_shader_context *ctx = si_shader_context(bld_base);
2720 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2721
2722 si_copy_tcs_inputs(bld_base);
2723
2724 rel_patch_id = get_rel_patch_id(ctx);
2725 invocation_id = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
2726 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2727
2728 /* Return epilog parameters from this function. */
2729 LLVMBuilderRef builder = ctx->gallivm.builder;
2730 LLVMValueRef ret = ctx->return_value;
2731 unsigned vgpr;
2732
2733 if (ctx->screen->b.chip_class >= GFX9) {
2734 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2735 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2736 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2737 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2738 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2739 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
2740 /* Tess offchip and tess factor offsets are at the beginning. */
2741 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
2742 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
2743 vgpr = 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K + 1;
2744 } else {
2745 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2746 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
2747 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2748 GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2749 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2750 GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K);
2751 /* Tess offchip and tess factor offsets are after user SGPRs. */
2752 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset,
2753 GFX6_TCS_NUM_USER_SGPR);
2754 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset,
2755 GFX6_TCS_NUM_USER_SGPR + 1);
2756 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
2757 }
2758
2759 /* VGPRs */
2760 rel_patch_id = bitcast(bld_base, TGSI_TYPE_FLOAT, rel_patch_id);
2761 invocation_id = bitcast(bld_base, TGSI_TYPE_FLOAT, invocation_id);
2762 tf_lds_offset = bitcast(bld_base, TGSI_TYPE_FLOAT, tf_lds_offset);
2763
2764 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2765 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2766 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2767 ctx->return_value = ret;
2768 }
2769
2770 /* Pass TCS inputs from LS to TCS on GFX9. */
2771 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
2772 {
2773 LLVMValueRef ret = ctx->return_value;
2774
2775 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers, 0);
2776 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
2777 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
2778 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
2779 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
2780
2781 ret = si_insert_input_ret(ctx, ret, ctx->param_vs_state_bits,
2782 8 + SI_SGPR_VS_STATE_BITS);
2783 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2784 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2785 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_offsets,
2786 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
2787 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_layout,
2788 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2789 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2790 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2791 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2792 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
2793
2794 unsigned desc_param = ctx->param_tcs_factor_addr_base64k + 2;
2795 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
2796 8 + GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS);
2797 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
2798 8 + GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES);
2799
2800 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
2801 ret = si_insert_input_ret_float(ctx, ret,
2802 ctx->param_tcs_patch_id, vgpr++);
2803 ret = si_insert_input_ret_float(ctx, ret,
2804 ctx->param_tcs_rel_ids, vgpr++);
2805 ctx->return_value = ret;
2806 }
2807
2808 /* Pass GS inputs from ES to GS on GFX9. */
2809 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
2810 {
2811 LLVMValueRef ret = ctx->return_value;
2812
2813 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers, 0);
2814 ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
2815 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
2816
2817 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
2818
2819 unsigned desc_param = ctx->param_vs_state_bits + 1;
2820 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
2821 8 + GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS);
2822 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
2823 8 + GFX9_SGPR_GS_SAMPLERS_AND_IMAGES);
2824
2825 unsigned vgpr = 8 + GFX9_GS_NUM_USER_SGPR;
2826 for (unsigned i = 0; i < 5; i++) {
2827 unsigned param = ctx->param_gs_vtx01_offset + i;
2828 ret = si_insert_input_ret_float(ctx, ret, param, vgpr++);
2829 }
2830 ctx->return_value = ret;
2831 }
2832
2833 static void si_llvm_emit_ls_epilogue(struct lp_build_tgsi_context *bld_base)
2834 {
2835 struct si_shader_context *ctx = si_shader_context(bld_base);
2836 struct si_shader *shader = ctx->shader;
2837 struct tgsi_shader_info *info = &shader->selector->info;
2838 struct gallivm_state *gallivm = &ctx->gallivm;
2839 unsigned i, chan;
2840 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
2841 ctx->param_rel_auto_id);
2842 LLVMValueRef vertex_dw_stride =
2843 unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
2844 LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
2845 vertex_dw_stride, "");
2846
2847 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2848 * its inputs from it. */
2849 for (i = 0; i < info->num_outputs; i++) {
2850 LLVMValueRef *out_ptr = ctx->outputs[i];
2851 unsigned name = info->output_semantic_name[i];
2852 unsigned index = info->output_semantic_index[i];
2853
2854 /* The ARB_shader_viewport_layer_array spec contains the
2855 * following issue:
2856 *
2857 * 2) What happens if gl_ViewportIndex or gl_Layer is
2858 * written in the vertex shader and a geometry shader is
2859 * present?
2860 *
2861 * RESOLVED: The value written by the last vertex processing
2862 * stage is used. If the last vertex processing stage
2863 * (vertex, tessellation evaluation or geometry) does not
2864 * statically assign to gl_ViewportIndex or gl_Layer, index
2865 * or layer zero is assumed.
2866 *
2867 * So writes to those outputs in VS-as-LS are simply ignored.
2868 */
2869 if (name == TGSI_SEMANTIC_LAYER ||
2870 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
2871 continue;
2872
2873 int param = si_shader_io_get_unique_index(name, index);
2874 LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
2875 LLVMConstInt(ctx->i32, param * 4, 0), "");
2876
2877 for (chan = 0; chan < 4; chan++) {
2878 lds_store(bld_base, chan, dw_addr,
2879 LLVMBuildLoad(gallivm->builder, out_ptr[chan], ""));
2880 }
2881 }
2882
2883 if (ctx->screen->b.chip_class >= GFX9)
2884 si_set_ls_return_value_for_tcs(ctx);
2885 }
2886
2887 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context *bld_base)
2888 {
2889 struct si_shader_context *ctx = si_shader_context(bld_base);
2890 struct gallivm_state *gallivm = &ctx->gallivm;
2891 struct si_shader *es = ctx->shader;
2892 struct tgsi_shader_info *info = &es->selector->info;
2893 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
2894 ctx->param_es2gs_offset);
2895 LLVMValueRef lds_base = NULL;
2896 unsigned chan;
2897 int i;
2898
2899 if (ctx->screen->b.chip_class >= GFX9 && info->num_outputs) {
2900 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
2901 lds_base = LLVMBuildMul(gallivm->builder, ac_get_thread_id(&ctx->ac),
2902 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
2903 }
2904
2905 for (i = 0; i < info->num_outputs; i++) {
2906 LLVMValueRef *out_ptr = ctx->outputs[i];
2907 int param;
2908
2909 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2910 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2911 continue;
2912
2913 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
2914 info->output_semantic_index[i]);
2915
2916 for (chan = 0; chan < 4; chan++) {
2917 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
2918 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
2919
2920 /* GFX9 has the ESGS ring in LDS. */
2921 if (ctx->screen->b.chip_class >= GFX9) {
2922 lds_store(bld_base, param * 4 + chan, lds_base, out_val);
2923 continue;
2924 }
2925
2926 ac_build_buffer_store_dword(&ctx->ac,
2927 ctx->esgs_ring,
2928 out_val, 1, NULL, soffset,
2929 (4 * param + chan) * 4,
2930 1, 1, true, true);
2931 }
2932 }
2933
2934 if (ctx->screen->b.chip_class >= GFX9)
2935 si_set_es_return_value_for_gs(ctx);
2936 }
2937
2938 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
2939 {
2940 if (ctx->screen->b.chip_class >= GFX9)
2941 return unpack_param(ctx, ctx->param_merged_wave_info, 16, 8);
2942 else
2943 return LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id);
2944 }
2945
2946 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
2947 {
2948 struct si_shader_context *ctx = si_shader_context(bld_base);
2949
2950 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
2951 si_get_gs_wave_id(ctx));
2952 }
2953
2954 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
2955 {
2956 struct si_shader_context *ctx = si_shader_context(bld_base);
2957 struct gallivm_state *gallivm = &ctx->gallivm;
2958 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2959 struct si_shader_output_values *outputs = NULL;
2960 int i,j;
2961
2962 assert(!ctx->shader->is_gs_copy_shader);
2963
2964 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2965
2966 /* Vertex color clamping.
2967 *
2968 * This uses a state constant loaded in a user data SGPR and
2969 * an IF statement is added that clamps all colors if the constant
2970 * is true.
2971 */
2972 if (ctx->type == PIPE_SHADER_VERTEX) {
2973 struct lp_build_if_state if_ctx;
2974 LLVMValueRef cond = NULL;
2975 LLVMValueRef addr, val;
2976
2977 for (i = 0; i < info->num_outputs; i++) {
2978 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
2979 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
2980 continue;
2981
2982 /* We've found a color. */
2983 if (!cond) {
2984 /* The state is in the first bit of the user SGPR. */
2985 cond = LLVMGetParam(ctx->main_fn,
2986 ctx->param_vs_state_bits);
2987 cond = LLVMBuildTrunc(gallivm->builder, cond,
2988 ctx->i1, "");
2989 lp_build_if(&if_ctx, gallivm, cond);
2990 }
2991
2992 for (j = 0; j < 4; j++) {
2993 addr = ctx->outputs[i][j];
2994 val = LLVMBuildLoad(gallivm->builder, addr, "");
2995 val = ac_build_clamp(&ctx->ac, val);
2996 LLVMBuildStore(gallivm->builder, val, addr);
2997 }
2998 }
2999
3000 if (cond)
3001 lp_build_endif(&if_ctx);
3002 }
3003
3004 for (i = 0; i < info->num_outputs; i++) {
3005 outputs[i].semantic_name = info->output_semantic_name[i];
3006 outputs[i].semantic_index = info->output_semantic_index[i];
3007
3008 for (j = 0; j < 4; j++) {
3009 outputs[i].values[j] =
3010 LLVMBuildLoad(gallivm->builder,
3011 ctx->outputs[i][j],
3012 "");
3013 outputs[i].vertex_stream[j] =
3014 (info->output_streams[i] >> (2 * j)) & 3;
3015 }
3016 }
3017
3018 if (ctx->shader->selector->so.num_outputs)
3019 si_llvm_emit_streamout(ctx, outputs, i, 0);
3020
3021 /* Export PrimitiveID. */
3022 if (ctx->shader->key.mono.u.vs_export_prim_id) {
3023 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
3024 outputs[i].semantic_index = 0;
3025 outputs[i].values[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
3026 get_primitive_id(bld_base, 0));
3027 for (j = 1; j < 4; j++)
3028 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
3029
3030 memset(outputs[i].vertex_stream, 0,
3031 sizeof(outputs[i].vertex_stream));
3032 i++;
3033 }
3034
3035 si_llvm_export_vs(bld_base, outputs, i);
3036 FREE(outputs);
3037 }
3038
3039 struct si_ps_exports {
3040 unsigned num;
3041 struct ac_export_args args[10];
3042 };
3043
3044 unsigned si_get_spi_shader_z_format(bool writes_z, bool writes_stencil,
3045 bool writes_samplemask)
3046 {
3047 if (writes_z) {
3048 /* Z needs 32 bits. */
3049 if (writes_samplemask)
3050 return V_028710_SPI_SHADER_32_ABGR;
3051 else if (writes_stencil)
3052 return V_028710_SPI_SHADER_32_GR;
3053 else
3054 return V_028710_SPI_SHADER_32_R;
3055 } else if (writes_stencil || writes_samplemask) {
3056 /* Both stencil and sample mask need only 16 bits. */
3057 return V_028710_SPI_SHADER_UINT16_ABGR;
3058 } else {
3059 return V_028710_SPI_SHADER_ZERO;
3060 }
3061 }
3062
3063 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
3064 LLVMValueRef depth, LLVMValueRef stencil,
3065 LLVMValueRef samplemask, struct si_ps_exports *exp)
3066 {
3067 struct si_shader_context *ctx = si_shader_context(bld_base);
3068 struct lp_build_context *base = &bld_base->base;
3069 struct ac_export_args args;
3070 unsigned mask = 0;
3071 unsigned format = si_get_spi_shader_z_format(depth != NULL,
3072 stencil != NULL,
3073 samplemask != NULL);
3074
3075 assert(depth || stencil || samplemask);
3076
3077 args.valid_mask = 1; /* whether the EXEC mask is valid */
3078 args.done = 1; /* DONE bit */
3079
3080 /* Specify the target we are exporting */
3081 args.target = V_008DFC_SQ_EXP_MRTZ;
3082
3083 args.compr = 0; /* COMP flag */
3084 args.out[0] = base->undef; /* R, depth */
3085 args.out[1] = base->undef; /* G, stencil test value[0:7], stencil op value[8:15] */
3086 args.out[2] = base->undef; /* B, sample mask */
3087 args.out[3] = base->undef; /* A, alpha to mask */
3088
3089 if (format == V_028710_SPI_SHADER_UINT16_ABGR) {
3090 assert(!depth);
3091 args.compr = 1; /* COMPR flag */
3092
3093 if (stencil) {
3094 /* Stencil should be in X[23:16]. */
3095 stencil = bitcast(bld_base, TGSI_TYPE_UNSIGNED, stencil);
3096 stencil = LLVMBuildShl(ctx->gallivm.builder, stencil,
3097 LLVMConstInt(ctx->i32, 16, 0), "");
3098 args.out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT, stencil);
3099 mask |= 0x3;
3100 }
3101 if (samplemask) {
3102 /* SampleMask should be in Y[15:0]. */
3103 args.out[1] = samplemask;
3104 mask |= 0xc;
3105 }
3106 } else {
3107 if (depth) {
3108 args.out[0] = depth;
3109 mask |= 0x1;
3110 }
3111 if (stencil) {
3112 args.out[1] = stencil;
3113 mask |= 0x2;
3114 }
3115 if (samplemask) {
3116 args.out[2] = samplemask;
3117 mask |= 0x4;
3118 }
3119 }
3120
3121 /* SI (except OLAND and HAINAN) has a bug that it only looks
3122 * at the X writemask component. */
3123 if (ctx->screen->b.chip_class == SI &&
3124 ctx->screen->b.family != CHIP_OLAND &&
3125 ctx->screen->b.family != CHIP_HAINAN)
3126 mask |= 0x1;
3127
3128 /* Specify which components to enable */
3129 args.enabled_channels = mask;
3130
3131 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3132 }
3133
3134 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
3135 LLVMValueRef *color, unsigned index,
3136 unsigned samplemask_param,
3137 bool is_last, struct si_ps_exports *exp)
3138 {
3139 struct si_shader_context *ctx = si_shader_context(bld_base);
3140 struct lp_build_context *base = &bld_base->base;
3141 int i;
3142
3143 /* Clamp color */
3144 if (ctx->shader->key.part.ps.epilog.clamp_color)
3145 for (i = 0; i < 4; i++)
3146 color[i] = ac_build_clamp(&ctx->ac, color[i]);
3147
3148 /* Alpha to one */
3149 if (ctx->shader->key.part.ps.epilog.alpha_to_one)
3150 color[3] = base->one;
3151
3152 /* Alpha test */
3153 if (index == 0 &&
3154 ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
3155 si_alpha_test(bld_base, color[3]);
3156
3157 /* Line & polygon smoothing */
3158 if (ctx->shader->key.part.ps.epilog.poly_line_smoothing)
3159 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
3160 samplemask_param);
3161
3162 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3163 if (ctx->shader->key.part.ps.epilog.last_cbuf > 0) {
3164 struct ac_export_args args[8];
3165 int c, last = -1;
3166
3167 /* Get the export arguments, also find out what the last one is. */
3168 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3169 si_llvm_init_export_args(bld_base, color,
3170 V_008DFC_SQ_EXP_MRT + c, &args[c]);
3171 if (args[c].enabled_channels)
3172 last = c;
3173 }
3174
3175 /* Emit all exports. */
3176 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3177 if (is_last && last == c) {
3178 args[c].valid_mask = 1; /* whether the EXEC mask is valid */
3179 args[c].done = 1; /* DONE bit */
3180 } else if (!args[c].enabled_channels)
3181 continue; /* unnecessary NULL export */
3182
3183 memcpy(&exp->args[exp->num++], &args[c], sizeof(args[c]));
3184 }
3185 } else {
3186 struct ac_export_args args;
3187
3188 /* Export */
3189 si_llvm_init_export_args(bld_base, color, V_008DFC_SQ_EXP_MRT + index,
3190 &args);
3191 if (is_last) {
3192 args.valid_mask = 1; /* whether the EXEC mask is valid */
3193 args.done = 1; /* DONE bit */
3194 } else if (!args.enabled_channels)
3195 return; /* unnecessary NULL export */
3196
3197 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3198 }
3199 }
3200
3201 static void si_emit_ps_exports(struct si_shader_context *ctx,
3202 struct si_ps_exports *exp)
3203 {
3204 for (unsigned i = 0; i < exp->num; i++)
3205 ac_build_export(&ctx->ac, &exp->args[i]);
3206 }
3207
3208 static void si_export_null(struct lp_build_tgsi_context *bld_base)
3209 {
3210 struct si_shader_context *ctx = si_shader_context(bld_base);
3211 struct lp_build_context *base = &bld_base->base;
3212 struct ac_export_args args;
3213
3214 args.enabled_channels = 0x0; /* enabled channels */
3215 args.valid_mask = 1; /* whether the EXEC mask is valid */
3216 args.done = 1; /* DONE bit */
3217 args.target = V_008DFC_SQ_EXP_NULL;
3218 args.compr = 0; /* COMPR flag (0 = 32-bit export) */
3219 args.out[0] = base->undef; /* R */
3220 args.out[1] = base->undef; /* G */
3221 args.out[2] = base->undef; /* B */
3222 args.out[3] = base->undef; /* A */
3223
3224 ac_build_export(&ctx->ac, &args);
3225 }
3226
3227 /**
3228 * Return PS outputs in this order:
3229 *
3230 * v[0:3] = color0.xyzw
3231 * v[4:7] = color1.xyzw
3232 * ...
3233 * vN+0 = Depth
3234 * vN+1 = Stencil
3235 * vN+2 = SampleMask
3236 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3237 *
3238 * The alpha-ref SGPR is returned via its original location.
3239 */
3240 static void si_llvm_return_fs_outputs(struct lp_build_tgsi_context *bld_base)
3241 {
3242 struct si_shader_context *ctx = si_shader_context(bld_base);
3243 struct si_shader *shader = ctx->shader;
3244 struct tgsi_shader_info *info = &shader->selector->info;
3245 LLVMBuilderRef builder = ctx->gallivm.builder;
3246 unsigned i, j, first_vgpr, vgpr;
3247
3248 LLVMValueRef color[8][4] = {};
3249 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3250 LLVMValueRef ret;
3251
3252 /* Read the output values. */
3253 for (i = 0; i < info->num_outputs; i++) {
3254 unsigned semantic_name = info->output_semantic_name[i];
3255 unsigned semantic_index = info->output_semantic_index[i];
3256
3257 switch (semantic_name) {
3258 case TGSI_SEMANTIC_COLOR:
3259 assert(semantic_index < 8);
3260 for (j = 0; j < 4; j++) {
3261 LLVMValueRef ptr = ctx->outputs[i][j];
3262 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3263 color[semantic_index][j] = result;
3264 }
3265 break;
3266 case TGSI_SEMANTIC_POSITION:
3267 depth = LLVMBuildLoad(builder,
3268 ctx->outputs[i][2], "");
3269 break;
3270 case TGSI_SEMANTIC_STENCIL:
3271 stencil = LLVMBuildLoad(builder,
3272 ctx->outputs[i][1], "");
3273 break;
3274 case TGSI_SEMANTIC_SAMPLEMASK:
3275 samplemask = LLVMBuildLoad(builder,
3276 ctx->outputs[i][0], "");
3277 break;
3278 default:
3279 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3280 semantic_name);
3281 }
3282 }
3283
3284 /* Fill the return structure. */
3285 ret = ctx->return_value;
3286
3287 /* Set SGPRs. */
3288 ret = LLVMBuildInsertValue(builder, ret,
3289 bitcast(bld_base, TGSI_TYPE_SIGNED,
3290 LLVMGetParam(ctx->main_fn,
3291 SI_PARAM_ALPHA_REF)),
3292 SI_SGPR_ALPHA_REF, "");
3293
3294 /* Set VGPRs */
3295 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3296 for (i = 0; i < ARRAY_SIZE(color); i++) {
3297 if (!color[i][0])
3298 continue;
3299
3300 for (j = 0; j < 4; j++)
3301 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3302 }
3303 if (depth)
3304 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3305 if (stencil)
3306 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3307 if (samplemask)
3308 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3309
3310 /* Add the input sample mask for smoothing at the end. */
3311 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3312 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3313 ret = LLVMBuildInsertValue(builder, ret,
3314 LLVMGetParam(ctx->main_fn,
3315 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3316
3317 ctx->return_value = ret;
3318 }
3319
3320 /* Prevent optimizations (at least of memory accesses) across the current
3321 * point in the program by emitting empty inline assembly that is marked as
3322 * having side effects.
3323 *
3324 * Optionally, a value can be passed through the inline assembly to prevent
3325 * LLVM from hoisting calls to ReadNone functions.
3326 */
3327 static void emit_optimization_barrier(struct si_shader_context *ctx,
3328 LLVMValueRef *pvgpr)
3329 {
3330 static int counter = 0;
3331
3332 LLVMBuilderRef builder = ctx->gallivm.builder;
3333 char code[16];
3334
3335 snprintf(code, sizeof(code), "; %d", p_atomic_inc_return(&counter));
3336
3337 if (!pvgpr) {
3338 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
3339 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
3340 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
3341 } else {
3342 LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
3343 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "=v,0", true, false);
3344 LLVMValueRef vgpr = *pvgpr;
3345 LLVMTypeRef vgpr_type = LLVMTypeOf(vgpr);
3346 unsigned vgpr_size = llvm_get_type_size(vgpr_type);
3347 LLVMValueRef vgpr0;
3348
3349 assert(vgpr_size % 4 == 0);
3350
3351 vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
3352 vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
3353 vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
3354 vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
3355 vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
3356
3357 *pvgpr = vgpr;
3358 }
3359 }
3360
3361 void si_emit_waitcnt(struct si_shader_context *ctx, unsigned simm16)
3362 {
3363 struct gallivm_state *gallivm = &ctx->gallivm;
3364 LLVMBuilderRef builder = gallivm->builder;
3365 LLVMValueRef args[1] = {
3366 LLVMConstInt(ctx->i32, simm16, 0)
3367 };
3368 lp_build_intrinsic(builder, "llvm.amdgcn.s.waitcnt",
3369 ctx->voidt, args, 1, 0);
3370 }
3371
3372 static void membar_emit(
3373 const struct lp_build_tgsi_action *action,
3374 struct lp_build_tgsi_context *bld_base,
3375 struct lp_build_emit_data *emit_data)
3376 {
3377 struct si_shader_context *ctx = si_shader_context(bld_base);
3378 LLVMValueRef src0 = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
3379 unsigned flags = LLVMConstIntGetZExtValue(src0);
3380 unsigned waitcnt = NOOP_WAITCNT;
3381
3382 if (flags & TGSI_MEMBAR_THREAD_GROUP)
3383 waitcnt &= VM_CNT & LGKM_CNT;
3384
3385 if (flags & (TGSI_MEMBAR_ATOMIC_BUFFER |
3386 TGSI_MEMBAR_SHADER_BUFFER |
3387 TGSI_MEMBAR_SHADER_IMAGE))
3388 waitcnt &= VM_CNT;
3389
3390 if (flags & TGSI_MEMBAR_SHARED)
3391 waitcnt &= LGKM_CNT;
3392
3393 if (waitcnt != NOOP_WAITCNT)
3394 si_emit_waitcnt(ctx, waitcnt);
3395 }
3396
3397 static void clock_emit(
3398 const struct lp_build_tgsi_action *action,
3399 struct lp_build_tgsi_context *bld_base,
3400 struct lp_build_emit_data *emit_data)
3401 {
3402 struct si_shader_context *ctx = si_shader_context(bld_base);
3403 struct gallivm_state *gallivm = &ctx->gallivm;
3404 LLVMValueRef tmp;
3405
3406 tmp = lp_build_intrinsic(gallivm->builder, "llvm.readcyclecounter",
3407 ctx->i64, NULL, 0, 0);
3408 tmp = LLVMBuildBitCast(gallivm->builder, tmp, ctx->v2i32, "");
3409
3410 emit_data->output[0] =
3411 LLVMBuildExtractElement(gallivm->builder, tmp, ctx->i32_0, "");
3412 emit_data->output[1] =
3413 LLVMBuildExtractElement(gallivm->builder, tmp, ctx->i32_1, "");
3414 }
3415
3416 LLVMTypeRef si_const_array(LLVMTypeRef elem_type, int num_elements)
3417 {
3418 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
3419 CONST_ADDR_SPACE);
3420 }
3421
3422 static void si_llvm_emit_ddxy(
3423 const struct lp_build_tgsi_action *action,
3424 struct lp_build_tgsi_context *bld_base,
3425 struct lp_build_emit_data *emit_data)
3426 {
3427 struct si_shader_context *ctx = si_shader_context(bld_base);
3428 struct gallivm_state *gallivm = &ctx->gallivm;
3429 unsigned opcode = emit_data->info->opcode;
3430 LLVMValueRef val;
3431 int idx;
3432 unsigned mask;
3433
3434 if (opcode == TGSI_OPCODE_DDX_FINE)
3435 mask = AC_TID_MASK_LEFT;
3436 else if (opcode == TGSI_OPCODE_DDY_FINE)
3437 mask = AC_TID_MASK_TOP;
3438 else
3439 mask = AC_TID_MASK_TOP_LEFT;
3440
3441 /* for DDX we want to next X pixel, DDY next Y pixel. */
3442 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
3443
3444 val = LLVMBuildBitCast(gallivm->builder, emit_data->args[0], ctx->i32, "");
3445 val = ac_build_ddxy(&ctx->ac, ctx->screen->has_ds_bpermute,
3446 mask, idx, ctx->lds, val);
3447 emit_data->output[emit_data->chan] = val;
3448 }
3449
3450 /*
3451 * this takes an I,J coordinate pair,
3452 * and works out the X and Y derivatives.
3453 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
3454 */
3455 static LLVMValueRef si_llvm_emit_ddxy_interp(
3456 struct lp_build_tgsi_context *bld_base,
3457 LLVMValueRef interp_ij)
3458 {
3459 struct si_shader_context *ctx = si_shader_context(bld_base);
3460 struct gallivm_state *gallivm = &ctx->gallivm;
3461 LLVMValueRef result[4], a;
3462 unsigned i;
3463
3464 for (i = 0; i < 2; i++) {
3465 a = LLVMBuildExtractElement(gallivm->builder, interp_ij,
3466 LLVMConstInt(ctx->i32, i, 0), "");
3467 result[i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDX, a);
3468 result[2+i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDY, a);
3469 }
3470
3471 return lp_build_gather_values(gallivm, result, 4);
3472 }
3473
3474 static void interp_fetch_args(
3475 struct lp_build_tgsi_context *bld_base,
3476 struct lp_build_emit_data *emit_data)
3477 {
3478 struct si_shader_context *ctx = si_shader_context(bld_base);
3479 struct gallivm_state *gallivm = &ctx->gallivm;
3480 const struct tgsi_full_instruction *inst = emit_data->inst;
3481
3482 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
3483 /* offset is in second src, first two channels */
3484 emit_data->args[0] = lp_build_emit_fetch(bld_base,
3485 emit_data->inst, 1,
3486 TGSI_CHAN_X);
3487 emit_data->args[1] = lp_build_emit_fetch(bld_base,
3488 emit_data->inst, 1,
3489 TGSI_CHAN_Y);
3490 emit_data->arg_count = 2;
3491 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3492 LLVMValueRef sample_position;
3493 LLVMValueRef sample_id;
3494 LLVMValueRef halfval = LLVMConstReal(ctx->f32, 0.5f);
3495
3496 /* fetch sample ID, then fetch its sample position,
3497 * and place into first two channels.
3498 */
3499 sample_id = lp_build_emit_fetch(bld_base,
3500 emit_data->inst, 1, TGSI_CHAN_X);
3501 sample_id = LLVMBuildBitCast(gallivm->builder, sample_id,
3502 ctx->i32, "");
3503 sample_position = load_sample_position(ctx, sample_id);
3504
3505 emit_data->args[0] = LLVMBuildExtractElement(gallivm->builder,
3506 sample_position,
3507 ctx->i32_0, "");
3508
3509 emit_data->args[0] = LLVMBuildFSub(gallivm->builder, emit_data->args[0], halfval, "");
3510 emit_data->args[1] = LLVMBuildExtractElement(gallivm->builder,
3511 sample_position,
3512 ctx->i32_1, "");
3513 emit_data->args[1] = LLVMBuildFSub(gallivm->builder, emit_data->args[1], halfval, "");
3514 emit_data->arg_count = 2;
3515 }
3516 }
3517
3518 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
3519 struct lp_build_tgsi_context *bld_base,
3520 struct lp_build_emit_data *emit_data)
3521 {
3522 struct si_shader_context *ctx = si_shader_context(bld_base);
3523 struct si_shader *shader = ctx->shader;
3524 struct gallivm_state *gallivm = &ctx->gallivm;
3525 LLVMValueRef interp_param;
3526 const struct tgsi_full_instruction *inst = emit_data->inst;
3527 int input_index = inst->Src[0].Register.Index;
3528 int chan;
3529 int i;
3530 LLVMValueRef attr_number;
3531 LLVMValueRef params = LLVMGetParam(ctx->main_fn, SI_PARAM_PRIM_MASK);
3532 int interp_param_idx;
3533 unsigned interp = shader->selector->info.input_interpolate[input_index];
3534 unsigned location;
3535
3536 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
3537
3538 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3539 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
3540 location = TGSI_INTERPOLATE_LOC_CENTER;
3541 else
3542 location = TGSI_INTERPOLATE_LOC_CENTROID;
3543
3544 interp_param_idx = lookup_interp_param_index(interp, location);
3545 if (interp_param_idx == -1)
3546 return;
3547 else if (interp_param_idx)
3548 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
3549 else
3550 interp_param = NULL;
3551
3552 attr_number = LLVMConstInt(ctx->i32, input_index, 0);
3553
3554 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3555 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3556 LLVMValueRef ij_out[2];
3557 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
3558
3559 /*
3560 * take the I then J parameters, and the DDX/Y for it, and
3561 * calculate the IJ inputs for the interpolator.
3562 * temp1 = ddx * offset/sample.x + I;
3563 * interp_param.I = ddy * offset/sample.y + temp1;
3564 * temp1 = ddx * offset/sample.x + J;
3565 * interp_param.J = ddy * offset/sample.y + temp1;
3566 */
3567 for (i = 0; i < 2; i++) {
3568 LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, 0);
3569 LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, 0);
3570 LLVMValueRef ddx_el = LLVMBuildExtractElement(gallivm->builder,
3571 ddxy_out, ix_ll, "");
3572 LLVMValueRef ddy_el = LLVMBuildExtractElement(gallivm->builder,
3573 ddxy_out, iy_ll, "");
3574 LLVMValueRef interp_el = LLVMBuildExtractElement(gallivm->builder,
3575 interp_param, ix_ll, "");
3576 LLVMValueRef temp1, temp2;
3577
3578 interp_el = LLVMBuildBitCast(gallivm->builder, interp_el,
3579 ctx->f32, "");
3580
3581 temp1 = LLVMBuildFMul(gallivm->builder, ddx_el, emit_data->args[0], "");
3582
3583 temp1 = LLVMBuildFAdd(gallivm->builder, temp1, interp_el, "");
3584
3585 temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
3586
3587 ij_out[i] = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
3588 }
3589 interp_param = lp_build_gather_values(gallivm, ij_out, 2);
3590 }
3591
3592 for (chan = 0; chan < 4; chan++) {
3593 LLVMValueRef llvm_chan;
3594 unsigned schan;
3595
3596 schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
3597 llvm_chan = LLVMConstInt(ctx->i32, schan, 0);
3598
3599 if (interp_param) {
3600 interp_param = LLVMBuildBitCast(gallivm->builder,
3601 interp_param, LLVMVectorType(ctx->f32, 2), "");
3602 LLVMValueRef i = LLVMBuildExtractElement(
3603 gallivm->builder, interp_param, ctx->i32_0, "");
3604 LLVMValueRef j = LLVMBuildExtractElement(
3605 gallivm->builder, interp_param, ctx->i32_1, "");
3606 emit_data->output[chan] = ac_build_fs_interp(&ctx->ac,
3607 llvm_chan, attr_number, params,
3608 i, j);
3609 } else {
3610 emit_data->output[chan] = ac_build_fs_interp_mov(&ctx->ac,
3611 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
3612 llvm_chan, attr_number, params);
3613 }
3614 }
3615 }
3616
3617 static LLVMValueRef si_emit_ballot(struct si_shader_context *ctx,
3618 LLVMValueRef value)
3619 {
3620 struct gallivm_state *gallivm = &ctx->gallivm;
3621 LLVMValueRef args[3] = {
3622 value,
3623 ctx->i32_0,
3624 LLVMConstInt(ctx->i32, LLVMIntNE, 0)
3625 };
3626
3627 /* We currently have no other way to prevent LLVM from lifting the icmp
3628 * calls to a dominating basic block.
3629 */
3630 emit_optimization_barrier(ctx, &args[0]);
3631
3632 if (LLVMTypeOf(args[0]) != ctx->i32)
3633 args[0] = LLVMBuildBitCast(gallivm->builder, args[0], ctx->i32, "");
3634
3635 return lp_build_intrinsic(gallivm->builder,
3636 "llvm.amdgcn.icmp.i32",
3637 ctx->i64, args, 3,
3638 LP_FUNC_ATTR_NOUNWIND |
3639 LP_FUNC_ATTR_READNONE |
3640 LP_FUNC_ATTR_CONVERGENT);
3641 }
3642
3643 static void vote_all_emit(
3644 const struct lp_build_tgsi_action *action,
3645 struct lp_build_tgsi_context *bld_base,
3646 struct lp_build_emit_data *emit_data)
3647 {
3648 struct si_shader_context *ctx = si_shader_context(bld_base);
3649 struct gallivm_state *gallivm = &ctx->gallivm;
3650 LLVMValueRef active_set, vote_set;
3651 LLVMValueRef tmp;
3652
3653 active_set = si_emit_ballot(ctx, ctx->i32_1);
3654 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3655
3656 tmp = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, vote_set, active_set, "");
3657 emit_data->output[emit_data->chan] =
3658 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3659 }
3660
3661 static void vote_any_emit(
3662 const struct lp_build_tgsi_action *action,
3663 struct lp_build_tgsi_context *bld_base,
3664 struct lp_build_emit_data *emit_data)
3665 {
3666 struct si_shader_context *ctx = si_shader_context(bld_base);
3667 struct gallivm_state *gallivm = &ctx->gallivm;
3668 LLVMValueRef vote_set;
3669 LLVMValueRef tmp;
3670
3671 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3672
3673 tmp = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
3674 vote_set, LLVMConstInt(ctx->i64, 0, 0), "");
3675 emit_data->output[emit_data->chan] =
3676 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3677 }
3678
3679 static void vote_eq_emit(
3680 const struct lp_build_tgsi_action *action,
3681 struct lp_build_tgsi_context *bld_base,
3682 struct lp_build_emit_data *emit_data)
3683 {
3684 struct si_shader_context *ctx = si_shader_context(bld_base);
3685 struct gallivm_state *gallivm = &ctx->gallivm;
3686 LLVMValueRef active_set, vote_set;
3687 LLVMValueRef all, none, tmp;
3688
3689 active_set = si_emit_ballot(ctx, ctx->i32_1);
3690 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3691
3692 all = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, vote_set, active_set, "");
3693 none = LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
3694 vote_set, LLVMConstInt(ctx->i64, 0, 0), "");
3695 tmp = LLVMBuildOr(gallivm->builder, all, none, "");
3696 emit_data->output[emit_data->chan] =
3697 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3698 }
3699
3700 static void ballot_emit(
3701 const struct lp_build_tgsi_action *action,
3702 struct lp_build_tgsi_context *bld_base,
3703 struct lp_build_emit_data *emit_data)
3704 {
3705 struct si_shader_context *ctx = si_shader_context(bld_base);
3706 LLVMBuilderRef builder = ctx->gallivm.builder;
3707 LLVMValueRef tmp;
3708
3709 tmp = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
3710 tmp = si_emit_ballot(ctx, tmp);
3711 tmp = LLVMBuildBitCast(builder, tmp, ctx->v2i32, "");
3712
3713 emit_data->output[0] = LLVMBuildExtractElement(builder, tmp, ctx->i32_0, "");
3714 emit_data->output[1] = LLVMBuildExtractElement(builder, tmp, ctx->i32_1, "");
3715 }
3716
3717 static void read_invoc_fetch_args(
3718 struct lp_build_tgsi_context *bld_base,
3719 struct lp_build_emit_data *emit_data)
3720 {
3721 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
3722 0, emit_data->src_chan);
3723
3724 /* Always read the source invocation (= lane) from the X channel. */
3725 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
3726 1, TGSI_CHAN_X);
3727 emit_data->arg_count = 2;
3728 }
3729
3730 static void read_lane_emit(
3731 const struct lp_build_tgsi_action *action,
3732 struct lp_build_tgsi_context *bld_base,
3733 struct lp_build_emit_data *emit_data)
3734 {
3735 struct si_shader_context *ctx = si_shader_context(bld_base);
3736 LLVMBuilderRef builder = ctx->gallivm.builder;
3737
3738 /* We currently have no other way to prevent LLVM from lifting the icmp
3739 * calls to a dominating basic block.
3740 */
3741 emit_optimization_barrier(ctx, &emit_data->args[0]);
3742
3743 for (unsigned i = 0; i < emit_data->arg_count; ++i) {
3744 emit_data->args[i] = LLVMBuildBitCast(builder, emit_data->args[i],
3745 ctx->i32, "");
3746 }
3747
3748 emit_data->output[emit_data->chan] =
3749 ac_build_intrinsic(&ctx->ac, action->intr_name,
3750 ctx->i32, emit_data->args, emit_data->arg_count,
3751 AC_FUNC_ATTR_READNONE |
3752 AC_FUNC_ATTR_CONVERGENT);
3753 }
3754
3755 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
3756 struct lp_build_emit_data *emit_data)
3757 {
3758 struct si_shader_context *ctx = si_shader_context(bld_base);
3759 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
3760 LLVMValueRef imm;
3761 unsigned stream;
3762
3763 assert(src0.File == TGSI_FILE_IMMEDIATE);
3764
3765 imm = ctx->imms[src0.Index * TGSI_NUM_CHANNELS + src0.SwizzleX];
3766 stream = LLVMConstIntGetZExtValue(imm) & 0x3;
3767 return stream;
3768 }
3769
3770 /* Emit one vertex from the geometry shader */
3771 static void si_llvm_emit_vertex(
3772 const struct lp_build_tgsi_action *action,
3773 struct lp_build_tgsi_context *bld_base,
3774 struct lp_build_emit_data *emit_data)
3775 {
3776 struct si_shader_context *ctx = si_shader_context(bld_base);
3777 struct lp_build_context *uint = &bld_base->uint_bld;
3778 struct si_shader *shader = ctx->shader;
3779 struct tgsi_shader_info *info = &shader->selector->info;
3780 struct gallivm_state *gallivm = &ctx->gallivm;
3781 struct lp_build_if_state if_state;
3782 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
3783 ctx->param_gs2vs_offset);
3784 LLVMValueRef gs_next_vertex;
3785 LLVMValueRef can_emit, kill;
3786 unsigned chan, offset;
3787 int i;
3788 unsigned stream;
3789
3790 stream = si_llvm_get_stream(bld_base, emit_data);
3791
3792 /* Write vertex attribute values to GSVS ring */
3793 gs_next_vertex = LLVMBuildLoad(gallivm->builder,
3794 ctx->gs_next_vertex[stream],
3795 "");
3796
3797 /* If this thread has already emitted the declared maximum number of
3798 * vertices, skip the write: excessive vertex emissions are not
3799 * supposed to have any effect.
3800 *
3801 * If the shader has no writes to memory, kill it instead. This skips
3802 * further memory loads and may allow LLVM to skip to the end
3803 * altogether.
3804 */
3805 can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULT, gs_next_vertex,
3806 LLVMConstInt(ctx->i32,
3807 shader->selector->gs_max_out_vertices, 0), "");
3808
3809 bool use_kill = !info->writes_memory;
3810 if (use_kill) {
3811 kill = lp_build_select(&bld_base->base, can_emit,
3812 LLVMConstReal(ctx->f32, 1.0f),
3813 LLVMConstReal(ctx->f32, -1.0f));
3814
3815 ac_build_kill(&ctx->ac, kill);
3816 } else {
3817 lp_build_if(&if_state, gallivm, can_emit);
3818 }
3819
3820 offset = 0;
3821 for (i = 0; i < info->num_outputs; i++) {
3822 LLVMValueRef *out_ptr = ctx->outputs[i];
3823
3824 for (chan = 0; chan < 4; chan++) {
3825 if (!(info->output_usagemask[i] & (1 << chan)) ||
3826 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
3827 continue;
3828
3829 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
3830 LLVMValueRef voffset =
3831 LLVMConstInt(ctx->i32, offset *
3832 shader->selector->gs_max_out_vertices, 0);
3833 offset++;
3834
3835 voffset = lp_build_add(uint, voffset, gs_next_vertex);
3836 voffset = lp_build_mul_imm(uint, voffset, 4);
3837
3838 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
3839
3840 ac_build_buffer_store_dword(&ctx->ac,
3841 ctx->gsvs_ring[stream],
3842 out_val, 1,
3843 voffset, soffset, 0,
3844 1, 1, true, true);
3845 }
3846 }
3847
3848 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
3849 ctx->i32_1);
3850
3851 LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
3852
3853 /* Signal vertex emission */
3854 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
3855 si_get_gs_wave_id(ctx));
3856 if (!use_kill)
3857 lp_build_endif(&if_state);
3858 }
3859
3860 /* Cut one primitive from the geometry shader */
3861 static void si_llvm_emit_primitive(
3862 const struct lp_build_tgsi_action *action,
3863 struct lp_build_tgsi_context *bld_base,
3864 struct lp_build_emit_data *emit_data)
3865 {
3866 struct si_shader_context *ctx = si_shader_context(bld_base);
3867 unsigned stream;
3868
3869 /* Signal primitive cut */
3870 stream = si_llvm_get_stream(bld_base, emit_data);
3871 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
3872 si_get_gs_wave_id(ctx));
3873 }
3874
3875 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
3876 struct lp_build_tgsi_context *bld_base,
3877 struct lp_build_emit_data *emit_data)
3878 {
3879 struct si_shader_context *ctx = si_shader_context(bld_base);
3880 struct gallivm_state *gallivm = &ctx->gallivm;
3881
3882 /* SI only (thanks to a hw bug workaround):
3883 * The real barrier instruction isn’t needed, because an entire patch
3884 * always fits into a single wave.
3885 */
3886 if (ctx->screen->b.chip_class == SI &&
3887 ctx->type == PIPE_SHADER_TESS_CTRL) {
3888 si_emit_waitcnt(ctx, LGKM_CNT & VM_CNT);
3889 return;
3890 }
3891
3892 lp_build_intrinsic(gallivm->builder,
3893 "llvm.amdgcn.s.barrier",
3894 ctx->voidt, NULL, 0, LP_FUNC_ATTR_CONVERGENT);
3895 }
3896
3897 static const struct lp_build_tgsi_action interp_action = {
3898 .fetch_args = interp_fetch_args,
3899 .emit = build_interp_intrinsic,
3900 };
3901
3902 static void si_create_function(struct si_shader_context *ctx,
3903 const char *name,
3904 LLVMTypeRef *returns, unsigned num_returns,
3905 LLVMTypeRef *params, unsigned num_params,
3906 int last_sgpr, unsigned max_workgroup_size)
3907 {
3908 int i;
3909
3910 si_llvm_create_func(ctx, name, returns, num_returns,
3911 params, num_params);
3912 ctx->return_value = LLVMGetUndef(ctx->return_type);
3913
3914 for (i = 0; i <= last_sgpr; ++i) {
3915 LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
3916
3917 /* The combination of:
3918 * - ByVal
3919 * - dereferenceable
3920 * - invariant.load
3921 * allows the optimization passes to move loads and reduces
3922 * SGPR spilling significantly.
3923 */
3924 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
3925 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_BYVAL);
3926 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_NOALIAS);
3927 ac_add_attr_dereferenceable(P, UINT64_MAX);
3928 } else
3929 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_INREG);
3930 }
3931
3932 if (max_workgroup_size) {
3933 si_llvm_add_attribute(ctx->main_fn, "amdgpu-max-work-group-size",
3934 max_workgroup_size);
3935 }
3936 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3937 "no-signed-zeros-fp-math",
3938 "true");
3939
3940 if (ctx->screen->b.debug_flags & DBG_UNSAFE_MATH) {
3941 /* These were copied from some LLVM test. */
3942 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3943 "less-precise-fpmad",
3944 "true");
3945 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3946 "no-infs-fp-math",
3947 "true");
3948 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3949 "no-nans-fp-math",
3950 "true");
3951 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3952 "unsafe-fp-math",
3953 "true");
3954 }
3955 }
3956
3957 static void declare_streamout_params(struct si_shader_context *ctx,
3958 struct pipe_stream_output_info *so,
3959 LLVMTypeRef *params, LLVMTypeRef i32,
3960 unsigned *num_params)
3961 {
3962 int i;
3963
3964 /* Streamout SGPRs. */
3965 if (so->num_outputs) {
3966 if (ctx->type != PIPE_SHADER_TESS_EVAL)
3967 params[ctx->param_streamout_config = (*num_params)++] = i32;
3968 else
3969 ctx->param_streamout_config = *num_params - 1;
3970
3971 params[ctx->param_streamout_write_index = (*num_params)++] = i32;
3972 }
3973 /* A streamout buffer offset is loaded if the stride is non-zero. */
3974 for (i = 0; i < 4; i++) {
3975 if (!so->stride[i])
3976 continue;
3977
3978 params[ctx->param_streamout_offset[i] = (*num_params)++] = i32;
3979 }
3980 }
3981
3982 static unsigned llvm_get_type_size(LLVMTypeRef type)
3983 {
3984 LLVMTypeKind kind = LLVMGetTypeKind(type);
3985
3986 switch (kind) {
3987 case LLVMIntegerTypeKind:
3988 return LLVMGetIntTypeWidth(type) / 8;
3989 case LLVMFloatTypeKind:
3990 return 4;
3991 case LLVMPointerTypeKind:
3992 return 8;
3993 case LLVMVectorTypeKind:
3994 return LLVMGetVectorSize(type) *
3995 llvm_get_type_size(LLVMGetElementType(type));
3996 case LLVMArrayTypeKind:
3997 return LLVMGetArrayLength(type) *
3998 llvm_get_type_size(LLVMGetElementType(type));
3999 default:
4000 assert(0);
4001 return 0;
4002 }
4003 }
4004
4005 static void declare_lds_as_pointer(struct si_shader_context *ctx)
4006 {
4007 struct gallivm_state *gallivm = &ctx->gallivm;
4008
4009 unsigned lds_size = ctx->screen->b.chip_class >= CIK ? 65536 : 32768;
4010 ctx->lds = LLVMBuildIntToPtr(gallivm->builder, ctx->i32_0,
4011 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), LOCAL_ADDR_SPACE),
4012 "lds");
4013 }
4014
4015 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
4016 {
4017 switch (shader->selector->type) {
4018 case PIPE_SHADER_TESS_CTRL:
4019 /* Return this so that LLVM doesn't remove s_barrier
4020 * instructions on chips where we use s_barrier. */
4021 return shader->selector->screen->b.chip_class >= CIK ? 128 : 64;
4022
4023 case PIPE_SHADER_GEOMETRY:
4024 return shader->selector->screen->b.chip_class >= GFX9 ? 128 : 64;
4025
4026 case PIPE_SHADER_COMPUTE:
4027 break; /* see below */
4028
4029 default:
4030 return 0;
4031 }
4032
4033 const unsigned *properties = shader->selector->info.properties;
4034 unsigned max_work_group_size =
4035 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
4036 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
4037 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
4038
4039 if (!max_work_group_size) {
4040 /* This is a variable group size compute shader,
4041 * compile it for the maximum possible group size.
4042 */
4043 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
4044 }
4045 return max_work_group_size;
4046 }
4047
4048 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
4049 LLVMTypeRef *params,
4050 unsigned *num_params,
4051 bool assign_params)
4052 {
4053 params[(*num_params)++] = si_const_array(ctx->v4i32,
4054 SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS);
4055 params[(*num_params)++] = si_const_array(ctx->v8i32,
4056 SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2);
4057
4058 if (assign_params) {
4059 ctx->param_const_and_shader_buffers = *num_params - 2;
4060 ctx->param_samplers_and_images = *num_params - 1;
4061 }
4062 }
4063
4064 static void declare_default_desc_pointers(struct si_shader_context *ctx,
4065 LLVMTypeRef *params,
4066 unsigned *num_params)
4067 {
4068 params[ctx->param_rw_buffers = (*num_params)++] =
4069 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4070 declare_per_stage_desc_pointers(ctx, params, num_params, true);
4071 }
4072
4073 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx,
4074 LLVMTypeRef *params,
4075 unsigned *num_params)
4076 {
4077 params[ctx->param_vertex_buffers = (*num_params)++] =
4078 si_const_array(ctx->v4i32, SI_NUM_VERTEX_BUFFERS);
4079 params[ctx->param_base_vertex = (*num_params)++] = ctx->i32;
4080 params[ctx->param_start_instance = (*num_params)++] = ctx->i32;
4081 params[ctx->param_draw_id = (*num_params)++] = ctx->i32;
4082 params[ctx->param_vs_state_bits = (*num_params)++] = ctx->i32;
4083 }
4084
4085 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
4086 LLVMTypeRef *params, unsigned *num_params,
4087 unsigned *num_prolog_vgprs)
4088 {
4089 struct si_shader *shader = ctx->shader;
4090
4091 params[ctx->param_vertex_id = (*num_params)++] = ctx->i32;
4092 if (shader->key.as_ls) {
4093 params[ctx->param_rel_auto_id = (*num_params)++] = ctx->i32;
4094 params[ctx->param_instance_id = (*num_params)++] = ctx->i32;
4095 } else {
4096 params[ctx->param_instance_id = (*num_params)++] = ctx->i32;
4097 params[ctx->param_vs_prim_id = (*num_params)++] = ctx->i32;
4098 }
4099 params[(*num_params)++] = ctx->i32; /* unused */
4100
4101 if (!shader->is_gs_copy_shader) {
4102 /* Vertex load indices. */
4103 ctx->param_vertex_index0 = (*num_params);
4104 for (unsigned i = 0; i < shader->selector->info.num_inputs; i++)
4105 params[(*num_params)++] = ctx->i32;
4106 *num_prolog_vgprs += shader->selector->info.num_inputs;
4107 }
4108 }
4109
4110 static void declare_tes_input_vgprs(struct si_shader_context *ctx,
4111 LLVMTypeRef *params, unsigned *num_params)
4112 {
4113 params[ctx->param_tes_u = (*num_params)++] = ctx->f32;
4114 params[ctx->param_tes_v = (*num_params)++] = ctx->f32;
4115 params[ctx->param_tes_rel_patch_id = (*num_params)++] = ctx->i32;
4116 params[ctx->param_tes_patch_id = (*num_params)++] = ctx->i32;
4117 }
4118
4119 enum {
4120 /* Convenient merged shader definitions. */
4121 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
4122 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
4123 };
4124
4125 static void create_function(struct si_shader_context *ctx)
4126 {
4127 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
4128 struct gallivm_state *gallivm = &ctx->gallivm;
4129 struct si_shader *shader = ctx->shader;
4130 LLVMTypeRef params[100]; /* just make it large enough */
4131 LLVMTypeRef returns[16+32*4];
4132 unsigned i, last_sgpr, num_params = 0, num_return_sgprs;
4133 unsigned num_returns = 0;
4134 unsigned num_prolog_vgprs = 0;
4135 unsigned type = ctx->type;
4136
4137 /* Set MERGED shaders. */
4138 if (ctx->screen->b.chip_class >= GFX9) {
4139 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
4140 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
4141 else if (shader->key.as_es || type == PIPE_SHADER_GEOMETRY)
4142 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
4143 }
4144
4145 LLVMTypeRef v3i32 = LLVMVectorType(ctx->i32, 3);
4146
4147 switch (type) {
4148 case PIPE_SHADER_VERTEX:
4149 declare_default_desc_pointers(ctx, params, &num_params);
4150 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4151
4152 if (shader->key.as_es) {
4153 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
4154 } else if (shader->key.as_ls) {
4155 /* no extra parameters */
4156 } else {
4157 if (shader->is_gs_copy_shader)
4158 num_params = ctx->param_rw_buffers + 1;
4159
4160 /* The locations of the other parameters are assigned dynamically. */
4161 declare_streamout_params(ctx, &shader->selector->so,
4162 params, ctx->i32, &num_params);
4163 }
4164
4165 last_sgpr = num_params-1;
4166
4167 /* VGPRs */
4168 declare_vs_input_vgprs(ctx, params, &num_params,
4169 &num_prolog_vgprs);
4170 break;
4171
4172 case PIPE_SHADER_TESS_CTRL: /* SI-CI-VI */
4173 declare_default_desc_pointers(ctx, params, &num_params);
4174 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4175 params[ctx->param_tcs_out_lds_offsets = num_params++] = ctx->i32;
4176 params[ctx->param_tcs_out_lds_layout = num_params++] = ctx->i32;
4177 params[ctx->param_vs_state_bits = num_params++] = ctx->i32;
4178 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4179 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
4180 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4181 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
4182 last_sgpr = num_params - 1;
4183
4184 /* VGPRs */
4185 params[ctx->param_tcs_patch_id = num_params++] = ctx->i32;
4186 params[ctx->param_tcs_rel_ids = num_params++] = ctx->i32;
4187
4188 /* param_tcs_offchip_offset and param_tcs_factor_offset are
4189 * placed after the user SGPRs.
4190 */
4191 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
4192 returns[num_returns++] = ctx->i32; /* SGPRs */
4193 for (i = 0; i < 3; i++)
4194 returns[num_returns++] = ctx->f32; /* VGPRs */
4195 break;
4196
4197 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
4198 /* Merged stages have 8 system SGPRs at the beginning. */
4199 params[ctx->param_rw_buffers = num_params++] = /* SPI_SHADER_USER_DATA_ADDR_LO_HS */
4200 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4201 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4202 params[ctx->param_merged_wave_info = num_params++] = ctx->i32;
4203 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
4204 params[ctx->param_merged_scratch_offset = num_params++] = ctx->i32;
4205 params[num_params++] = ctx->i32; /* unused */
4206 params[num_params++] = ctx->i32; /* unused */
4207
4208 params[num_params++] = ctx->i32; /* unused */
4209 params[num_params++] = ctx->i32; /* unused */
4210 declare_per_stage_desc_pointers(ctx, params, &num_params,
4211 ctx->type == PIPE_SHADER_VERTEX);
4212 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4213
4214 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4215 params[ctx->param_tcs_out_lds_offsets = num_params++] = ctx->i32;
4216 params[ctx->param_tcs_out_lds_layout = num_params++] = ctx->i32;
4217 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4218 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
4219 params[num_params++] = ctx->i32; /* unused */
4220
4221 declare_per_stage_desc_pointers(ctx, params, &num_params,
4222 ctx->type == PIPE_SHADER_TESS_CTRL);
4223 last_sgpr = num_params - 1;
4224
4225 /* VGPRs (first TCS, then VS) */
4226 params[ctx->param_tcs_patch_id = num_params++] = ctx->i32;
4227 params[ctx->param_tcs_rel_ids = num_params++] = ctx->i32;
4228
4229 if (ctx->type == PIPE_SHADER_VERTEX) {
4230 declare_vs_input_vgprs(ctx, params, &num_params,
4231 &num_prolog_vgprs);
4232
4233 /* LS return values are inputs to the TCS main shader part. */
4234 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
4235 returns[num_returns++] = ctx->i32; /* SGPRs */
4236 for (i = 0; i < 2; i++)
4237 returns[num_returns++] = ctx->f32; /* VGPRs */
4238 } else {
4239 /* TCS return values are inputs to the TCS epilog.
4240 *
4241 * param_tcs_offchip_offset, param_tcs_factor_offset,
4242 * param_tcs_offchip_layout, and param_rw_buffers
4243 * should be passed to the epilog.
4244 */
4245 for (i = 0; i <= 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K; i++)
4246 returns[num_returns++] = ctx->i32; /* SGPRs */
4247 for (i = 0; i < 3; i++)
4248 returns[num_returns++] = ctx->f32; /* VGPRs */
4249 }
4250 break;
4251
4252 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
4253 /* Merged stages have 8 system SGPRs at the beginning. */
4254 params[ctx->param_rw_buffers = num_params++] = /* SPI_SHADER_USER_DATA_ADDR_LO_GS */
4255 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4256 params[ctx->param_gs2vs_offset = num_params++] = ctx->i32;
4257 params[ctx->param_merged_wave_info = num_params++] = ctx->i32;
4258 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4259 params[ctx->param_merged_scratch_offset = num_params++] = ctx->i32;
4260 params[num_params++] = ctx->i32; /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
4261 params[num_params++] = ctx->i32; /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
4262
4263 params[num_params++] = ctx->i32; /* unused */
4264 params[num_params++] = ctx->i32; /* unused */
4265 declare_per_stage_desc_pointers(ctx, params, &num_params,
4266 (ctx->type == PIPE_SHADER_VERTEX ||
4267 ctx->type == PIPE_SHADER_TESS_EVAL));
4268 if (ctx->type == PIPE_SHADER_VERTEX) {
4269 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4270 } else {
4271 /* TESS_EVAL (and also GEOMETRY):
4272 * Declare as many input SGPRs as the VS has. */
4273 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4274 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4275 params[num_params++] = ctx->i32; /* unused */
4276 params[num_params++] = ctx->i32; /* unused */
4277 params[num_params++] = ctx->i32; /* unused */
4278 params[ctx->param_vs_state_bits = num_params++] = ctx->i32; /* unused */
4279 }
4280
4281 declare_per_stage_desc_pointers(ctx, params, &num_params,
4282 ctx->type == PIPE_SHADER_GEOMETRY);
4283 last_sgpr = num_params - 1;
4284
4285 /* VGPRs (first GS, then VS/TES) */
4286 params[ctx->param_gs_vtx01_offset = num_params++] = ctx->i32;
4287 params[ctx->param_gs_vtx23_offset = num_params++] = ctx->i32;
4288 params[ctx->param_gs_prim_id = num_params++] = ctx->i32;
4289 params[ctx->param_gs_instance_id = num_params++] = ctx->i32;
4290 params[ctx->param_gs_vtx45_offset = num_params++] = ctx->i32;
4291
4292 if (ctx->type == PIPE_SHADER_VERTEX) {
4293 declare_vs_input_vgprs(ctx, params, &num_params,
4294 &num_prolog_vgprs);
4295 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
4296 declare_tes_input_vgprs(ctx, params, &num_params);
4297 }
4298
4299 if (ctx->type == PIPE_SHADER_VERTEX ||
4300 ctx->type == PIPE_SHADER_TESS_EVAL) {
4301 /* ES return values are inputs to GS. */
4302 for (i = 0; i < 8 + GFX9_GS_NUM_USER_SGPR; i++)
4303 returns[num_returns++] = ctx->i32; /* SGPRs */
4304 for (i = 0; i < 5; i++)
4305 returns[num_returns++] = ctx->f32; /* VGPRs */
4306 }
4307 break;
4308
4309 case PIPE_SHADER_TESS_EVAL:
4310 declare_default_desc_pointers(ctx, params, &num_params);
4311 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4312 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4313
4314 if (shader->key.as_es) {
4315 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4316 params[num_params++] = ctx->i32;
4317 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
4318 } else {
4319 params[num_params++] = ctx->i32;
4320 declare_streamout_params(ctx, &shader->selector->so,
4321 params, ctx->i32, &num_params);
4322 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4323 }
4324 last_sgpr = num_params - 1;
4325
4326 /* VGPRs */
4327 declare_tes_input_vgprs(ctx, params, &num_params);
4328 break;
4329
4330 case PIPE_SHADER_GEOMETRY:
4331 declare_default_desc_pointers(ctx, params, &num_params);
4332 params[ctx->param_gs2vs_offset = num_params++] = ctx->i32;
4333 params[ctx->param_gs_wave_id = num_params++] = ctx->i32;
4334 last_sgpr = num_params - 1;
4335
4336 /* VGPRs */
4337 params[ctx->param_gs_vtx0_offset = num_params++] = ctx->i32;
4338 params[ctx->param_gs_vtx1_offset = num_params++] = ctx->i32;
4339 params[ctx->param_gs_prim_id = num_params++] = ctx->i32;
4340 params[ctx->param_gs_vtx2_offset = num_params++] = ctx->i32;
4341 params[ctx->param_gs_vtx3_offset = num_params++] = ctx->i32;
4342 params[ctx->param_gs_vtx4_offset = num_params++] = ctx->i32;
4343 params[ctx->param_gs_vtx5_offset = num_params++] = ctx->i32;
4344 params[ctx->param_gs_instance_id = num_params++] = ctx->i32;
4345 break;
4346
4347 case PIPE_SHADER_FRAGMENT:
4348 declare_default_desc_pointers(ctx, params, &num_params);
4349 params[SI_PARAM_ALPHA_REF] = ctx->f32;
4350 params[SI_PARAM_PRIM_MASK] = ctx->i32;
4351 last_sgpr = SI_PARAM_PRIM_MASK;
4352 params[SI_PARAM_PERSP_SAMPLE] = ctx->v2i32;
4353 params[SI_PARAM_PERSP_CENTER] = ctx->v2i32;
4354 params[SI_PARAM_PERSP_CENTROID] = ctx->v2i32;
4355 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
4356 params[SI_PARAM_LINEAR_SAMPLE] = ctx->v2i32;
4357 params[SI_PARAM_LINEAR_CENTER] = ctx->v2i32;
4358 params[SI_PARAM_LINEAR_CENTROID] = ctx->v2i32;
4359 params[SI_PARAM_LINE_STIPPLE_TEX] = ctx->f32;
4360 params[SI_PARAM_POS_X_FLOAT] = ctx->f32;
4361 params[SI_PARAM_POS_Y_FLOAT] = ctx->f32;
4362 params[SI_PARAM_POS_Z_FLOAT] = ctx->f32;
4363 params[SI_PARAM_POS_W_FLOAT] = ctx->f32;
4364 params[SI_PARAM_FRONT_FACE] = ctx->i32;
4365 shader->info.face_vgpr_index = 20;
4366 params[SI_PARAM_ANCILLARY] = ctx->i32;
4367 params[SI_PARAM_SAMPLE_COVERAGE] = ctx->f32;
4368 params[SI_PARAM_POS_FIXED_PT] = ctx->i32;
4369 num_params = SI_PARAM_POS_FIXED_PT+1;
4370
4371 /* Color inputs from the prolog. */
4372 if (shader->selector->info.colors_read) {
4373 unsigned num_color_elements =
4374 util_bitcount(shader->selector->info.colors_read);
4375
4376 assert(num_params + num_color_elements <= ARRAY_SIZE(params));
4377 for (i = 0; i < num_color_elements; i++)
4378 params[num_params++] = ctx->f32;
4379
4380 num_prolog_vgprs += num_color_elements;
4381 }
4382
4383 /* Outputs for the epilog. */
4384 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
4385 num_returns =
4386 num_return_sgprs +
4387 util_bitcount(shader->selector->info.colors_written) * 4 +
4388 shader->selector->info.writes_z +
4389 shader->selector->info.writes_stencil +
4390 shader->selector->info.writes_samplemask +
4391 1 /* SampleMaskIn */;
4392
4393 num_returns = MAX2(num_returns,
4394 num_return_sgprs +
4395 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
4396
4397 for (i = 0; i < num_return_sgprs; i++)
4398 returns[i] = ctx->i32;
4399 for (; i < num_returns; i++)
4400 returns[i] = ctx->f32;
4401 break;
4402
4403 case PIPE_SHADER_COMPUTE:
4404 declare_default_desc_pointers(ctx, params, &num_params);
4405 if (shader->selector->info.uses_grid_size)
4406 params[ctx->param_grid_size = num_params++] = v3i32;
4407 if (shader->selector->info.uses_block_size)
4408 params[ctx->param_block_size = num_params++] = v3i32;
4409
4410 for (i = 0; i < 3; i++) {
4411 ctx->param_block_id[i] = -1;
4412 if (shader->selector->info.uses_block_id[i])
4413 params[ctx->param_block_id[i] = num_params++] = ctx->i32;
4414 }
4415 last_sgpr = num_params - 1;
4416
4417 params[ctx->param_thread_id = num_params++] = v3i32;
4418 break;
4419 default:
4420 assert(0 && "unimplemented shader");
4421 return;
4422 }
4423
4424 assert(num_params <= ARRAY_SIZE(params));
4425
4426 si_create_function(ctx, "main", returns, num_returns, params,
4427 num_params, last_sgpr,
4428 si_get_max_workgroup_size(shader));
4429
4430 /* Reserve register locations for VGPR inputs the PS prolog may need. */
4431 if (ctx->type == PIPE_SHADER_FRAGMENT &&
4432 ctx->separate_prolog) {
4433 si_llvm_add_attribute(ctx->main_fn,
4434 "InitialPSInputAddr",
4435 S_0286D0_PERSP_SAMPLE_ENA(1) |
4436 S_0286D0_PERSP_CENTER_ENA(1) |
4437 S_0286D0_PERSP_CENTROID_ENA(1) |
4438 S_0286D0_LINEAR_SAMPLE_ENA(1) |
4439 S_0286D0_LINEAR_CENTER_ENA(1) |
4440 S_0286D0_LINEAR_CENTROID_ENA(1) |
4441 S_0286D0_FRONT_FACE_ENA(1) |
4442 S_0286D0_POS_FIXED_PT_ENA(1));
4443 }
4444
4445 shader->info.num_input_sgprs = 0;
4446 shader->info.num_input_vgprs = 0;
4447
4448 for (i = 0; i <= last_sgpr; ++i)
4449 shader->info.num_input_sgprs += llvm_get_type_size(params[i]) / 4;
4450
4451 for (; i < num_params; ++i)
4452 shader->info.num_input_vgprs += llvm_get_type_size(params[i]) / 4;
4453
4454 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
4455 shader->info.num_input_vgprs -= num_prolog_vgprs;
4456
4457 if (!ctx->screen->has_ds_bpermute &&
4458 bld_base->info &&
4459 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
4460 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0 ||
4461 bld_base->info->opcode_count[TGSI_OPCODE_DDX_FINE] > 0 ||
4462 bld_base->info->opcode_count[TGSI_OPCODE_DDY_FINE] > 0 ||
4463 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_OFFSET] > 0 ||
4464 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_SAMPLE] > 0))
4465 ctx->lds =
4466 LLVMAddGlobalInAddressSpace(gallivm->module,
4467 LLVMArrayType(ctx->i32, 64),
4468 "ddxy_lds",
4469 LOCAL_ADDR_SPACE);
4470
4471 if (shader->key.as_ls ||
4472 ctx->type == PIPE_SHADER_TESS_CTRL ||
4473 /* GFX9 has the ESGS ring buffer in LDS. */
4474 (ctx->screen->b.chip_class >= GFX9 &&
4475 (shader->key.as_es ||
4476 ctx->type == PIPE_SHADER_GEOMETRY)))
4477 declare_lds_as_pointer(ctx);
4478 }
4479
4480 /**
4481 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
4482 * for later use.
4483 */
4484 static void preload_ring_buffers(struct si_shader_context *ctx)
4485 {
4486 struct gallivm_state *gallivm = &ctx->gallivm;
4487 LLVMBuilderRef builder = gallivm->builder;
4488
4489 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
4490 ctx->param_rw_buffers);
4491
4492 if (ctx->screen->b.chip_class <= VI &&
4493 (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)) {
4494 unsigned ring =
4495 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
4496 : SI_ES_RING_ESGS;
4497 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
4498
4499 ctx->esgs_ring =
4500 ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4501 }
4502
4503 if (ctx->shader->is_gs_copy_shader) {
4504 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4505
4506 ctx->gsvs_ring[0] =
4507 ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4508 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
4509 const struct si_shader_selector *sel = ctx->shader->selector;
4510 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4511 LLVMValueRef base_ring;
4512
4513 base_ring = ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4514
4515 /* The conceptual layout of the GSVS ring is
4516 * v0c0 .. vLv0 v0c1 .. vLc1 ..
4517 * but the real memory layout is swizzled across
4518 * threads:
4519 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
4520 * t16v0c0 ..
4521 * Override the buffer descriptor accordingly.
4522 */
4523 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
4524 uint64_t stream_offset = 0;
4525
4526 for (unsigned stream = 0; stream < 4; ++stream) {
4527 unsigned num_components;
4528 unsigned stride;
4529 unsigned num_records;
4530 LLVMValueRef ring, tmp;
4531
4532 num_components = sel->info.num_stream_output_components[stream];
4533 if (!num_components)
4534 continue;
4535
4536 stride = 4 * num_components * sel->gs_max_out_vertices;
4537
4538 /* Limit on the stride field for <= CIK. */
4539 assert(stride < (1 << 14));
4540
4541 num_records = 64;
4542
4543 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
4544 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
4545 tmp = LLVMBuildAdd(builder, tmp,
4546 LLVMConstInt(ctx->i64,
4547 stream_offset, 0), "");
4548 stream_offset += stride * 64;
4549
4550 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
4551 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
4552 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
4553 tmp = LLVMBuildOr(builder, tmp,
4554 LLVMConstInt(ctx->i32,
4555 S_008F04_STRIDE(stride) |
4556 S_008F04_SWIZZLE_ENABLE(1), 0), "");
4557 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
4558 ring = LLVMBuildInsertElement(builder, ring,
4559 LLVMConstInt(ctx->i32, num_records, 0),
4560 LLVMConstInt(ctx->i32, 2, 0), "");
4561 ring = LLVMBuildInsertElement(builder, ring,
4562 LLVMConstInt(ctx->i32,
4563 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
4564 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
4565 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
4566 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
4567 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
4568 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
4569 S_008F0C_ELEMENT_SIZE(1) | /* element_size = 4 (bytes) */
4570 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
4571 S_008F0C_ADD_TID_ENABLE(1),
4572 0),
4573 LLVMConstInt(ctx->i32, 3, 0), "");
4574
4575 ctx->gsvs_ring[stream] = ring;
4576 }
4577 }
4578 }
4579
4580 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
4581 LLVMValueRef param_rw_buffers,
4582 unsigned param_pos_fixed_pt)
4583 {
4584 struct gallivm_state *gallivm = &ctx->gallivm;
4585 LLVMBuilderRef builder = gallivm->builder;
4586 LLVMValueRef slot, desc, offset, row, bit, address[2];
4587
4588 /* Use the fixed-point gl_FragCoord input.
4589 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
4590 * per coordinate to get the repeating effect.
4591 */
4592 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
4593 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
4594
4595 /* Load the buffer descriptor. */
4596 slot = LLVMConstInt(ctx->i32, SI_PS_CONST_POLY_STIPPLE, 0);
4597 desc = ac_build_indexed_load_const(&ctx->ac, param_rw_buffers, slot);
4598
4599 /* The stipple pattern is 32x32, each row has 32 bits. */
4600 offset = LLVMBuildMul(builder, address[1],
4601 LLVMConstInt(ctx->i32, 4, 0), "");
4602 row = buffer_load_const(ctx, desc, offset);
4603 row = LLVMBuildBitCast(builder, row, ctx->i32, "");
4604 bit = LLVMBuildLShr(builder, row, address[0], "");
4605 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
4606
4607 /* The intrinsic kills the thread if arg < 0. */
4608 bit = LLVMBuildSelect(builder, bit, LLVMConstReal(ctx->f32, 0),
4609 LLVMConstReal(ctx->f32, -1), "");
4610 ac_build_kill(&ctx->ac, bit);
4611 }
4612
4613 void si_shader_binary_read_config(struct ac_shader_binary *binary,
4614 struct si_shader_config *conf,
4615 unsigned symbol_offset)
4616 {
4617 unsigned i;
4618 const unsigned char *config =
4619 ac_shader_binary_config_start(binary, symbol_offset);
4620 bool really_needs_scratch = false;
4621
4622 /* LLVM adds SGPR spills to the scratch size.
4623 * Find out if we really need the scratch buffer.
4624 */
4625 for (i = 0; i < binary->reloc_count; i++) {
4626 const struct ac_shader_reloc *reloc = &binary->relocs[i];
4627
4628 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
4629 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
4630 really_needs_scratch = true;
4631 break;
4632 }
4633 }
4634
4635 /* XXX: We may be able to emit some of these values directly rather than
4636 * extracting fields to be emitted later.
4637 */
4638
4639 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
4640 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
4641 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
4642 switch (reg) {
4643 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
4644 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
4645 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
4646 case R_00B428_SPI_SHADER_PGM_RSRC1_HS:
4647 case R_00B848_COMPUTE_PGM_RSRC1:
4648 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
4649 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
4650 conf->float_mode = G_00B028_FLOAT_MODE(value);
4651 conf->rsrc1 = value;
4652 break;
4653 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
4654 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
4655 break;
4656 case R_00B84C_COMPUTE_PGM_RSRC2:
4657 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
4658 conf->rsrc2 = value;
4659 break;
4660 case R_0286CC_SPI_PS_INPUT_ENA:
4661 conf->spi_ps_input_ena = value;
4662 break;
4663 case R_0286D0_SPI_PS_INPUT_ADDR:
4664 conf->spi_ps_input_addr = value;
4665 break;
4666 case R_0286E8_SPI_TMPRING_SIZE:
4667 case R_00B860_COMPUTE_TMPRING_SIZE:
4668 /* WAVESIZE is in units of 256 dwords. */
4669 if (really_needs_scratch)
4670 conf->scratch_bytes_per_wave =
4671 G_00B860_WAVESIZE(value) * 256 * 4;
4672 break;
4673 case 0x4: /* SPILLED_SGPRS */
4674 conf->spilled_sgprs = value;
4675 break;
4676 case 0x8: /* SPILLED_VGPRS */
4677 conf->spilled_vgprs = value;
4678 break;
4679 default:
4680 {
4681 static bool printed;
4682
4683 if (!printed) {
4684 fprintf(stderr, "Warning: LLVM emitted unknown "
4685 "config register: 0x%x\n", reg);
4686 printed = true;
4687 }
4688 }
4689 break;
4690 }
4691 }
4692
4693 if (!conf->spi_ps_input_addr)
4694 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
4695 }
4696
4697 void si_shader_apply_scratch_relocs(struct si_shader *shader,
4698 uint64_t scratch_va)
4699 {
4700 unsigned i;
4701 uint32_t scratch_rsrc_dword0 = scratch_va;
4702 uint32_t scratch_rsrc_dword1 =
4703 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
4704
4705 /* Enable scratch coalescing. */
4706 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
4707
4708 for (i = 0 ; i < shader->binary.reloc_count; i++) {
4709 const struct ac_shader_reloc *reloc =
4710 &shader->binary.relocs[i];
4711 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
4712 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
4713 &scratch_rsrc_dword0, 4);
4714 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
4715 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
4716 &scratch_rsrc_dword1, 4);
4717 }
4718 }
4719 }
4720
4721 static unsigned si_get_shader_binary_size(const struct si_shader *shader)
4722 {
4723 unsigned size = shader->binary.code_size;
4724
4725 if (shader->prolog)
4726 size += shader->prolog->binary.code_size;
4727 if (shader->previous_stage)
4728 size += shader->previous_stage->binary.code_size;
4729 if (shader->prolog2)
4730 size += shader->prolog2->binary.code_size;
4731 if (shader->epilog)
4732 size += shader->epilog->binary.code_size;
4733 return size;
4734 }
4735
4736 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
4737 {
4738 const struct ac_shader_binary *prolog =
4739 shader->prolog ? &shader->prolog->binary : NULL;
4740 const struct ac_shader_binary *previous_stage =
4741 shader->previous_stage ? &shader->previous_stage->binary : NULL;
4742 const struct ac_shader_binary *prolog2 =
4743 shader->prolog2 ? &shader->prolog2->binary : NULL;
4744 const struct ac_shader_binary *epilog =
4745 shader->epilog ? &shader->epilog->binary : NULL;
4746 const struct ac_shader_binary *mainb = &shader->binary;
4747 unsigned bo_size = si_get_shader_binary_size(shader) +
4748 (!epilog ? mainb->rodata_size : 0);
4749 unsigned char *ptr;
4750
4751 assert(!prolog || !prolog->rodata_size);
4752 assert(!previous_stage || !previous_stage->rodata_size);
4753 assert(!prolog2 || !prolog2->rodata_size);
4754 assert((!prolog && !previous_stage && !prolog2 && !epilog) ||
4755 !mainb->rodata_size);
4756 assert(!epilog || !epilog->rodata_size);
4757
4758 /* GFX9 can fetch at most 128 bytes past the end of the shader.
4759 * Prevent VM faults.
4760 */
4761 if (sscreen->b.chip_class >= GFX9)
4762 bo_size += 128;
4763
4764 r600_resource_reference(&shader->bo, NULL);
4765 shader->bo = (struct r600_resource*)
4766 pipe_buffer_create(&sscreen->b.b, 0,
4767 PIPE_USAGE_IMMUTABLE,
4768 align(bo_size, SI_CPDMA_ALIGNMENT));
4769 if (!shader->bo)
4770 return -ENOMEM;
4771
4772 /* Upload. */
4773 ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
4774 PIPE_TRANSFER_READ_WRITE |
4775 PIPE_TRANSFER_UNSYNCHRONIZED);
4776
4777 /* Don't use util_memcpy_cpu_to_le32. LLVM binaries are
4778 * endian-independent. */
4779 if (prolog) {
4780 memcpy(ptr, prolog->code, prolog->code_size);
4781 ptr += prolog->code_size;
4782 }
4783 if (previous_stage) {
4784 memcpy(ptr, previous_stage->code, previous_stage->code_size);
4785 ptr += previous_stage->code_size;
4786 }
4787 if (prolog2) {
4788 memcpy(ptr, prolog2->code, prolog2->code_size);
4789 ptr += prolog2->code_size;
4790 }
4791
4792 memcpy(ptr, mainb->code, mainb->code_size);
4793 ptr += mainb->code_size;
4794
4795 if (epilog)
4796 memcpy(ptr, epilog->code, epilog->code_size);
4797 else if (mainb->rodata_size > 0)
4798 memcpy(ptr, mainb->rodata, mainb->rodata_size);
4799
4800 sscreen->b.ws->buffer_unmap(shader->bo->buf);
4801 return 0;
4802 }
4803
4804 static void si_shader_dump_disassembly(const struct ac_shader_binary *binary,
4805 struct pipe_debug_callback *debug,
4806 const char *name, FILE *file)
4807 {
4808 char *line, *p;
4809 unsigned i, count;
4810
4811 if (binary->disasm_string) {
4812 fprintf(file, "Shader %s disassembly:\n", name);
4813 fprintf(file, "%s", binary->disasm_string);
4814
4815 if (debug && debug->debug_message) {
4816 /* Very long debug messages are cut off, so send the
4817 * disassembly one line at a time. This causes more
4818 * overhead, but on the plus side it simplifies
4819 * parsing of resulting logs.
4820 */
4821 pipe_debug_message(debug, SHADER_INFO,
4822 "Shader Disassembly Begin");
4823
4824 line = binary->disasm_string;
4825 while (*line) {
4826 p = util_strchrnul(line, '\n');
4827 count = p - line;
4828
4829 if (count) {
4830 pipe_debug_message(debug, SHADER_INFO,
4831 "%.*s", count, line);
4832 }
4833
4834 if (!*p)
4835 break;
4836 line = p + 1;
4837 }
4838
4839 pipe_debug_message(debug, SHADER_INFO,
4840 "Shader Disassembly End");
4841 }
4842 } else {
4843 fprintf(file, "Shader %s binary:\n", name);
4844 for (i = 0; i < binary->code_size; i += 4) {
4845 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
4846 binary->code[i + 3], binary->code[i + 2],
4847 binary->code[i + 1], binary->code[i]);
4848 }
4849 }
4850 }
4851
4852 static void si_shader_dump_stats(struct si_screen *sscreen,
4853 const struct si_shader *shader,
4854 struct pipe_debug_callback *debug,
4855 unsigned processor,
4856 FILE *file,
4857 bool check_debug_option)
4858 {
4859 const struct si_shader_config *conf = &shader->config;
4860 unsigned num_inputs = shader->selector ? shader->selector->info.num_inputs : 0;
4861 unsigned code_size = si_get_shader_binary_size(shader);
4862 unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
4863 unsigned lds_per_wave = 0;
4864 unsigned max_simd_waves = 10;
4865
4866 /* Compute LDS usage for PS. */
4867 switch (processor) {
4868 case PIPE_SHADER_FRAGMENT:
4869 /* The minimum usage per wave is (num_inputs * 48). The maximum
4870 * usage is (num_inputs * 48 * 16).
4871 * We can get anything in between and it varies between waves.
4872 *
4873 * The 48 bytes per input for a single primitive is equal to
4874 * 4 bytes/component * 4 components/input * 3 points.
4875 *
4876 * Other stages don't know the size at compile time or don't
4877 * allocate LDS per wave, but instead they do it per thread group.
4878 */
4879 lds_per_wave = conf->lds_size * lds_increment +
4880 align(num_inputs * 48, lds_increment);
4881 break;
4882 case PIPE_SHADER_COMPUTE:
4883 if (shader->selector) {
4884 unsigned max_workgroup_size =
4885 si_get_max_workgroup_size(shader);
4886 lds_per_wave = (conf->lds_size * lds_increment) /
4887 DIV_ROUND_UP(max_workgroup_size, 64);
4888 }
4889 break;
4890 }
4891
4892 /* Compute the per-SIMD wave counts. */
4893 if (conf->num_sgprs) {
4894 if (sscreen->b.chip_class >= VI)
4895 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
4896 else
4897 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
4898 }
4899
4900 if (conf->num_vgprs)
4901 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
4902
4903 /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
4904 * 16KB makes some SIMDs unoccupied). */
4905 if (lds_per_wave)
4906 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
4907
4908 if (!check_debug_option ||
4909 r600_can_dump_shader(&sscreen->b, processor)) {
4910 if (processor == PIPE_SHADER_FRAGMENT) {
4911 fprintf(file, "*** SHADER CONFIG ***\n"
4912 "SPI_PS_INPUT_ADDR = 0x%04x\n"
4913 "SPI_PS_INPUT_ENA = 0x%04x\n",
4914 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
4915 }
4916
4917 fprintf(file, "*** SHADER STATS ***\n"
4918 "SGPRS: %d\n"
4919 "VGPRS: %d\n"
4920 "Spilled SGPRs: %d\n"
4921 "Spilled VGPRs: %d\n"
4922 "Private memory VGPRs: %d\n"
4923 "Code Size: %d bytes\n"
4924 "LDS: %d blocks\n"
4925 "Scratch: %d bytes per wave\n"
4926 "Max Waves: %d\n"
4927 "********************\n\n\n",
4928 conf->num_sgprs, conf->num_vgprs,
4929 conf->spilled_sgprs, conf->spilled_vgprs,
4930 conf->private_mem_vgprs, code_size,
4931 conf->lds_size, conf->scratch_bytes_per_wave,
4932 max_simd_waves);
4933 }
4934
4935 pipe_debug_message(debug, SHADER_INFO,
4936 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
4937 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
4938 "Spilled VGPRs: %d PrivMem VGPRs: %d",
4939 conf->num_sgprs, conf->num_vgprs, code_size,
4940 conf->lds_size, conf->scratch_bytes_per_wave,
4941 max_simd_waves, conf->spilled_sgprs,
4942 conf->spilled_vgprs, conf->private_mem_vgprs);
4943 }
4944
4945 const char *si_get_shader_name(const struct si_shader *shader, unsigned processor)
4946 {
4947 switch (processor) {
4948 case PIPE_SHADER_VERTEX:
4949 if (shader->key.as_es)
4950 return "Vertex Shader as ES";
4951 else if (shader->key.as_ls)
4952 return "Vertex Shader as LS";
4953 else
4954 return "Vertex Shader as VS";
4955 case PIPE_SHADER_TESS_CTRL:
4956 return "Tessellation Control Shader";
4957 case PIPE_SHADER_TESS_EVAL:
4958 if (shader->key.as_es)
4959 return "Tessellation Evaluation Shader as ES";
4960 else
4961 return "Tessellation Evaluation Shader as VS";
4962 case PIPE_SHADER_GEOMETRY:
4963 if (shader->is_gs_copy_shader)
4964 return "GS Copy Shader as VS";
4965 else
4966 return "Geometry Shader";
4967 case PIPE_SHADER_FRAGMENT:
4968 return "Pixel Shader";
4969 case PIPE_SHADER_COMPUTE:
4970 return "Compute Shader";
4971 default:
4972 return "Unknown Shader";
4973 }
4974 }
4975
4976 void si_shader_dump(struct si_screen *sscreen, const struct si_shader *shader,
4977 struct pipe_debug_callback *debug, unsigned processor,
4978 FILE *file, bool check_debug_option)
4979 {
4980 if (!check_debug_option ||
4981 r600_can_dump_shader(&sscreen->b, processor))
4982 si_dump_shader_key(processor, shader, file);
4983
4984 if (!check_debug_option && shader->binary.llvm_ir_string) {
4985 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
4986 si_get_shader_name(shader, processor));
4987 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
4988 }
4989
4990 if (!check_debug_option ||
4991 (r600_can_dump_shader(&sscreen->b, processor) &&
4992 !(sscreen->b.debug_flags & DBG_NO_ASM))) {
4993 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
4994
4995 if (shader->prolog)
4996 si_shader_dump_disassembly(&shader->prolog->binary,
4997 debug, "prolog", file);
4998 if (shader->previous_stage)
4999 si_shader_dump_disassembly(&shader->previous_stage->binary,
5000 debug, "previous stage", file);
5001 if (shader->prolog2)
5002 si_shader_dump_disassembly(&shader->prolog2->binary,
5003 debug, "prolog2", file);
5004
5005 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
5006
5007 if (shader->epilog)
5008 si_shader_dump_disassembly(&shader->epilog->binary,
5009 debug, "epilog", file);
5010 fprintf(file, "\n");
5011 }
5012
5013 si_shader_dump_stats(sscreen, shader, debug, processor, file,
5014 check_debug_option);
5015 }
5016
5017 static int si_compile_llvm(struct si_screen *sscreen,
5018 struct ac_shader_binary *binary,
5019 struct si_shader_config *conf,
5020 LLVMTargetMachineRef tm,
5021 LLVMModuleRef mod,
5022 struct pipe_debug_callback *debug,
5023 unsigned processor,
5024 const char *name)
5025 {
5026 int r = 0;
5027 unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
5028
5029 if (r600_can_dump_shader(&sscreen->b, processor)) {
5030 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
5031
5032 if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
5033 fprintf(stderr, "%s LLVM IR:\n\n", name);
5034 ac_dump_module(mod);
5035 fprintf(stderr, "\n");
5036 }
5037 }
5038
5039 if (sscreen->record_llvm_ir) {
5040 char *ir = LLVMPrintModuleToString(mod);
5041 binary->llvm_ir_string = strdup(ir);
5042 LLVMDisposeMessage(ir);
5043 }
5044
5045 if (!si_replace_shader(count, binary)) {
5046 r = si_llvm_compile(mod, binary, tm, debug);
5047 if (r)
5048 return r;
5049 }
5050
5051 si_shader_binary_read_config(binary, conf, 0);
5052
5053 /* Enable 64-bit and 16-bit denormals, because there is no performance
5054 * cost.
5055 *
5056 * If denormals are enabled, all floating-point output modifiers are
5057 * ignored.
5058 *
5059 * Don't enable denormals for 32-bit floats, because:
5060 * - Floating-point output modifiers would be ignored by the hw.
5061 * - Some opcodes don't support denormals, such as v_mad_f32. We would
5062 * have to stop using those.
5063 * - SI & CI would be very slow.
5064 */
5065 conf->float_mode |= V_00B028_FP_64_DENORMS;
5066
5067 FREE(binary->config);
5068 FREE(binary->global_symbol_offsets);
5069 binary->config = NULL;
5070 binary->global_symbol_offsets = NULL;
5071
5072 /* Some shaders can't have rodata because their binaries can be
5073 * concatenated.
5074 */
5075 if (binary->rodata_size &&
5076 (processor == PIPE_SHADER_VERTEX ||
5077 processor == PIPE_SHADER_TESS_CTRL ||
5078 processor == PIPE_SHADER_TESS_EVAL ||
5079 processor == PIPE_SHADER_FRAGMENT)) {
5080 fprintf(stderr, "radeonsi: The shader can't have rodata.");
5081 return -EINVAL;
5082 }
5083
5084 return r;
5085 }
5086
5087 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
5088 {
5089 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
5090 LLVMBuildRetVoid(ctx->gallivm.builder);
5091 else
5092 LLVMBuildRet(ctx->gallivm.builder, ret);
5093 }
5094
5095 /* Generate code for the hardware VS shader stage to go with a geometry shader */
5096 struct si_shader *
5097 si_generate_gs_copy_shader(struct si_screen *sscreen,
5098 LLVMTargetMachineRef tm,
5099 struct si_shader_selector *gs_selector,
5100 struct pipe_debug_callback *debug)
5101 {
5102 struct si_shader_context ctx;
5103 struct si_shader *shader;
5104 struct gallivm_state *gallivm = &ctx.gallivm;
5105 LLVMBuilderRef builder;
5106 struct lp_build_tgsi_context *bld_base = &ctx.bld_base;
5107 struct lp_build_context *uint = &bld_base->uint_bld;
5108 struct si_shader_output_values *outputs;
5109 struct tgsi_shader_info *gsinfo = &gs_selector->info;
5110 int i, r;
5111
5112 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
5113
5114 if (!outputs)
5115 return NULL;
5116
5117 shader = CALLOC_STRUCT(si_shader);
5118 if (!shader) {
5119 FREE(outputs);
5120 return NULL;
5121 }
5122
5123
5124 shader->selector = gs_selector;
5125 shader->is_gs_copy_shader = true;
5126
5127 si_init_shader_ctx(&ctx, sscreen, tm);
5128 ctx.shader = shader;
5129 ctx.type = PIPE_SHADER_VERTEX;
5130
5131 builder = gallivm->builder;
5132
5133 create_function(&ctx);
5134 preload_ring_buffers(&ctx);
5135
5136 LLVMValueRef voffset =
5137 lp_build_mul_imm(uint, LLVMGetParam(ctx.main_fn,
5138 ctx.param_vertex_id), 4);
5139
5140 /* Fetch the vertex stream ID.*/
5141 LLVMValueRef stream_id;
5142
5143 if (gs_selector->so.num_outputs)
5144 stream_id = unpack_param(&ctx, ctx.param_streamout_config, 24, 2);
5145 else
5146 stream_id = ctx.i32_0;
5147
5148 /* Fill in output information. */
5149 for (i = 0; i < gsinfo->num_outputs; ++i) {
5150 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
5151 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
5152
5153 for (int chan = 0; chan < 4; chan++) {
5154 outputs[i].vertex_stream[chan] =
5155 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
5156 }
5157 }
5158
5159 LLVMBasicBlockRef end_bb;
5160 LLVMValueRef switch_inst;
5161
5162 end_bb = LLVMAppendBasicBlockInContext(gallivm->context, ctx.main_fn, "end");
5163 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
5164
5165 for (int stream = 0; stream < 4; stream++) {
5166 LLVMBasicBlockRef bb;
5167 unsigned offset;
5168
5169 if (!gsinfo->num_stream_output_components[stream])
5170 continue;
5171
5172 if (stream > 0 && !gs_selector->so.num_outputs)
5173 continue;
5174
5175 bb = LLVMInsertBasicBlockInContext(gallivm->context, end_bb, "out");
5176 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
5177 LLVMPositionBuilderAtEnd(builder, bb);
5178
5179 /* Fetch vertex data from GSVS ring */
5180 offset = 0;
5181 for (i = 0; i < gsinfo->num_outputs; ++i) {
5182 for (unsigned chan = 0; chan < 4; chan++) {
5183 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
5184 outputs[i].vertex_stream[chan] != stream) {
5185 outputs[i].values[chan] = ctx.bld_base.base.undef;
5186 continue;
5187 }
5188
5189 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
5190 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
5191 offset++;
5192
5193 outputs[i].values[chan] =
5194 ac_build_buffer_load(&ctx.ac,
5195 ctx.gsvs_ring[0], 1,
5196 ctx.i32_0, voffset,
5197 soffset, 0, 1, 1,
5198 true, false);
5199 }
5200 }
5201
5202 /* Streamout and exports. */
5203 if (gs_selector->so.num_outputs) {
5204 si_llvm_emit_streamout(&ctx, outputs,
5205 gsinfo->num_outputs,
5206 stream);
5207 }
5208
5209 if (stream == 0)
5210 si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
5211
5212 LLVMBuildBr(builder, end_bb);
5213 }
5214
5215 LLVMPositionBuilderAtEnd(builder, end_bb);
5216
5217 LLVMBuildRetVoid(gallivm->builder);
5218
5219 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
5220 si_llvm_optimize_module(&ctx);
5221
5222 r = si_compile_llvm(sscreen, &ctx.shader->binary,
5223 &ctx.shader->config, ctx.tm,
5224 ctx.gallivm.module,
5225 debug, PIPE_SHADER_GEOMETRY,
5226 "GS Copy Shader");
5227 if (!r) {
5228 if (r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
5229 fprintf(stderr, "GS Copy Shader:\n");
5230 si_shader_dump(sscreen, ctx.shader, debug,
5231 PIPE_SHADER_GEOMETRY, stderr, true);
5232 r = si_shader_binary_upload(sscreen, ctx.shader);
5233 }
5234
5235 si_llvm_dispose(&ctx);
5236
5237 FREE(outputs);
5238
5239 if (r != 0) {
5240 FREE(shader);
5241 shader = NULL;
5242 }
5243 return shader;
5244 }
5245
5246 static void si_dump_shader_key_vs(const struct si_shader_key *key,
5247 const struct si_vs_prolog_bits *prolog,
5248 const char *prefix, FILE *f)
5249 {
5250 fprintf(f, " %s.instance_divisors = {", prefix);
5251 for (int i = 0; i < ARRAY_SIZE(prolog->instance_divisors); i++) {
5252 fprintf(f, !i ? "%u" : ", %u",
5253 prolog->instance_divisors[i]);
5254 }
5255 fprintf(f, "}\n");
5256
5257 fprintf(f, " mono.vs.fix_fetch = {");
5258 for (int i = 0; i < SI_MAX_ATTRIBS; i++)
5259 fprintf(f, !i ? "%u" : ", %u", key->mono.vs_fix_fetch[i]);
5260 fprintf(f, "}\n");
5261 }
5262
5263 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
5264 FILE *f)
5265 {
5266 const struct si_shader_key *key = &shader->key;
5267
5268 fprintf(f, "SHADER KEY\n");
5269
5270 switch (processor) {
5271 case PIPE_SHADER_VERTEX:
5272 si_dump_shader_key_vs(key, &key->part.vs.prolog,
5273 "part.vs.prolog", f);
5274 fprintf(f, " as_es = %u\n", key->as_es);
5275 fprintf(f, " as_ls = %u\n", key->as_ls);
5276 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5277 key->mono.u.vs_export_prim_id);
5278 break;
5279
5280 case PIPE_SHADER_TESS_CTRL:
5281 if (shader->selector->screen->b.chip_class >= GFX9) {
5282 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
5283 "part.tcs.ls_prolog", f);
5284 }
5285 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
5286 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
5287 break;
5288
5289 case PIPE_SHADER_TESS_EVAL:
5290 fprintf(f, " as_es = %u\n", key->as_es);
5291 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5292 key->mono.u.vs_export_prim_id);
5293 break;
5294
5295 case PIPE_SHADER_GEOMETRY:
5296 if (shader->is_gs_copy_shader)
5297 break;
5298
5299 if (shader->selector->screen->b.chip_class >= GFX9 &&
5300 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
5301 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
5302 "part.gs.vs_prolog", f);
5303 }
5304 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
5305 break;
5306
5307 case PIPE_SHADER_COMPUTE:
5308 break;
5309
5310 case PIPE_SHADER_FRAGMENT:
5311 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
5312 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
5313 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
5314 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
5315 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
5316 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
5317 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
5318 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
5319 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
5320 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
5321 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
5322 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
5323 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
5324 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
5325 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
5326 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
5327 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
5328 break;
5329
5330 default:
5331 assert(0);
5332 }
5333
5334 if ((processor == PIPE_SHADER_GEOMETRY ||
5335 processor == PIPE_SHADER_TESS_EVAL ||
5336 processor == PIPE_SHADER_VERTEX) &&
5337 !key->as_es && !key->as_ls) {
5338 fprintf(f, " opt.hw_vs.kill_outputs = 0x%"PRIx64"\n", key->opt.hw_vs.kill_outputs);
5339 fprintf(f, " opt.hw_vs.clip_disable = %u\n", key->opt.hw_vs.clip_disable);
5340 }
5341 }
5342
5343 static void si_init_shader_ctx(struct si_shader_context *ctx,
5344 struct si_screen *sscreen,
5345 LLVMTargetMachineRef tm)
5346 {
5347 struct lp_build_tgsi_context *bld_base;
5348
5349 si_llvm_context_init(ctx, sscreen, tm);
5350
5351 bld_base = &ctx->bld_base;
5352 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
5353
5354 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
5355 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
5356 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
5357
5358 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
5359
5360 bld_base->op_actions[TGSI_OPCODE_CLOCK].emit = clock_emit;
5361
5362 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
5363 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
5364 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
5365 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
5366
5367 bld_base->op_actions[TGSI_OPCODE_VOTE_ALL].emit = vote_all_emit;
5368 bld_base->op_actions[TGSI_OPCODE_VOTE_ANY].emit = vote_any_emit;
5369 bld_base->op_actions[TGSI_OPCODE_VOTE_EQ].emit = vote_eq_emit;
5370 bld_base->op_actions[TGSI_OPCODE_BALLOT].emit = ballot_emit;
5371 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].intr_name = "llvm.amdgcn.readfirstlane";
5372 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].emit = read_lane_emit;
5373 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].intr_name = "llvm.amdgcn.readlane";
5374 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].fetch_args = read_invoc_fetch_args;
5375 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].emit = read_lane_emit;
5376
5377 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
5378 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
5379 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
5380 }
5381
5382 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
5383 {
5384 struct si_shader *shader = ctx->shader;
5385 struct tgsi_shader_info *info = &shader->selector->info;
5386
5387 if ((ctx->type != PIPE_SHADER_VERTEX &&
5388 ctx->type != PIPE_SHADER_TESS_EVAL) ||
5389 shader->key.as_ls ||
5390 shader->key.as_es)
5391 return;
5392
5393 ac_optimize_vs_outputs(&ctx->ac,
5394 ctx->main_fn,
5395 shader->info.vs_output_param_offset,
5396 info->num_outputs,
5397 &shader->info.nr_param_exports);
5398 }
5399
5400 static void si_count_scratch_private_memory(struct si_shader_context *ctx)
5401 {
5402 ctx->shader->config.private_mem_vgprs = 0;
5403
5404 /* Process all LLVM instructions. */
5405 LLVMBasicBlockRef bb = LLVMGetFirstBasicBlock(ctx->main_fn);
5406 while (bb) {
5407 LLVMValueRef next = LLVMGetFirstInstruction(bb);
5408
5409 while (next) {
5410 LLVMValueRef inst = next;
5411 next = LLVMGetNextInstruction(next);
5412
5413 if (LLVMGetInstructionOpcode(inst) != LLVMAlloca)
5414 continue;
5415
5416 LLVMTypeRef type = LLVMGetElementType(LLVMTypeOf(inst));
5417 /* No idea why LLVM aligns allocas to 4 elements. */
5418 unsigned alignment = LLVMGetAlignment(inst);
5419 unsigned dw_size = align(llvm_get_type_size(type) / 4, alignment);
5420 ctx->shader->config.private_mem_vgprs += dw_size;
5421 }
5422 bb = LLVMGetNextBasicBlock(bb);
5423 }
5424 }
5425
5426 static void si_init_exec_full_mask(struct si_shader_context *ctx)
5427 {
5428 LLVMValueRef full_mask = LLVMConstInt(ctx->i64, ~0ull, 0);
5429 lp_build_intrinsic(ctx->gallivm.builder,
5430 "llvm.amdgcn.init.exec", ctx->voidt,
5431 &full_mask, 1, LP_FUNC_ATTR_CONVERGENT);
5432 }
5433
5434 static void si_init_exec_from_input(struct si_shader_context *ctx,
5435 unsigned param, unsigned bitoffset)
5436 {
5437 LLVMValueRef args[] = {
5438 LLVMGetParam(ctx->main_fn, param),
5439 LLVMConstInt(ctx->i32, bitoffset, 0),
5440 };
5441 lp_build_intrinsic(ctx->gallivm.builder,
5442 "llvm.amdgcn.init.exec.from.input",
5443 ctx->voidt, args, 2, LP_FUNC_ATTR_CONVERGENT);
5444 }
5445
5446 static bool si_compile_tgsi_main(struct si_shader_context *ctx,
5447 bool is_monolithic)
5448 {
5449 struct si_shader *shader = ctx->shader;
5450 struct si_shader_selector *sel = shader->selector;
5451 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
5452
5453 switch (ctx->type) {
5454 case PIPE_SHADER_VERTEX:
5455 ctx->load_input = declare_input_vs;
5456 if (shader->key.as_ls)
5457 bld_base->emit_epilogue = si_llvm_emit_ls_epilogue;
5458 else if (shader->key.as_es)
5459 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
5460 else
5461 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
5462 break;
5463 case PIPE_SHADER_TESS_CTRL:
5464 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
5465 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
5466 bld_base->emit_store = store_output_tcs;
5467 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
5468 break;
5469 case PIPE_SHADER_TESS_EVAL:
5470 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
5471 if (shader->key.as_es)
5472 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
5473 else
5474 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
5475 break;
5476 case PIPE_SHADER_GEOMETRY:
5477 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
5478 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
5479 break;
5480 case PIPE_SHADER_FRAGMENT:
5481 ctx->load_input = declare_input_fs;
5482 bld_base->emit_epilogue = si_llvm_return_fs_outputs;
5483 break;
5484 case PIPE_SHADER_COMPUTE:
5485 ctx->declare_memory_region = declare_compute_memory;
5486 break;
5487 default:
5488 assert(!"Unsupported shader type");
5489 return false;
5490 }
5491
5492 create_function(ctx);
5493 preload_ring_buffers(ctx);
5494
5495 /* For GFX9 merged shaders:
5496 * - Set EXEC. If the prolog is present, set EXEC there instead.
5497 * - Add a barrier before the second shader.
5498 *
5499 * The same thing for monolithic shaders is done in
5500 * si_build_wrapper_function.
5501 */
5502 if (ctx->screen->b.chip_class >= GFX9 && !is_monolithic) {
5503 if (sel->info.num_instructions > 1 && /* not empty shader */
5504 (shader->key.as_es || shader->key.as_ls) &&
5505 (ctx->type == PIPE_SHADER_TESS_EVAL ||
5506 (ctx->type == PIPE_SHADER_VERTEX &&
5507 !sel->vs_needs_prolog))) {
5508 si_init_exec_from_input(ctx,
5509 ctx->param_merged_wave_info, 0);
5510 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
5511 ctx->type == PIPE_SHADER_GEOMETRY) {
5512 si_init_exec_from_input(ctx,
5513 ctx->param_merged_wave_info, 8);
5514 si_llvm_emit_barrier(NULL, bld_base, NULL);
5515 }
5516 }
5517
5518 if (ctx->type == PIPE_SHADER_GEOMETRY) {
5519 int i;
5520 for (i = 0; i < 4; i++) {
5521 ctx->gs_next_vertex[i] =
5522 lp_build_alloca(&ctx->gallivm,
5523 ctx->i32, "");
5524 }
5525 }
5526
5527 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
5528 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
5529 return false;
5530 }
5531
5532 si_llvm_build_ret(ctx, ctx->return_value);
5533 return true;
5534 }
5535
5536 /**
5537 * Compute the VS prolog key, which contains all the information needed to
5538 * build the VS prolog function, and set shader->info bits where needed.
5539 *
5540 * \param info Shader info of the vertex shader.
5541 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
5542 * \param prolog_key Key of the VS prolog
5543 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
5544 * \param key Output shader part key.
5545 */
5546 static void si_get_vs_prolog_key(const struct tgsi_shader_info *info,
5547 unsigned num_input_sgprs,
5548 const struct si_vs_prolog_bits *prolog_key,
5549 struct si_shader *shader_out,
5550 union si_shader_part_key *key)
5551 {
5552 memset(key, 0, sizeof(*key));
5553 key->vs_prolog.states = *prolog_key;
5554 key->vs_prolog.num_input_sgprs = num_input_sgprs;
5555 key->vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
5556 key->vs_prolog.as_ls = shader_out->key.as_ls;
5557
5558 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
5559 key->vs_prolog.as_ls = 1;
5560 key->vs_prolog.num_merged_next_stage_vgprs = 2;
5561 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
5562 key->vs_prolog.num_merged_next_stage_vgprs = 5;
5563 }
5564
5565 /* Set the instanceID flag. */
5566 for (unsigned i = 0; i < info->num_inputs; i++)
5567 if (key->vs_prolog.states.instance_divisors[i])
5568 shader_out->info.uses_instanceid = true;
5569 }
5570
5571 /**
5572 * Compute the PS prolog key, which contains all the information needed to
5573 * build the PS prolog function, and set related bits in shader->config.
5574 */
5575 static void si_get_ps_prolog_key(struct si_shader *shader,
5576 union si_shader_part_key *key,
5577 bool separate_prolog)
5578 {
5579 struct tgsi_shader_info *info = &shader->selector->info;
5580
5581 memset(key, 0, sizeof(*key));
5582 key->ps_prolog.states = shader->key.part.ps.prolog;
5583 key->ps_prolog.colors_read = info->colors_read;
5584 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
5585 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
5586 key->ps_prolog.wqm = info->uses_derivatives &&
5587 (key->ps_prolog.colors_read ||
5588 key->ps_prolog.states.force_persp_sample_interp ||
5589 key->ps_prolog.states.force_linear_sample_interp ||
5590 key->ps_prolog.states.force_persp_center_interp ||
5591 key->ps_prolog.states.force_linear_center_interp ||
5592 key->ps_prolog.states.bc_optimize_for_persp ||
5593 key->ps_prolog.states.bc_optimize_for_linear);
5594
5595 if (info->colors_read) {
5596 unsigned *color = shader->selector->color_attr_index;
5597
5598 if (shader->key.part.ps.prolog.color_two_side) {
5599 /* BCOLORs are stored after the last input. */
5600 key->ps_prolog.num_interp_inputs = info->num_inputs;
5601 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
5602 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
5603 }
5604
5605 for (unsigned i = 0; i < 2; i++) {
5606 unsigned interp = info->input_interpolate[color[i]];
5607 unsigned location = info->input_interpolate_loc[color[i]];
5608
5609 if (!(info->colors_read & (0xf << i*4)))
5610 continue;
5611
5612 key->ps_prolog.color_attr_index[i] = color[i];
5613
5614 if (shader->key.part.ps.prolog.flatshade_colors &&
5615 interp == TGSI_INTERPOLATE_COLOR)
5616 interp = TGSI_INTERPOLATE_CONSTANT;
5617
5618 switch (interp) {
5619 case TGSI_INTERPOLATE_CONSTANT:
5620 key->ps_prolog.color_interp_vgpr_index[i] = -1;
5621 break;
5622 case TGSI_INTERPOLATE_PERSPECTIVE:
5623 case TGSI_INTERPOLATE_COLOR:
5624 /* Force the interpolation location for colors here. */
5625 if (shader->key.part.ps.prolog.force_persp_sample_interp)
5626 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5627 if (shader->key.part.ps.prolog.force_persp_center_interp)
5628 location = TGSI_INTERPOLATE_LOC_CENTER;
5629
5630 switch (location) {
5631 case TGSI_INTERPOLATE_LOC_SAMPLE:
5632 key->ps_prolog.color_interp_vgpr_index[i] = 0;
5633 shader->config.spi_ps_input_ena |=
5634 S_0286CC_PERSP_SAMPLE_ENA(1);
5635 break;
5636 case TGSI_INTERPOLATE_LOC_CENTER:
5637 key->ps_prolog.color_interp_vgpr_index[i] = 2;
5638 shader->config.spi_ps_input_ena |=
5639 S_0286CC_PERSP_CENTER_ENA(1);
5640 break;
5641 case TGSI_INTERPOLATE_LOC_CENTROID:
5642 key->ps_prolog.color_interp_vgpr_index[i] = 4;
5643 shader->config.spi_ps_input_ena |=
5644 S_0286CC_PERSP_CENTROID_ENA(1);
5645 break;
5646 default:
5647 assert(0);
5648 }
5649 break;
5650 case TGSI_INTERPOLATE_LINEAR:
5651 /* Force the interpolation location for colors here. */
5652 if (shader->key.part.ps.prolog.force_linear_sample_interp)
5653 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5654 if (shader->key.part.ps.prolog.force_linear_center_interp)
5655 location = TGSI_INTERPOLATE_LOC_CENTER;
5656
5657 /* The VGPR assignment for non-monolithic shaders
5658 * works because InitialPSInputAddr is set on the
5659 * main shader and PERSP_PULL_MODEL is never used.
5660 */
5661 switch (location) {
5662 case TGSI_INTERPOLATE_LOC_SAMPLE:
5663 key->ps_prolog.color_interp_vgpr_index[i] =
5664 separate_prolog ? 6 : 9;
5665 shader->config.spi_ps_input_ena |=
5666 S_0286CC_LINEAR_SAMPLE_ENA(1);
5667 break;
5668 case TGSI_INTERPOLATE_LOC_CENTER:
5669 key->ps_prolog.color_interp_vgpr_index[i] =
5670 separate_prolog ? 8 : 11;
5671 shader->config.spi_ps_input_ena |=
5672 S_0286CC_LINEAR_CENTER_ENA(1);
5673 break;
5674 case TGSI_INTERPOLATE_LOC_CENTROID:
5675 key->ps_prolog.color_interp_vgpr_index[i] =
5676 separate_prolog ? 10 : 13;
5677 shader->config.spi_ps_input_ena |=
5678 S_0286CC_LINEAR_CENTROID_ENA(1);
5679 break;
5680 default:
5681 assert(0);
5682 }
5683 break;
5684 default:
5685 assert(0);
5686 }
5687 }
5688 }
5689 }
5690
5691 /**
5692 * Check whether a PS prolog is required based on the key.
5693 */
5694 static bool si_need_ps_prolog(const union si_shader_part_key *key)
5695 {
5696 return key->ps_prolog.colors_read ||
5697 key->ps_prolog.states.force_persp_sample_interp ||
5698 key->ps_prolog.states.force_linear_sample_interp ||
5699 key->ps_prolog.states.force_persp_center_interp ||
5700 key->ps_prolog.states.force_linear_center_interp ||
5701 key->ps_prolog.states.bc_optimize_for_persp ||
5702 key->ps_prolog.states.bc_optimize_for_linear ||
5703 key->ps_prolog.states.poly_stipple;
5704 }
5705
5706 /**
5707 * Compute the PS epilog key, which contains all the information needed to
5708 * build the PS epilog function.
5709 */
5710 static void si_get_ps_epilog_key(struct si_shader *shader,
5711 union si_shader_part_key *key)
5712 {
5713 struct tgsi_shader_info *info = &shader->selector->info;
5714 memset(key, 0, sizeof(*key));
5715 key->ps_epilog.colors_written = info->colors_written;
5716 key->ps_epilog.writes_z = info->writes_z;
5717 key->ps_epilog.writes_stencil = info->writes_stencil;
5718 key->ps_epilog.writes_samplemask = info->writes_samplemask;
5719 key->ps_epilog.states = shader->key.part.ps.epilog;
5720 }
5721
5722 /**
5723 * Build the GS prolog function. Rotate the input vertices for triangle strips
5724 * with adjacency.
5725 */
5726 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
5727 union si_shader_part_key *key)
5728 {
5729 unsigned num_sgprs, num_vgprs;
5730 struct gallivm_state *gallivm = &ctx->gallivm;
5731 LLVMBuilderRef builder = gallivm->builder;
5732 LLVMTypeRef params[48]; /* 40 SGPRs (maximum) + some VGPRs */
5733 LLVMTypeRef returns[48];
5734 LLVMValueRef func, ret;
5735
5736 if (ctx->screen->b.chip_class >= GFX9) {
5737 num_sgprs = 8 + GFX9_GS_NUM_USER_SGPR;
5738 num_vgprs = 5; /* ES inputs are not needed by GS */
5739 } else {
5740 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
5741 num_vgprs = 8;
5742 }
5743
5744 for (unsigned i = 0; i < num_sgprs; ++i) {
5745 params[i] = ctx->i32;
5746 returns[i] = ctx->i32;
5747 }
5748
5749 for (unsigned i = 0; i < num_vgprs; ++i) {
5750 params[num_sgprs + i] = ctx->i32;
5751 returns[num_sgprs + i] = ctx->f32;
5752 }
5753
5754 /* Create the function. */
5755 si_create_function(ctx, "gs_prolog", returns, num_sgprs + num_vgprs,
5756 params, num_sgprs + num_vgprs, num_sgprs - 1, 0);
5757 func = ctx->main_fn;
5758
5759 /* Set the full EXEC mask for the prolog, because we are only fiddling
5760 * with registers here. The main shader part will set the correct EXEC
5761 * mask.
5762 */
5763 if (ctx->screen->b.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
5764 si_init_exec_full_mask(ctx);
5765
5766 /* Copy inputs to outputs. This should be no-op, as the registers match,
5767 * but it will prevent the compiler from overwriting them unintentionally.
5768 */
5769 ret = ctx->return_value;
5770 for (unsigned i = 0; i < num_sgprs; i++) {
5771 LLVMValueRef p = LLVMGetParam(func, i);
5772 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
5773 }
5774 for (unsigned i = 0; i < num_vgprs; i++) {
5775 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
5776 p = LLVMBuildBitCast(builder, p, ctx->f32, "");
5777 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
5778 }
5779
5780 if (key->gs_prolog.states.tri_strip_adj_fix) {
5781 /* Remap the input vertices for every other primitive. */
5782 const unsigned gfx6_vtx_params[6] = {
5783 num_sgprs,
5784 num_sgprs + 1,
5785 num_sgprs + 3,
5786 num_sgprs + 4,
5787 num_sgprs + 5,
5788 num_sgprs + 6
5789 };
5790 const unsigned gfx9_vtx_params[3] = {
5791 num_sgprs,
5792 num_sgprs + 1,
5793 num_sgprs + 4,
5794 };
5795 LLVMValueRef vtx_in[6], vtx_out[6];
5796 LLVMValueRef prim_id, rotate;
5797
5798 if (ctx->screen->b.chip_class >= GFX9) {
5799 for (unsigned i = 0; i < 3; i++) {
5800 vtx_in[i*2] = unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
5801 vtx_in[i*2+1] = unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
5802 }
5803 } else {
5804 for (unsigned i = 0; i < 6; i++)
5805 vtx_in[i] = LLVMGetParam(func, gfx6_vtx_params[i]);
5806 }
5807
5808 prim_id = LLVMGetParam(func, num_sgprs + 2);
5809 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
5810
5811 for (unsigned i = 0; i < 6; ++i) {
5812 LLVMValueRef base, rotated;
5813 base = vtx_in[i];
5814 rotated = vtx_in[(i + 4) % 6];
5815 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
5816 }
5817
5818 if (ctx->screen->b.chip_class >= GFX9) {
5819 for (unsigned i = 0; i < 3; i++) {
5820 LLVMValueRef hi, out;
5821
5822 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
5823 LLVMConstInt(ctx->i32, 16, 0), "");
5824 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
5825 out = LLVMBuildBitCast(builder, out, ctx->f32, "");
5826 ret = LLVMBuildInsertValue(builder, ret, out,
5827 gfx9_vtx_params[i], "");
5828 }
5829 } else {
5830 for (unsigned i = 0; i < 6; i++) {
5831 LLVMValueRef out;
5832
5833 out = LLVMBuildBitCast(builder, vtx_out[i], ctx->f32, "");
5834 ret = LLVMBuildInsertValue(builder, ret, out,
5835 gfx6_vtx_params[i], "");
5836 }
5837 }
5838 }
5839
5840 LLVMBuildRet(builder, ret);
5841 }
5842
5843 /**
5844 * Given a list of shader part functions, build a wrapper function that
5845 * runs them in sequence to form a monolithic shader.
5846 */
5847 static void si_build_wrapper_function(struct si_shader_context *ctx,
5848 LLVMValueRef *parts,
5849 unsigned num_parts,
5850 unsigned main_part,
5851 unsigned next_shader_first_part)
5852 {
5853 struct gallivm_state *gallivm = &ctx->gallivm;
5854 LLVMBuilderRef builder = ctx->gallivm.builder;
5855 /* PS epilog has one arg per color component */
5856 LLVMTypeRef param_types[48];
5857 LLVMValueRef initial[48], out[48];
5858 LLVMTypeRef function_type;
5859 unsigned num_params;
5860 unsigned num_out, initial_num_out;
5861 MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
5862 MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
5863 unsigned num_sgprs, num_vgprs;
5864 unsigned last_sgpr_param;
5865 unsigned gprs;
5866 struct lp_build_if_state if_state;
5867
5868 for (unsigned i = 0; i < num_parts; ++i) {
5869 lp_add_function_attr(parts[i], -1, LP_FUNC_ATTR_ALWAYSINLINE);
5870 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
5871 }
5872
5873 /* The parameters of the wrapper function correspond to those of the
5874 * first part in terms of SGPRs and VGPRs, but we use the types of the
5875 * main part to get the right types. This is relevant for the
5876 * dereferenceable attribute on descriptor table pointers.
5877 */
5878 num_sgprs = 0;
5879 num_vgprs = 0;
5880
5881 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
5882 num_params = LLVMCountParamTypes(function_type);
5883
5884 for (unsigned i = 0; i < num_params; ++i) {
5885 LLVMValueRef param = LLVMGetParam(parts[0], i);
5886
5887 if (ac_is_sgpr_param(param)) {
5888 assert(num_vgprs == 0);
5889 num_sgprs += llvm_get_type_size(LLVMTypeOf(param)) / 4;
5890 } else {
5891 num_vgprs += llvm_get_type_size(LLVMTypeOf(param)) / 4;
5892 }
5893 }
5894 assert(num_vgprs + num_sgprs <= ARRAY_SIZE(param_types));
5895
5896 num_params = 0;
5897 last_sgpr_param = 0;
5898 gprs = 0;
5899 while (gprs < num_sgprs + num_vgprs) {
5900 LLVMValueRef param = LLVMGetParam(parts[main_part], num_params);
5901 unsigned size;
5902
5903 param_types[num_params] = LLVMTypeOf(param);
5904 if (gprs < num_sgprs)
5905 last_sgpr_param = num_params;
5906 size = llvm_get_type_size(param_types[num_params]) / 4;
5907 num_params++;
5908
5909 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
5910 assert(gprs + size <= num_sgprs + num_vgprs &&
5911 (gprs >= num_sgprs || gprs + size <= num_sgprs));
5912
5913 gprs += size;
5914 }
5915
5916 si_create_function(ctx, "wrapper", NULL, 0, param_types, num_params,
5917 last_sgpr_param,
5918 si_get_max_workgroup_size(ctx->shader));
5919
5920 if (is_merged_shader(ctx->shader))
5921 si_init_exec_full_mask(ctx);
5922
5923 /* Record the arguments of the function as if they were an output of
5924 * a previous part.
5925 */
5926 num_out = 0;
5927 num_out_sgpr = 0;
5928
5929 for (unsigned i = 0; i < num_params; ++i) {
5930 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
5931 LLVMTypeRef param_type = LLVMTypeOf(param);
5932 LLVMTypeRef out_type = i <= last_sgpr_param ? ctx->i32 : ctx->f32;
5933 unsigned size = llvm_get_type_size(param_type) / 4;
5934
5935 if (size == 1) {
5936 if (param_type != out_type)
5937 param = LLVMBuildBitCast(builder, param, out_type, "");
5938 out[num_out++] = param;
5939 } else {
5940 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
5941
5942 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
5943 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
5944 param_type = ctx->i64;
5945 }
5946
5947 if (param_type != vector_type)
5948 param = LLVMBuildBitCast(builder, param, vector_type, "");
5949
5950 for (unsigned j = 0; j < size; ++j)
5951 out[num_out++] = LLVMBuildExtractElement(
5952 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
5953 }
5954
5955 if (i <= last_sgpr_param)
5956 num_out_sgpr = num_out;
5957 }
5958
5959 memcpy(initial, out, sizeof(out));
5960 initial_num_out = num_out;
5961 initial_num_out_sgpr = num_out_sgpr;
5962
5963 /* Now chain the parts. */
5964 for (unsigned part = 0; part < num_parts; ++part) {
5965 LLVMValueRef in[48];
5966 LLVMValueRef ret;
5967 LLVMTypeRef ret_type;
5968 unsigned out_idx = 0;
5969
5970 num_params = LLVMCountParams(parts[part]);
5971 assert(num_params <= ARRAY_SIZE(param_types));
5972
5973 /* Merged shaders are executed conditionally depending
5974 * on the number of enabled threads passed in the input SGPRs. */
5975 if (is_merged_shader(ctx->shader) &&
5976 (part == 0 || part == next_shader_first_part)) {
5977 LLVMValueRef ena, count = initial[3];
5978
5979 /* The thread count for the 2nd shader is at bit-offset 8. */
5980 if (part == next_shader_first_part) {
5981 count = LLVMBuildLShr(builder, count,
5982 LLVMConstInt(ctx->i32, 8, 0), "");
5983 }
5984 count = LLVMBuildAnd(builder, count,
5985 LLVMConstInt(ctx->i32, 0x7f, 0), "");
5986 ena = LLVMBuildICmp(builder, LLVMIntULT,
5987 ac_get_thread_id(&ctx->ac), count, "");
5988 lp_build_if(&if_state, &ctx->gallivm, ena);
5989 }
5990
5991 /* Derive arguments for the next part from outputs of the
5992 * previous one.
5993 */
5994 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
5995 LLVMValueRef param;
5996 LLVMTypeRef param_type;
5997 bool is_sgpr;
5998 unsigned param_size;
5999 LLVMValueRef arg = NULL;
6000
6001 param = LLVMGetParam(parts[part], param_idx);
6002 param_type = LLVMTypeOf(param);
6003 param_size = llvm_get_type_size(param_type) / 4;
6004 is_sgpr = ac_is_sgpr_param(param);
6005
6006 if (is_sgpr) {
6007 #if HAVE_LLVM < 0x0400
6008 LLVMRemoveAttribute(param, LLVMByValAttribute);
6009 #else
6010 unsigned kind_id = LLVMGetEnumAttributeKindForName("byval", 5);
6011 LLVMRemoveEnumAttributeAtIndex(parts[part], param_idx + 1, kind_id);
6012 #endif
6013 lp_add_function_attr(parts[part], param_idx + 1, LP_FUNC_ATTR_INREG);
6014 }
6015
6016 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
6017 assert(is_sgpr || out_idx >= num_out_sgpr);
6018
6019 if (param_size == 1)
6020 arg = out[out_idx];
6021 else
6022 arg = lp_build_gather_values(gallivm, &out[out_idx], param_size);
6023
6024 if (LLVMTypeOf(arg) != param_type) {
6025 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6026 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
6027 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
6028 } else {
6029 arg = LLVMBuildBitCast(builder, arg, param_type, "");
6030 }
6031 }
6032
6033 in[param_idx] = arg;
6034 out_idx += param_size;
6035 }
6036
6037 ret = LLVMBuildCall(builder, parts[part], in, num_params, "");
6038
6039 if (is_merged_shader(ctx->shader) &&
6040 (part + 1 == next_shader_first_part ||
6041 part + 1 == num_parts)) {
6042 lp_build_endif(&if_state);
6043
6044 if (part + 1 == next_shader_first_part) {
6045 /* A barrier is required between 2 merged shaders. */
6046 si_llvm_emit_barrier(NULL, &ctx->bld_base, NULL);
6047
6048 /* The second half of the merged shader should use
6049 * the inputs from the toplevel (wrapper) function,
6050 * not the return value from the last call.
6051 *
6052 * That's because the last call was executed condi-
6053 * tionally, so we can't consume it in the main
6054 * block.
6055 */
6056 memcpy(out, initial, sizeof(initial));
6057 num_out = initial_num_out;
6058 num_out_sgpr = initial_num_out_sgpr;
6059 }
6060 continue;
6061 }
6062
6063 /* Extract the returned GPRs. */
6064 ret_type = LLVMTypeOf(ret);
6065 num_out = 0;
6066 num_out_sgpr = 0;
6067
6068 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
6069 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
6070
6071 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
6072
6073 for (unsigned i = 0; i < ret_size; ++i) {
6074 LLVMValueRef val =
6075 LLVMBuildExtractValue(builder, ret, i, "");
6076
6077 out[num_out++] = val;
6078
6079 if (LLVMTypeOf(val) == ctx->i32) {
6080 assert(num_out_sgpr + 1 == num_out);
6081 num_out_sgpr = num_out;
6082 }
6083 }
6084 }
6085 }
6086
6087 LLVMBuildRetVoid(builder);
6088 }
6089
6090 int si_compile_tgsi_shader(struct si_screen *sscreen,
6091 LLVMTargetMachineRef tm,
6092 struct si_shader *shader,
6093 bool is_monolithic,
6094 struct pipe_debug_callback *debug)
6095 {
6096 struct si_shader_selector *sel = shader->selector;
6097 struct si_shader_context ctx;
6098 int r = -1;
6099
6100 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6101 * conversion fails. */
6102 if (r600_can_dump_shader(&sscreen->b, sel->info.processor) &&
6103 !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
6104 tgsi_dump(sel->tokens, 0);
6105 si_dump_streamout(&sel->so);
6106 }
6107
6108 si_init_shader_ctx(&ctx, sscreen, tm);
6109 si_llvm_context_set_tgsi(&ctx, shader);
6110 ctx.separate_prolog = !is_monolithic;
6111
6112 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
6113 sizeof(shader->info.vs_output_param_offset));
6114
6115 shader->info.uses_instanceid = sel->info.uses_instanceid;
6116
6117 ctx.load_system_value = declare_system_value;
6118
6119 if (!si_compile_tgsi_main(&ctx, is_monolithic)) {
6120 si_llvm_dispose(&ctx);
6121 return -1;
6122 }
6123
6124 if (is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
6125 LLVMValueRef parts[2];
6126 bool need_prolog = sel->vs_needs_prolog;
6127
6128 parts[1] = ctx.main_fn;
6129
6130 if (need_prolog) {
6131 union si_shader_part_key prolog_key;
6132 si_get_vs_prolog_key(&sel->info,
6133 shader->info.num_input_sgprs,
6134 &shader->key.part.vs.prolog,
6135 shader, &prolog_key);
6136 si_build_vs_prolog_function(&ctx, &prolog_key);
6137 parts[0] = ctx.main_fn;
6138 }
6139
6140 si_build_wrapper_function(&ctx, parts + !need_prolog,
6141 1 + need_prolog, need_prolog, 0);
6142 } else if (is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
6143 if (sscreen->b.chip_class >= GFX9) {
6144 struct si_shader_selector *ls = shader->key.part.tcs.ls;
6145 LLVMValueRef parts[4];
6146
6147 /* TCS main part */
6148 parts[2] = ctx.main_fn;
6149
6150 /* TCS epilog */
6151 union si_shader_part_key tcs_epilog_key;
6152 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
6153 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6154 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
6155 parts[3] = ctx.main_fn;
6156
6157 /* VS prolog */
6158 if (ls->vs_needs_prolog) {
6159 union si_shader_part_key vs_prolog_key;
6160 si_get_vs_prolog_key(&ls->info,
6161 shader->info.num_input_sgprs,
6162 &shader->key.part.tcs.ls_prolog,
6163 shader, &vs_prolog_key);
6164 vs_prolog_key.vs_prolog.is_monolithic = true;
6165 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6166 parts[0] = ctx.main_fn;
6167 }
6168
6169 /* VS as LS main part */
6170 struct si_shader shader_ls = {};
6171 shader_ls.selector = ls;
6172 shader_ls.key.as_ls = 1;
6173 shader_ls.key.mono = shader->key.mono;
6174 shader_ls.key.opt = shader->key.opt;
6175 si_llvm_context_set_tgsi(&ctx, &shader_ls);
6176
6177 if (!si_compile_tgsi_main(&ctx, true)) {
6178 si_llvm_dispose(&ctx);
6179 return -1;
6180 }
6181 shader->info.uses_instanceid |= ls->info.uses_instanceid;
6182 parts[1] = ctx.main_fn;
6183
6184 /* Reset the shader context. */
6185 ctx.shader = shader;
6186 ctx.type = PIPE_SHADER_TESS_CTRL;
6187
6188 si_build_wrapper_function(&ctx,
6189 parts + !ls->vs_needs_prolog,
6190 4 - !ls->vs_needs_prolog, 0,
6191 ls->vs_needs_prolog ? 2 : 1);
6192 } else {
6193 LLVMValueRef parts[2];
6194 union si_shader_part_key epilog_key;
6195
6196 parts[0] = ctx.main_fn;
6197
6198 memset(&epilog_key, 0, sizeof(epilog_key));
6199 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6200 si_build_tcs_epilog_function(&ctx, &epilog_key);
6201 parts[1] = ctx.main_fn;
6202
6203 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
6204 }
6205 } else if (is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
6206 if (ctx.screen->b.chip_class >= GFX9) {
6207 struct si_shader_selector *es = shader->key.part.gs.es;
6208 LLVMValueRef es_prolog = NULL;
6209 LLVMValueRef es_main = NULL;
6210 LLVMValueRef gs_prolog = NULL;
6211 LLVMValueRef gs_main = ctx.main_fn;
6212
6213 /* GS prolog */
6214 union si_shader_part_key gs_prolog_key;
6215 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
6216 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6217 gs_prolog_key.gs_prolog.is_monolithic = true;
6218 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
6219 gs_prolog = ctx.main_fn;
6220
6221 /* ES prolog */
6222 if (es->vs_needs_prolog) {
6223 union si_shader_part_key vs_prolog_key;
6224 si_get_vs_prolog_key(&es->info,
6225 shader->info.num_input_sgprs,
6226 &shader->key.part.tcs.ls_prolog,
6227 shader, &vs_prolog_key);
6228 vs_prolog_key.vs_prolog.is_monolithic = true;
6229 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6230 es_prolog = ctx.main_fn;
6231 }
6232
6233 /* ES main part */
6234 struct si_shader shader_es = {};
6235 shader_es.selector = es;
6236 shader_es.key.as_es = 1;
6237 shader_es.key.mono = shader->key.mono;
6238 shader_es.key.opt = shader->key.opt;
6239 si_llvm_context_set_tgsi(&ctx, &shader_es);
6240
6241 if (!si_compile_tgsi_main(&ctx, true)) {
6242 si_llvm_dispose(&ctx);
6243 return -1;
6244 }
6245 shader->info.uses_instanceid |= es->info.uses_instanceid;
6246 es_main = ctx.main_fn;
6247
6248 /* Reset the shader context. */
6249 ctx.shader = shader;
6250 ctx.type = PIPE_SHADER_GEOMETRY;
6251
6252 /* Prepare the array of shader parts. */
6253 LLVMValueRef parts[4];
6254 unsigned num_parts = 0, main_part, next_first_part;
6255
6256 if (es_prolog)
6257 parts[num_parts++] = es_prolog;
6258
6259 parts[main_part = num_parts++] = es_main;
6260 parts[next_first_part = num_parts++] = gs_prolog;
6261 parts[num_parts++] = gs_main;
6262
6263 si_build_wrapper_function(&ctx, parts, num_parts,
6264 main_part, next_first_part);
6265 } else {
6266 LLVMValueRef parts[2];
6267 union si_shader_part_key prolog_key;
6268
6269 parts[1] = ctx.main_fn;
6270
6271 memset(&prolog_key, 0, sizeof(prolog_key));
6272 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6273 si_build_gs_prolog_function(&ctx, &prolog_key);
6274 parts[0] = ctx.main_fn;
6275
6276 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
6277 }
6278 } else if (is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
6279 LLVMValueRef parts[3];
6280 union si_shader_part_key prolog_key;
6281 union si_shader_part_key epilog_key;
6282 bool need_prolog;
6283
6284 si_get_ps_prolog_key(shader, &prolog_key, false);
6285 need_prolog = si_need_ps_prolog(&prolog_key);
6286
6287 parts[need_prolog ? 1 : 0] = ctx.main_fn;
6288
6289 if (need_prolog) {
6290 si_build_ps_prolog_function(&ctx, &prolog_key);
6291 parts[0] = ctx.main_fn;
6292 }
6293
6294 si_get_ps_epilog_key(shader, &epilog_key);
6295 si_build_ps_epilog_function(&ctx, &epilog_key);
6296 parts[need_prolog ? 2 : 1] = ctx.main_fn;
6297
6298 si_build_wrapper_function(&ctx, parts, need_prolog ? 3 : 2,
6299 need_prolog ? 1 : 0, 0);
6300 }
6301
6302 si_llvm_optimize_module(&ctx);
6303
6304 /* Post-optimization transformations and analysis. */
6305 si_optimize_vs_outputs(&ctx);
6306
6307 if ((debug && debug->debug_message) ||
6308 r600_can_dump_shader(&sscreen->b, ctx.type))
6309 si_count_scratch_private_memory(&ctx);
6310
6311 /* Compile to bytecode. */
6312 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
6313 ctx.gallivm.module, debug, ctx.type, "TGSI shader");
6314 si_llvm_dispose(&ctx);
6315 if (r) {
6316 fprintf(stderr, "LLVM failed to compile shader\n");
6317 return r;
6318 }
6319
6320 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
6321 * LLVM 3.9svn has this bug.
6322 */
6323 if (sel->type == PIPE_SHADER_COMPUTE) {
6324 unsigned wave_size = 64;
6325 unsigned max_vgprs = 256;
6326 unsigned max_sgprs = sscreen->b.chip_class >= VI ? 800 : 512;
6327 unsigned max_sgprs_per_wave = 128;
6328 unsigned max_block_threads = si_get_max_workgroup_size(shader);
6329 unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
6330 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
6331
6332 max_vgprs = max_vgprs / min_waves_per_simd;
6333 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
6334
6335 if (shader->config.num_sgprs > max_sgprs ||
6336 shader->config.num_vgprs > max_vgprs) {
6337 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6338 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6339 shader->config.num_sgprs, shader->config.num_vgprs,
6340 max_sgprs, max_vgprs);
6341
6342 /* Just terminate the process, because dependent
6343 * shaders can hang due to bad input data, but use
6344 * the env var to allow shader-db to work.
6345 */
6346 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6347 abort();
6348 }
6349 }
6350
6351 /* Add the scratch offset to input SGPRs. */
6352 if (shader->config.scratch_bytes_per_wave && !is_merged_shader(shader))
6353 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6354
6355 /* Calculate the number of fragment input VGPRs. */
6356 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6357 shader->info.num_input_vgprs = 0;
6358 shader->info.face_vgpr_index = -1;
6359
6360 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6361 shader->info.num_input_vgprs += 2;
6362 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6363 shader->info.num_input_vgprs += 2;
6364 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6365 shader->info.num_input_vgprs += 2;
6366 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6367 shader->info.num_input_vgprs += 3;
6368 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6369 shader->info.num_input_vgprs += 2;
6370 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6371 shader->info.num_input_vgprs += 2;
6372 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6373 shader->info.num_input_vgprs += 2;
6374 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6375 shader->info.num_input_vgprs += 1;
6376 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6377 shader->info.num_input_vgprs += 1;
6378 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6379 shader->info.num_input_vgprs += 1;
6380 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6381 shader->info.num_input_vgprs += 1;
6382 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6383 shader->info.num_input_vgprs += 1;
6384 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6385 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6386 shader->info.num_input_vgprs += 1;
6387 }
6388 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr))
6389 shader->info.num_input_vgprs += 1;
6390 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6391 shader->info.num_input_vgprs += 1;
6392 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6393 shader->info.num_input_vgprs += 1;
6394 }
6395
6396 return 0;
6397 }
6398
6399 /**
6400 * Create, compile and return a shader part (prolog or epilog).
6401 *
6402 * \param sscreen screen
6403 * \param list list of shader parts of the same category
6404 * \param type shader type
6405 * \param key shader part key
6406 * \param prolog whether the part being requested is a prolog
6407 * \param tm LLVM target machine
6408 * \param debug debug callback
6409 * \param build the callback responsible for building the main function
6410 * \return non-NULL on success
6411 */
6412 static struct si_shader_part *
6413 si_get_shader_part(struct si_screen *sscreen,
6414 struct si_shader_part **list,
6415 enum pipe_shader_type type,
6416 bool prolog,
6417 union si_shader_part_key *key,
6418 LLVMTargetMachineRef tm,
6419 struct pipe_debug_callback *debug,
6420 void (*build)(struct si_shader_context *,
6421 union si_shader_part_key *),
6422 const char *name)
6423 {
6424 struct si_shader_part *result;
6425
6426 mtx_lock(&sscreen->shader_parts_mutex);
6427
6428 /* Find existing. */
6429 for (result = *list; result; result = result->next) {
6430 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
6431 mtx_unlock(&sscreen->shader_parts_mutex);
6432 return result;
6433 }
6434 }
6435
6436 /* Compile a new one. */
6437 result = CALLOC_STRUCT(si_shader_part);
6438 result->key = *key;
6439
6440 struct si_shader shader = {};
6441 struct si_shader_context ctx;
6442 struct gallivm_state *gallivm = &ctx.gallivm;
6443
6444 si_init_shader_ctx(&ctx, sscreen, tm);
6445 ctx.shader = &shader;
6446 ctx.type = type;
6447
6448 switch (type) {
6449 case PIPE_SHADER_VERTEX:
6450 break;
6451 case PIPE_SHADER_TESS_CTRL:
6452 assert(!prolog);
6453 shader.key.part.tcs.epilog = key->tcs_epilog.states;
6454 break;
6455 case PIPE_SHADER_GEOMETRY:
6456 assert(prolog);
6457 break;
6458 case PIPE_SHADER_FRAGMENT:
6459 if (prolog)
6460 shader.key.part.ps.prolog = key->ps_prolog.states;
6461 else
6462 shader.key.part.ps.epilog = key->ps_epilog.states;
6463 break;
6464 default:
6465 unreachable("bad shader part");
6466 }
6467
6468 build(&ctx, key);
6469
6470 /* Compile. */
6471 si_llvm_optimize_module(&ctx);
6472
6473 if (si_compile_llvm(sscreen, &result->binary, &result->config, tm,
6474 gallivm->module, debug, ctx.type, name)) {
6475 FREE(result);
6476 result = NULL;
6477 goto out;
6478 }
6479
6480 result->next = *list;
6481 *list = result;
6482
6483 out:
6484 si_llvm_dispose(&ctx);
6485 mtx_unlock(&sscreen->shader_parts_mutex);
6486 return result;
6487 }
6488
6489 /**
6490 * Build the vertex shader prolog function.
6491 *
6492 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
6493 * All inputs are returned unmodified. The vertex load indices are
6494 * stored after them, which will be used by the API VS for fetching inputs.
6495 *
6496 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
6497 * input_v0,
6498 * input_v1,
6499 * input_v2,
6500 * input_v3,
6501 * (VertexID + BaseVertex),
6502 * (InstanceID + StartInstance),
6503 * (InstanceID / 2 + StartInstance)
6504 */
6505 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
6506 union si_shader_part_key *key)
6507 {
6508 struct gallivm_state *gallivm = &ctx->gallivm;
6509 LLVMTypeRef *params, *returns;
6510 LLVMValueRef ret, func;
6511 int last_sgpr, num_params, num_returns, i;
6512 unsigned first_vs_vgpr = key->vs_prolog.num_input_sgprs +
6513 key->vs_prolog.num_merged_next_stage_vgprs;
6514 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
6515 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
6516 num_input_vgprs;
6517 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
6518
6519 ctx->param_vertex_id = first_vs_vgpr;
6520 ctx->param_instance_id = first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
6521
6522 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
6523 params = alloca(num_all_input_regs * sizeof(LLVMTypeRef));
6524 returns = alloca((num_all_input_regs + key->vs_prolog.last_input + 1) *
6525 sizeof(LLVMTypeRef));
6526 num_params = 0;
6527 num_returns = 0;
6528
6529 /* Declare input and output SGPRs. */
6530 num_params = 0;
6531 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6532 params[num_params++] = ctx->i32;
6533 returns[num_returns++] = ctx->i32;
6534 }
6535 last_sgpr = num_params - 1;
6536
6537 /* Preloaded VGPRs (outputs must be floats) */
6538 for (i = 0; i < num_input_vgprs; i++) {
6539 params[num_params++] = ctx->i32;
6540 returns[num_returns++] = ctx->f32;
6541 }
6542
6543 /* Vertex load indices. */
6544 for (i = 0; i <= key->vs_prolog.last_input; i++)
6545 returns[num_returns++] = ctx->f32;
6546
6547 /* Create the function. */
6548 si_create_function(ctx, "vs_prolog", returns, num_returns, params,
6549 num_params, last_sgpr, 0);
6550 func = ctx->main_fn;
6551
6552 if (key->vs_prolog.num_merged_next_stage_vgprs &&
6553 !key->vs_prolog.is_monolithic)
6554 si_init_exec_from_input(ctx, 3, 0);
6555
6556 /* Copy inputs to outputs. This should be no-op, as the registers match,
6557 * but it will prevent the compiler from overwriting them unintentionally.
6558 */
6559 ret = ctx->return_value;
6560 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6561 LLVMValueRef p = LLVMGetParam(func, i);
6562 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6563 }
6564 for (; i < num_params; i++) {
6565 LLVMValueRef p = LLVMGetParam(func, i);
6566 p = LLVMBuildBitCast(gallivm->builder, p, ctx->f32, "");
6567 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6568 }
6569
6570 /* Compute vertex load indices from instance divisors. */
6571 for (i = 0; i <= key->vs_prolog.last_input; i++) {
6572 unsigned divisor = key->vs_prolog.states.instance_divisors[i];
6573 LLVMValueRef index;
6574
6575 if (divisor) {
6576 /* InstanceID / Divisor + StartInstance */
6577 index = get_instance_index_for_fetch(ctx,
6578 user_sgpr_base +
6579 SI_SGPR_START_INSTANCE,
6580 divisor);
6581 } else {
6582 /* VertexID + BaseVertex */
6583 index = LLVMBuildAdd(gallivm->builder,
6584 LLVMGetParam(func, ctx->param_vertex_id),
6585 LLVMGetParam(func, user_sgpr_base +
6586 SI_SGPR_BASE_VERTEX), "");
6587 }
6588
6589 index = LLVMBuildBitCast(gallivm->builder, index, ctx->f32, "");
6590 ret = LLVMBuildInsertValue(gallivm->builder, ret, index,
6591 num_params++, "");
6592 }
6593
6594 si_llvm_build_ret(ctx, ret);
6595 }
6596
6597 static bool si_get_vs_prolog(struct si_screen *sscreen,
6598 LLVMTargetMachineRef tm,
6599 struct si_shader *shader,
6600 struct pipe_debug_callback *debug,
6601 struct si_shader *main_part,
6602 const struct si_vs_prolog_bits *key)
6603 {
6604 struct si_shader_selector *vs = main_part->selector;
6605
6606 /* The prolog is a no-op if there are no inputs. */
6607 if (!vs->vs_needs_prolog)
6608 return true;
6609
6610 /* Get the prolog. */
6611 union si_shader_part_key prolog_key;
6612 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
6613 key, shader, &prolog_key);
6614
6615 shader->prolog =
6616 si_get_shader_part(sscreen, &sscreen->vs_prologs,
6617 PIPE_SHADER_VERTEX, true, &prolog_key, tm,
6618 debug, si_build_vs_prolog_function,
6619 "Vertex Shader Prolog");
6620 return shader->prolog != NULL;
6621 }
6622
6623 /**
6624 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
6625 */
6626 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
6627 LLVMTargetMachineRef tm,
6628 struct si_shader *shader,
6629 struct pipe_debug_callback *debug)
6630 {
6631 return si_get_vs_prolog(sscreen, tm, shader, debug, shader,
6632 &shader->key.part.vs.prolog);
6633 }
6634
6635 /**
6636 * Compile the TCS epilog function. This writes tesselation factors to memory
6637 * based on the output primitive type of the tesselator (determined by TES).
6638 */
6639 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
6640 union si_shader_part_key *key)
6641 {
6642 struct gallivm_state *gallivm = &ctx->gallivm;
6643 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
6644 LLVMTypeRef params[32];
6645 LLVMValueRef func;
6646 int last_sgpr, num_params = 0;
6647
6648 if (ctx->screen->b.chip_class >= GFX9) {
6649 params[num_params++] = ctx->i64;
6650 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
6651 params[num_params++] = ctx->i32; /* wave info */
6652 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
6653 params[num_params++] = ctx->i32;
6654 params[num_params++] = ctx->i32;
6655 params[num_params++] = ctx->i32;
6656 params[num_params++] = ctx->i64;
6657 params[num_params++] = ctx->i64;
6658 params[num_params++] = ctx->i64;
6659 params[num_params++] = ctx->i64;
6660 params[num_params++] = ctx->i32;
6661 params[num_params++] = ctx->i32;
6662 params[num_params++] = ctx->i32;
6663 params[num_params++] = ctx->i32;
6664 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
6665 params[num_params++] = ctx->i32;
6666 params[num_params++] = ctx->i32;
6667 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
6668 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
6669 } else {
6670 params[num_params++] = ctx->i64;
6671 params[num_params++] = ctx->i64;
6672 params[num_params++] = ctx->i64;
6673 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
6674 params[num_params++] = ctx->i32;
6675 params[num_params++] = ctx->i32;
6676 params[num_params++] = ctx->i32;
6677 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
6678 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
6679 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
6680 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
6681 }
6682 last_sgpr = num_params - 1;
6683
6684 params[num_params++] = ctx->i32; /* patch index within the wave (REL_PATCH_ID) */
6685 params[num_params++] = ctx->i32; /* invocation ID within the patch */
6686 params[num_params++] = ctx->i32; /* LDS offset where tess factors should be loaded from */
6687
6688 /* Create the function. */
6689 si_create_function(ctx, "tcs_epilog", NULL, 0, params, num_params, last_sgpr,
6690 ctx->screen->b.chip_class >= CIK ? 128 : 64);
6691 declare_lds_as_pointer(ctx);
6692 func = ctx->main_fn;
6693
6694 si_write_tess_factors(bld_base,
6695 LLVMGetParam(func, last_sgpr + 1),
6696 LLVMGetParam(func, last_sgpr + 2),
6697 LLVMGetParam(func, last_sgpr + 3));
6698
6699 LLVMBuildRetVoid(gallivm->builder);
6700 }
6701
6702 /**
6703 * Select and compile (or reuse) TCS parts (epilog).
6704 */
6705 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
6706 LLVMTargetMachineRef tm,
6707 struct si_shader *shader,
6708 struct pipe_debug_callback *debug)
6709 {
6710 if (sscreen->b.chip_class >= GFX9) {
6711 struct si_shader *ls_main_part =
6712 shader->key.part.tcs.ls->main_shader_part_ls;
6713
6714 if (!si_get_vs_prolog(sscreen, tm, shader, debug, ls_main_part,
6715 &shader->key.part.tcs.ls_prolog))
6716 return false;
6717
6718 shader->previous_stage = ls_main_part;
6719 }
6720
6721 /* Get the epilog. */
6722 union si_shader_part_key epilog_key;
6723 memset(&epilog_key, 0, sizeof(epilog_key));
6724 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6725
6726 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
6727 PIPE_SHADER_TESS_CTRL, false,
6728 &epilog_key, tm, debug,
6729 si_build_tcs_epilog_function,
6730 "Tessellation Control Shader Epilog");
6731 return shader->epilog != NULL;
6732 }
6733
6734 /**
6735 * Select and compile (or reuse) GS parts (prolog).
6736 */
6737 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
6738 LLVMTargetMachineRef tm,
6739 struct si_shader *shader,
6740 struct pipe_debug_callback *debug)
6741 {
6742 if (sscreen->b.chip_class >= GFX9) {
6743 struct si_shader *es_main_part =
6744 shader->key.part.gs.es->main_shader_part_es;
6745
6746 if (shader->key.part.gs.es->type == PIPE_SHADER_VERTEX &&
6747 !si_get_vs_prolog(sscreen, tm, shader, debug, es_main_part,
6748 &shader->key.part.gs.vs_prolog))
6749 return false;
6750
6751 shader->previous_stage = es_main_part;
6752 }
6753
6754 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
6755 return true;
6756
6757 union si_shader_part_key prolog_key;
6758 memset(&prolog_key, 0, sizeof(prolog_key));
6759 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6760
6761 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
6762 PIPE_SHADER_GEOMETRY, true,
6763 &prolog_key, tm, debug,
6764 si_build_gs_prolog_function,
6765 "Geometry Shader Prolog");
6766 return shader->prolog2 != NULL;
6767 }
6768
6769 /**
6770 * Build the pixel shader prolog function. This handles:
6771 * - two-side color selection and interpolation
6772 * - overriding interpolation parameters for the API PS
6773 * - polygon stippling
6774 *
6775 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
6776 * overriden by other states. (e.g. per-sample interpolation)
6777 * Interpolated colors are stored after the preloaded VGPRs.
6778 */
6779 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
6780 union si_shader_part_key *key)
6781 {
6782 struct gallivm_state *gallivm = &ctx->gallivm;
6783 LLVMTypeRef *params;
6784 LLVMValueRef ret, func;
6785 int last_sgpr, num_params, num_returns, i, num_color_channels;
6786
6787 assert(si_need_ps_prolog(key));
6788
6789 /* Number of inputs + 8 color elements. */
6790 params = alloca((key->ps_prolog.num_input_sgprs +
6791 key->ps_prolog.num_input_vgprs + 8) *
6792 sizeof(LLVMTypeRef));
6793
6794 /* Declare inputs. */
6795 num_params = 0;
6796 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
6797 params[num_params++] = ctx->i32;
6798 last_sgpr = num_params - 1;
6799
6800 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
6801 params[num_params++] = ctx->f32;
6802
6803 /* Declare outputs (same as inputs + add colors if needed) */
6804 num_returns = num_params;
6805 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
6806 for (i = 0; i < num_color_channels; i++)
6807 params[num_returns++] = ctx->f32;
6808
6809 /* Create the function. */
6810 si_create_function(ctx, "ps_prolog", params, num_returns, params,
6811 num_params, last_sgpr, 0);
6812 func = ctx->main_fn;
6813
6814 /* Copy inputs to outputs. This should be no-op, as the registers match,
6815 * but it will prevent the compiler from overwriting them unintentionally.
6816 */
6817 ret = ctx->return_value;
6818 for (i = 0; i < num_params; i++) {
6819 LLVMValueRef p = LLVMGetParam(func, i);
6820 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6821 }
6822
6823 /* Polygon stippling. */
6824 if (key->ps_prolog.states.poly_stipple) {
6825 /* POS_FIXED_PT is always last. */
6826 unsigned pos = key->ps_prolog.num_input_sgprs +
6827 key->ps_prolog.num_input_vgprs - 1;
6828 LLVMValueRef ptr[2], list;
6829
6830 /* Get the pointer to rw buffers. */
6831 ptr[0] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS);
6832 ptr[1] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS_HI);
6833 list = lp_build_gather_values(gallivm, ptr, 2);
6834 list = LLVMBuildBitCast(gallivm->builder, list, ctx->i64, "");
6835 list = LLVMBuildIntToPtr(gallivm->builder, list,
6836 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS), "");
6837
6838 si_llvm_emit_polygon_stipple(ctx, list, pos);
6839 }
6840
6841 if (key->ps_prolog.states.bc_optimize_for_persp ||
6842 key->ps_prolog.states.bc_optimize_for_linear) {
6843 unsigned i, base = key->ps_prolog.num_input_sgprs;
6844 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
6845
6846 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
6847 * The hw doesn't compute CENTROID if the whole wave only
6848 * contains fully-covered quads.
6849 *
6850 * PRIM_MASK is after user SGPRs.
6851 */
6852 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
6853 bc_optimize = LLVMBuildLShr(gallivm->builder, bc_optimize,
6854 LLVMConstInt(ctx->i32, 31, 0), "");
6855 bc_optimize = LLVMBuildTrunc(gallivm->builder, bc_optimize,
6856 ctx->i1, "");
6857
6858 if (key->ps_prolog.states.bc_optimize_for_persp) {
6859 /* Read PERSP_CENTER. */
6860 for (i = 0; i < 2; i++)
6861 center[i] = LLVMGetParam(func, base + 2 + i);
6862 /* Read PERSP_CENTROID. */
6863 for (i = 0; i < 2; i++)
6864 centroid[i] = LLVMGetParam(func, base + 4 + i);
6865 /* Select PERSP_CENTROID. */
6866 for (i = 0; i < 2; i++) {
6867 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
6868 center[i], centroid[i], "");
6869 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6870 tmp, base + 4 + i, "");
6871 }
6872 }
6873 if (key->ps_prolog.states.bc_optimize_for_linear) {
6874 /* Read LINEAR_CENTER. */
6875 for (i = 0; i < 2; i++)
6876 center[i] = LLVMGetParam(func, base + 8 + i);
6877 /* Read LINEAR_CENTROID. */
6878 for (i = 0; i < 2; i++)
6879 centroid[i] = LLVMGetParam(func, base + 10 + i);
6880 /* Select LINEAR_CENTROID. */
6881 for (i = 0; i < 2; i++) {
6882 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
6883 center[i], centroid[i], "");
6884 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6885 tmp, base + 10 + i, "");
6886 }
6887 }
6888 }
6889
6890 /* Force per-sample interpolation. */
6891 if (key->ps_prolog.states.force_persp_sample_interp) {
6892 unsigned i, base = key->ps_prolog.num_input_sgprs;
6893 LLVMValueRef persp_sample[2];
6894
6895 /* Read PERSP_SAMPLE. */
6896 for (i = 0; i < 2; i++)
6897 persp_sample[i] = LLVMGetParam(func, base + i);
6898 /* Overwrite PERSP_CENTER. */
6899 for (i = 0; i < 2; i++)
6900 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6901 persp_sample[i], base + 2 + i, "");
6902 /* Overwrite PERSP_CENTROID. */
6903 for (i = 0; i < 2; i++)
6904 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6905 persp_sample[i], base + 4 + i, "");
6906 }
6907 if (key->ps_prolog.states.force_linear_sample_interp) {
6908 unsigned i, base = key->ps_prolog.num_input_sgprs;
6909 LLVMValueRef linear_sample[2];
6910
6911 /* Read LINEAR_SAMPLE. */
6912 for (i = 0; i < 2; i++)
6913 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
6914 /* Overwrite LINEAR_CENTER. */
6915 for (i = 0; i < 2; i++)
6916 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6917 linear_sample[i], base + 8 + i, "");
6918 /* Overwrite LINEAR_CENTROID. */
6919 for (i = 0; i < 2; i++)
6920 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6921 linear_sample[i], base + 10 + i, "");
6922 }
6923
6924 /* Force center interpolation. */
6925 if (key->ps_prolog.states.force_persp_center_interp) {
6926 unsigned i, base = key->ps_prolog.num_input_sgprs;
6927 LLVMValueRef persp_center[2];
6928
6929 /* Read PERSP_CENTER. */
6930 for (i = 0; i < 2; i++)
6931 persp_center[i] = LLVMGetParam(func, base + 2 + i);
6932 /* Overwrite PERSP_SAMPLE. */
6933 for (i = 0; i < 2; i++)
6934 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6935 persp_center[i], base + i, "");
6936 /* Overwrite PERSP_CENTROID. */
6937 for (i = 0; i < 2; i++)
6938 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6939 persp_center[i], base + 4 + i, "");
6940 }
6941 if (key->ps_prolog.states.force_linear_center_interp) {
6942 unsigned i, base = key->ps_prolog.num_input_sgprs;
6943 LLVMValueRef linear_center[2];
6944
6945 /* Read LINEAR_CENTER. */
6946 for (i = 0; i < 2; i++)
6947 linear_center[i] = LLVMGetParam(func, base + 8 + i);
6948 /* Overwrite LINEAR_SAMPLE. */
6949 for (i = 0; i < 2; i++)
6950 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6951 linear_center[i], base + 6 + i, "");
6952 /* Overwrite LINEAR_CENTROID. */
6953 for (i = 0; i < 2; i++)
6954 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6955 linear_center[i], base + 10 + i, "");
6956 }
6957
6958 /* Interpolate colors. */
6959 for (i = 0; i < 2; i++) {
6960 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
6961 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
6962 key->ps_prolog.face_vgpr_index;
6963 LLVMValueRef interp[2], color[4];
6964 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
6965
6966 if (!writemask)
6967 continue;
6968
6969 /* If the interpolation qualifier is not CONSTANT (-1). */
6970 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
6971 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
6972 key->ps_prolog.color_interp_vgpr_index[i];
6973
6974 /* Get the (i,j) updated by bc_optimize handling. */
6975 interp[0] = LLVMBuildExtractValue(gallivm->builder, ret,
6976 interp_vgpr, "");
6977 interp[1] = LLVMBuildExtractValue(gallivm->builder, ret,
6978 interp_vgpr + 1, "");
6979 interp_ij = lp_build_gather_values(gallivm, interp, 2);
6980 }
6981
6982 /* Use the absolute location of the input. */
6983 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
6984
6985 if (key->ps_prolog.states.color_two_side) {
6986 face = LLVMGetParam(func, face_vgpr);
6987 face = LLVMBuildBitCast(gallivm->builder, face, ctx->i32, "");
6988 }
6989
6990 interp_fs_input(ctx,
6991 key->ps_prolog.color_attr_index[i],
6992 TGSI_SEMANTIC_COLOR, i,
6993 key->ps_prolog.num_interp_inputs,
6994 key->ps_prolog.colors_read, interp_ij,
6995 prim_mask, face, color);
6996
6997 while (writemask) {
6998 unsigned chan = u_bit_scan(&writemask);
6999 ret = LLVMBuildInsertValue(gallivm->builder, ret, color[chan],
7000 num_params++, "");
7001 }
7002 }
7003
7004 /* Tell LLVM to insert WQM instruction sequence when needed. */
7005 if (key->ps_prolog.wqm) {
7006 LLVMAddTargetDependentFunctionAttr(func,
7007 "amdgpu-ps-wqm-outputs", "");
7008 }
7009
7010 si_llvm_build_ret(ctx, ret);
7011 }
7012
7013 /**
7014 * Build the pixel shader epilog function. This handles everything that must be
7015 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7016 */
7017 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
7018 union si_shader_part_key *key)
7019 {
7020 struct gallivm_state *gallivm = &ctx->gallivm;
7021 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7022 LLVMTypeRef params[16+8*4+3];
7023 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7024 int last_sgpr, num_params = 0, i;
7025 struct si_ps_exports exp = {};
7026
7027 /* Declare input SGPRs. */
7028 params[ctx->param_rw_buffers = num_params++] = ctx->i64;
7029 params[ctx->param_const_and_shader_buffers = num_params++] = ctx->i64;
7030 params[ctx->param_samplers_and_images = num_params++] = ctx->i64;
7031 assert(num_params == SI_PARAM_ALPHA_REF);
7032 params[SI_PARAM_ALPHA_REF] = ctx->f32;
7033 last_sgpr = SI_PARAM_ALPHA_REF;
7034
7035 /* Declare input VGPRs. */
7036 num_params = (last_sgpr + 1) +
7037 util_bitcount(key->ps_epilog.colors_written) * 4 +
7038 key->ps_epilog.writes_z +
7039 key->ps_epilog.writes_stencil +
7040 key->ps_epilog.writes_samplemask;
7041
7042 num_params = MAX2(num_params,
7043 last_sgpr + 1 + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7044
7045 assert(num_params <= ARRAY_SIZE(params));
7046
7047 for (i = last_sgpr + 1; i < num_params; i++)
7048 params[i] = ctx->f32;
7049
7050 /* Create the function. */
7051 si_create_function(ctx, "ps_epilog", NULL, 0, params, num_params,
7052 last_sgpr, 0);
7053 /* Disable elimination of unused inputs. */
7054 si_llvm_add_attribute(ctx->main_fn,
7055 "InitialPSInputAddr", 0xffffff);
7056
7057 /* Process colors. */
7058 unsigned vgpr = last_sgpr + 1;
7059 unsigned colors_written = key->ps_epilog.colors_written;
7060 int last_color_export = -1;
7061
7062 /* Find the last color export. */
7063 if (!key->ps_epilog.writes_z &&
7064 !key->ps_epilog.writes_stencil &&
7065 !key->ps_epilog.writes_samplemask) {
7066 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7067
7068 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7069 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7070 /* Just set this if any of the colorbuffers are enabled. */
7071 if (spi_format &
7072 ((1llu << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7073 last_color_export = 0;
7074 } else {
7075 for (i = 0; i < 8; i++)
7076 if (colors_written & (1 << i) &&
7077 (spi_format >> (i * 4)) & 0xf)
7078 last_color_export = i;
7079 }
7080 }
7081
7082 while (colors_written) {
7083 LLVMValueRef color[4];
7084 int mrt = u_bit_scan(&colors_written);
7085
7086 for (i = 0; i < 4; i++)
7087 color[i] = LLVMGetParam(ctx->main_fn, vgpr++);
7088
7089 si_export_mrt_color(bld_base, color, mrt,
7090 num_params - 1,
7091 mrt == last_color_export, &exp);
7092 }
7093
7094 /* Process depth, stencil, samplemask. */
7095 if (key->ps_epilog.writes_z)
7096 depth = LLVMGetParam(ctx->main_fn, vgpr++);
7097 if (key->ps_epilog.writes_stencil)
7098 stencil = LLVMGetParam(ctx->main_fn, vgpr++);
7099 if (key->ps_epilog.writes_samplemask)
7100 samplemask = LLVMGetParam(ctx->main_fn, vgpr++);
7101
7102 if (depth || stencil || samplemask)
7103 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
7104 else if (last_color_export == -1)
7105 si_export_null(bld_base);
7106
7107 if (exp.num)
7108 si_emit_ps_exports(ctx, &exp);
7109
7110 /* Compile. */
7111 LLVMBuildRetVoid(gallivm->builder);
7112 }
7113
7114 /**
7115 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7116 */
7117 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7118 LLVMTargetMachineRef tm,
7119 struct si_shader *shader,
7120 struct pipe_debug_callback *debug)
7121 {
7122 union si_shader_part_key prolog_key;
7123 union si_shader_part_key epilog_key;
7124
7125 /* Get the prolog. */
7126 si_get_ps_prolog_key(shader, &prolog_key, true);
7127
7128 /* The prolog is a no-op if these aren't set. */
7129 if (si_need_ps_prolog(&prolog_key)) {
7130 shader->prolog =
7131 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7132 PIPE_SHADER_FRAGMENT, true,
7133 &prolog_key, tm, debug,
7134 si_build_ps_prolog_function,
7135 "Fragment Shader Prolog");
7136 if (!shader->prolog)
7137 return false;
7138 }
7139
7140 /* Get the epilog. */
7141 si_get_ps_epilog_key(shader, &epilog_key);
7142
7143 shader->epilog =
7144 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7145 PIPE_SHADER_FRAGMENT, false,
7146 &epilog_key, tm, debug,
7147 si_build_ps_epilog_function,
7148 "Fragment Shader Epilog");
7149 if (!shader->epilog)
7150 return false;
7151
7152 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7153 if (shader->key.part.ps.prolog.poly_stipple) {
7154 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7155 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7156 }
7157
7158 /* Set up the enable bits for per-sample shading if needed. */
7159 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
7160 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7161 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7162 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7163 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7164 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7165 }
7166 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
7167 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7168 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7169 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7170 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7171 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7172 }
7173 if (shader->key.part.ps.prolog.force_persp_center_interp &&
7174 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7175 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7176 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
7177 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7178 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7179 }
7180 if (shader->key.part.ps.prolog.force_linear_center_interp &&
7181 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7182 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7183 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
7184 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7185 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7186 }
7187
7188 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7189 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7190 !(shader->config.spi_ps_input_ena & 0xf)) {
7191 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7192 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7193 }
7194
7195 /* At least one pair of interpolation weights must be enabled. */
7196 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7197 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7198 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7199 }
7200
7201 /* The sample mask input is always enabled, because the API shader always
7202 * passes it through to the epilog. Disable it here if it's unused.
7203 */
7204 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
7205 !shader->selector->info.reads_samplemask)
7206 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7207
7208 return true;
7209 }
7210
7211 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
7212 unsigned *lds_size)
7213 {
7214 /* SPI barrier management bug:
7215 * Make sure we have at least 4k of LDS in use to avoid the bug.
7216 * It applies to workgroup sizes of more than one wavefront.
7217 */
7218 if (sscreen->b.family == CHIP_BONAIRE ||
7219 sscreen->b.family == CHIP_KABINI ||
7220 sscreen->b.family == CHIP_MULLINS)
7221 *lds_size = MAX2(*lds_size, 8);
7222 }
7223
7224 static void si_fix_resource_usage(struct si_screen *sscreen,
7225 struct si_shader *shader)
7226 {
7227 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7228
7229 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7230
7231 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
7232 si_get_max_workgroup_size(shader) > 64) {
7233 si_multiwave_lds_size_workaround(sscreen,
7234 &shader->config.lds_size);
7235 }
7236 }
7237
7238 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
7239 struct si_shader *shader,
7240 struct pipe_debug_callback *debug)
7241 {
7242 struct si_shader_selector *sel = shader->selector;
7243 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
7244 int r;
7245
7246 /* LS, ES, VS are compiled on demand if the main part hasn't been
7247 * compiled for that stage.
7248 *
7249 * Vertex shaders are compiled on demand when a vertex fetch
7250 * workaround must be applied.
7251 */
7252 if (shader->is_monolithic) {
7253 /* Monolithic shader (compiled as a whole, has many variants,
7254 * may take a long time to compile).
7255 */
7256 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
7257 if (r)
7258 return r;
7259 } else {
7260 /* The shader consists of 2-3 parts:
7261 *
7262 * - the middle part is the user shader, it has 1 variant only
7263 * and it was compiled during the creation of the shader
7264 * selector
7265 * - the prolog part is inserted at the beginning
7266 * - the epilog part is inserted at the end
7267 *
7268 * The prolog and epilog have many (but simple) variants.
7269 */
7270
7271 /* Copy the compiled TGSI shader data over. */
7272 shader->is_binary_shared = true;
7273 shader->binary = mainp->binary;
7274 shader->config = mainp->config;
7275 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7276 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7277 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7278 memcpy(shader->info.vs_output_param_offset,
7279 mainp->info.vs_output_param_offset,
7280 sizeof(mainp->info.vs_output_param_offset));
7281 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7282 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7283 shader->info.nr_param_exports = mainp->info.nr_param_exports;
7284
7285 /* Select prologs and/or epilogs. */
7286 switch (sel->type) {
7287 case PIPE_SHADER_VERTEX:
7288 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
7289 return -1;
7290 break;
7291 case PIPE_SHADER_TESS_CTRL:
7292 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
7293 return -1;
7294 break;
7295 case PIPE_SHADER_TESS_EVAL:
7296 break;
7297 case PIPE_SHADER_GEOMETRY:
7298 if (!si_shader_select_gs_parts(sscreen, tm, shader, debug))
7299 return -1;
7300 break;
7301 case PIPE_SHADER_FRAGMENT:
7302 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
7303 return -1;
7304
7305 /* Make sure we have at least as many VGPRs as there
7306 * are allocated inputs.
7307 */
7308 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7309 shader->info.num_input_vgprs);
7310 break;
7311 }
7312
7313 /* Update SGPR and VGPR counts. */
7314 if (shader->prolog) {
7315 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7316 shader->prolog->config.num_sgprs);
7317 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7318 shader->prolog->config.num_vgprs);
7319 }
7320 if (shader->previous_stage) {
7321 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7322 shader->previous_stage->config.num_sgprs);
7323 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7324 shader->previous_stage->config.num_vgprs);
7325 shader->config.spilled_sgprs =
7326 MAX2(shader->config.spilled_sgprs,
7327 shader->previous_stage->config.spilled_sgprs);
7328 shader->config.spilled_vgprs =
7329 MAX2(shader->config.spilled_vgprs,
7330 shader->previous_stage->config.spilled_vgprs);
7331 shader->config.private_mem_vgprs =
7332 MAX2(shader->config.private_mem_vgprs,
7333 shader->previous_stage->config.private_mem_vgprs);
7334 shader->config.scratch_bytes_per_wave =
7335 MAX2(shader->config.scratch_bytes_per_wave,
7336 shader->previous_stage->config.scratch_bytes_per_wave);
7337 shader->info.uses_instanceid |=
7338 shader->previous_stage->info.uses_instanceid;
7339 }
7340 if (shader->prolog2) {
7341 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7342 shader->prolog2->config.num_sgprs);
7343 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7344 shader->prolog2->config.num_vgprs);
7345 }
7346 if (shader->epilog) {
7347 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7348 shader->epilog->config.num_sgprs);
7349 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7350 shader->epilog->config.num_vgprs);
7351 }
7352 }
7353
7354 si_fix_resource_usage(sscreen, shader);
7355 si_shader_dump(sscreen, shader, debug, sel->info.processor,
7356 stderr, true);
7357
7358 /* Upload. */
7359 r = si_shader_binary_upload(sscreen, shader);
7360 if (r) {
7361 fprintf(stderr, "LLVM failed to upload shader\n");
7362 return r;
7363 }
7364
7365 return 0;
7366 }
7367
7368 void si_shader_destroy(struct si_shader *shader)
7369 {
7370 if (shader->scratch_bo)
7371 r600_resource_reference(&shader->scratch_bo, NULL);
7372
7373 r600_resource_reference(&shader->bo, NULL);
7374
7375 if (!shader->is_binary_shared)
7376 radeon_shader_binary_clean(&shader->binary);
7377
7378 free(shader->shader_log);
7379 }