radeonsi: remove 8 bytes from si_shader_key with uint32_t ff_tcs_inputs_to_copy
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_flow.h"
35 #include "gallivm/lp_bld_misc.h"
36 #include "util/u_memory.h"
37 #include "util/u_string.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_util.h"
40 #include "tgsi/tgsi_dump.h"
41
42 #include "ac_binary.h"
43 #include "ac_llvm_util.h"
44 #include "ac_exp_param.h"
45 #include "si_shader_internal.h"
46 #include "si_pipe.h"
47 #include "sid.h"
48
49
50 static const char *scratch_rsrc_dword0_symbol =
51 "SCRATCH_RSRC_DWORD0";
52
53 static const char *scratch_rsrc_dword1_symbol =
54 "SCRATCH_RSRC_DWORD1";
55
56 struct si_shader_output_values
57 {
58 LLVMValueRef values[4];
59 unsigned semantic_name;
60 unsigned semantic_index;
61 ubyte vertex_stream[4];
62 };
63
64 static void si_init_shader_ctx(struct si_shader_context *ctx,
65 struct si_screen *sscreen,
66 LLVMTargetMachineRef tm);
67
68 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
69 struct lp_build_tgsi_context *bld_base,
70 struct lp_build_emit_data *emit_data);
71
72 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
73 FILE *f);
74
75 static unsigned llvm_get_type_size(LLVMTypeRef type);
76
77 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
78 union si_shader_part_key *key);
79 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
80 union si_shader_part_key *key);
81 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
82 union si_shader_part_key *key);
83 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
84 union si_shader_part_key *key);
85
86 /* Ideally pass the sample mask input to the PS epilog as v13, which
87 * is its usual location, so that the shader doesn't have to add v_mov.
88 */
89 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 13
90
91 enum {
92 CONST_ADDR_SPACE = 2,
93 LOCAL_ADDR_SPACE = 3,
94 };
95
96 static bool is_merged_shader(struct si_shader *shader)
97 {
98 if (shader->selector->screen->b.chip_class <= VI)
99 return false;
100
101 return shader->key.as_ls ||
102 shader->key.as_es ||
103 shader->selector->type == PIPE_SHADER_TESS_CTRL ||
104 shader->selector->type == PIPE_SHADER_GEOMETRY;
105 }
106
107 /**
108 * Returns a unique index for a per-patch semantic name and index. The index
109 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
110 * can be calculated.
111 */
112 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
113 {
114 switch (semantic_name) {
115 case TGSI_SEMANTIC_TESSOUTER:
116 return 0;
117 case TGSI_SEMANTIC_TESSINNER:
118 return 1;
119 case TGSI_SEMANTIC_PATCH:
120 assert(index < 30);
121 return 2 + index;
122
123 default:
124 assert(!"invalid semantic name");
125 return 0;
126 }
127 }
128
129 /**
130 * Returns a unique index for a semantic name and index. The index must be
131 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
132 * calculated.
133 */
134 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
135 {
136 switch (semantic_name) {
137 case TGSI_SEMANTIC_POSITION:
138 return 0;
139 case TGSI_SEMANTIC_GENERIC:
140 /* Since some shader stages use the the highest used IO index
141 * to determine the size to allocate for inputs/outputs
142 * (in LDS, tess and GS rings). GENERIC should be placed right
143 * after POSITION to make that size as small as possible.
144 */
145 if (index < SI_MAX_IO_GENERIC)
146 return 1 + index;
147
148 assert(!"invalid generic index");
149 return 0;
150 case TGSI_SEMANTIC_PSIZE:
151 return SI_MAX_IO_GENERIC + 1;
152 case TGSI_SEMANTIC_CLIPDIST:
153 assert(index <= 1);
154 return SI_MAX_IO_GENERIC + 2 + index;
155 case TGSI_SEMANTIC_FOG:
156 return SI_MAX_IO_GENERIC + 4;
157 case TGSI_SEMANTIC_LAYER:
158 return SI_MAX_IO_GENERIC + 5;
159 case TGSI_SEMANTIC_VIEWPORT_INDEX:
160 return SI_MAX_IO_GENERIC + 6;
161 case TGSI_SEMANTIC_PRIMID:
162 return SI_MAX_IO_GENERIC + 7;
163 case TGSI_SEMANTIC_COLOR: /* these alias */
164 case TGSI_SEMANTIC_BCOLOR:
165 assert(index < 2);
166 return SI_MAX_IO_GENERIC + 8 + index;
167 case TGSI_SEMANTIC_TEXCOORD:
168 assert(index < 8);
169 assert(SI_MAX_IO_GENERIC + 10 + index < 64);
170 return SI_MAX_IO_GENERIC + 10 + index;
171 default:
172 assert(!"invalid semantic name");
173 return 0;
174 }
175 }
176
177 /**
178 * Get the value of a shader input parameter and extract a bitfield.
179 */
180 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
181 unsigned param, unsigned rshift,
182 unsigned bitwidth)
183 {
184 struct gallivm_state *gallivm = &ctx->gallivm;
185 LLVMValueRef value = LLVMGetParam(ctx->main_fn,
186 param);
187
188 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
189 value = bitcast(&ctx->bld_base,
190 TGSI_TYPE_UNSIGNED, value);
191
192 if (rshift)
193 value = LLVMBuildLShr(gallivm->builder, value,
194 LLVMConstInt(ctx->i32, rshift, 0), "");
195
196 if (rshift + bitwidth < 32) {
197 unsigned mask = (1 << bitwidth) - 1;
198 value = LLVMBuildAnd(gallivm->builder, value,
199 LLVMConstInt(ctx->i32, mask, 0), "");
200 }
201
202 return value;
203 }
204
205 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
206 {
207 switch (ctx->type) {
208 case PIPE_SHADER_TESS_CTRL:
209 return unpack_param(ctx, ctx->param_tcs_rel_ids, 0, 8);
210
211 case PIPE_SHADER_TESS_EVAL:
212 return LLVMGetParam(ctx->main_fn,
213 ctx->param_tes_rel_patch_id);
214
215 default:
216 assert(0);
217 return NULL;
218 }
219 }
220
221 /* Tessellation shaders pass outputs to the next shader using LDS.
222 *
223 * LS outputs = TCS inputs
224 * TCS outputs = TES inputs
225 *
226 * The LDS layout is:
227 * - TCS inputs for patch 0
228 * - TCS inputs for patch 1
229 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
230 * - ...
231 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
232 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
233 * - TCS outputs for patch 1
234 * - Per-patch TCS outputs for patch 1
235 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
236 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
237 * - ...
238 *
239 * All three shaders VS(LS), TCS, TES share the same LDS space.
240 */
241
242 static LLVMValueRef
243 get_tcs_in_patch_stride(struct si_shader_context *ctx)
244 {
245 return unpack_param(ctx, ctx->param_vs_state_bits, 8, 13);
246 }
247
248 static LLVMValueRef
249 get_tcs_out_patch_stride(struct si_shader_context *ctx)
250 {
251 return unpack_param(ctx, ctx->param_tcs_out_lds_layout, 0, 13);
252 }
253
254 static LLVMValueRef
255 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
256 {
257 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
258 unpack_param(ctx,
259 ctx->param_tcs_out_lds_offsets,
260 0, 16),
261 4);
262 }
263
264 static LLVMValueRef
265 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
266 {
267 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
268 unpack_param(ctx,
269 ctx->param_tcs_out_lds_offsets,
270 16, 16),
271 4);
272 }
273
274 static LLVMValueRef
275 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
276 {
277 struct gallivm_state *gallivm = &ctx->gallivm;
278 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
279 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
280
281 return LLVMBuildMul(gallivm->builder, patch_stride, rel_patch_id, "");
282 }
283
284 static LLVMValueRef
285 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
286 {
287 struct gallivm_state *gallivm = &ctx->gallivm;
288 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
289 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
290 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
291
292 return LLVMBuildAdd(gallivm->builder, patch0_offset,
293 LLVMBuildMul(gallivm->builder, patch_stride,
294 rel_patch_id, ""),
295 "");
296 }
297
298 static LLVMValueRef
299 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
300 {
301 struct gallivm_state *gallivm = &ctx->gallivm;
302 LLVMValueRef patch0_patch_data_offset =
303 get_tcs_out_patch0_patch_data_offset(ctx);
304 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
305 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
306
307 return LLVMBuildAdd(gallivm->builder, patch0_patch_data_offset,
308 LLVMBuildMul(gallivm->builder, patch_stride,
309 rel_patch_id, ""),
310 "");
311 }
312
313 static LLVMValueRef get_instance_index_for_fetch(
314 struct si_shader_context *ctx,
315 unsigned param_start_instance, unsigned divisor)
316 {
317 struct gallivm_state *gallivm = &ctx->gallivm;
318
319 LLVMValueRef result = LLVMGetParam(ctx->main_fn,
320 ctx->param_instance_id);
321
322 /* The division must be done before START_INSTANCE is added. */
323 if (divisor > 1)
324 result = LLVMBuildUDiv(gallivm->builder, result,
325 LLVMConstInt(ctx->i32, divisor, 0), "");
326
327 return LLVMBuildAdd(gallivm->builder, result,
328 LLVMGetParam(ctx->main_fn, param_start_instance), "");
329 }
330
331 /* Bitcast <4 x float> to <2 x double>, extract the component, and convert
332 * to float. */
333 static LLVMValueRef extract_double_to_float(struct si_shader_context *ctx,
334 LLVMValueRef vec4,
335 unsigned double_index)
336 {
337 LLVMBuilderRef builder = ctx->gallivm.builder;
338 LLVMTypeRef f64 = LLVMDoubleTypeInContext(ctx->gallivm.context);
339 LLVMValueRef dvec2 = LLVMBuildBitCast(builder, vec4,
340 LLVMVectorType(f64, 2), "");
341 LLVMValueRef index = LLVMConstInt(ctx->i32, double_index, 0);
342 LLVMValueRef value = LLVMBuildExtractElement(builder, dvec2, index, "");
343 return LLVMBuildFPTrunc(builder, value, ctx->f32, "");
344 }
345
346 static void declare_input_vs(
347 struct si_shader_context *ctx,
348 unsigned input_index,
349 const struct tgsi_full_declaration *decl,
350 LLVMValueRef out[4])
351 {
352 struct gallivm_state *gallivm = &ctx->gallivm;
353
354 unsigned chan;
355 unsigned fix_fetch;
356 unsigned num_fetches;
357 unsigned fetch_stride;
358
359 LLVMValueRef t_list_ptr;
360 LLVMValueRef t_offset;
361 LLVMValueRef t_list;
362 LLVMValueRef vertex_index;
363 LLVMValueRef input[3];
364
365 /* Load the T list */
366 t_list_ptr = LLVMGetParam(ctx->main_fn, ctx->param_vertex_buffers);
367
368 t_offset = LLVMConstInt(ctx->i32, input_index, 0);
369
370 t_list = ac_build_indexed_load_const(&ctx->ac, t_list_ptr, t_offset);
371
372 vertex_index = LLVMGetParam(ctx->main_fn,
373 ctx->param_vertex_index0 +
374 input_index);
375
376 fix_fetch = ctx->shader->key.mono.vs_fix_fetch[input_index];
377
378 /* Do multiple loads for special formats. */
379 switch (fix_fetch) {
380 case SI_FIX_FETCH_RGB_64_FLOAT:
381 num_fetches = 3; /* 3 2-dword loads */
382 fetch_stride = 8;
383 break;
384 case SI_FIX_FETCH_RGBA_64_FLOAT:
385 num_fetches = 2; /* 2 4-dword loads */
386 fetch_stride = 16;
387 break;
388 case SI_FIX_FETCH_RGB_8:
389 case SI_FIX_FETCH_RGB_8_INT:
390 num_fetches = 3;
391 fetch_stride = 1;
392 break;
393 case SI_FIX_FETCH_RGB_16:
394 case SI_FIX_FETCH_RGB_16_INT:
395 num_fetches = 3;
396 fetch_stride = 2;
397 break;
398 default:
399 num_fetches = 1;
400 fetch_stride = 0;
401 }
402
403 for (unsigned i = 0; i < num_fetches; i++) {
404 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
405
406 input[i] = ac_build_buffer_load_format(&ctx->ac, t_list,
407 vertex_index, voffset,
408 true);
409 }
410
411 /* Break up the vec4 into individual components */
412 for (chan = 0; chan < 4; chan++) {
413 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
414 out[chan] = LLVMBuildExtractElement(gallivm->builder,
415 input[0], llvm_chan, "");
416 }
417
418 switch (fix_fetch) {
419 case SI_FIX_FETCH_A2_SNORM:
420 case SI_FIX_FETCH_A2_SSCALED:
421 case SI_FIX_FETCH_A2_SINT: {
422 /* The hardware returns an unsigned value; convert it to a
423 * signed one.
424 */
425 LLVMValueRef tmp = out[3];
426 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
427
428 /* First, recover the sign-extended signed integer value. */
429 if (fix_fetch == SI_FIX_FETCH_A2_SSCALED)
430 tmp = LLVMBuildFPToUI(gallivm->builder, tmp, ctx->i32, "");
431 else
432 tmp = LLVMBuildBitCast(gallivm->builder, tmp, ctx->i32, "");
433
434 /* For the integer-like cases, do a natural sign extension.
435 *
436 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
437 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
438 * exponent.
439 */
440 tmp = LLVMBuildShl(gallivm->builder, tmp,
441 fix_fetch == SI_FIX_FETCH_A2_SNORM ?
442 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
443 tmp = LLVMBuildAShr(gallivm->builder, tmp, c30, "");
444
445 /* Convert back to the right type. */
446 if (fix_fetch == SI_FIX_FETCH_A2_SNORM) {
447 LLVMValueRef clamp;
448 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
449 tmp = LLVMBuildSIToFP(gallivm->builder, tmp, ctx->f32, "");
450 clamp = LLVMBuildFCmp(gallivm->builder, LLVMRealULT, tmp, neg_one, "");
451 tmp = LLVMBuildSelect(gallivm->builder, clamp, neg_one, tmp, "");
452 } else if (fix_fetch == SI_FIX_FETCH_A2_SSCALED) {
453 tmp = LLVMBuildSIToFP(gallivm->builder, tmp, ctx->f32, "");
454 }
455
456 out[3] = tmp;
457 break;
458 }
459 case SI_FIX_FETCH_RGBA_32_UNORM:
460 case SI_FIX_FETCH_RGBX_32_UNORM:
461 for (chan = 0; chan < 4; chan++) {
462 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
463 ctx->i32, "");
464 out[chan] = LLVMBuildUIToFP(gallivm->builder,
465 out[chan], ctx->f32, "");
466 out[chan] = LLVMBuildFMul(gallivm->builder, out[chan],
467 LLVMConstReal(ctx->f32, 1.0 / UINT_MAX), "");
468 }
469 /* RGBX UINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
470 if (fix_fetch == SI_FIX_FETCH_RGBX_32_UNORM)
471 out[3] = LLVMConstReal(ctx->f32, 1);
472 break;
473 case SI_FIX_FETCH_RGBA_32_SNORM:
474 case SI_FIX_FETCH_RGBX_32_SNORM:
475 case SI_FIX_FETCH_RGBA_32_FIXED:
476 case SI_FIX_FETCH_RGBX_32_FIXED: {
477 double scale;
478 if (fix_fetch >= SI_FIX_FETCH_RGBA_32_FIXED)
479 scale = 1.0 / 0x10000;
480 else
481 scale = 1.0 / INT_MAX;
482
483 for (chan = 0; chan < 4; chan++) {
484 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
485 ctx->i32, "");
486 out[chan] = LLVMBuildSIToFP(gallivm->builder,
487 out[chan], ctx->f32, "");
488 out[chan] = LLVMBuildFMul(gallivm->builder, out[chan],
489 LLVMConstReal(ctx->f32, scale), "");
490 }
491 /* RGBX SINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
492 if (fix_fetch == SI_FIX_FETCH_RGBX_32_SNORM ||
493 fix_fetch == SI_FIX_FETCH_RGBX_32_FIXED)
494 out[3] = LLVMConstReal(ctx->f32, 1);
495 break;
496 }
497 case SI_FIX_FETCH_RGBA_32_USCALED:
498 for (chan = 0; chan < 4; chan++) {
499 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
500 ctx->i32, "");
501 out[chan] = LLVMBuildUIToFP(gallivm->builder,
502 out[chan], ctx->f32, "");
503 }
504 break;
505 case SI_FIX_FETCH_RGBA_32_SSCALED:
506 for (chan = 0; chan < 4; chan++) {
507 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
508 ctx->i32, "");
509 out[chan] = LLVMBuildSIToFP(gallivm->builder,
510 out[chan], ctx->f32, "");
511 }
512 break;
513 case SI_FIX_FETCH_RG_64_FLOAT:
514 for (chan = 0; chan < 2; chan++)
515 out[chan] = extract_double_to_float(ctx, input[0], chan);
516
517 out[2] = LLVMConstReal(ctx->f32, 0);
518 out[3] = LLVMConstReal(ctx->f32, 1);
519 break;
520 case SI_FIX_FETCH_RGB_64_FLOAT:
521 for (chan = 0; chan < 3; chan++)
522 out[chan] = extract_double_to_float(ctx, input[chan], 0);
523
524 out[3] = LLVMConstReal(ctx->f32, 1);
525 break;
526 case SI_FIX_FETCH_RGBA_64_FLOAT:
527 for (chan = 0; chan < 4; chan++) {
528 out[chan] = extract_double_to_float(ctx, input[chan / 2],
529 chan % 2);
530 }
531 break;
532 case SI_FIX_FETCH_RGB_8:
533 case SI_FIX_FETCH_RGB_8_INT:
534 case SI_FIX_FETCH_RGB_16:
535 case SI_FIX_FETCH_RGB_16_INT:
536 for (chan = 0; chan < 3; chan++) {
537 out[chan] = LLVMBuildExtractElement(gallivm->builder,
538 input[chan],
539 ctx->i32_0, "");
540 }
541 if (fix_fetch == SI_FIX_FETCH_RGB_8 ||
542 fix_fetch == SI_FIX_FETCH_RGB_16) {
543 out[3] = LLVMConstReal(ctx->f32, 1);
544 } else {
545 out[3] = LLVMBuildBitCast(gallivm->builder, ctx->i32_1,
546 ctx->f32, "");
547 }
548 break;
549 }
550 }
551
552 static LLVMValueRef get_primitive_id(struct lp_build_tgsi_context *bld_base,
553 unsigned swizzle)
554 {
555 struct si_shader_context *ctx = si_shader_context(bld_base);
556
557 if (swizzle > 0)
558 return ctx->i32_0;
559
560 switch (ctx->type) {
561 case PIPE_SHADER_VERTEX:
562 return LLVMGetParam(ctx->main_fn,
563 ctx->param_vs_prim_id);
564 case PIPE_SHADER_TESS_CTRL:
565 return LLVMGetParam(ctx->main_fn,
566 ctx->param_tcs_patch_id);
567 case PIPE_SHADER_TESS_EVAL:
568 return LLVMGetParam(ctx->main_fn,
569 ctx->param_tes_patch_id);
570 case PIPE_SHADER_GEOMETRY:
571 return LLVMGetParam(ctx->main_fn,
572 ctx->param_gs_prim_id);
573 default:
574 assert(0);
575 return ctx->i32_0;
576 }
577 }
578
579 /**
580 * Return the value of tgsi_ind_register for indexing.
581 * This is the indirect index with the constant offset added to it.
582 */
583 static LLVMValueRef get_indirect_index(struct si_shader_context *ctx,
584 const struct tgsi_ind_register *ind,
585 int rel_index)
586 {
587 struct gallivm_state *gallivm = &ctx->gallivm;
588 LLVMValueRef result;
589
590 result = ctx->addrs[ind->Index][ind->Swizzle];
591 result = LLVMBuildLoad(gallivm->builder, result, "");
592 result = LLVMBuildAdd(gallivm->builder, result,
593 LLVMConstInt(ctx->i32, rel_index, 0), "");
594 return result;
595 }
596
597 /**
598 * Like get_indirect_index, but restricts the return value to a (possibly
599 * undefined) value inside [0..num).
600 */
601 LLVMValueRef si_get_bounded_indirect_index(struct si_shader_context *ctx,
602 const struct tgsi_ind_register *ind,
603 int rel_index, unsigned num)
604 {
605 LLVMValueRef result = get_indirect_index(ctx, ind, rel_index);
606
607 return si_llvm_bound_index(ctx, result, num);
608 }
609
610
611 /**
612 * Calculate a dword address given an input or output register and a stride.
613 */
614 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
615 const struct tgsi_full_dst_register *dst,
616 const struct tgsi_full_src_register *src,
617 LLVMValueRef vertex_dw_stride,
618 LLVMValueRef base_addr)
619 {
620 struct gallivm_state *gallivm = &ctx->gallivm;
621 struct tgsi_shader_info *info = &ctx->shader->selector->info;
622 ubyte *name, *index, *array_first;
623 int first, param;
624 struct tgsi_full_dst_register reg;
625
626 /* Set the register description. The address computation is the same
627 * for sources and destinations. */
628 if (src) {
629 reg.Register.File = src->Register.File;
630 reg.Register.Index = src->Register.Index;
631 reg.Register.Indirect = src->Register.Indirect;
632 reg.Register.Dimension = src->Register.Dimension;
633 reg.Indirect = src->Indirect;
634 reg.Dimension = src->Dimension;
635 reg.DimIndirect = src->DimIndirect;
636 } else
637 reg = *dst;
638
639 /* If the register is 2-dimensional (e.g. an array of vertices
640 * in a primitive), calculate the base address of the vertex. */
641 if (reg.Register.Dimension) {
642 LLVMValueRef index;
643
644 if (reg.Dimension.Indirect)
645 index = get_indirect_index(ctx, &reg.DimIndirect,
646 reg.Dimension.Index);
647 else
648 index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
649
650 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
651 LLVMBuildMul(gallivm->builder, index,
652 vertex_dw_stride, ""), "");
653 }
654
655 /* Get information about the register. */
656 if (reg.Register.File == TGSI_FILE_INPUT) {
657 name = info->input_semantic_name;
658 index = info->input_semantic_index;
659 array_first = info->input_array_first;
660 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
661 name = info->output_semantic_name;
662 index = info->output_semantic_index;
663 array_first = info->output_array_first;
664 } else {
665 assert(0);
666 return NULL;
667 }
668
669 if (reg.Register.Indirect) {
670 /* Add the relative address of the element. */
671 LLVMValueRef ind_index;
672
673 if (reg.Indirect.ArrayID)
674 first = array_first[reg.Indirect.ArrayID];
675 else
676 first = reg.Register.Index;
677
678 ind_index = get_indirect_index(ctx, &reg.Indirect,
679 reg.Register.Index - first);
680
681 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
682 LLVMBuildMul(gallivm->builder, ind_index,
683 LLVMConstInt(ctx->i32, 4, 0), ""), "");
684
685 param = reg.Register.Dimension ?
686 si_shader_io_get_unique_index(name[first], index[first]) :
687 si_shader_io_get_unique_index_patch(name[first], index[first]);
688 } else {
689 param = reg.Register.Dimension ?
690 si_shader_io_get_unique_index(name[reg.Register.Index],
691 index[reg.Register.Index]) :
692 si_shader_io_get_unique_index_patch(name[reg.Register.Index],
693 index[reg.Register.Index]);
694 }
695
696 /* Add the base address of the element. */
697 return LLVMBuildAdd(gallivm->builder, base_addr,
698 LLVMConstInt(ctx->i32, param * 4, 0), "");
699 }
700
701 /* The offchip buffer layout for TCS->TES is
702 *
703 * - attribute 0 of patch 0 vertex 0
704 * - attribute 0 of patch 0 vertex 1
705 * - attribute 0 of patch 0 vertex 2
706 * ...
707 * - attribute 0 of patch 1 vertex 0
708 * - attribute 0 of patch 1 vertex 1
709 * ...
710 * - attribute 1 of patch 0 vertex 0
711 * - attribute 1 of patch 0 vertex 1
712 * ...
713 * - per patch attribute 0 of patch 0
714 * - per patch attribute 0 of patch 1
715 * ...
716 *
717 * Note that every attribute has 4 components.
718 */
719 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
720 LLVMValueRef rel_patch_id,
721 LLVMValueRef vertex_index,
722 LLVMValueRef param_index)
723 {
724 struct gallivm_state *gallivm = &ctx->gallivm;
725 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
726 LLVMValueRef param_stride, constant16;
727
728 vertices_per_patch = unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
729 num_patches = unpack_param(ctx, ctx->param_tcs_offchip_layout, 0, 6);
730 total_vertices = LLVMBuildMul(gallivm->builder, vertices_per_patch,
731 num_patches, "");
732
733 constant16 = LLVMConstInt(ctx->i32, 16, 0);
734 if (vertex_index) {
735 base_addr = LLVMBuildMul(gallivm->builder, rel_patch_id,
736 vertices_per_patch, "");
737
738 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
739 vertex_index, "");
740
741 param_stride = total_vertices;
742 } else {
743 base_addr = rel_patch_id;
744 param_stride = num_patches;
745 }
746
747 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
748 LLVMBuildMul(gallivm->builder, param_index,
749 param_stride, ""), "");
750
751 base_addr = LLVMBuildMul(gallivm->builder, base_addr, constant16, "");
752
753 if (!vertex_index) {
754 LLVMValueRef patch_data_offset =
755 unpack_param(ctx, ctx->param_tcs_offchip_layout, 12, 20);
756
757 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
758 patch_data_offset, "");
759 }
760 return base_addr;
761 }
762
763 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
764 struct si_shader_context *ctx,
765 const struct tgsi_full_dst_register *dst,
766 const struct tgsi_full_src_register *src)
767 {
768 struct gallivm_state *gallivm = &ctx->gallivm;
769 struct tgsi_shader_info *info = &ctx->shader->selector->info;
770 ubyte *name, *index, *array_first;
771 struct tgsi_full_src_register reg;
772 LLVMValueRef vertex_index = NULL;
773 LLVMValueRef param_index = NULL;
774 unsigned param_index_base, param_base;
775
776 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
777
778 if (reg.Register.Dimension) {
779
780 if (reg.Dimension.Indirect)
781 vertex_index = get_indirect_index(ctx, &reg.DimIndirect,
782 reg.Dimension.Index);
783 else
784 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
785 }
786
787 /* Get information about the register. */
788 if (reg.Register.File == TGSI_FILE_INPUT) {
789 name = info->input_semantic_name;
790 index = info->input_semantic_index;
791 array_first = info->input_array_first;
792 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
793 name = info->output_semantic_name;
794 index = info->output_semantic_index;
795 array_first = info->output_array_first;
796 } else {
797 assert(0);
798 return NULL;
799 }
800
801 if (reg.Register.Indirect) {
802 if (reg.Indirect.ArrayID)
803 param_base = array_first[reg.Indirect.ArrayID];
804 else
805 param_base = reg.Register.Index;
806
807 param_index = get_indirect_index(ctx, &reg.Indirect,
808 reg.Register.Index - param_base);
809
810 } else {
811 param_base = reg.Register.Index;
812 param_index = ctx->i32_0;
813 }
814
815 param_index_base = reg.Register.Dimension ?
816 si_shader_io_get_unique_index(name[param_base], index[param_base]) :
817 si_shader_io_get_unique_index_patch(name[param_base], index[param_base]);
818
819 param_index = LLVMBuildAdd(gallivm->builder, param_index,
820 LLVMConstInt(ctx->i32, param_index_base, 0),
821 "");
822
823 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
824 vertex_index, param_index);
825 }
826
827 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
828 enum tgsi_opcode_type type, unsigned swizzle,
829 LLVMValueRef buffer, LLVMValueRef offset,
830 LLVMValueRef base, bool can_speculate)
831 {
832 struct si_shader_context *ctx = si_shader_context(bld_base);
833 struct gallivm_state *gallivm = &ctx->gallivm;
834 LLVMValueRef value, value2;
835 LLVMTypeRef llvm_type = tgsi2llvmtype(bld_base, type);
836 LLVMTypeRef vec_type = LLVMVectorType(llvm_type, 4);
837
838 if (swizzle == ~0) {
839 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
840 0, 1, 0, can_speculate, false);
841
842 return LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
843 }
844
845 if (!tgsi_type_is_64bit(type)) {
846 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
847 0, 1, 0, can_speculate, false);
848
849 value = LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
850 return LLVMBuildExtractElement(gallivm->builder, value,
851 LLVMConstInt(ctx->i32, swizzle, 0), "");
852 }
853
854 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
855 swizzle * 4, 1, 0, can_speculate, false);
856
857 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
858 swizzle * 4 + 4, 1, 0, can_speculate, false);
859
860 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
861 }
862
863 /**
864 * Load from LDS.
865 *
866 * \param type output value type
867 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
868 * \param dw_addr address in dwords
869 */
870 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
871 enum tgsi_opcode_type type, unsigned swizzle,
872 LLVMValueRef dw_addr)
873 {
874 struct si_shader_context *ctx = si_shader_context(bld_base);
875 struct gallivm_state *gallivm = &ctx->gallivm;
876 LLVMValueRef value;
877
878 if (swizzle == ~0) {
879 LLVMValueRef values[TGSI_NUM_CHANNELS];
880
881 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
882 values[chan] = lds_load(bld_base, type, chan, dw_addr);
883
884 return lp_build_gather_values(gallivm, values,
885 TGSI_NUM_CHANNELS);
886 }
887
888 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
889 LLVMConstInt(ctx->i32, swizzle, 0));
890
891 value = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
892 if (tgsi_type_is_64bit(type)) {
893 LLVMValueRef value2;
894 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
895 ctx->i32_1);
896 value2 = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
897 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
898 }
899
900 return LLVMBuildBitCast(gallivm->builder, value,
901 tgsi2llvmtype(bld_base, type), "");
902 }
903
904 /**
905 * Store to LDS.
906 *
907 * \param swizzle offset (typically 0..3)
908 * \param dw_addr address in dwords
909 * \param value value to store
910 */
911 static void lds_store(struct lp_build_tgsi_context *bld_base,
912 unsigned dw_offset_imm, LLVMValueRef dw_addr,
913 LLVMValueRef value)
914 {
915 struct si_shader_context *ctx = si_shader_context(bld_base);
916 struct gallivm_state *gallivm = &ctx->gallivm;
917
918 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
919 LLVMConstInt(ctx->i32, dw_offset_imm, 0));
920
921 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
922 ac_build_indexed_store(&ctx->ac, ctx->lds,
923 dw_addr, value);
924 }
925
926 static LLVMValueRef desc_from_addr_base64k(struct si_shader_context *ctx,
927 unsigned param)
928 {
929 LLVMBuilderRef builder = ctx->gallivm.builder;
930
931 LLVMValueRef addr = LLVMGetParam(ctx->main_fn, param);
932 addr = LLVMBuildZExt(builder, addr, ctx->i64, "");
933 addr = LLVMBuildShl(builder, addr, LLVMConstInt(ctx->i64, 16, 0), "");
934
935 uint64_t desc2 = 0xffffffff;
936 uint64_t desc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
937 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
938 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
939 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
940 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
941 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
942 LLVMValueRef hi = LLVMConstInt(ctx->i64, desc2 | (desc3 << 32), 0);
943
944 LLVMValueRef desc = LLVMGetUndef(LLVMVectorType(ctx->i64, 2));
945 desc = LLVMBuildInsertElement(builder, desc, addr, ctx->i32_0, "");
946 desc = LLVMBuildInsertElement(builder, desc, hi, ctx->i32_1, "");
947 return LLVMBuildBitCast(builder, desc, ctx->v4i32, "");
948 }
949
950 static LLVMValueRef fetch_input_tcs(
951 struct lp_build_tgsi_context *bld_base,
952 const struct tgsi_full_src_register *reg,
953 enum tgsi_opcode_type type, unsigned swizzle)
954 {
955 struct si_shader_context *ctx = si_shader_context(bld_base);
956 LLVMValueRef dw_addr, stride;
957
958 stride = unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
959 dw_addr = get_tcs_in_current_patch_offset(ctx);
960 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
961
962 return lds_load(bld_base, type, swizzle, dw_addr);
963 }
964
965 static LLVMValueRef fetch_output_tcs(
966 struct lp_build_tgsi_context *bld_base,
967 const struct tgsi_full_src_register *reg,
968 enum tgsi_opcode_type type, unsigned swizzle)
969 {
970 struct si_shader_context *ctx = si_shader_context(bld_base);
971 LLVMValueRef dw_addr, stride;
972
973 if (reg->Register.Dimension) {
974 stride = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 8);
975 dw_addr = get_tcs_out_current_patch_offset(ctx);
976 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
977 } else {
978 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
979 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
980 }
981
982 return lds_load(bld_base, type, swizzle, dw_addr);
983 }
984
985 static LLVMValueRef fetch_input_tes(
986 struct lp_build_tgsi_context *bld_base,
987 const struct tgsi_full_src_register *reg,
988 enum tgsi_opcode_type type, unsigned swizzle)
989 {
990 struct si_shader_context *ctx = si_shader_context(bld_base);
991 LLVMValueRef buffer, base, addr;
992
993 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
994
995 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
996 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
997
998 return buffer_load(bld_base, type, swizzle, buffer, base, addr, true);
999 }
1000
1001 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1002 const struct tgsi_full_instruction *inst,
1003 const struct tgsi_opcode_info *info,
1004 LLVMValueRef dst[4])
1005 {
1006 struct si_shader_context *ctx = si_shader_context(bld_base);
1007 struct gallivm_state *gallivm = &ctx->gallivm;
1008 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
1009 const struct tgsi_shader_info *sh_info = &ctx->shader->selector->info;
1010 unsigned chan_index;
1011 LLVMValueRef dw_addr, stride;
1012 LLVMValueRef buffer, base, buf_addr;
1013 LLVMValueRef values[4];
1014 bool skip_lds_store;
1015 bool is_tess_factor = false;
1016
1017 /* Only handle per-patch and per-vertex outputs here.
1018 * Vectors will be lowered to scalars and this function will be called again.
1019 */
1020 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1021 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1022 si_llvm_emit_store(bld_base, inst, info, dst);
1023 return;
1024 }
1025
1026 if (reg->Register.Dimension) {
1027 stride = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 8);
1028 dw_addr = get_tcs_out_current_patch_offset(ctx);
1029 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1030 skip_lds_store = !sh_info->reads_pervertex_outputs;
1031 } else {
1032 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1033 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1034 skip_lds_store = !sh_info->reads_perpatch_outputs;
1035
1036 if (!reg->Register.Indirect) {
1037 int name = sh_info->output_semantic_name[reg->Register.Index];
1038
1039 /* Always write tess factors into LDS for the TCS epilog. */
1040 if (name == TGSI_SEMANTIC_TESSINNER ||
1041 name == TGSI_SEMANTIC_TESSOUTER) {
1042 skip_lds_store = false;
1043 is_tess_factor = true;
1044 }
1045 }
1046 }
1047
1048 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1049
1050 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1051 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1052
1053
1054 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
1055 LLVMValueRef value = dst[chan_index];
1056
1057 if (inst->Instruction.Saturate)
1058 value = ac_build_clamp(&ctx->ac, value);
1059
1060 /* Skip LDS stores if there is no LDS read of this output. */
1061 if (!skip_lds_store)
1062 lds_store(bld_base, chan_index, dw_addr, value);
1063
1064 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
1065 values[chan_index] = value;
1066
1067 if (inst->Dst[0].Register.WriteMask != 0xF && !is_tess_factor) {
1068 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1069 buf_addr, base,
1070 4 * chan_index, 1, 0, true, false);
1071 }
1072 }
1073
1074 if (inst->Dst[0].Register.WriteMask == 0xF && !is_tess_factor) {
1075 LLVMValueRef value = lp_build_gather_values(gallivm,
1076 values, 4);
1077 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buf_addr,
1078 base, 0, 1, 0, true, false);
1079 }
1080 }
1081
1082 static LLVMValueRef fetch_input_gs(
1083 struct lp_build_tgsi_context *bld_base,
1084 const struct tgsi_full_src_register *reg,
1085 enum tgsi_opcode_type type,
1086 unsigned swizzle)
1087 {
1088 struct si_shader_context *ctx = si_shader_context(bld_base);
1089 struct si_shader *shader = ctx->shader;
1090 struct lp_build_context *uint = &ctx->bld_base.uint_bld;
1091 struct gallivm_state *gallivm = &ctx->gallivm;
1092 LLVMValueRef vtx_offset, soffset;
1093 struct tgsi_shader_info *info = &shader->selector->info;
1094 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1095 unsigned semantic_index = info->input_semantic_index[reg->Register.Index];
1096 unsigned param;
1097 LLVMValueRef value;
1098
1099 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1100 return get_primitive_id(bld_base, swizzle);
1101
1102 if (!reg->Register.Dimension)
1103 return NULL;
1104
1105 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1106
1107 /* GFX9 has the ESGS ring in LDS. */
1108 if (ctx->screen->b.chip_class >= GFX9) {
1109 unsigned index = reg->Dimension.Index;
1110
1111 switch (index / 2) {
1112 case 0:
1113 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx01_offset,
1114 index % 2 ? 16 : 0, 16);
1115 break;
1116 case 1:
1117 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx23_offset,
1118 index % 2 ? 16 : 0, 16);
1119 break;
1120 case 2:
1121 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx45_offset,
1122 index % 2 ? 16 : 0, 16);
1123 break;
1124 default:
1125 assert(0);
1126 return NULL;
1127 }
1128
1129 vtx_offset = LLVMBuildAdd(gallivm->builder, vtx_offset,
1130 LLVMConstInt(ctx->i32, param * 4, 0), "");
1131 return lds_load(bld_base, type, swizzle, vtx_offset);
1132 }
1133
1134 /* GFX6: input load from the ESGS ring in memory. */
1135 if (swizzle == ~0) {
1136 LLVMValueRef values[TGSI_NUM_CHANNELS];
1137 unsigned chan;
1138 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1139 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
1140 }
1141 return lp_build_gather_values(gallivm, values,
1142 TGSI_NUM_CHANNELS);
1143 }
1144
1145 /* Get the vertex offset parameter on GFX6. */
1146 unsigned vtx_offset_param = reg->Dimension.Index;
1147 if (vtx_offset_param < 2) {
1148 vtx_offset_param += ctx->param_gs_vtx0_offset;
1149 } else {
1150 assert(vtx_offset_param < 6);
1151 vtx_offset_param += ctx->param_gs_vtx2_offset - 2;
1152 }
1153 vtx_offset = lp_build_mul_imm(uint,
1154 LLVMGetParam(ctx->main_fn,
1155 vtx_offset_param),
1156 4);
1157
1158 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1159
1160 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1161 vtx_offset, soffset, 0, 1, 0, true, false);
1162 if (tgsi_type_is_64bit(type)) {
1163 LLVMValueRef value2;
1164 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1165
1166 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1167 ctx->i32_0, vtx_offset, soffset,
1168 0, 1, 0, true, false);
1169 return si_llvm_emit_fetch_64bit(bld_base, type,
1170 value, value2);
1171 }
1172 return LLVMBuildBitCast(gallivm->builder,
1173 value,
1174 tgsi2llvmtype(bld_base, type), "");
1175 }
1176
1177 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1178 {
1179 switch (interpolate) {
1180 case TGSI_INTERPOLATE_CONSTANT:
1181 return 0;
1182
1183 case TGSI_INTERPOLATE_LINEAR:
1184 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1185 return SI_PARAM_LINEAR_SAMPLE;
1186 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1187 return SI_PARAM_LINEAR_CENTROID;
1188 else
1189 return SI_PARAM_LINEAR_CENTER;
1190 break;
1191 case TGSI_INTERPOLATE_COLOR:
1192 case TGSI_INTERPOLATE_PERSPECTIVE:
1193 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1194 return SI_PARAM_PERSP_SAMPLE;
1195 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1196 return SI_PARAM_PERSP_CENTROID;
1197 else
1198 return SI_PARAM_PERSP_CENTER;
1199 break;
1200 default:
1201 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1202 return -1;
1203 }
1204 }
1205
1206 /**
1207 * Interpolate a fragment shader input.
1208 *
1209 * @param ctx context
1210 * @param input_index index of the input in hardware
1211 * @param semantic_name TGSI_SEMANTIC_*
1212 * @param semantic_index semantic index
1213 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1214 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1215 * @param interp_param interpolation weights (i,j)
1216 * @param prim_mask SI_PARAM_PRIM_MASK
1217 * @param face SI_PARAM_FRONT_FACE
1218 * @param result the return value (4 components)
1219 */
1220 static void interp_fs_input(struct si_shader_context *ctx,
1221 unsigned input_index,
1222 unsigned semantic_name,
1223 unsigned semantic_index,
1224 unsigned num_interp_inputs,
1225 unsigned colors_read_mask,
1226 LLVMValueRef interp_param,
1227 LLVMValueRef prim_mask,
1228 LLVMValueRef face,
1229 LLVMValueRef result[4])
1230 {
1231 struct gallivm_state *gallivm = &ctx->gallivm;
1232 LLVMValueRef attr_number;
1233 LLVMValueRef i, j;
1234
1235 unsigned chan;
1236
1237 /* fs.constant returns the param from the middle vertex, so it's not
1238 * really useful for flat shading. It's meant to be used for custom
1239 * interpolation (but the intrinsic can't fetch from the other two
1240 * vertices).
1241 *
1242 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1243 * to do the right thing. The only reason we use fs.constant is that
1244 * fs.interp cannot be used on integers, because they can be equal
1245 * to NaN.
1246 *
1247 * When interp is false we will use fs.constant or for newer llvm,
1248 * amdgcn.interp.mov.
1249 */
1250 bool interp = interp_param != NULL;
1251
1252 attr_number = LLVMConstInt(ctx->i32, input_index, 0);
1253
1254 if (interp) {
1255 interp_param = LLVMBuildBitCast(gallivm->builder, interp_param,
1256 LLVMVectorType(ctx->f32, 2), "");
1257
1258 i = LLVMBuildExtractElement(gallivm->builder, interp_param,
1259 ctx->i32_0, "");
1260 j = LLVMBuildExtractElement(gallivm->builder, interp_param,
1261 ctx->i32_1, "");
1262 }
1263
1264 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1265 ctx->shader->key.part.ps.prolog.color_two_side) {
1266 LLVMValueRef is_face_positive;
1267 LLVMValueRef back_attr_number;
1268
1269 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1270 * otherwise it's at offset "num_inputs".
1271 */
1272 unsigned back_attr_offset = num_interp_inputs;
1273 if (semantic_index == 1 && colors_read_mask & 0xf)
1274 back_attr_offset += 1;
1275
1276 back_attr_number = LLVMConstInt(ctx->i32, back_attr_offset, 0);
1277
1278 is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1279 face, ctx->i32_0, "");
1280
1281 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1282 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
1283 LLVMValueRef front, back;
1284
1285 if (interp) {
1286 front = ac_build_fs_interp(&ctx->ac, llvm_chan,
1287 attr_number, prim_mask,
1288 i, j);
1289 back = ac_build_fs_interp(&ctx->ac, llvm_chan,
1290 back_attr_number, prim_mask,
1291 i, j);
1292 } else {
1293 front = ac_build_fs_interp_mov(&ctx->ac,
1294 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1295 llvm_chan, attr_number, prim_mask);
1296 back = ac_build_fs_interp_mov(&ctx->ac,
1297 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1298 llvm_chan, back_attr_number, prim_mask);
1299 }
1300
1301 result[chan] = LLVMBuildSelect(gallivm->builder,
1302 is_face_positive,
1303 front,
1304 back,
1305 "");
1306 }
1307 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1308 if (interp) {
1309 result[0] = ac_build_fs_interp(&ctx->ac, ctx->i32_0,
1310 attr_number, prim_mask, i, j);
1311 } else {
1312 result[0] = ac_build_fs_interp_mov(&ctx->ac, ctx->i32_0,
1313 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1314 attr_number, prim_mask);
1315 }
1316 result[1] =
1317 result[2] = LLVMConstReal(ctx->f32, 0.0f);
1318 result[3] = LLVMConstReal(ctx->f32, 1.0f);
1319 } else {
1320 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1321 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
1322
1323 if (interp) {
1324 result[chan] = ac_build_fs_interp(&ctx->ac,
1325 llvm_chan, attr_number, prim_mask, i, j);
1326 } else {
1327 result[chan] = ac_build_fs_interp_mov(&ctx->ac,
1328 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1329 llvm_chan, attr_number, prim_mask);
1330 }
1331 }
1332 }
1333 }
1334
1335 static void declare_input_fs(
1336 struct si_shader_context *ctx,
1337 unsigned input_index,
1338 const struct tgsi_full_declaration *decl,
1339 LLVMValueRef out[4])
1340 {
1341 struct lp_build_context *base = &ctx->bld_base.base;
1342 struct si_shader *shader = ctx->shader;
1343 LLVMValueRef main_fn = ctx->main_fn;
1344 LLVMValueRef interp_param = NULL;
1345 int interp_param_idx;
1346
1347 /* Get colors from input VGPRs (set by the prolog). */
1348 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR) {
1349 unsigned i = decl->Semantic.Index;
1350 unsigned colors_read = shader->selector->info.colors_read;
1351 unsigned mask = colors_read >> (i * 4);
1352 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1353 (i ? util_bitcount(colors_read & 0xf) : 0);
1354
1355 out[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1356 out[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1357 out[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1358 out[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1359 return;
1360 }
1361
1362 interp_param_idx = lookup_interp_param_index(decl->Interp.Interpolate,
1363 decl->Interp.Location);
1364 if (interp_param_idx == -1)
1365 return;
1366 else if (interp_param_idx) {
1367 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
1368 }
1369
1370 interp_fs_input(ctx, input_index, decl->Semantic.Name,
1371 decl->Semantic.Index, shader->selector->info.num_inputs,
1372 shader->selector->info.colors_read, interp_param,
1373 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1374 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1375 &out[0]);
1376 }
1377
1378 static LLVMValueRef get_sample_id(struct si_shader_context *ctx)
1379 {
1380 return unpack_param(ctx, SI_PARAM_ANCILLARY, 8, 4);
1381 }
1382
1383
1384 /**
1385 * Load a dword from a constant buffer.
1386 */
1387 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1388 LLVMValueRef resource,
1389 LLVMValueRef offset)
1390 {
1391 return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL,
1392 0, 0, 0, true, true);
1393 }
1394
1395 static LLVMValueRef load_sample_position(struct si_shader_context *ctx, LLVMValueRef sample_id)
1396 {
1397 struct lp_build_context *uint_bld = &ctx->bld_base.uint_bld;
1398 struct gallivm_state *gallivm = &ctx->gallivm;
1399 LLVMBuilderRef builder = gallivm->builder;
1400 LLVMValueRef desc = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1401 LLVMValueRef buf_index = LLVMConstInt(ctx->i32, SI_PS_CONST_SAMPLE_POSITIONS, 0);
1402 LLVMValueRef resource = ac_build_indexed_load_const(&ctx->ac, desc, buf_index);
1403
1404 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1405 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1406 LLVMValueRef offset1 = LLVMBuildAdd(builder, offset0, LLVMConstInt(ctx->i32, 4, 0), "");
1407
1408 LLVMValueRef pos[4] = {
1409 buffer_load_const(ctx, resource, offset0),
1410 buffer_load_const(ctx, resource, offset1),
1411 LLVMConstReal(ctx->f32, 0),
1412 LLVMConstReal(ctx->f32, 0)
1413 };
1414
1415 return lp_build_gather_values(gallivm, pos, 4);
1416 }
1417
1418 static void declare_system_value(struct si_shader_context *ctx,
1419 unsigned index,
1420 const struct tgsi_full_declaration *decl)
1421 {
1422 struct lp_build_context *bld = &ctx->bld_base.base;
1423 struct gallivm_state *gallivm = &ctx->gallivm;
1424 LLVMValueRef value = 0;
1425
1426 assert(index < RADEON_LLVM_MAX_SYSTEM_VALUES);
1427
1428 switch (decl->Semantic.Name) {
1429 case TGSI_SEMANTIC_INSTANCEID:
1430 value = LLVMGetParam(ctx->main_fn,
1431 ctx->param_instance_id);
1432 break;
1433
1434 case TGSI_SEMANTIC_VERTEXID:
1435 value = LLVMBuildAdd(gallivm->builder,
1436 LLVMGetParam(ctx->main_fn,
1437 ctx->param_vertex_id),
1438 LLVMGetParam(ctx->main_fn,
1439 ctx->param_base_vertex), "");
1440 break;
1441
1442 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1443 /* Unused. Clarify the meaning in indexed vs. non-indexed
1444 * draws if this is ever used again. */
1445 assert(false);
1446 break;
1447
1448 case TGSI_SEMANTIC_BASEVERTEX:
1449 {
1450 /* For non-indexed draws, the base vertex set by the driver
1451 * (for direct draws) or the CP (for indirect draws) is the
1452 * first vertex ID, but GLSL expects 0 to be returned.
1453 */
1454 LLVMValueRef vs_state = LLVMGetParam(ctx->main_fn, ctx->param_vs_state_bits);
1455 LLVMValueRef indexed;
1456
1457 indexed = LLVMBuildLShr(gallivm->builder, vs_state, ctx->i32_1, "");
1458 indexed = LLVMBuildTrunc(gallivm->builder, indexed, ctx->i1, "");
1459
1460 value = LLVMBuildSelect(gallivm->builder, indexed,
1461 LLVMGetParam(ctx->main_fn, ctx->param_base_vertex),
1462 ctx->i32_0, "");
1463 break;
1464 }
1465
1466 case TGSI_SEMANTIC_BASEINSTANCE:
1467 value = LLVMGetParam(ctx->main_fn, ctx->param_start_instance);
1468 break;
1469
1470 case TGSI_SEMANTIC_DRAWID:
1471 value = LLVMGetParam(ctx->main_fn, ctx->param_draw_id);
1472 break;
1473
1474 case TGSI_SEMANTIC_INVOCATIONID:
1475 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1476 value = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
1477 else if (ctx->type == PIPE_SHADER_GEOMETRY)
1478 value = LLVMGetParam(ctx->main_fn,
1479 ctx->param_gs_instance_id);
1480 else
1481 assert(!"INVOCATIONID not implemented");
1482 break;
1483
1484 case TGSI_SEMANTIC_POSITION:
1485 {
1486 LLVMValueRef pos[4] = {
1487 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
1488 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
1489 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Z_FLOAT),
1490 lp_build_emit_llvm_unary(&ctx->bld_base, TGSI_OPCODE_RCP,
1491 LLVMGetParam(ctx->main_fn,
1492 SI_PARAM_POS_W_FLOAT)),
1493 };
1494 value = lp_build_gather_values(gallivm, pos, 4);
1495 break;
1496 }
1497
1498 case TGSI_SEMANTIC_FACE:
1499 value = LLVMGetParam(ctx->main_fn, SI_PARAM_FRONT_FACE);
1500 break;
1501
1502 case TGSI_SEMANTIC_SAMPLEID:
1503 value = get_sample_id(ctx);
1504 break;
1505
1506 case TGSI_SEMANTIC_SAMPLEPOS: {
1507 LLVMValueRef pos[4] = {
1508 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
1509 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
1510 LLVMConstReal(ctx->f32, 0),
1511 LLVMConstReal(ctx->f32, 0)
1512 };
1513 pos[0] = lp_build_emit_llvm_unary(&ctx->bld_base,
1514 TGSI_OPCODE_FRC, pos[0]);
1515 pos[1] = lp_build_emit_llvm_unary(&ctx->bld_base,
1516 TGSI_OPCODE_FRC, pos[1]);
1517 value = lp_build_gather_values(gallivm, pos, 4);
1518 break;
1519 }
1520
1521 case TGSI_SEMANTIC_SAMPLEMASK:
1522 /* This can only occur with the OpenGL Core profile, which
1523 * doesn't support smoothing.
1524 */
1525 value = LLVMGetParam(ctx->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1526 break;
1527
1528 case TGSI_SEMANTIC_TESSCOORD:
1529 {
1530 LLVMValueRef coord[4] = {
1531 LLVMGetParam(ctx->main_fn, ctx->param_tes_u),
1532 LLVMGetParam(ctx->main_fn, ctx->param_tes_v),
1533 bld->zero,
1534 bld->zero
1535 };
1536
1537 /* For triangles, the vector should be (u, v, 1-u-v). */
1538 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1539 PIPE_PRIM_TRIANGLES)
1540 coord[2] = lp_build_sub(bld, bld->one,
1541 lp_build_add(bld, coord[0], coord[1]));
1542
1543 value = lp_build_gather_values(gallivm, coord, 4);
1544 break;
1545 }
1546
1547 case TGSI_SEMANTIC_VERTICESIN:
1548 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1549 value = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 26, 6);
1550 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1551 value = unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
1552 else
1553 assert(!"invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1554 break;
1555
1556 case TGSI_SEMANTIC_TESSINNER:
1557 case TGSI_SEMANTIC_TESSOUTER:
1558 {
1559 LLVMValueRef buffer, base, addr;
1560 int param = si_shader_io_get_unique_index_patch(decl->Semantic.Name, 0);
1561
1562 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1563
1564 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1565 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1566 LLVMConstInt(ctx->i32, param, 0));
1567
1568 value = buffer_load(&ctx->bld_base, TGSI_TYPE_FLOAT,
1569 ~0, buffer, base, addr, true);
1570
1571 break;
1572 }
1573
1574 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
1575 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
1576 {
1577 LLVMValueRef buf, slot, val[4];
1578 int i, offset;
1579
1580 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
1581 buf = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1582 buf = ac_build_indexed_load_const(&ctx->ac, buf, slot);
1583 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
1584
1585 for (i = 0; i < 4; i++)
1586 val[i] = buffer_load_const(ctx, buf,
1587 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
1588 value = lp_build_gather_values(gallivm, val, 4);
1589 break;
1590 }
1591
1592 case TGSI_SEMANTIC_PRIMID:
1593 value = get_primitive_id(&ctx->bld_base, 0);
1594 break;
1595
1596 case TGSI_SEMANTIC_GRID_SIZE:
1597 value = LLVMGetParam(ctx->main_fn, ctx->param_grid_size);
1598 break;
1599
1600 case TGSI_SEMANTIC_BLOCK_SIZE:
1601 {
1602 LLVMValueRef values[3];
1603 unsigned i;
1604 unsigned *properties = ctx->shader->selector->info.properties;
1605
1606 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1607 unsigned sizes[3] = {
1608 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1609 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1610 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1611 };
1612
1613 for (i = 0; i < 3; ++i)
1614 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1615
1616 value = lp_build_gather_values(gallivm, values, 3);
1617 } else {
1618 value = LLVMGetParam(ctx->main_fn, ctx->param_block_size);
1619 }
1620 break;
1621 }
1622
1623 case TGSI_SEMANTIC_BLOCK_ID:
1624 {
1625 LLVMValueRef values[3];
1626
1627 for (int i = 0; i < 3; i++) {
1628 values[i] = ctx->i32_0;
1629 if (ctx->param_block_id[i] >= 0) {
1630 values[i] = LLVMGetParam(ctx->main_fn,
1631 ctx->param_block_id[i]);
1632 }
1633 }
1634 value = lp_build_gather_values(gallivm, values, 3);
1635 break;
1636 }
1637
1638 case TGSI_SEMANTIC_THREAD_ID:
1639 value = LLVMGetParam(ctx->main_fn, ctx->param_thread_id);
1640 break;
1641
1642 case TGSI_SEMANTIC_HELPER_INVOCATION:
1643 value = lp_build_intrinsic(gallivm->builder,
1644 "llvm.amdgcn.ps.live",
1645 ctx->i1, NULL, 0,
1646 LP_FUNC_ATTR_READNONE);
1647 value = LLVMBuildNot(gallivm->builder, value, "");
1648 value = LLVMBuildSExt(gallivm->builder, value, ctx->i32, "");
1649 break;
1650
1651 case TGSI_SEMANTIC_SUBGROUP_SIZE:
1652 value = LLVMConstInt(ctx->i32, 64, 0);
1653 break;
1654
1655 case TGSI_SEMANTIC_SUBGROUP_INVOCATION:
1656 value = ac_get_thread_id(&ctx->ac);
1657 break;
1658
1659 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
1660 {
1661 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
1662 id = LLVMBuildZExt(gallivm->builder, id, ctx->i64, "");
1663 value = LLVMBuildShl(gallivm->builder, LLVMConstInt(ctx->i64, 1, 0), id, "");
1664 value = LLVMBuildBitCast(gallivm->builder, value, ctx->v2i32, "");
1665 break;
1666 }
1667
1668 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
1669 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
1670 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
1671 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
1672 {
1673 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
1674 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_GT_MASK ||
1675 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK) {
1676 /* All bits set except LSB */
1677 value = LLVMConstInt(ctx->i64, -2, 0);
1678 } else {
1679 /* All bits set */
1680 value = LLVMConstInt(ctx->i64, -1, 0);
1681 }
1682 id = LLVMBuildZExt(gallivm->builder, id, ctx->i64, "");
1683 value = LLVMBuildShl(gallivm->builder, value, id, "");
1684 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK ||
1685 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LT_MASK)
1686 value = LLVMBuildNot(gallivm->builder, value, "");
1687 value = LLVMBuildBitCast(gallivm->builder, value, ctx->v2i32, "");
1688 break;
1689 }
1690
1691 default:
1692 assert(!"unknown system value");
1693 return;
1694 }
1695
1696 ctx->system_values[index] = value;
1697 }
1698
1699 static void declare_compute_memory(struct si_shader_context *ctx,
1700 const struct tgsi_full_declaration *decl)
1701 {
1702 struct si_shader_selector *sel = ctx->shader->selector;
1703 struct gallivm_state *gallivm = &ctx->gallivm;
1704
1705 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, LOCAL_ADDR_SPACE);
1706 LLVMValueRef var;
1707
1708 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
1709 assert(decl->Range.First == decl->Range.Last);
1710 assert(!ctx->shared_memory);
1711
1712 var = LLVMAddGlobalInAddressSpace(gallivm->module,
1713 LLVMArrayType(ctx->i8, sel->local_size),
1714 "compute_lds",
1715 LOCAL_ADDR_SPACE);
1716 LLVMSetAlignment(var, 4);
1717
1718 ctx->shared_memory = LLVMBuildBitCast(gallivm->builder, var, i8p, "");
1719 }
1720
1721 static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
1722 {
1723 LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
1724 ctx->param_const_and_shader_buffers);
1725
1726 return ac_build_indexed_load_const(&ctx->ac, list_ptr,
1727 LLVMConstInt(ctx->i32, si_get_constbuf_slot(i), 0));
1728 }
1729
1730 static LLVMValueRef fetch_constant(
1731 struct lp_build_tgsi_context *bld_base,
1732 const struct tgsi_full_src_register *reg,
1733 enum tgsi_opcode_type type,
1734 unsigned swizzle)
1735 {
1736 struct si_shader_context *ctx = si_shader_context(bld_base);
1737 struct lp_build_context *base = &bld_base->base;
1738 const struct tgsi_ind_register *ireg = &reg->Indirect;
1739 unsigned buf, idx;
1740
1741 LLVMValueRef addr, bufp;
1742 LLVMValueRef result;
1743
1744 if (swizzle == LP_CHAN_ALL) {
1745 unsigned chan;
1746 LLVMValueRef values[4];
1747 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
1748 values[chan] = fetch_constant(bld_base, reg, type, chan);
1749
1750 return lp_build_gather_values(&ctx->gallivm, values, 4);
1751 }
1752
1753 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
1754 idx = reg->Register.Index * 4 + swizzle;
1755
1756 if (reg->Register.Dimension && reg->Dimension.Indirect) {
1757 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
1758 LLVMValueRef index;
1759 index = si_get_bounded_indirect_index(ctx, &reg->DimIndirect,
1760 reg->Dimension.Index,
1761 ctx->num_const_buffers);
1762 index = LLVMBuildAdd(ctx->gallivm.builder, index,
1763 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
1764 bufp = ac_build_indexed_load_const(&ctx->ac, ptr, index);
1765 } else
1766 bufp = load_const_buffer_desc(ctx, buf);
1767
1768 if (reg->Register.Indirect) {
1769 addr = ctx->addrs[ireg->Index][ireg->Swizzle];
1770 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
1771 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
1772 addr = lp_build_add(&bld_base->uint_bld, addr,
1773 LLVMConstInt(ctx->i32, idx * 4, 0));
1774 } else {
1775 addr = LLVMConstInt(ctx->i32, idx * 4, 0);
1776 }
1777
1778 result = buffer_load_const(ctx, bufp, addr);
1779
1780 if (!tgsi_type_is_64bit(type))
1781 result = bitcast(bld_base, type, result);
1782 else {
1783 LLVMValueRef addr2, result2;
1784
1785 addr2 = lp_build_add(&bld_base->uint_bld, addr,
1786 LLVMConstInt(ctx->i32, 4, 0));
1787 result2 = buffer_load_const(ctx, bufp, addr2);
1788
1789 result = si_llvm_emit_fetch_64bit(bld_base, type,
1790 result, result2);
1791 }
1792 return result;
1793 }
1794
1795 /* Upper 16 bits must be zero. */
1796 static LLVMValueRef si_llvm_pack_two_int16(struct si_shader_context *ctx,
1797 LLVMValueRef val[2])
1798 {
1799 return LLVMBuildOr(ctx->gallivm.builder, val[0],
1800 LLVMBuildShl(ctx->gallivm.builder, val[1],
1801 LLVMConstInt(ctx->i32, 16, 0),
1802 ""), "");
1803 }
1804
1805 /* Upper 16 bits are ignored and will be dropped. */
1806 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct si_shader_context *ctx,
1807 LLVMValueRef val[2])
1808 {
1809 LLVMValueRef v[2] = {
1810 LLVMBuildAnd(ctx->gallivm.builder, val[0],
1811 LLVMConstInt(ctx->i32, 0xffff, 0), ""),
1812 val[1],
1813 };
1814 return si_llvm_pack_two_int16(ctx, v);
1815 }
1816
1817 /* Initialize arguments for the shader export intrinsic */
1818 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
1819 LLVMValueRef *values,
1820 unsigned target,
1821 struct ac_export_args *args)
1822 {
1823 struct si_shader_context *ctx = si_shader_context(bld_base);
1824 struct lp_build_context *base = &bld_base->base;
1825 LLVMBuilderRef builder = ctx->gallivm.builder;
1826 LLVMValueRef val[4];
1827 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1828 unsigned chan;
1829 bool is_int8, is_int10;
1830
1831 /* Default is 0xf. Adjusted below depending on the format. */
1832 args->enabled_channels = 0xf; /* writemask */
1833
1834 /* Specify whether the EXEC mask represents the valid mask */
1835 args->valid_mask = 0;
1836
1837 /* Specify whether this is the last export */
1838 args->done = 0;
1839
1840 /* Specify the target we are exporting */
1841 args->target = target;
1842
1843 if (ctx->type == PIPE_SHADER_FRAGMENT) {
1844 const struct si_shader_key *key = &ctx->shader->key;
1845 unsigned col_formats = key->part.ps.epilog.spi_shader_col_format;
1846 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1847
1848 assert(cbuf >= 0 && cbuf < 8);
1849 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1850 is_int8 = (key->part.ps.epilog.color_is_int8 >> cbuf) & 0x1;
1851 is_int10 = (key->part.ps.epilog.color_is_int10 >> cbuf) & 0x1;
1852 }
1853
1854 args->compr = false;
1855 args->out[0] = base->undef;
1856 args->out[1] = base->undef;
1857 args->out[2] = base->undef;
1858 args->out[3] = base->undef;
1859
1860 switch (spi_shader_col_format) {
1861 case V_028714_SPI_SHADER_ZERO:
1862 args->enabled_channels = 0; /* writemask */
1863 args->target = V_008DFC_SQ_EXP_NULL;
1864 break;
1865
1866 case V_028714_SPI_SHADER_32_R:
1867 args->enabled_channels = 1; /* writemask */
1868 args->out[0] = values[0];
1869 break;
1870
1871 case V_028714_SPI_SHADER_32_GR:
1872 args->enabled_channels = 0x3; /* writemask */
1873 args->out[0] = values[0];
1874 args->out[1] = values[1];
1875 break;
1876
1877 case V_028714_SPI_SHADER_32_AR:
1878 args->enabled_channels = 0x9; /* writemask */
1879 args->out[0] = values[0];
1880 args->out[3] = values[3];
1881 break;
1882
1883 case V_028714_SPI_SHADER_FP16_ABGR:
1884 args->compr = 1; /* COMPR flag */
1885
1886 for (chan = 0; chan < 2; chan++) {
1887 LLVMValueRef pack_args[2] = {
1888 values[2 * chan],
1889 values[2 * chan + 1]
1890 };
1891 LLVMValueRef packed;
1892
1893 packed = ac_build_cvt_pkrtz_f16(&ctx->ac, pack_args);
1894 args->out[chan] =
1895 LLVMBuildBitCast(ctx->gallivm.builder,
1896 packed, ctx->f32, "");
1897 }
1898 break;
1899
1900 case V_028714_SPI_SHADER_UNORM16_ABGR:
1901 for (chan = 0; chan < 4; chan++) {
1902 val[chan] = ac_build_clamp(&ctx->ac, values[chan]);
1903 val[chan] = LLVMBuildFMul(builder, val[chan],
1904 LLVMConstReal(ctx->f32, 65535), "");
1905 val[chan] = LLVMBuildFAdd(builder, val[chan],
1906 LLVMConstReal(ctx->f32, 0.5), "");
1907 val[chan] = LLVMBuildFPToUI(builder, val[chan],
1908 ctx->i32, "");
1909 }
1910
1911 args->compr = 1; /* COMPR flag */
1912 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1913 si_llvm_pack_two_int16(ctx, val));
1914 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1915 si_llvm_pack_two_int16(ctx, val+2));
1916 break;
1917
1918 case V_028714_SPI_SHADER_SNORM16_ABGR:
1919 for (chan = 0; chan < 4; chan++) {
1920 /* Clamp between [-1, 1]. */
1921 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MIN,
1922 values[chan],
1923 LLVMConstReal(ctx->f32, 1));
1924 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MAX,
1925 val[chan],
1926 LLVMConstReal(ctx->f32, -1));
1927 /* Convert to a signed integer in [-32767, 32767]. */
1928 val[chan] = LLVMBuildFMul(builder, val[chan],
1929 LLVMConstReal(ctx->f32, 32767), "");
1930 /* If positive, add 0.5, else add -0.5. */
1931 val[chan] = LLVMBuildFAdd(builder, val[chan],
1932 LLVMBuildSelect(builder,
1933 LLVMBuildFCmp(builder, LLVMRealOGE,
1934 val[chan], base->zero, ""),
1935 LLVMConstReal(ctx->f32, 0.5),
1936 LLVMConstReal(ctx->f32, -0.5), ""), "");
1937 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
1938 }
1939
1940 args->compr = 1; /* COMPR flag */
1941 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1942 si_llvm_pack_two_int32_as_int16(ctx, val));
1943 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1944 si_llvm_pack_two_int32_as_int16(ctx, val+2));
1945 break;
1946
1947 case V_028714_SPI_SHADER_UINT16_ABGR: {
1948 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1949 is_int8 ? 255 : is_int10 ? 1023 : 65535, 0);
1950 LLVMValueRef max_alpha =
1951 !is_int10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
1952
1953 /* Clamp. */
1954 for (chan = 0; chan < 4; chan++) {
1955 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1956 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
1957 val[chan],
1958 chan == 3 ? max_alpha : max_rgb);
1959 }
1960
1961 args->compr = 1; /* COMPR flag */
1962 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1963 si_llvm_pack_two_int16(ctx, val));
1964 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1965 si_llvm_pack_two_int16(ctx, val+2));
1966 break;
1967 }
1968
1969 case V_028714_SPI_SHADER_SINT16_ABGR: {
1970 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1971 is_int8 ? 127 : is_int10 ? 511 : 32767, 0);
1972 LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
1973 is_int8 ? -128 : is_int10 ? -512 : -32768, 0);
1974 LLVMValueRef max_alpha =
1975 !is_int10 ? max_rgb : ctx->i32_1;
1976 LLVMValueRef min_alpha =
1977 !is_int10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
1978
1979 /* Clamp. */
1980 for (chan = 0; chan < 4; chan++) {
1981 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1982 val[chan] = lp_build_emit_llvm_binary(bld_base,
1983 TGSI_OPCODE_IMIN,
1984 val[chan], chan == 3 ? max_alpha : max_rgb);
1985 val[chan] = lp_build_emit_llvm_binary(bld_base,
1986 TGSI_OPCODE_IMAX,
1987 val[chan], chan == 3 ? min_alpha : min_rgb);
1988 }
1989
1990 args->compr = 1; /* COMPR flag */
1991 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1992 si_llvm_pack_two_int32_as_int16(ctx, val));
1993 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1994 si_llvm_pack_two_int32_as_int16(ctx, val+2));
1995 break;
1996 }
1997
1998 case V_028714_SPI_SHADER_32_ABGR:
1999 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2000 break;
2001 }
2002 }
2003
2004 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2005 LLVMValueRef alpha)
2006 {
2007 struct si_shader_context *ctx = si_shader_context(bld_base);
2008
2009 if (ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2010 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
2011 SI_PARAM_ALPHA_REF);
2012
2013 LLVMValueRef alpha_pass =
2014 lp_build_cmp(&bld_base->base,
2015 ctx->shader->key.part.ps.epilog.alpha_func,
2016 alpha, alpha_ref);
2017 LLVMValueRef arg =
2018 lp_build_select(&bld_base->base,
2019 alpha_pass,
2020 LLVMConstReal(ctx->f32, 1.0f),
2021 LLVMConstReal(ctx->f32, -1.0f));
2022
2023 ac_build_kill(&ctx->ac, arg);
2024 } else {
2025 ac_build_kill(&ctx->ac, NULL);
2026 }
2027 }
2028
2029 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2030 LLVMValueRef alpha,
2031 unsigned samplemask_param)
2032 {
2033 struct si_shader_context *ctx = si_shader_context(bld_base);
2034 struct gallivm_state *gallivm = &ctx->gallivm;
2035 LLVMValueRef coverage;
2036
2037 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2038 coverage = LLVMGetParam(ctx->main_fn,
2039 samplemask_param);
2040 coverage = bitcast(bld_base, TGSI_TYPE_SIGNED, coverage);
2041
2042 coverage = lp_build_intrinsic(gallivm->builder, "llvm.ctpop.i32",
2043 ctx->i32,
2044 &coverage, 1, LP_FUNC_ATTR_READNONE);
2045
2046 coverage = LLVMBuildUIToFP(gallivm->builder, coverage,
2047 ctx->f32, "");
2048
2049 coverage = LLVMBuildFMul(gallivm->builder, coverage,
2050 LLVMConstReal(ctx->f32,
2051 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2052
2053 return LLVMBuildFMul(gallivm->builder, alpha, coverage, "");
2054 }
2055
2056 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context *bld_base,
2057 struct ac_export_args *pos, LLVMValueRef *out_elts)
2058 {
2059 struct si_shader_context *ctx = si_shader_context(bld_base);
2060 struct lp_build_context *base = &bld_base->base;
2061 unsigned reg_index;
2062 unsigned chan;
2063 unsigned const_chan;
2064 LLVMValueRef base_elt;
2065 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2066 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
2067 SI_VS_CONST_CLIP_PLANES, 0);
2068 LLVMValueRef const_resource = ac_build_indexed_load_const(&ctx->ac, ptr, constbuf_index);
2069
2070 for (reg_index = 0; reg_index < 2; reg_index ++) {
2071 struct ac_export_args *args = &pos[2 + reg_index];
2072
2073 args->out[0] =
2074 args->out[1] =
2075 args->out[2] =
2076 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
2077
2078 /* Compute dot products of position and user clip plane vectors */
2079 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2080 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2081 LLVMValueRef addr =
2082 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
2083 const_chan) * 4, 0);
2084 base_elt = buffer_load_const(ctx, const_resource,
2085 addr);
2086 args->out[chan] =
2087 lp_build_add(base, args->out[chan],
2088 lp_build_mul(base, base_elt,
2089 out_elts[const_chan]));
2090 }
2091 }
2092
2093 args->enabled_channels = 0xf;
2094 args->valid_mask = 0;
2095 args->done = 0;
2096 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
2097 args->compr = 0;
2098 }
2099 }
2100
2101 static void si_dump_streamout(struct pipe_stream_output_info *so)
2102 {
2103 unsigned i;
2104
2105 if (so->num_outputs)
2106 fprintf(stderr, "STREAMOUT\n");
2107
2108 for (i = 0; i < so->num_outputs; i++) {
2109 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2110 so->output[i].start_component;
2111 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2112 i, so->output[i].output_buffer,
2113 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2114 so->output[i].register_index,
2115 mask & 1 ? "x" : "",
2116 mask & 2 ? "y" : "",
2117 mask & 4 ? "z" : "",
2118 mask & 8 ? "w" : "");
2119 }
2120 }
2121
2122 static void emit_streamout_output(struct si_shader_context *ctx,
2123 LLVMValueRef const *so_buffers,
2124 LLVMValueRef const *so_write_offsets,
2125 struct pipe_stream_output *stream_out,
2126 struct si_shader_output_values *shader_out)
2127 {
2128 struct gallivm_state *gallivm = &ctx->gallivm;
2129 LLVMBuilderRef builder = gallivm->builder;
2130 unsigned buf_idx = stream_out->output_buffer;
2131 unsigned start = stream_out->start_component;
2132 unsigned num_comps = stream_out->num_components;
2133 LLVMValueRef out[4];
2134
2135 assert(num_comps && num_comps <= 4);
2136 if (!num_comps || num_comps > 4)
2137 return;
2138
2139 /* Load the output as int. */
2140 for (int j = 0; j < num_comps; j++) {
2141 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
2142
2143 out[j] = LLVMBuildBitCast(builder,
2144 shader_out->values[start + j],
2145 ctx->i32, "");
2146 }
2147
2148 /* Pack the output. */
2149 LLVMValueRef vdata = NULL;
2150
2151 switch (num_comps) {
2152 case 1: /* as i32 */
2153 vdata = out[0];
2154 break;
2155 case 2: /* as v2i32 */
2156 case 3: /* as v4i32 (aligned to 4) */
2157 case 4: /* as v4i32 */
2158 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2159 for (int j = 0; j < num_comps; j++) {
2160 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
2161 LLVMConstInt(ctx->i32, j, 0), "");
2162 }
2163 break;
2164 }
2165
2166 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
2167 vdata, num_comps,
2168 so_write_offsets[buf_idx],
2169 ctx->i32_0,
2170 stream_out->dst_offset * 4, 1, 1, true, false);
2171 }
2172
2173 /**
2174 * Write streamout data to buffers for vertex stream @p stream (different
2175 * vertex streams can occur for GS copy shaders).
2176 */
2177 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2178 struct si_shader_output_values *outputs,
2179 unsigned noutput, unsigned stream)
2180 {
2181 struct si_shader_selector *sel = ctx->shader->selector;
2182 struct pipe_stream_output_info *so = &sel->so;
2183 struct gallivm_state *gallivm = &ctx->gallivm;
2184 LLVMBuilderRef builder = gallivm->builder;
2185 int i;
2186 struct lp_build_if_state if_ctx;
2187
2188 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2189 LLVMValueRef so_vtx_count =
2190 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2191
2192 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
2193
2194 /* can_emit = tid < so_vtx_count; */
2195 LLVMValueRef can_emit =
2196 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2197
2198 /* Emit the streamout code conditionally. This actually avoids
2199 * out-of-bounds buffer access. The hw tells us via the SGPR
2200 * (so_vtx_count) which threads are allowed to emit streamout data. */
2201 lp_build_if(&if_ctx, gallivm, can_emit);
2202 {
2203 /* The buffer offset is computed as follows:
2204 * ByteOffset = streamout_offset[buffer_id]*4 +
2205 * (streamout_write_index + thread_id)*stride[buffer_id] +
2206 * attrib_offset
2207 */
2208
2209 LLVMValueRef so_write_index =
2210 LLVMGetParam(ctx->main_fn,
2211 ctx->param_streamout_write_index);
2212
2213 /* Compute (streamout_write_index + thread_id). */
2214 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2215
2216 /* Load the descriptor and compute the write offset for each
2217 * enabled buffer. */
2218 LLVMValueRef so_write_offset[4] = {};
2219 LLVMValueRef so_buffers[4];
2220 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
2221 ctx->param_rw_buffers);
2222
2223 for (i = 0; i < 4; i++) {
2224 if (!so->stride[i])
2225 continue;
2226
2227 LLVMValueRef offset = LLVMConstInt(ctx->i32,
2228 SI_VS_STREAMOUT_BUF0 + i, 0);
2229
2230 so_buffers[i] = ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
2231
2232 LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
2233 ctx->param_streamout_offset[i]);
2234 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2235
2236 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2237 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2238 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2239 }
2240
2241 /* Write streamout data. */
2242 for (i = 0; i < so->num_outputs; i++) {
2243 unsigned reg = so->output[i].register_index;
2244
2245 if (reg >= noutput)
2246 continue;
2247
2248 if (stream != so->output[i].stream)
2249 continue;
2250
2251 emit_streamout_output(ctx, so_buffers, so_write_offset,
2252 &so->output[i], &outputs[reg]);
2253 }
2254 }
2255 lp_build_endif(&if_ctx);
2256 }
2257
2258
2259 /* Generate export instructions for hardware VS shader stage */
2260 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
2261 struct si_shader_output_values *outputs,
2262 unsigned noutput)
2263 {
2264 struct si_shader_context *ctx = si_shader_context(bld_base);
2265 struct si_shader *shader = ctx->shader;
2266 struct lp_build_context *base = &bld_base->base;
2267 struct ac_export_args args, pos_args[4] = {};
2268 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2269 unsigned semantic_name, semantic_index;
2270 unsigned target;
2271 unsigned param_count = 0;
2272 unsigned pos_idx;
2273 int i;
2274
2275 for (i = 0; i < noutput; i++) {
2276 semantic_name = outputs[i].semantic_name;
2277 semantic_index = outputs[i].semantic_index;
2278 bool export_param = true;
2279 unsigned id;
2280
2281 switch (semantic_name) {
2282 case TGSI_SEMANTIC_POSITION: /* ignore these */
2283 case TGSI_SEMANTIC_PSIZE:
2284 case TGSI_SEMANTIC_CLIPVERTEX:
2285 case TGSI_SEMANTIC_EDGEFLAG:
2286 break;
2287 case TGSI_SEMANTIC_GENERIC:
2288 /* don't process indices the function can't handle */
2289 if (semantic_index >= SI_MAX_IO_GENERIC)
2290 break;
2291 /* fall through */
2292 default:
2293 id = si_shader_io_get_unique_index(semantic_name, semantic_index);
2294 if (shader->key.opt.kill_outputs[id / 32] & (1u << (id % 32)))
2295 export_param = false;
2296 }
2297
2298 if (outputs[i].vertex_stream[0] != 0 &&
2299 outputs[i].vertex_stream[1] != 0 &&
2300 outputs[i].vertex_stream[2] != 0 &&
2301 outputs[i].vertex_stream[3] != 0)
2302 export_param = false;
2303
2304 handle_semantic:
2305 /* Select the correct target */
2306 switch(semantic_name) {
2307 case TGSI_SEMANTIC_PSIZE:
2308 psize_value = outputs[i].values[0];
2309 continue;
2310 case TGSI_SEMANTIC_EDGEFLAG:
2311 edgeflag_value = outputs[i].values[0];
2312 continue;
2313 case TGSI_SEMANTIC_LAYER:
2314 layer_value = outputs[i].values[0];
2315 semantic_name = TGSI_SEMANTIC_GENERIC;
2316 goto handle_semantic;
2317 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2318 viewport_index_value = outputs[i].values[0];
2319 semantic_name = TGSI_SEMANTIC_GENERIC;
2320 goto handle_semantic;
2321 case TGSI_SEMANTIC_POSITION:
2322 target = V_008DFC_SQ_EXP_POS;
2323 break;
2324 case TGSI_SEMANTIC_CLIPDIST:
2325 if (shader->key.opt.clip_disable) {
2326 semantic_name = TGSI_SEMANTIC_GENERIC;
2327 goto handle_semantic;
2328 }
2329 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
2330 break;
2331 case TGSI_SEMANTIC_CLIPVERTEX:
2332 if (shader->key.opt.clip_disable)
2333 continue;
2334 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
2335 continue;
2336 case TGSI_SEMANTIC_COLOR:
2337 case TGSI_SEMANTIC_BCOLOR:
2338 case TGSI_SEMANTIC_PRIMID:
2339 case TGSI_SEMANTIC_FOG:
2340 case TGSI_SEMANTIC_TEXCOORD:
2341 case TGSI_SEMANTIC_GENERIC:
2342 if (!export_param)
2343 continue;
2344 target = V_008DFC_SQ_EXP_PARAM + param_count;
2345 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2346 shader->info.vs_output_param_offset[i] = param_count;
2347 param_count++;
2348 break;
2349 default:
2350 target = 0;
2351 fprintf(stderr,
2352 "Warning: SI unhandled vs output type:%d\n",
2353 semantic_name);
2354 }
2355
2356 si_llvm_init_export_args(bld_base, outputs[i].values, target, &args);
2357
2358 if (target >= V_008DFC_SQ_EXP_POS &&
2359 target <= (V_008DFC_SQ_EXP_POS + 3)) {
2360 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2361 &args, sizeof(args));
2362 } else {
2363 ac_build_export(&ctx->ac, &args);
2364 }
2365
2366 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
2367 semantic_name = TGSI_SEMANTIC_GENERIC;
2368 goto handle_semantic;
2369 }
2370 }
2371
2372 shader->info.nr_param_exports = param_count;
2373
2374 /* We need to add the position output manually if it's missing. */
2375 if (!pos_args[0].out[0]) {
2376 pos_args[0].enabled_channels = 0xf; /* writemask */
2377 pos_args[0].valid_mask = 0; /* EXEC mask */
2378 pos_args[0].done = 0; /* last export? */
2379 pos_args[0].target = V_008DFC_SQ_EXP_POS;
2380 pos_args[0].compr = 0; /* COMPR flag */
2381 pos_args[0].out[0] = base->zero; /* X */
2382 pos_args[0].out[1] = base->zero; /* Y */
2383 pos_args[0].out[2] = base->zero; /* Z */
2384 pos_args[0].out[3] = base->one; /* W */
2385 }
2386
2387 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2388 if (shader->selector->info.writes_psize ||
2389 shader->selector->info.writes_edgeflag ||
2390 shader->selector->info.writes_viewport_index ||
2391 shader->selector->info.writes_layer) {
2392 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
2393 (shader->selector->info.writes_edgeflag << 1) |
2394 (shader->selector->info.writes_layer << 2);
2395
2396 pos_args[1].valid_mask = 0; /* EXEC mask */
2397 pos_args[1].done = 0; /* last export? */
2398 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2399 pos_args[1].compr = 0; /* COMPR flag */
2400 pos_args[1].out[0] = base->zero; /* X */
2401 pos_args[1].out[1] = base->zero; /* Y */
2402 pos_args[1].out[2] = base->zero; /* Z */
2403 pos_args[1].out[3] = base->zero; /* W */
2404
2405 if (shader->selector->info.writes_psize)
2406 pos_args[1].out[0] = psize_value;
2407
2408 if (shader->selector->info.writes_edgeflag) {
2409 /* The output is a float, but the hw expects an integer
2410 * with the first bit containing the edge flag. */
2411 edgeflag_value = LLVMBuildFPToUI(ctx->gallivm.builder,
2412 edgeflag_value,
2413 ctx->i32, "");
2414 edgeflag_value = lp_build_min(&bld_base->int_bld,
2415 edgeflag_value,
2416 ctx->i32_1);
2417
2418 /* The LLVM intrinsic expects a float. */
2419 pos_args[1].out[1] = LLVMBuildBitCast(ctx->gallivm.builder,
2420 edgeflag_value,
2421 ctx->f32, "");
2422 }
2423
2424 if (ctx->screen->b.chip_class >= GFX9) {
2425 /* GFX9 has the layer in out.z[10:0] and the viewport
2426 * index in out.z[19:16].
2427 */
2428 if (shader->selector->info.writes_layer)
2429 pos_args[1].out[2] = layer_value;
2430
2431 if (shader->selector->info.writes_viewport_index) {
2432 LLVMValueRef v = viewport_index_value;
2433
2434 v = bitcast(bld_base, TGSI_TYPE_UNSIGNED, v);
2435 v = LLVMBuildShl(ctx->gallivm.builder, v,
2436 LLVMConstInt(ctx->i32, 16, 0), "");
2437 v = LLVMBuildOr(ctx->gallivm.builder, v,
2438 bitcast(bld_base, TGSI_TYPE_UNSIGNED,
2439 pos_args[1].out[2]), "");
2440 pos_args[1].out[2] = bitcast(bld_base, TGSI_TYPE_FLOAT, v);
2441 pos_args[1].enabled_channels |= 1 << 2;
2442 }
2443 } else {
2444 if (shader->selector->info.writes_layer)
2445 pos_args[1].out[2] = layer_value;
2446
2447 if (shader->selector->info.writes_viewport_index) {
2448 pos_args[1].out[3] = viewport_index_value;
2449 pos_args[1].enabled_channels |= 1 << 3;
2450 }
2451 }
2452 }
2453
2454 for (i = 0; i < 4; i++)
2455 if (pos_args[i].out[0])
2456 shader->info.nr_pos_exports++;
2457
2458 pos_idx = 0;
2459 for (i = 0; i < 4; i++) {
2460 if (!pos_args[i].out[0])
2461 continue;
2462
2463 /* Specify the target we are exporting */
2464 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2465
2466 if (pos_idx == shader->info.nr_pos_exports)
2467 /* Specify that this is the last export */
2468 pos_args[i].done = 1;
2469
2470 ac_build_export(&ctx->ac, &pos_args[i]);
2471 }
2472 }
2473
2474 /**
2475 * Forward all outputs from the vertex shader to the TES. This is only used
2476 * for the fixed function TCS.
2477 */
2478 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2479 {
2480 struct si_shader_context *ctx = si_shader_context(bld_base);
2481 struct gallivm_state *gallivm = &ctx->gallivm;
2482 LLVMValueRef invocation_id, buffer, buffer_offset;
2483 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
2484 uint64_t inputs;
2485
2486 invocation_id = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
2487 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
2488 buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2489
2490 lds_vertex_stride = unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
2491 lds_vertex_offset = LLVMBuildMul(gallivm->builder, invocation_id,
2492 lds_vertex_stride, "");
2493 lds_base = get_tcs_in_current_patch_offset(ctx);
2494 lds_base = LLVMBuildAdd(gallivm->builder, lds_base, lds_vertex_offset, "");
2495
2496 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy[0] |
2497 ((uint64_t)ctx->shader->key.mono.u.ff_tcs_inputs_to_copy[1] << 32);
2498 while (inputs) {
2499 unsigned i = u_bit_scan64(&inputs);
2500
2501 LLVMValueRef lds_ptr = LLVMBuildAdd(gallivm->builder, lds_base,
2502 LLVMConstInt(ctx->i32, 4 * i, 0),
2503 "");
2504
2505 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
2506 get_rel_patch_id(ctx),
2507 invocation_id,
2508 LLVMConstInt(ctx->i32, i, 0));
2509
2510 LLVMValueRef value = lds_load(bld_base, TGSI_TYPE_SIGNED, ~0,
2511 lds_ptr);
2512
2513 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
2514 buffer_offset, 0, 1, 0, true, false);
2515 }
2516 }
2517
2518 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
2519 LLVMValueRef rel_patch_id,
2520 LLVMValueRef invocation_id,
2521 LLVMValueRef tcs_out_current_patch_data_offset)
2522 {
2523 struct si_shader_context *ctx = si_shader_context(bld_base);
2524 struct gallivm_state *gallivm = &ctx->gallivm;
2525 struct si_shader *shader = ctx->shader;
2526 unsigned tess_inner_index, tess_outer_index;
2527 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2528 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2529 unsigned stride, outer_comps, inner_comps, i, offset;
2530 struct lp_build_if_state if_ctx, inner_if_ctx;
2531
2532 si_llvm_emit_barrier(NULL, bld_base, NULL);
2533
2534 /* Do this only for invocation 0, because the tess levels are per-patch,
2535 * not per-vertex.
2536 *
2537 * This can't jump, because invocation 0 executes this. It should
2538 * at least mask out the loads and stores for other invocations.
2539 */
2540 lp_build_if(&if_ctx, gallivm,
2541 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2542 invocation_id, ctx->i32_0, ""));
2543
2544 /* Determine the layout of one tess factor element in the buffer. */
2545 switch (shader->key.part.tcs.epilog.prim_mode) {
2546 case PIPE_PRIM_LINES:
2547 stride = 2; /* 2 dwords, 1 vec2 store */
2548 outer_comps = 2;
2549 inner_comps = 0;
2550 break;
2551 case PIPE_PRIM_TRIANGLES:
2552 stride = 4; /* 4 dwords, 1 vec4 store */
2553 outer_comps = 3;
2554 inner_comps = 1;
2555 break;
2556 case PIPE_PRIM_QUADS:
2557 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2558 outer_comps = 4;
2559 inner_comps = 2;
2560 break;
2561 default:
2562 assert(0);
2563 return;
2564 }
2565
2566 /* Load tess_inner and tess_outer from LDS.
2567 * Any invocation can write them, so we can't get them from a temporary.
2568 */
2569 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
2570 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
2571
2572 lds_base = tcs_out_current_patch_data_offset;
2573 lds_inner = LLVMBuildAdd(gallivm->builder, lds_base,
2574 LLVMConstInt(ctx->i32,
2575 tess_inner_index * 4, 0), "");
2576 lds_outer = LLVMBuildAdd(gallivm->builder, lds_base,
2577 LLVMConstInt(ctx->i32,
2578 tess_outer_index * 4, 0), "");
2579
2580 for (i = 0; i < 4; i++) {
2581 inner[i] = LLVMGetUndef(ctx->i32);
2582 outer[i] = LLVMGetUndef(ctx->i32);
2583 }
2584
2585 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
2586 /* For isolines, the hardware expects tess factors in the
2587 * reverse order from what GLSL / TGSI specify.
2588 */
2589 outer[0] = out[1] = lds_load(bld_base, TGSI_TYPE_SIGNED, 0, lds_outer);
2590 outer[1] = out[0] = lds_load(bld_base, TGSI_TYPE_SIGNED, 1, lds_outer);
2591 } else {
2592 for (i = 0; i < outer_comps; i++) {
2593 outer[i] = out[i] =
2594 lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
2595 }
2596 for (i = 0; i < inner_comps; i++) {
2597 inner[i] = out[outer_comps+i] =
2598 lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
2599 }
2600 }
2601
2602 /* Convert the outputs to vectors for stores. */
2603 vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
2604 vec1 = NULL;
2605
2606 if (stride > 4)
2607 vec1 = lp_build_gather_values(gallivm, out+4, stride - 4);
2608
2609 /* Get the buffer. */
2610 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_factor_addr_base64k);
2611
2612 /* Get the offset. */
2613 tf_base = LLVMGetParam(ctx->main_fn,
2614 ctx->param_tcs_factor_offset);
2615 byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
2616 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
2617
2618 lp_build_if(&inner_if_ctx, gallivm,
2619 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2620 rel_patch_id, ctx->i32_0, ""));
2621
2622 /* Store the dynamic HS control word. */
2623 offset = 0;
2624 if (ctx->screen->b.chip_class <= VI) {
2625 ac_build_buffer_store_dword(&ctx->ac, buffer,
2626 LLVMConstInt(ctx->i32, 0x80000000, 0),
2627 1, ctx->i32_0, tf_base,
2628 offset, 1, 0, true, false);
2629 offset += 4;
2630 }
2631
2632 lp_build_endif(&inner_if_ctx);
2633
2634 /* Store the tessellation factors. */
2635 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2636 MIN2(stride, 4), byteoffset, tf_base,
2637 offset, 1, 0, true, false);
2638 offset += 16;
2639 if (vec1)
2640 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2641 stride - 4, byteoffset, tf_base,
2642 offset, 1, 0, true, false);
2643
2644 /* Store the tess factors into the offchip buffer if TES reads them. */
2645 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
2646 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
2647 LLVMValueRef tf_inner_offset;
2648 unsigned param_outer, param_inner;
2649
2650 buf = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
2651 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2652
2653 param_outer = si_shader_io_get_unique_index_patch(
2654 TGSI_SEMANTIC_TESSOUTER, 0);
2655 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2656 LLVMConstInt(ctx->i32, param_outer, 0));
2657
2658 outer_vec = lp_build_gather_values(gallivm, outer,
2659 util_next_power_of_two(outer_comps));
2660
2661 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
2662 outer_comps, tf_outer_offset,
2663 base, 0, 1, 0, true, false);
2664 if (inner_comps) {
2665 param_inner = si_shader_io_get_unique_index_patch(
2666 TGSI_SEMANTIC_TESSINNER, 0);
2667 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2668 LLVMConstInt(ctx->i32, param_inner, 0));
2669
2670 inner_vec = inner_comps == 1 ? inner[0] :
2671 lp_build_gather_values(gallivm, inner, inner_comps);
2672 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
2673 inner_comps, tf_inner_offset,
2674 base, 0, 1, 0, true, false);
2675 }
2676 }
2677
2678 lp_build_endif(&if_ctx);
2679 }
2680
2681 static LLVMValueRef
2682 si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
2683 unsigned param, unsigned return_index)
2684 {
2685 return LLVMBuildInsertValue(ctx->gallivm.builder, ret,
2686 LLVMGetParam(ctx->main_fn, param),
2687 return_index, "");
2688 }
2689
2690 static LLVMValueRef
2691 si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
2692 unsigned param, unsigned return_index)
2693 {
2694 LLVMBuilderRef builder = ctx->gallivm.builder;
2695 LLVMValueRef p = LLVMGetParam(ctx->main_fn, param);
2696
2697 return LLVMBuildInsertValue(builder, ret,
2698 LLVMBuildBitCast(builder, p, ctx->f32, ""),
2699 return_index, "");
2700 }
2701
2702 static LLVMValueRef
2703 si_insert_input_ptr_as_2xi32(struct si_shader_context *ctx, LLVMValueRef ret,
2704 unsigned param, unsigned return_index)
2705 {
2706 LLVMBuilderRef builder = ctx->gallivm.builder;
2707 LLVMValueRef ptr, lo, hi;
2708
2709 ptr = LLVMGetParam(ctx->main_fn, param);
2710 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i64, "");
2711 ptr = LLVMBuildBitCast(builder, ptr, ctx->v2i32, "");
2712 lo = LLVMBuildExtractElement(builder, ptr, ctx->i32_0, "");
2713 hi = LLVMBuildExtractElement(builder, ptr, ctx->i32_1, "");
2714 ret = LLVMBuildInsertValue(builder, ret, lo, return_index, "");
2715 return LLVMBuildInsertValue(builder, ret, hi, return_index + 1, "");
2716 }
2717
2718 /* This only writes the tessellation factor levels. */
2719 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
2720 {
2721 struct si_shader_context *ctx = si_shader_context(bld_base);
2722 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2723
2724 si_copy_tcs_inputs(bld_base);
2725
2726 rel_patch_id = get_rel_patch_id(ctx);
2727 invocation_id = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
2728 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2729
2730 /* Return epilog parameters from this function. */
2731 LLVMBuilderRef builder = ctx->gallivm.builder;
2732 LLVMValueRef ret = ctx->return_value;
2733 unsigned vgpr;
2734
2735 if (ctx->screen->b.chip_class >= GFX9) {
2736 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2737 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2738 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2739 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2740 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2741 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
2742 /* Tess offchip and tess factor offsets are at the beginning. */
2743 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
2744 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
2745 vgpr = 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K + 1;
2746 } else {
2747 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2748 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
2749 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2750 GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2751 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2752 GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K);
2753 /* Tess offchip and tess factor offsets are after user SGPRs. */
2754 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset,
2755 GFX6_TCS_NUM_USER_SGPR);
2756 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset,
2757 GFX6_TCS_NUM_USER_SGPR + 1);
2758 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
2759 }
2760
2761 /* VGPRs */
2762 rel_patch_id = bitcast(bld_base, TGSI_TYPE_FLOAT, rel_patch_id);
2763 invocation_id = bitcast(bld_base, TGSI_TYPE_FLOAT, invocation_id);
2764 tf_lds_offset = bitcast(bld_base, TGSI_TYPE_FLOAT, tf_lds_offset);
2765
2766 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2767 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2768 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2769 ctx->return_value = ret;
2770 }
2771
2772 /* Pass TCS inputs from LS to TCS on GFX9. */
2773 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
2774 {
2775 LLVMValueRef ret = ctx->return_value;
2776
2777 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers, 0);
2778 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
2779 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
2780 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
2781 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
2782
2783 ret = si_insert_input_ret(ctx, ret, ctx->param_vs_state_bits,
2784 8 + SI_SGPR_VS_STATE_BITS);
2785 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2786 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2787 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_offsets,
2788 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
2789 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_layout,
2790 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2791 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2792 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2793 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2794 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
2795
2796 unsigned desc_param = ctx->param_tcs_factor_addr_base64k + 2;
2797 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
2798 8 + GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS);
2799 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
2800 8 + GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES);
2801
2802 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
2803 ret = si_insert_input_ret_float(ctx, ret,
2804 ctx->param_tcs_patch_id, vgpr++);
2805 ret = si_insert_input_ret_float(ctx, ret,
2806 ctx->param_tcs_rel_ids, vgpr++);
2807 ctx->return_value = ret;
2808 }
2809
2810 /* Pass GS inputs from ES to GS on GFX9. */
2811 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
2812 {
2813 LLVMValueRef ret = ctx->return_value;
2814
2815 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers, 0);
2816 ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
2817 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
2818
2819 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
2820
2821 unsigned desc_param = ctx->param_vs_state_bits + 1;
2822 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
2823 8 + GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS);
2824 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
2825 8 + GFX9_SGPR_GS_SAMPLERS_AND_IMAGES);
2826
2827 unsigned vgpr = 8 + GFX9_GS_NUM_USER_SGPR;
2828 for (unsigned i = 0; i < 5; i++) {
2829 unsigned param = ctx->param_gs_vtx01_offset + i;
2830 ret = si_insert_input_ret_float(ctx, ret, param, vgpr++);
2831 }
2832 ctx->return_value = ret;
2833 }
2834
2835 static void si_llvm_emit_ls_epilogue(struct lp_build_tgsi_context *bld_base)
2836 {
2837 struct si_shader_context *ctx = si_shader_context(bld_base);
2838 struct si_shader *shader = ctx->shader;
2839 struct tgsi_shader_info *info = &shader->selector->info;
2840 struct gallivm_state *gallivm = &ctx->gallivm;
2841 unsigned i, chan;
2842 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
2843 ctx->param_rel_auto_id);
2844 LLVMValueRef vertex_dw_stride =
2845 unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
2846 LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
2847 vertex_dw_stride, "");
2848
2849 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2850 * its inputs from it. */
2851 for (i = 0; i < info->num_outputs; i++) {
2852 LLVMValueRef *out_ptr = ctx->outputs[i];
2853 unsigned name = info->output_semantic_name[i];
2854 unsigned index = info->output_semantic_index[i];
2855
2856 /* The ARB_shader_viewport_layer_array spec contains the
2857 * following issue:
2858 *
2859 * 2) What happens if gl_ViewportIndex or gl_Layer is
2860 * written in the vertex shader and a geometry shader is
2861 * present?
2862 *
2863 * RESOLVED: The value written by the last vertex processing
2864 * stage is used. If the last vertex processing stage
2865 * (vertex, tessellation evaluation or geometry) does not
2866 * statically assign to gl_ViewportIndex or gl_Layer, index
2867 * or layer zero is assumed.
2868 *
2869 * So writes to those outputs in VS-as-LS are simply ignored.
2870 */
2871 if (name == TGSI_SEMANTIC_LAYER ||
2872 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
2873 continue;
2874
2875 int param = si_shader_io_get_unique_index(name, index);
2876 LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
2877 LLVMConstInt(ctx->i32, param * 4, 0), "");
2878
2879 for (chan = 0; chan < 4; chan++) {
2880 lds_store(bld_base, chan, dw_addr,
2881 LLVMBuildLoad(gallivm->builder, out_ptr[chan], ""));
2882 }
2883 }
2884
2885 if (ctx->screen->b.chip_class >= GFX9)
2886 si_set_ls_return_value_for_tcs(ctx);
2887 }
2888
2889 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context *bld_base)
2890 {
2891 struct si_shader_context *ctx = si_shader_context(bld_base);
2892 struct gallivm_state *gallivm = &ctx->gallivm;
2893 struct si_shader *es = ctx->shader;
2894 struct tgsi_shader_info *info = &es->selector->info;
2895 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
2896 ctx->param_es2gs_offset);
2897 LLVMValueRef lds_base = NULL;
2898 unsigned chan;
2899 int i;
2900
2901 if (ctx->screen->b.chip_class >= GFX9 && info->num_outputs) {
2902 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
2903 lds_base = LLVMBuildMul(gallivm->builder, ac_get_thread_id(&ctx->ac),
2904 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
2905 }
2906
2907 for (i = 0; i < info->num_outputs; i++) {
2908 LLVMValueRef *out_ptr = ctx->outputs[i];
2909 int param;
2910
2911 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2912 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2913 continue;
2914
2915 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
2916 info->output_semantic_index[i]);
2917
2918 for (chan = 0; chan < 4; chan++) {
2919 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
2920 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
2921
2922 /* GFX9 has the ESGS ring in LDS. */
2923 if (ctx->screen->b.chip_class >= GFX9) {
2924 lds_store(bld_base, param * 4 + chan, lds_base, out_val);
2925 continue;
2926 }
2927
2928 ac_build_buffer_store_dword(&ctx->ac,
2929 ctx->esgs_ring,
2930 out_val, 1, NULL, soffset,
2931 (4 * param + chan) * 4,
2932 1, 1, true, true);
2933 }
2934 }
2935
2936 if (ctx->screen->b.chip_class >= GFX9)
2937 si_set_es_return_value_for_gs(ctx);
2938 }
2939
2940 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
2941 {
2942 if (ctx->screen->b.chip_class >= GFX9)
2943 return unpack_param(ctx, ctx->param_merged_wave_info, 16, 8);
2944 else
2945 return LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id);
2946 }
2947
2948 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
2949 {
2950 struct si_shader_context *ctx = si_shader_context(bld_base);
2951
2952 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
2953 si_get_gs_wave_id(ctx));
2954 }
2955
2956 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
2957 {
2958 struct si_shader_context *ctx = si_shader_context(bld_base);
2959 struct gallivm_state *gallivm = &ctx->gallivm;
2960 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2961 struct si_shader_output_values *outputs = NULL;
2962 int i,j;
2963
2964 assert(!ctx->shader->is_gs_copy_shader);
2965
2966 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2967
2968 /* Vertex color clamping.
2969 *
2970 * This uses a state constant loaded in a user data SGPR and
2971 * an IF statement is added that clamps all colors if the constant
2972 * is true.
2973 */
2974 if (ctx->type == PIPE_SHADER_VERTEX) {
2975 struct lp_build_if_state if_ctx;
2976 LLVMValueRef cond = NULL;
2977 LLVMValueRef addr, val;
2978
2979 for (i = 0; i < info->num_outputs; i++) {
2980 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
2981 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
2982 continue;
2983
2984 /* We've found a color. */
2985 if (!cond) {
2986 /* The state is in the first bit of the user SGPR. */
2987 cond = LLVMGetParam(ctx->main_fn,
2988 ctx->param_vs_state_bits);
2989 cond = LLVMBuildTrunc(gallivm->builder, cond,
2990 ctx->i1, "");
2991 lp_build_if(&if_ctx, gallivm, cond);
2992 }
2993
2994 for (j = 0; j < 4; j++) {
2995 addr = ctx->outputs[i][j];
2996 val = LLVMBuildLoad(gallivm->builder, addr, "");
2997 val = ac_build_clamp(&ctx->ac, val);
2998 LLVMBuildStore(gallivm->builder, val, addr);
2999 }
3000 }
3001
3002 if (cond)
3003 lp_build_endif(&if_ctx);
3004 }
3005
3006 for (i = 0; i < info->num_outputs; i++) {
3007 outputs[i].semantic_name = info->output_semantic_name[i];
3008 outputs[i].semantic_index = info->output_semantic_index[i];
3009
3010 for (j = 0; j < 4; j++) {
3011 outputs[i].values[j] =
3012 LLVMBuildLoad(gallivm->builder,
3013 ctx->outputs[i][j],
3014 "");
3015 outputs[i].vertex_stream[j] =
3016 (info->output_streams[i] >> (2 * j)) & 3;
3017 }
3018 }
3019
3020 if (ctx->shader->selector->so.num_outputs)
3021 si_llvm_emit_streamout(ctx, outputs, i, 0);
3022
3023 /* Export PrimitiveID. */
3024 if (ctx->shader->key.mono.u.vs_export_prim_id) {
3025 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
3026 outputs[i].semantic_index = 0;
3027 outputs[i].values[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
3028 get_primitive_id(bld_base, 0));
3029 for (j = 1; j < 4; j++)
3030 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
3031
3032 memset(outputs[i].vertex_stream, 0,
3033 sizeof(outputs[i].vertex_stream));
3034 i++;
3035 }
3036
3037 si_llvm_export_vs(bld_base, outputs, i);
3038 FREE(outputs);
3039 }
3040
3041 struct si_ps_exports {
3042 unsigned num;
3043 struct ac_export_args args[10];
3044 };
3045
3046 unsigned si_get_spi_shader_z_format(bool writes_z, bool writes_stencil,
3047 bool writes_samplemask)
3048 {
3049 if (writes_z) {
3050 /* Z needs 32 bits. */
3051 if (writes_samplemask)
3052 return V_028710_SPI_SHADER_32_ABGR;
3053 else if (writes_stencil)
3054 return V_028710_SPI_SHADER_32_GR;
3055 else
3056 return V_028710_SPI_SHADER_32_R;
3057 } else if (writes_stencil || writes_samplemask) {
3058 /* Both stencil and sample mask need only 16 bits. */
3059 return V_028710_SPI_SHADER_UINT16_ABGR;
3060 } else {
3061 return V_028710_SPI_SHADER_ZERO;
3062 }
3063 }
3064
3065 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
3066 LLVMValueRef depth, LLVMValueRef stencil,
3067 LLVMValueRef samplemask, struct si_ps_exports *exp)
3068 {
3069 struct si_shader_context *ctx = si_shader_context(bld_base);
3070 struct lp_build_context *base = &bld_base->base;
3071 struct ac_export_args args;
3072 unsigned mask = 0;
3073 unsigned format = si_get_spi_shader_z_format(depth != NULL,
3074 stencil != NULL,
3075 samplemask != NULL);
3076
3077 assert(depth || stencil || samplemask);
3078
3079 args.valid_mask = 1; /* whether the EXEC mask is valid */
3080 args.done = 1; /* DONE bit */
3081
3082 /* Specify the target we are exporting */
3083 args.target = V_008DFC_SQ_EXP_MRTZ;
3084
3085 args.compr = 0; /* COMP flag */
3086 args.out[0] = base->undef; /* R, depth */
3087 args.out[1] = base->undef; /* G, stencil test value[0:7], stencil op value[8:15] */
3088 args.out[2] = base->undef; /* B, sample mask */
3089 args.out[3] = base->undef; /* A, alpha to mask */
3090
3091 if (format == V_028710_SPI_SHADER_UINT16_ABGR) {
3092 assert(!depth);
3093 args.compr = 1; /* COMPR flag */
3094
3095 if (stencil) {
3096 /* Stencil should be in X[23:16]. */
3097 stencil = bitcast(bld_base, TGSI_TYPE_UNSIGNED, stencil);
3098 stencil = LLVMBuildShl(ctx->gallivm.builder, stencil,
3099 LLVMConstInt(ctx->i32, 16, 0), "");
3100 args.out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT, stencil);
3101 mask |= 0x3;
3102 }
3103 if (samplemask) {
3104 /* SampleMask should be in Y[15:0]. */
3105 args.out[1] = samplemask;
3106 mask |= 0xc;
3107 }
3108 } else {
3109 if (depth) {
3110 args.out[0] = depth;
3111 mask |= 0x1;
3112 }
3113 if (stencil) {
3114 args.out[1] = stencil;
3115 mask |= 0x2;
3116 }
3117 if (samplemask) {
3118 args.out[2] = samplemask;
3119 mask |= 0x4;
3120 }
3121 }
3122
3123 /* SI (except OLAND and HAINAN) has a bug that it only looks
3124 * at the X writemask component. */
3125 if (ctx->screen->b.chip_class == SI &&
3126 ctx->screen->b.family != CHIP_OLAND &&
3127 ctx->screen->b.family != CHIP_HAINAN)
3128 mask |= 0x1;
3129
3130 /* Specify which components to enable */
3131 args.enabled_channels = mask;
3132
3133 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3134 }
3135
3136 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
3137 LLVMValueRef *color, unsigned index,
3138 unsigned samplemask_param,
3139 bool is_last, struct si_ps_exports *exp)
3140 {
3141 struct si_shader_context *ctx = si_shader_context(bld_base);
3142 struct lp_build_context *base = &bld_base->base;
3143 int i;
3144
3145 /* Clamp color */
3146 if (ctx->shader->key.part.ps.epilog.clamp_color)
3147 for (i = 0; i < 4; i++)
3148 color[i] = ac_build_clamp(&ctx->ac, color[i]);
3149
3150 /* Alpha to one */
3151 if (ctx->shader->key.part.ps.epilog.alpha_to_one)
3152 color[3] = base->one;
3153
3154 /* Alpha test */
3155 if (index == 0 &&
3156 ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
3157 si_alpha_test(bld_base, color[3]);
3158
3159 /* Line & polygon smoothing */
3160 if (ctx->shader->key.part.ps.epilog.poly_line_smoothing)
3161 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
3162 samplemask_param);
3163
3164 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3165 if (ctx->shader->key.part.ps.epilog.last_cbuf > 0) {
3166 struct ac_export_args args[8];
3167 int c, last = -1;
3168
3169 /* Get the export arguments, also find out what the last one is. */
3170 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3171 si_llvm_init_export_args(bld_base, color,
3172 V_008DFC_SQ_EXP_MRT + c, &args[c]);
3173 if (args[c].enabled_channels)
3174 last = c;
3175 }
3176
3177 /* Emit all exports. */
3178 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3179 if (is_last && last == c) {
3180 args[c].valid_mask = 1; /* whether the EXEC mask is valid */
3181 args[c].done = 1; /* DONE bit */
3182 } else if (!args[c].enabled_channels)
3183 continue; /* unnecessary NULL export */
3184
3185 memcpy(&exp->args[exp->num++], &args[c], sizeof(args[c]));
3186 }
3187 } else {
3188 struct ac_export_args args;
3189
3190 /* Export */
3191 si_llvm_init_export_args(bld_base, color, V_008DFC_SQ_EXP_MRT + index,
3192 &args);
3193 if (is_last) {
3194 args.valid_mask = 1; /* whether the EXEC mask is valid */
3195 args.done = 1; /* DONE bit */
3196 } else if (!args.enabled_channels)
3197 return; /* unnecessary NULL export */
3198
3199 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3200 }
3201 }
3202
3203 static void si_emit_ps_exports(struct si_shader_context *ctx,
3204 struct si_ps_exports *exp)
3205 {
3206 for (unsigned i = 0; i < exp->num; i++)
3207 ac_build_export(&ctx->ac, &exp->args[i]);
3208 }
3209
3210 static void si_export_null(struct lp_build_tgsi_context *bld_base)
3211 {
3212 struct si_shader_context *ctx = si_shader_context(bld_base);
3213 struct lp_build_context *base = &bld_base->base;
3214 struct ac_export_args args;
3215
3216 args.enabled_channels = 0x0; /* enabled channels */
3217 args.valid_mask = 1; /* whether the EXEC mask is valid */
3218 args.done = 1; /* DONE bit */
3219 args.target = V_008DFC_SQ_EXP_NULL;
3220 args.compr = 0; /* COMPR flag (0 = 32-bit export) */
3221 args.out[0] = base->undef; /* R */
3222 args.out[1] = base->undef; /* G */
3223 args.out[2] = base->undef; /* B */
3224 args.out[3] = base->undef; /* A */
3225
3226 ac_build_export(&ctx->ac, &args);
3227 }
3228
3229 /**
3230 * Return PS outputs in this order:
3231 *
3232 * v[0:3] = color0.xyzw
3233 * v[4:7] = color1.xyzw
3234 * ...
3235 * vN+0 = Depth
3236 * vN+1 = Stencil
3237 * vN+2 = SampleMask
3238 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3239 *
3240 * The alpha-ref SGPR is returned via its original location.
3241 */
3242 static void si_llvm_return_fs_outputs(struct lp_build_tgsi_context *bld_base)
3243 {
3244 struct si_shader_context *ctx = si_shader_context(bld_base);
3245 struct si_shader *shader = ctx->shader;
3246 struct tgsi_shader_info *info = &shader->selector->info;
3247 LLVMBuilderRef builder = ctx->gallivm.builder;
3248 unsigned i, j, first_vgpr, vgpr;
3249
3250 LLVMValueRef color[8][4] = {};
3251 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3252 LLVMValueRef ret;
3253
3254 /* Read the output values. */
3255 for (i = 0; i < info->num_outputs; i++) {
3256 unsigned semantic_name = info->output_semantic_name[i];
3257 unsigned semantic_index = info->output_semantic_index[i];
3258
3259 switch (semantic_name) {
3260 case TGSI_SEMANTIC_COLOR:
3261 assert(semantic_index < 8);
3262 for (j = 0; j < 4; j++) {
3263 LLVMValueRef ptr = ctx->outputs[i][j];
3264 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3265 color[semantic_index][j] = result;
3266 }
3267 break;
3268 case TGSI_SEMANTIC_POSITION:
3269 depth = LLVMBuildLoad(builder,
3270 ctx->outputs[i][2], "");
3271 break;
3272 case TGSI_SEMANTIC_STENCIL:
3273 stencil = LLVMBuildLoad(builder,
3274 ctx->outputs[i][1], "");
3275 break;
3276 case TGSI_SEMANTIC_SAMPLEMASK:
3277 samplemask = LLVMBuildLoad(builder,
3278 ctx->outputs[i][0], "");
3279 break;
3280 default:
3281 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3282 semantic_name);
3283 }
3284 }
3285
3286 /* Fill the return structure. */
3287 ret = ctx->return_value;
3288
3289 /* Set SGPRs. */
3290 ret = LLVMBuildInsertValue(builder, ret,
3291 bitcast(bld_base, TGSI_TYPE_SIGNED,
3292 LLVMGetParam(ctx->main_fn,
3293 SI_PARAM_ALPHA_REF)),
3294 SI_SGPR_ALPHA_REF, "");
3295
3296 /* Set VGPRs */
3297 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3298 for (i = 0; i < ARRAY_SIZE(color); i++) {
3299 if (!color[i][0])
3300 continue;
3301
3302 for (j = 0; j < 4; j++)
3303 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3304 }
3305 if (depth)
3306 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3307 if (stencil)
3308 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3309 if (samplemask)
3310 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3311
3312 /* Add the input sample mask for smoothing at the end. */
3313 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3314 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3315 ret = LLVMBuildInsertValue(builder, ret,
3316 LLVMGetParam(ctx->main_fn,
3317 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3318
3319 ctx->return_value = ret;
3320 }
3321
3322 /* Prevent optimizations (at least of memory accesses) across the current
3323 * point in the program by emitting empty inline assembly that is marked as
3324 * having side effects.
3325 *
3326 * Optionally, a value can be passed through the inline assembly to prevent
3327 * LLVM from hoisting calls to ReadNone functions.
3328 */
3329 static void emit_optimization_barrier(struct si_shader_context *ctx,
3330 LLVMValueRef *pvgpr)
3331 {
3332 static int counter = 0;
3333
3334 LLVMBuilderRef builder = ctx->gallivm.builder;
3335 char code[16];
3336
3337 snprintf(code, sizeof(code), "; %d", p_atomic_inc_return(&counter));
3338
3339 if (!pvgpr) {
3340 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
3341 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
3342 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
3343 } else {
3344 LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
3345 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "=v,0", true, false);
3346 LLVMValueRef vgpr = *pvgpr;
3347 LLVMTypeRef vgpr_type = LLVMTypeOf(vgpr);
3348 unsigned vgpr_size = llvm_get_type_size(vgpr_type);
3349 LLVMValueRef vgpr0;
3350
3351 assert(vgpr_size % 4 == 0);
3352
3353 vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
3354 vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
3355 vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
3356 vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
3357 vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
3358
3359 *pvgpr = vgpr;
3360 }
3361 }
3362
3363 void si_emit_waitcnt(struct si_shader_context *ctx, unsigned simm16)
3364 {
3365 struct gallivm_state *gallivm = &ctx->gallivm;
3366 LLVMBuilderRef builder = gallivm->builder;
3367 LLVMValueRef args[1] = {
3368 LLVMConstInt(ctx->i32, simm16, 0)
3369 };
3370 lp_build_intrinsic(builder, "llvm.amdgcn.s.waitcnt",
3371 ctx->voidt, args, 1, 0);
3372 }
3373
3374 static void membar_emit(
3375 const struct lp_build_tgsi_action *action,
3376 struct lp_build_tgsi_context *bld_base,
3377 struct lp_build_emit_data *emit_data)
3378 {
3379 struct si_shader_context *ctx = si_shader_context(bld_base);
3380 LLVMValueRef src0 = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
3381 unsigned flags = LLVMConstIntGetZExtValue(src0);
3382 unsigned waitcnt = NOOP_WAITCNT;
3383
3384 if (flags & TGSI_MEMBAR_THREAD_GROUP)
3385 waitcnt &= VM_CNT & LGKM_CNT;
3386
3387 if (flags & (TGSI_MEMBAR_ATOMIC_BUFFER |
3388 TGSI_MEMBAR_SHADER_BUFFER |
3389 TGSI_MEMBAR_SHADER_IMAGE))
3390 waitcnt &= VM_CNT;
3391
3392 if (flags & TGSI_MEMBAR_SHARED)
3393 waitcnt &= LGKM_CNT;
3394
3395 if (waitcnt != NOOP_WAITCNT)
3396 si_emit_waitcnt(ctx, waitcnt);
3397 }
3398
3399 static void clock_emit(
3400 const struct lp_build_tgsi_action *action,
3401 struct lp_build_tgsi_context *bld_base,
3402 struct lp_build_emit_data *emit_data)
3403 {
3404 struct si_shader_context *ctx = si_shader_context(bld_base);
3405 struct gallivm_state *gallivm = &ctx->gallivm;
3406 LLVMValueRef tmp;
3407
3408 tmp = lp_build_intrinsic(gallivm->builder, "llvm.readcyclecounter",
3409 ctx->i64, NULL, 0, 0);
3410 tmp = LLVMBuildBitCast(gallivm->builder, tmp, ctx->v2i32, "");
3411
3412 emit_data->output[0] =
3413 LLVMBuildExtractElement(gallivm->builder, tmp, ctx->i32_0, "");
3414 emit_data->output[1] =
3415 LLVMBuildExtractElement(gallivm->builder, tmp, ctx->i32_1, "");
3416 }
3417
3418 LLVMTypeRef si_const_array(LLVMTypeRef elem_type, int num_elements)
3419 {
3420 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
3421 CONST_ADDR_SPACE);
3422 }
3423
3424 static void si_llvm_emit_ddxy(
3425 const struct lp_build_tgsi_action *action,
3426 struct lp_build_tgsi_context *bld_base,
3427 struct lp_build_emit_data *emit_data)
3428 {
3429 struct si_shader_context *ctx = si_shader_context(bld_base);
3430 struct gallivm_state *gallivm = &ctx->gallivm;
3431 unsigned opcode = emit_data->info->opcode;
3432 LLVMValueRef val;
3433 int idx;
3434 unsigned mask;
3435
3436 if (opcode == TGSI_OPCODE_DDX_FINE)
3437 mask = AC_TID_MASK_LEFT;
3438 else if (opcode == TGSI_OPCODE_DDY_FINE)
3439 mask = AC_TID_MASK_TOP;
3440 else
3441 mask = AC_TID_MASK_TOP_LEFT;
3442
3443 /* for DDX we want to next X pixel, DDY next Y pixel. */
3444 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
3445
3446 val = LLVMBuildBitCast(gallivm->builder, emit_data->args[0], ctx->i32, "");
3447 val = ac_build_ddxy(&ctx->ac, ctx->screen->has_ds_bpermute,
3448 mask, idx, ctx->lds, val);
3449 emit_data->output[emit_data->chan] = val;
3450 }
3451
3452 /*
3453 * this takes an I,J coordinate pair,
3454 * and works out the X and Y derivatives.
3455 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
3456 */
3457 static LLVMValueRef si_llvm_emit_ddxy_interp(
3458 struct lp_build_tgsi_context *bld_base,
3459 LLVMValueRef interp_ij)
3460 {
3461 struct si_shader_context *ctx = si_shader_context(bld_base);
3462 struct gallivm_state *gallivm = &ctx->gallivm;
3463 LLVMValueRef result[4], a;
3464 unsigned i;
3465
3466 for (i = 0; i < 2; i++) {
3467 a = LLVMBuildExtractElement(gallivm->builder, interp_ij,
3468 LLVMConstInt(ctx->i32, i, 0), "");
3469 result[i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDX, a);
3470 result[2+i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDY, a);
3471 }
3472
3473 return lp_build_gather_values(gallivm, result, 4);
3474 }
3475
3476 static void interp_fetch_args(
3477 struct lp_build_tgsi_context *bld_base,
3478 struct lp_build_emit_data *emit_data)
3479 {
3480 struct si_shader_context *ctx = si_shader_context(bld_base);
3481 struct gallivm_state *gallivm = &ctx->gallivm;
3482 const struct tgsi_full_instruction *inst = emit_data->inst;
3483
3484 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
3485 /* offset is in second src, first two channels */
3486 emit_data->args[0] = lp_build_emit_fetch(bld_base,
3487 emit_data->inst, 1,
3488 TGSI_CHAN_X);
3489 emit_data->args[1] = lp_build_emit_fetch(bld_base,
3490 emit_data->inst, 1,
3491 TGSI_CHAN_Y);
3492 emit_data->arg_count = 2;
3493 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3494 LLVMValueRef sample_position;
3495 LLVMValueRef sample_id;
3496 LLVMValueRef halfval = LLVMConstReal(ctx->f32, 0.5f);
3497
3498 /* fetch sample ID, then fetch its sample position,
3499 * and place into first two channels.
3500 */
3501 sample_id = lp_build_emit_fetch(bld_base,
3502 emit_data->inst, 1, TGSI_CHAN_X);
3503 sample_id = LLVMBuildBitCast(gallivm->builder, sample_id,
3504 ctx->i32, "");
3505 sample_position = load_sample_position(ctx, sample_id);
3506
3507 emit_data->args[0] = LLVMBuildExtractElement(gallivm->builder,
3508 sample_position,
3509 ctx->i32_0, "");
3510
3511 emit_data->args[0] = LLVMBuildFSub(gallivm->builder, emit_data->args[0], halfval, "");
3512 emit_data->args[1] = LLVMBuildExtractElement(gallivm->builder,
3513 sample_position,
3514 ctx->i32_1, "");
3515 emit_data->args[1] = LLVMBuildFSub(gallivm->builder, emit_data->args[1], halfval, "");
3516 emit_data->arg_count = 2;
3517 }
3518 }
3519
3520 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
3521 struct lp_build_tgsi_context *bld_base,
3522 struct lp_build_emit_data *emit_data)
3523 {
3524 struct si_shader_context *ctx = si_shader_context(bld_base);
3525 struct si_shader *shader = ctx->shader;
3526 struct gallivm_state *gallivm = &ctx->gallivm;
3527 LLVMValueRef interp_param;
3528 const struct tgsi_full_instruction *inst = emit_data->inst;
3529 int input_index = inst->Src[0].Register.Index;
3530 int chan;
3531 int i;
3532 LLVMValueRef attr_number;
3533 LLVMValueRef params = LLVMGetParam(ctx->main_fn, SI_PARAM_PRIM_MASK);
3534 int interp_param_idx;
3535 unsigned interp = shader->selector->info.input_interpolate[input_index];
3536 unsigned location;
3537
3538 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
3539
3540 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3541 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
3542 location = TGSI_INTERPOLATE_LOC_CENTER;
3543 else
3544 location = TGSI_INTERPOLATE_LOC_CENTROID;
3545
3546 interp_param_idx = lookup_interp_param_index(interp, location);
3547 if (interp_param_idx == -1)
3548 return;
3549 else if (interp_param_idx)
3550 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
3551 else
3552 interp_param = NULL;
3553
3554 attr_number = LLVMConstInt(ctx->i32, input_index, 0);
3555
3556 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3557 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3558 LLVMValueRef ij_out[2];
3559 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
3560
3561 /*
3562 * take the I then J parameters, and the DDX/Y for it, and
3563 * calculate the IJ inputs for the interpolator.
3564 * temp1 = ddx * offset/sample.x + I;
3565 * interp_param.I = ddy * offset/sample.y + temp1;
3566 * temp1 = ddx * offset/sample.x + J;
3567 * interp_param.J = ddy * offset/sample.y + temp1;
3568 */
3569 for (i = 0; i < 2; i++) {
3570 LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, 0);
3571 LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, 0);
3572 LLVMValueRef ddx_el = LLVMBuildExtractElement(gallivm->builder,
3573 ddxy_out, ix_ll, "");
3574 LLVMValueRef ddy_el = LLVMBuildExtractElement(gallivm->builder,
3575 ddxy_out, iy_ll, "");
3576 LLVMValueRef interp_el = LLVMBuildExtractElement(gallivm->builder,
3577 interp_param, ix_ll, "");
3578 LLVMValueRef temp1, temp2;
3579
3580 interp_el = LLVMBuildBitCast(gallivm->builder, interp_el,
3581 ctx->f32, "");
3582
3583 temp1 = LLVMBuildFMul(gallivm->builder, ddx_el, emit_data->args[0], "");
3584
3585 temp1 = LLVMBuildFAdd(gallivm->builder, temp1, interp_el, "");
3586
3587 temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
3588
3589 ij_out[i] = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
3590 }
3591 interp_param = lp_build_gather_values(gallivm, ij_out, 2);
3592 }
3593
3594 for (chan = 0; chan < 4; chan++) {
3595 LLVMValueRef llvm_chan;
3596 unsigned schan;
3597
3598 schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
3599 llvm_chan = LLVMConstInt(ctx->i32, schan, 0);
3600
3601 if (interp_param) {
3602 interp_param = LLVMBuildBitCast(gallivm->builder,
3603 interp_param, LLVMVectorType(ctx->f32, 2), "");
3604 LLVMValueRef i = LLVMBuildExtractElement(
3605 gallivm->builder, interp_param, ctx->i32_0, "");
3606 LLVMValueRef j = LLVMBuildExtractElement(
3607 gallivm->builder, interp_param, ctx->i32_1, "");
3608 emit_data->output[chan] = ac_build_fs_interp(&ctx->ac,
3609 llvm_chan, attr_number, params,
3610 i, j);
3611 } else {
3612 emit_data->output[chan] = ac_build_fs_interp_mov(&ctx->ac,
3613 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
3614 llvm_chan, attr_number, params);
3615 }
3616 }
3617 }
3618
3619 static LLVMValueRef si_emit_ballot(struct si_shader_context *ctx,
3620 LLVMValueRef value)
3621 {
3622 struct gallivm_state *gallivm = &ctx->gallivm;
3623 LLVMValueRef args[3] = {
3624 value,
3625 ctx->i32_0,
3626 LLVMConstInt(ctx->i32, LLVMIntNE, 0)
3627 };
3628
3629 /* We currently have no other way to prevent LLVM from lifting the icmp
3630 * calls to a dominating basic block.
3631 */
3632 emit_optimization_barrier(ctx, &args[0]);
3633
3634 if (LLVMTypeOf(args[0]) != ctx->i32)
3635 args[0] = LLVMBuildBitCast(gallivm->builder, args[0], ctx->i32, "");
3636
3637 return lp_build_intrinsic(gallivm->builder,
3638 "llvm.amdgcn.icmp.i32",
3639 ctx->i64, args, 3,
3640 LP_FUNC_ATTR_NOUNWIND |
3641 LP_FUNC_ATTR_READNONE |
3642 LP_FUNC_ATTR_CONVERGENT);
3643 }
3644
3645 static void vote_all_emit(
3646 const struct lp_build_tgsi_action *action,
3647 struct lp_build_tgsi_context *bld_base,
3648 struct lp_build_emit_data *emit_data)
3649 {
3650 struct si_shader_context *ctx = si_shader_context(bld_base);
3651 struct gallivm_state *gallivm = &ctx->gallivm;
3652 LLVMValueRef active_set, vote_set;
3653 LLVMValueRef tmp;
3654
3655 active_set = si_emit_ballot(ctx, ctx->i32_1);
3656 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3657
3658 tmp = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, vote_set, active_set, "");
3659 emit_data->output[emit_data->chan] =
3660 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3661 }
3662
3663 static void vote_any_emit(
3664 const struct lp_build_tgsi_action *action,
3665 struct lp_build_tgsi_context *bld_base,
3666 struct lp_build_emit_data *emit_data)
3667 {
3668 struct si_shader_context *ctx = si_shader_context(bld_base);
3669 struct gallivm_state *gallivm = &ctx->gallivm;
3670 LLVMValueRef vote_set;
3671 LLVMValueRef tmp;
3672
3673 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3674
3675 tmp = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
3676 vote_set, LLVMConstInt(ctx->i64, 0, 0), "");
3677 emit_data->output[emit_data->chan] =
3678 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3679 }
3680
3681 static void vote_eq_emit(
3682 const struct lp_build_tgsi_action *action,
3683 struct lp_build_tgsi_context *bld_base,
3684 struct lp_build_emit_data *emit_data)
3685 {
3686 struct si_shader_context *ctx = si_shader_context(bld_base);
3687 struct gallivm_state *gallivm = &ctx->gallivm;
3688 LLVMValueRef active_set, vote_set;
3689 LLVMValueRef all, none, tmp;
3690
3691 active_set = si_emit_ballot(ctx, ctx->i32_1);
3692 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3693
3694 all = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, vote_set, active_set, "");
3695 none = LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
3696 vote_set, LLVMConstInt(ctx->i64, 0, 0), "");
3697 tmp = LLVMBuildOr(gallivm->builder, all, none, "");
3698 emit_data->output[emit_data->chan] =
3699 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3700 }
3701
3702 static void ballot_emit(
3703 const struct lp_build_tgsi_action *action,
3704 struct lp_build_tgsi_context *bld_base,
3705 struct lp_build_emit_data *emit_data)
3706 {
3707 struct si_shader_context *ctx = si_shader_context(bld_base);
3708 LLVMBuilderRef builder = ctx->gallivm.builder;
3709 LLVMValueRef tmp;
3710
3711 tmp = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
3712 tmp = si_emit_ballot(ctx, tmp);
3713 tmp = LLVMBuildBitCast(builder, tmp, ctx->v2i32, "");
3714
3715 emit_data->output[0] = LLVMBuildExtractElement(builder, tmp, ctx->i32_0, "");
3716 emit_data->output[1] = LLVMBuildExtractElement(builder, tmp, ctx->i32_1, "");
3717 }
3718
3719 static void read_invoc_fetch_args(
3720 struct lp_build_tgsi_context *bld_base,
3721 struct lp_build_emit_data *emit_data)
3722 {
3723 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
3724 0, emit_data->src_chan);
3725
3726 /* Always read the source invocation (= lane) from the X channel. */
3727 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
3728 1, TGSI_CHAN_X);
3729 emit_data->arg_count = 2;
3730 }
3731
3732 static void read_lane_emit(
3733 const struct lp_build_tgsi_action *action,
3734 struct lp_build_tgsi_context *bld_base,
3735 struct lp_build_emit_data *emit_data)
3736 {
3737 struct si_shader_context *ctx = si_shader_context(bld_base);
3738 LLVMBuilderRef builder = ctx->gallivm.builder;
3739
3740 /* We currently have no other way to prevent LLVM from lifting the icmp
3741 * calls to a dominating basic block.
3742 */
3743 emit_optimization_barrier(ctx, &emit_data->args[0]);
3744
3745 for (unsigned i = 0; i < emit_data->arg_count; ++i) {
3746 emit_data->args[i] = LLVMBuildBitCast(builder, emit_data->args[i],
3747 ctx->i32, "");
3748 }
3749
3750 emit_data->output[emit_data->chan] =
3751 ac_build_intrinsic(&ctx->ac, action->intr_name,
3752 ctx->i32, emit_data->args, emit_data->arg_count,
3753 AC_FUNC_ATTR_READNONE |
3754 AC_FUNC_ATTR_CONVERGENT);
3755 }
3756
3757 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
3758 struct lp_build_emit_data *emit_data)
3759 {
3760 struct si_shader_context *ctx = si_shader_context(bld_base);
3761 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
3762 LLVMValueRef imm;
3763 unsigned stream;
3764
3765 assert(src0.File == TGSI_FILE_IMMEDIATE);
3766
3767 imm = ctx->imms[src0.Index * TGSI_NUM_CHANNELS + src0.SwizzleX];
3768 stream = LLVMConstIntGetZExtValue(imm) & 0x3;
3769 return stream;
3770 }
3771
3772 /* Emit one vertex from the geometry shader */
3773 static void si_llvm_emit_vertex(
3774 const struct lp_build_tgsi_action *action,
3775 struct lp_build_tgsi_context *bld_base,
3776 struct lp_build_emit_data *emit_data)
3777 {
3778 struct si_shader_context *ctx = si_shader_context(bld_base);
3779 struct lp_build_context *uint = &bld_base->uint_bld;
3780 struct si_shader *shader = ctx->shader;
3781 struct tgsi_shader_info *info = &shader->selector->info;
3782 struct gallivm_state *gallivm = &ctx->gallivm;
3783 struct lp_build_if_state if_state;
3784 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
3785 ctx->param_gs2vs_offset);
3786 LLVMValueRef gs_next_vertex;
3787 LLVMValueRef can_emit, kill;
3788 unsigned chan, offset;
3789 int i;
3790 unsigned stream;
3791
3792 stream = si_llvm_get_stream(bld_base, emit_data);
3793
3794 /* Write vertex attribute values to GSVS ring */
3795 gs_next_vertex = LLVMBuildLoad(gallivm->builder,
3796 ctx->gs_next_vertex[stream],
3797 "");
3798
3799 /* If this thread has already emitted the declared maximum number of
3800 * vertices, skip the write: excessive vertex emissions are not
3801 * supposed to have any effect.
3802 *
3803 * If the shader has no writes to memory, kill it instead. This skips
3804 * further memory loads and may allow LLVM to skip to the end
3805 * altogether.
3806 */
3807 can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULT, gs_next_vertex,
3808 LLVMConstInt(ctx->i32,
3809 shader->selector->gs_max_out_vertices, 0), "");
3810
3811 bool use_kill = !info->writes_memory;
3812 if (use_kill) {
3813 kill = lp_build_select(&bld_base->base, can_emit,
3814 LLVMConstReal(ctx->f32, 1.0f),
3815 LLVMConstReal(ctx->f32, -1.0f));
3816
3817 ac_build_kill(&ctx->ac, kill);
3818 } else {
3819 lp_build_if(&if_state, gallivm, can_emit);
3820 }
3821
3822 offset = 0;
3823 for (i = 0; i < info->num_outputs; i++) {
3824 LLVMValueRef *out_ptr = ctx->outputs[i];
3825
3826 for (chan = 0; chan < 4; chan++) {
3827 if (!(info->output_usagemask[i] & (1 << chan)) ||
3828 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
3829 continue;
3830
3831 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
3832 LLVMValueRef voffset =
3833 LLVMConstInt(ctx->i32, offset *
3834 shader->selector->gs_max_out_vertices, 0);
3835 offset++;
3836
3837 voffset = lp_build_add(uint, voffset, gs_next_vertex);
3838 voffset = lp_build_mul_imm(uint, voffset, 4);
3839
3840 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
3841
3842 ac_build_buffer_store_dword(&ctx->ac,
3843 ctx->gsvs_ring[stream],
3844 out_val, 1,
3845 voffset, soffset, 0,
3846 1, 1, true, true);
3847 }
3848 }
3849
3850 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
3851 ctx->i32_1);
3852
3853 LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
3854
3855 /* Signal vertex emission */
3856 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
3857 si_get_gs_wave_id(ctx));
3858 if (!use_kill)
3859 lp_build_endif(&if_state);
3860 }
3861
3862 /* Cut one primitive from the geometry shader */
3863 static void si_llvm_emit_primitive(
3864 const struct lp_build_tgsi_action *action,
3865 struct lp_build_tgsi_context *bld_base,
3866 struct lp_build_emit_data *emit_data)
3867 {
3868 struct si_shader_context *ctx = si_shader_context(bld_base);
3869 unsigned stream;
3870
3871 /* Signal primitive cut */
3872 stream = si_llvm_get_stream(bld_base, emit_data);
3873 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
3874 si_get_gs_wave_id(ctx));
3875 }
3876
3877 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
3878 struct lp_build_tgsi_context *bld_base,
3879 struct lp_build_emit_data *emit_data)
3880 {
3881 struct si_shader_context *ctx = si_shader_context(bld_base);
3882 struct gallivm_state *gallivm = &ctx->gallivm;
3883
3884 /* SI only (thanks to a hw bug workaround):
3885 * The real barrier instruction isn’t needed, because an entire patch
3886 * always fits into a single wave.
3887 */
3888 if (ctx->screen->b.chip_class == SI &&
3889 ctx->type == PIPE_SHADER_TESS_CTRL) {
3890 si_emit_waitcnt(ctx, LGKM_CNT & VM_CNT);
3891 return;
3892 }
3893
3894 lp_build_intrinsic(gallivm->builder,
3895 "llvm.amdgcn.s.barrier",
3896 ctx->voidt, NULL, 0, LP_FUNC_ATTR_CONVERGENT);
3897 }
3898
3899 static const struct lp_build_tgsi_action interp_action = {
3900 .fetch_args = interp_fetch_args,
3901 .emit = build_interp_intrinsic,
3902 };
3903
3904 static void si_create_function(struct si_shader_context *ctx,
3905 const char *name,
3906 LLVMTypeRef *returns, unsigned num_returns,
3907 LLVMTypeRef *params, unsigned num_params,
3908 int last_sgpr, unsigned max_workgroup_size)
3909 {
3910 int i;
3911
3912 si_llvm_create_func(ctx, name, returns, num_returns,
3913 params, num_params);
3914 ctx->return_value = LLVMGetUndef(ctx->return_type);
3915
3916 for (i = 0; i <= last_sgpr; ++i) {
3917 LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
3918
3919 /* The combination of:
3920 * - ByVal
3921 * - dereferenceable
3922 * - invariant.load
3923 * allows the optimization passes to move loads and reduces
3924 * SGPR spilling significantly.
3925 */
3926 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
3927 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_BYVAL);
3928 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_NOALIAS);
3929 ac_add_attr_dereferenceable(P, UINT64_MAX);
3930 } else
3931 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_INREG);
3932 }
3933
3934 if (max_workgroup_size) {
3935 si_llvm_add_attribute(ctx->main_fn, "amdgpu-max-work-group-size",
3936 max_workgroup_size);
3937 }
3938 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3939 "no-signed-zeros-fp-math",
3940 "true");
3941
3942 if (ctx->screen->b.debug_flags & DBG_UNSAFE_MATH) {
3943 /* These were copied from some LLVM test. */
3944 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3945 "less-precise-fpmad",
3946 "true");
3947 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3948 "no-infs-fp-math",
3949 "true");
3950 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3951 "no-nans-fp-math",
3952 "true");
3953 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3954 "unsafe-fp-math",
3955 "true");
3956 }
3957 }
3958
3959 static void declare_streamout_params(struct si_shader_context *ctx,
3960 struct pipe_stream_output_info *so,
3961 LLVMTypeRef *params, LLVMTypeRef i32,
3962 unsigned *num_params)
3963 {
3964 int i;
3965
3966 /* Streamout SGPRs. */
3967 if (so->num_outputs) {
3968 if (ctx->type != PIPE_SHADER_TESS_EVAL)
3969 params[ctx->param_streamout_config = (*num_params)++] = i32;
3970 else
3971 ctx->param_streamout_config = *num_params - 1;
3972
3973 params[ctx->param_streamout_write_index = (*num_params)++] = i32;
3974 }
3975 /* A streamout buffer offset is loaded if the stride is non-zero. */
3976 for (i = 0; i < 4; i++) {
3977 if (!so->stride[i])
3978 continue;
3979
3980 params[ctx->param_streamout_offset[i] = (*num_params)++] = i32;
3981 }
3982 }
3983
3984 static unsigned llvm_get_type_size(LLVMTypeRef type)
3985 {
3986 LLVMTypeKind kind = LLVMGetTypeKind(type);
3987
3988 switch (kind) {
3989 case LLVMIntegerTypeKind:
3990 return LLVMGetIntTypeWidth(type) / 8;
3991 case LLVMFloatTypeKind:
3992 return 4;
3993 case LLVMPointerTypeKind:
3994 return 8;
3995 case LLVMVectorTypeKind:
3996 return LLVMGetVectorSize(type) *
3997 llvm_get_type_size(LLVMGetElementType(type));
3998 case LLVMArrayTypeKind:
3999 return LLVMGetArrayLength(type) *
4000 llvm_get_type_size(LLVMGetElementType(type));
4001 default:
4002 assert(0);
4003 return 0;
4004 }
4005 }
4006
4007 static void declare_lds_as_pointer(struct si_shader_context *ctx)
4008 {
4009 struct gallivm_state *gallivm = &ctx->gallivm;
4010
4011 unsigned lds_size = ctx->screen->b.chip_class >= CIK ? 65536 : 32768;
4012 ctx->lds = LLVMBuildIntToPtr(gallivm->builder, ctx->i32_0,
4013 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), LOCAL_ADDR_SPACE),
4014 "lds");
4015 }
4016
4017 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
4018 {
4019 switch (shader->selector->type) {
4020 case PIPE_SHADER_TESS_CTRL:
4021 /* Return this so that LLVM doesn't remove s_barrier
4022 * instructions on chips where we use s_barrier. */
4023 return shader->selector->screen->b.chip_class >= CIK ? 128 : 64;
4024
4025 case PIPE_SHADER_GEOMETRY:
4026 return shader->selector->screen->b.chip_class >= GFX9 ? 128 : 64;
4027
4028 case PIPE_SHADER_COMPUTE:
4029 break; /* see below */
4030
4031 default:
4032 return 0;
4033 }
4034
4035 const unsigned *properties = shader->selector->info.properties;
4036 unsigned max_work_group_size =
4037 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
4038 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
4039 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
4040
4041 if (!max_work_group_size) {
4042 /* This is a variable group size compute shader,
4043 * compile it for the maximum possible group size.
4044 */
4045 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
4046 }
4047 return max_work_group_size;
4048 }
4049
4050 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
4051 LLVMTypeRef *params,
4052 unsigned *num_params,
4053 bool assign_params)
4054 {
4055 params[(*num_params)++] = si_const_array(ctx->v4i32,
4056 SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS);
4057 params[(*num_params)++] = si_const_array(ctx->v8i32,
4058 SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2);
4059
4060 if (assign_params) {
4061 ctx->param_const_and_shader_buffers = *num_params - 2;
4062 ctx->param_samplers_and_images = *num_params - 1;
4063 }
4064 }
4065
4066 static void declare_default_desc_pointers(struct si_shader_context *ctx,
4067 LLVMTypeRef *params,
4068 unsigned *num_params)
4069 {
4070 params[ctx->param_rw_buffers = (*num_params)++] =
4071 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4072 declare_per_stage_desc_pointers(ctx, params, num_params, true);
4073 }
4074
4075 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx,
4076 LLVMTypeRef *params,
4077 unsigned *num_params)
4078 {
4079 params[ctx->param_vertex_buffers = (*num_params)++] =
4080 si_const_array(ctx->v4i32, SI_NUM_VERTEX_BUFFERS);
4081 params[ctx->param_base_vertex = (*num_params)++] = ctx->i32;
4082 params[ctx->param_start_instance = (*num_params)++] = ctx->i32;
4083 params[ctx->param_draw_id = (*num_params)++] = ctx->i32;
4084 params[ctx->param_vs_state_bits = (*num_params)++] = ctx->i32;
4085 }
4086
4087 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
4088 LLVMTypeRef *params, unsigned *num_params,
4089 unsigned *num_prolog_vgprs)
4090 {
4091 struct si_shader *shader = ctx->shader;
4092
4093 params[ctx->param_vertex_id = (*num_params)++] = ctx->i32;
4094 if (shader->key.as_ls) {
4095 params[ctx->param_rel_auto_id = (*num_params)++] = ctx->i32;
4096 params[ctx->param_instance_id = (*num_params)++] = ctx->i32;
4097 } else {
4098 params[ctx->param_instance_id = (*num_params)++] = ctx->i32;
4099 params[ctx->param_vs_prim_id = (*num_params)++] = ctx->i32;
4100 }
4101 params[(*num_params)++] = ctx->i32; /* unused */
4102
4103 if (!shader->is_gs_copy_shader) {
4104 /* Vertex load indices. */
4105 ctx->param_vertex_index0 = (*num_params);
4106 for (unsigned i = 0; i < shader->selector->info.num_inputs; i++)
4107 params[(*num_params)++] = ctx->i32;
4108 *num_prolog_vgprs += shader->selector->info.num_inputs;
4109 }
4110 }
4111
4112 static void declare_tes_input_vgprs(struct si_shader_context *ctx,
4113 LLVMTypeRef *params, unsigned *num_params)
4114 {
4115 params[ctx->param_tes_u = (*num_params)++] = ctx->f32;
4116 params[ctx->param_tes_v = (*num_params)++] = ctx->f32;
4117 params[ctx->param_tes_rel_patch_id = (*num_params)++] = ctx->i32;
4118 params[ctx->param_tes_patch_id = (*num_params)++] = ctx->i32;
4119 }
4120
4121 enum {
4122 /* Convenient merged shader definitions. */
4123 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
4124 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
4125 };
4126
4127 static void create_function(struct si_shader_context *ctx)
4128 {
4129 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
4130 struct gallivm_state *gallivm = &ctx->gallivm;
4131 struct si_shader *shader = ctx->shader;
4132 LLVMTypeRef params[100]; /* just make it large enough */
4133 LLVMTypeRef returns[16+32*4];
4134 unsigned i, last_sgpr, num_params = 0, num_return_sgprs;
4135 unsigned num_returns = 0;
4136 unsigned num_prolog_vgprs = 0;
4137 unsigned type = ctx->type;
4138
4139 /* Set MERGED shaders. */
4140 if (ctx->screen->b.chip_class >= GFX9) {
4141 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
4142 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
4143 else if (shader->key.as_es || type == PIPE_SHADER_GEOMETRY)
4144 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
4145 }
4146
4147 LLVMTypeRef v3i32 = LLVMVectorType(ctx->i32, 3);
4148
4149 switch (type) {
4150 case PIPE_SHADER_VERTEX:
4151 declare_default_desc_pointers(ctx, params, &num_params);
4152 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4153
4154 if (shader->key.as_es) {
4155 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
4156 } else if (shader->key.as_ls) {
4157 /* no extra parameters */
4158 } else {
4159 if (shader->is_gs_copy_shader)
4160 num_params = ctx->param_rw_buffers + 1;
4161
4162 /* The locations of the other parameters are assigned dynamically. */
4163 declare_streamout_params(ctx, &shader->selector->so,
4164 params, ctx->i32, &num_params);
4165 }
4166
4167 last_sgpr = num_params-1;
4168
4169 /* VGPRs */
4170 declare_vs_input_vgprs(ctx, params, &num_params,
4171 &num_prolog_vgprs);
4172 break;
4173
4174 case PIPE_SHADER_TESS_CTRL: /* SI-CI-VI */
4175 declare_default_desc_pointers(ctx, params, &num_params);
4176 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4177 params[ctx->param_tcs_out_lds_offsets = num_params++] = ctx->i32;
4178 params[ctx->param_tcs_out_lds_layout = num_params++] = ctx->i32;
4179 params[ctx->param_vs_state_bits = num_params++] = ctx->i32;
4180 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4181 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
4182 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4183 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
4184 last_sgpr = num_params - 1;
4185
4186 /* VGPRs */
4187 params[ctx->param_tcs_patch_id = num_params++] = ctx->i32;
4188 params[ctx->param_tcs_rel_ids = num_params++] = ctx->i32;
4189
4190 /* param_tcs_offchip_offset and param_tcs_factor_offset are
4191 * placed after the user SGPRs.
4192 */
4193 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
4194 returns[num_returns++] = ctx->i32; /* SGPRs */
4195 for (i = 0; i < 3; i++)
4196 returns[num_returns++] = ctx->f32; /* VGPRs */
4197 break;
4198
4199 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
4200 /* Merged stages have 8 system SGPRs at the beginning. */
4201 params[ctx->param_rw_buffers = num_params++] = /* SPI_SHADER_USER_DATA_ADDR_LO_HS */
4202 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4203 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4204 params[ctx->param_merged_wave_info = num_params++] = ctx->i32;
4205 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
4206 params[ctx->param_merged_scratch_offset = num_params++] = ctx->i32;
4207 params[num_params++] = ctx->i32; /* unused */
4208 params[num_params++] = ctx->i32; /* unused */
4209
4210 params[num_params++] = ctx->i32; /* unused */
4211 params[num_params++] = ctx->i32; /* unused */
4212 declare_per_stage_desc_pointers(ctx, params, &num_params,
4213 ctx->type == PIPE_SHADER_VERTEX);
4214 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4215
4216 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4217 params[ctx->param_tcs_out_lds_offsets = num_params++] = ctx->i32;
4218 params[ctx->param_tcs_out_lds_layout = num_params++] = ctx->i32;
4219 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4220 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
4221 params[num_params++] = ctx->i32; /* unused */
4222
4223 declare_per_stage_desc_pointers(ctx, params, &num_params,
4224 ctx->type == PIPE_SHADER_TESS_CTRL);
4225 last_sgpr = num_params - 1;
4226
4227 /* VGPRs (first TCS, then VS) */
4228 params[ctx->param_tcs_patch_id = num_params++] = ctx->i32;
4229 params[ctx->param_tcs_rel_ids = num_params++] = ctx->i32;
4230
4231 if (ctx->type == PIPE_SHADER_VERTEX) {
4232 declare_vs_input_vgprs(ctx, params, &num_params,
4233 &num_prolog_vgprs);
4234
4235 /* LS return values are inputs to the TCS main shader part. */
4236 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
4237 returns[num_returns++] = ctx->i32; /* SGPRs */
4238 for (i = 0; i < 2; i++)
4239 returns[num_returns++] = ctx->f32; /* VGPRs */
4240 } else {
4241 /* TCS return values are inputs to the TCS epilog.
4242 *
4243 * param_tcs_offchip_offset, param_tcs_factor_offset,
4244 * param_tcs_offchip_layout, and param_rw_buffers
4245 * should be passed to the epilog.
4246 */
4247 for (i = 0; i <= 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K; i++)
4248 returns[num_returns++] = ctx->i32; /* SGPRs */
4249 for (i = 0; i < 3; i++)
4250 returns[num_returns++] = ctx->f32; /* VGPRs */
4251 }
4252 break;
4253
4254 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
4255 /* Merged stages have 8 system SGPRs at the beginning. */
4256 params[ctx->param_rw_buffers = num_params++] = /* SPI_SHADER_USER_DATA_ADDR_LO_GS */
4257 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4258 params[ctx->param_gs2vs_offset = num_params++] = ctx->i32;
4259 params[ctx->param_merged_wave_info = num_params++] = ctx->i32;
4260 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4261 params[ctx->param_merged_scratch_offset = num_params++] = ctx->i32;
4262 params[num_params++] = ctx->i32; /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
4263 params[num_params++] = ctx->i32; /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
4264
4265 params[num_params++] = ctx->i32; /* unused */
4266 params[num_params++] = ctx->i32; /* unused */
4267 declare_per_stage_desc_pointers(ctx, params, &num_params,
4268 (ctx->type == PIPE_SHADER_VERTEX ||
4269 ctx->type == PIPE_SHADER_TESS_EVAL));
4270 if (ctx->type == PIPE_SHADER_VERTEX) {
4271 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4272 } else {
4273 /* TESS_EVAL (and also GEOMETRY):
4274 * Declare as many input SGPRs as the VS has. */
4275 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4276 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4277 params[num_params++] = ctx->i32; /* unused */
4278 params[num_params++] = ctx->i32; /* unused */
4279 params[num_params++] = ctx->i32; /* unused */
4280 params[ctx->param_vs_state_bits = num_params++] = ctx->i32; /* unused */
4281 }
4282
4283 declare_per_stage_desc_pointers(ctx, params, &num_params,
4284 ctx->type == PIPE_SHADER_GEOMETRY);
4285 last_sgpr = num_params - 1;
4286
4287 /* VGPRs (first GS, then VS/TES) */
4288 params[ctx->param_gs_vtx01_offset = num_params++] = ctx->i32;
4289 params[ctx->param_gs_vtx23_offset = num_params++] = ctx->i32;
4290 params[ctx->param_gs_prim_id = num_params++] = ctx->i32;
4291 params[ctx->param_gs_instance_id = num_params++] = ctx->i32;
4292 params[ctx->param_gs_vtx45_offset = num_params++] = ctx->i32;
4293
4294 if (ctx->type == PIPE_SHADER_VERTEX) {
4295 declare_vs_input_vgprs(ctx, params, &num_params,
4296 &num_prolog_vgprs);
4297 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
4298 declare_tes_input_vgprs(ctx, params, &num_params);
4299 }
4300
4301 if (ctx->type == PIPE_SHADER_VERTEX ||
4302 ctx->type == PIPE_SHADER_TESS_EVAL) {
4303 /* ES return values are inputs to GS. */
4304 for (i = 0; i < 8 + GFX9_GS_NUM_USER_SGPR; i++)
4305 returns[num_returns++] = ctx->i32; /* SGPRs */
4306 for (i = 0; i < 5; i++)
4307 returns[num_returns++] = ctx->f32; /* VGPRs */
4308 }
4309 break;
4310
4311 case PIPE_SHADER_TESS_EVAL:
4312 declare_default_desc_pointers(ctx, params, &num_params);
4313 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4314 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4315
4316 if (shader->key.as_es) {
4317 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4318 params[num_params++] = ctx->i32;
4319 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
4320 } else {
4321 params[num_params++] = ctx->i32;
4322 declare_streamout_params(ctx, &shader->selector->so,
4323 params, ctx->i32, &num_params);
4324 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4325 }
4326 last_sgpr = num_params - 1;
4327
4328 /* VGPRs */
4329 declare_tes_input_vgprs(ctx, params, &num_params);
4330 break;
4331
4332 case PIPE_SHADER_GEOMETRY:
4333 declare_default_desc_pointers(ctx, params, &num_params);
4334 params[ctx->param_gs2vs_offset = num_params++] = ctx->i32;
4335 params[ctx->param_gs_wave_id = num_params++] = ctx->i32;
4336 last_sgpr = num_params - 1;
4337
4338 /* VGPRs */
4339 params[ctx->param_gs_vtx0_offset = num_params++] = ctx->i32;
4340 params[ctx->param_gs_vtx1_offset = num_params++] = ctx->i32;
4341 params[ctx->param_gs_prim_id = num_params++] = ctx->i32;
4342 params[ctx->param_gs_vtx2_offset = num_params++] = ctx->i32;
4343 params[ctx->param_gs_vtx3_offset = num_params++] = ctx->i32;
4344 params[ctx->param_gs_vtx4_offset = num_params++] = ctx->i32;
4345 params[ctx->param_gs_vtx5_offset = num_params++] = ctx->i32;
4346 params[ctx->param_gs_instance_id = num_params++] = ctx->i32;
4347 break;
4348
4349 case PIPE_SHADER_FRAGMENT:
4350 declare_default_desc_pointers(ctx, params, &num_params);
4351 params[SI_PARAM_ALPHA_REF] = ctx->f32;
4352 params[SI_PARAM_PRIM_MASK] = ctx->i32;
4353 last_sgpr = SI_PARAM_PRIM_MASK;
4354 params[SI_PARAM_PERSP_SAMPLE] = ctx->v2i32;
4355 params[SI_PARAM_PERSP_CENTER] = ctx->v2i32;
4356 params[SI_PARAM_PERSP_CENTROID] = ctx->v2i32;
4357 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
4358 params[SI_PARAM_LINEAR_SAMPLE] = ctx->v2i32;
4359 params[SI_PARAM_LINEAR_CENTER] = ctx->v2i32;
4360 params[SI_PARAM_LINEAR_CENTROID] = ctx->v2i32;
4361 params[SI_PARAM_LINE_STIPPLE_TEX] = ctx->f32;
4362 params[SI_PARAM_POS_X_FLOAT] = ctx->f32;
4363 params[SI_PARAM_POS_Y_FLOAT] = ctx->f32;
4364 params[SI_PARAM_POS_Z_FLOAT] = ctx->f32;
4365 params[SI_PARAM_POS_W_FLOAT] = ctx->f32;
4366 params[SI_PARAM_FRONT_FACE] = ctx->i32;
4367 shader->info.face_vgpr_index = 20;
4368 params[SI_PARAM_ANCILLARY] = ctx->i32;
4369 params[SI_PARAM_SAMPLE_COVERAGE] = ctx->f32;
4370 params[SI_PARAM_POS_FIXED_PT] = ctx->i32;
4371 num_params = SI_PARAM_POS_FIXED_PT+1;
4372
4373 /* Color inputs from the prolog. */
4374 if (shader->selector->info.colors_read) {
4375 unsigned num_color_elements =
4376 util_bitcount(shader->selector->info.colors_read);
4377
4378 assert(num_params + num_color_elements <= ARRAY_SIZE(params));
4379 for (i = 0; i < num_color_elements; i++)
4380 params[num_params++] = ctx->f32;
4381
4382 num_prolog_vgprs += num_color_elements;
4383 }
4384
4385 /* Outputs for the epilog. */
4386 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
4387 num_returns =
4388 num_return_sgprs +
4389 util_bitcount(shader->selector->info.colors_written) * 4 +
4390 shader->selector->info.writes_z +
4391 shader->selector->info.writes_stencil +
4392 shader->selector->info.writes_samplemask +
4393 1 /* SampleMaskIn */;
4394
4395 num_returns = MAX2(num_returns,
4396 num_return_sgprs +
4397 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
4398
4399 for (i = 0; i < num_return_sgprs; i++)
4400 returns[i] = ctx->i32;
4401 for (; i < num_returns; i++)
4402 returns[i] = ctx->f32;
4403 break;
4404
4405 case PIPE_SHADER_COMPUTE:
4406 declare_default_desc_pointers(ctx, params, &num_params);
4407 if (shader->selector->info.uses_grid_size)
4408 params[ctx->param_grid_size = num_params++] = v3i32;
4409 if (shader->selector->info.uses_block_size)
4410 params[ctx->param_block_size = num_params++] = v3i32;
4411
4412 for (i = 0; i < 3; i++) {
4413 ctx->param_block_id[i] = -1;
4414 if (shader->selector->info.uses_block_id[i])
4415 params[ctx->param_block_id[i] = num_params++] = ctx->i32;
4416 }
4417 last_sgpr = num_params - 1;
4418
4419 params[ctx->param_thread_id = num_params++] = v3i32;
4420 break;
4421 default:
4422 assert(0 && "unimplemented shader");
4423 return;
4424 }
4425
4426 assert(num_params <= ARRAY_SIZE(params));
4427
4428 si_create_function(ctx, "main", returns, num_returns, params,
4429 num_params, last_sgpr,
4430 si_get_max_workgroup_size(shader));
4431
4432 /* Reserve register locations for VGPR inputs the PS prolog may need. */
4433 if (ctx->type == PIPE_SHADER_FRAGMENT &&
4434 ctx->separate_prolog) {
4435 si_llvm_add_attribute(ctx->main_fn,
4436 "InitialPSInputAddr",
4437 S_0286D0_PERSP_SAMPLE_ENA(1) |
4438 S_0286D0_PERSP_CENTER_ENA(1) |
4439 S_0286D0_PERSP_CENTROID_ENA(1) |
4440 S_0286D0_LINEAR_SAMPLE_ENA(1) |
4441 S_0286D0_LINEAR_CENTER_ENA(1) |
4442 S_0286D0_LINEAR_CENTROID_ENA(1) |
4443 S_0286D0_FRONT_FACE_ENA(1) |
4444 S_0286D0_POS_FIXED_PT_ENA(1));
4445 }
4446
4447 shader->info.num_input_sgprs = 0;
4448 shader->info.num_input_vgprs = 0;
4449
4450 for (i = 0; i <= last_sgpr; ++i)
4451 shader->info.num_input_sgprs += llvm_get_type_size(params[i]) / 4;
4452
4453 for (; i < num_params; ++i)
4454 shader->info.num_input_vgprs += llvm_get_type_size(params[i]) / 4;
4455
4456 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
4457 shader->info.num_input_vgprs -= num_prolog_vgprs;
4458
4459 if (!ctx->screen->has_ds_bpermute &&
4460 bld_base->info &&
4461 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
4462 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0 ||
4463 bld_base->info->opcode_count[TGSI_OPCODE_DDX_FINE] > 0 ||
4464 bld_base->info->opcode_count[TGSI_OPCODE_DDY_FINE] > 0 ||
4465 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_OFFSET] > 0 ||
4466 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_SAMPLE] > 0))
4467 ctx->lds =
4468 LLVMAddGlobalInAddressSpace(gallivm->module,
4469 LLVMArrayType(ctx->i32, 64),
4470 "ddxy_lds",
4471 LOCAL_ADDR_SPACE);
4472
4473 if (shader->key.as_ls ||
4474 ctx->type == PIPE_SHADER_TESS_CTRL ||
4475 /* GFX9 has the ESGS ring buffer in LDS. */
4476 (ctx->screen->b.chip_class >= GFX9 &&
4477 (shader->key.as_es ||
4478 ctx->type == PIPE_SHADER_GEOMETRY)))
4479 declare_lds_as_pointer(ctx);
4480 }
4481
4482 /**
4483 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
4484 * for later use.
4485 */
4486 static void preload_ring_buffers(struct si_shader_context *ctx)
4487 {
4488 struct gallivm_state *gallivm = &ctx->gallivm;
4489 LLVMBuilderRef builder = gallivm->builder;
4490
4491 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
4492 ctx->param_rw_buffers);
4493
4494 if (ctx->screen->b.chip_class <= VI &&
4495 (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)) {
4496 unsigned ring =
4497 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
4498 : SI_ES_RING_ESGS;
4499 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
4500
4501 ctx->esgs_ring =
4502 ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4503 }
4504
4505 if (ctx->shader->is_gs_copy_shader) {
4506 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4507
4508 ctx->gsvs_ring[0] =
4509 ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4510 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
4511 const struct si_shader_selector *sel = ctx->shader->selector;
4512 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4513 LLVMValueRef base_ring;
4514
4515 base_ring = ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4516
4517 /* The conceptual layout of the GSVS ring is
4518 * v0c0 .. vLv0 v0c1 .. vLc1 ..
4519 * but the real memory layout is swizzled across
4520 * threads:
4521 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
4522 * t16v0c0 ..
4523 * Override the buffer descriptor accordingly.
4524 */
4525 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
4526 uint64_t stream_offset = 0;
4527
4528 for (unsigned stream = 0; stream < 4; ++stream) {
4529 unsigned num_components;
4530 unsigned stride;
4531 unsigned num_records;
4532 LLVMValueRef ring, tmp;
4533
4534 num_components = sel->info.num_stream_output_components[stream];
4535 if (!num_components)
4536 continue;
4537
4538 stride = 4 * num_components * sel->gs_max_out_vertices;
4539
4540 /* Limit on the stride field for <= CIK. */
4541 assert(stride < (1 << 14));
4542
4543 num_records = 64;
4544
4545 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
4546 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
4547 tmp = LLVMBuildAdd(builder, tmp,
4548 LLVMConstInt(ctx->i64,
4549 stream_offset, 0), "");
4550 stream_offset += stride * 64;
4551
4552 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
4553 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
4554 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
4555 tmp = LLVMBuildOr(builder, tmp,
4556 LLVMConstInt(ctx->i32,
4557 S_008F04_STRIDE(stride) |
4558 S_008F04_SWIZZLE_ENABLE(1), 0), "");
4559 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
4560 ring = LLVMBuildInsertElement(builder, ring,
4561 LLVMConstInt(ctx->i32, num_records, 0),
4562 LLVMConstInt(ctx->i32, 2, 0), "");
4563 ring = LLVMBuildInsertElement(builder, ring,
4564 LLVMConstInt(ctx->i32,
4565 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
4566 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
4567 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
4568 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
4569 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
4570 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
4571 S_008F0C_ELEMENT_SIZE(1) | /* element_size = 4 (bytes) */
4572 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
4573 S_008F0C_ADD_TID_ENABLE(1),
4574 0),
4575 LLVMConstInt(ctx->i32, 3, 0), "");
4576
4577 ctx->gsvs_ring[stream] = ring;
4578 }
4579 }
4580 }
4581
4582 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
4583 LLVMValueRef param_rw_buffers,
4584 unsigned param_pos_fixed_pt)
4585 {
4586 struct gallivm_state *gallivm = &ctx->gallivm;
4587 LLVMBuilderRef builder = gallivm->builder;
4588 LLVMValueRef slot, desc, offset, row, bit, address[2];
4589
4590 /* Use the fixed-point gl_FragCoord input.
4591 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
4592 * per coordinate to get the repeating effect.
4593 */
4594 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
4595 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
4596
4597 /* Load the buffer descriptor. */
4598 slot = LLVMConstInt(ctx->i32, SI_PS_CONST_POLY_STIPPLE, 0);
4599 desc = ac_build_indexed_load_const(&ctx->ac, param_rw_buffers, slot);
4600
4601 /* The stipple pattern is 32x32, each row has 32 bits. */
4602 offset = LLVMBuildMul(builder, address[1],
4603 LLVMConstInt(ctx->i32, 4, 0), "");
4604 row = buffer_load_const(ctx, desc, offset);
4605 row = LLVMBuildBitCast(builder, row, ctx->i32, "");
4606 bit = LLVMBuildLShr(builder, row, address[0], "");
4607 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
4608
4609 /* The intrinsic kills the thread if arg < 0. */
4610 bit = LLVMBuildSelect(builder, bit, LLVMConstReal(ctx->f32, 0),
4611 LLVMConstReal(ctx->f32, -1), "");
4612 ac_build_kill(&ctx->ac, bit);
4613 }
4614
4615 void si_shader_binary_read_config(struct ac_shader_binary *binary,
4616 struct si_shader_config *conf,
4617 unsigned symbol_offset)
4618 {
4619 unsigned i;
4620 const unsigned char *config =
4621 ac_shader_binary_config_start(binary, symbol_offset);
4622 bool really_needs_scratch = false;
4623
4624 /* LLVM adds SGPR spills to the scratch size.
4625 * Find out if we really need the scratch buffer.
4626 */
4627 for (i = 0; i < binary->reloc_count; i++) {
4628 const struct ac_shader_reloc *reloc = &binary->relocs[i];
4629
4630 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
4631 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
4632 really_needs_scratch = true;
4633 break;
4634 }
4635 }
4636
4637 /* XXX: We may be able to emit some of these values directly rather than
4638 * extracting fields to be emitted later.
4639 */
4640
4641 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
4642 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
4643 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
4644 switch (reg) {
4645 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
4646 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
4647 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
4648 case R_00B428_SPI_SHADER_PGM_RSRC1_HS:
4649 case R_00B848_COMPUTE_PGM_RSRC1:
4650 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
4651 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
4652 conf->float_mode = G_00B028_FLOAT_MODE(value);
4653 conf->rsrc1 = value;
4654 break;
4655 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
4656 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
4657 break;
4658 case R_00B84C_COMPUTE_PGM_RSRC2:
4659 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
4660 conf->rsrc2 = value;
4661 break;
4662 case R_0286CC_SPI_PS_INPUT_ENA:
4663 conf->spi_ps_input_ena = value;
4664 break;
4665 case R_0286D0_SPI_PS_INPUT_ADDR:
4666 conf->spi_ps_input_addr = value;
4667 break;
4668 case R_0286E8_SPI_TMPRING_SIZE:
4669 case R_00B860_COMPUTE_TMPRING_SIZE:
4670 /* WAVESIZE is in units of 256 dwords. */
4671 if (really_needs_scratch)
4672 conf->scratch_bytes_per_wave =
4673 G_00B860_WAVESIZE(value) * 256 * 4;
4674 break;
4675 case 0x4: /* SPILLED_SGPRS */
4676 conf->spilled_sgprs = value;
4677 break;
4678 case 0x8: /* SPILLED_VGPRS */
4679 conf->spilled_vgprs = value;
4680 break;
4681 default:
4682 {
4683 static bool printed;
4684
4685 if (!printed) {
4686 fprintf(stderr, "Warning: LLVM emitted unknown "
4687 "config register: 0x%x\n", reg);
4688 printed = true;
4689 }
4690 }
4691 break;
4692 }
4693 }
4694
4695 if (!conf->spi_ps_input_addr)
4696 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
4697 }
4698
4699 void si_shader_apply_scratch_relocs(struct si_shader *shader,
4700 uint64_t scratch_va)
4701 {
4702 unsigned i;
4703 uint32_t scratch_rsrc_dword0 = scratch_va;
4704 uint32_t scratch_rsrc_dword1 =
4705 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
4706
4707 /* Enable scratch coalescing. */
4708 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
4709
4710 for (i = 0 ; i < shader->binary.reloc_count; i++) {
4711 const struct ac_shader_reloc *reloc =
4712 &shader->binary.relocs[i];
4713 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
4714 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
4715 &scratch_rsrc_dword0, 4);
4716 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
4717 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
4718 &scratch_rsrc_dword1, 4);
4719 }
4720 }
4721 }
4722
4723 static unsigned si_get_shader_binary_size(const struct si_shader *shader)
4724 {
4725 unsigned size = shader->binary.code_size;
4726
4727 if (shader->prolog)
4728 size += shader->prolog->binary.code_size;
4729 if (shader->previous_stage)
4730 size += shader->previous_stage->binary.code_size;
4731 if (shader->prolog2)
4732 size += shader->prolog2->binary.code_size;
4733 if (shader->epilog)
4734 size += shader->epilog->binary.code_size;
4735 return size;
4736 }
4737
4738 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
4739 {
4740 const struct ac_shader_binary *prolog =
4741 shader->prolog ? &shader->prolog->binary : NULL;
4742 const struct ac_shader_binary *previous_stage =
4743 shader->previous_stage ? &shader->previous_stage->binary : NULL;
4744 const struct ac_shader_binary *prolog2 =
4745 shader->prolog2 ? &shader->prolog2->binary : NULL;
4746 const struct ac_shader_binary *epilog =
4747 shader->epilog ? &shader->epilog->binary : NULL;
4748 const struct ac_shader_binary *mainb = &shader->binary;
4749 unsigned bo_size = si_get_shader_binary_size(shader) +
4750 (!epilog ? mainb->rodata_size : 0);
4751 unsigned char *ptr;
4752
4753 assert(!prolog || !prolog->rodata_size);
4754 assert(!previous_stage || !previous_stage->rodata_size);
4755 assert(!prolog2 || !prolog2->rodata_size);
4756 assert((!prolog && !previous_stage && !prolog2 && !epilog) ||
4757 !mainb->rodata_size);
4758 assert(!epilog || !epilog->rodata_size);
4759
4760 /* GFX9 can fetch at most 128 bytes past the end of the shader.
4761 * Prevent VM faults.
4762 */
4763 if (sscreen->b.chip_class >= GFX9)
4764 bo_size += 128;
4765
4766 r600_resource_reference(&shader->bo, NULL);
4767 shader->bo = (struct r600_resource*)
4768 pipe_buffer_create(&sscreen->b.b, 0,
4769 PIPE_USAGE_IMMUTABLE,
4770 align(bo_size, SI_CPDMA_ALIGNMENT));
4771 if (!shader->bo)
4772 return -ENOMEM;
4773
4774 /* Upload. */
4775 ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
4776 PIPE_TRANSFER_READ_WRITE |
4777 PIPE_TRANSFER_UNSYNCHRONIZED);
4778
4779 /* Don't use util_memcpy_cpu_to_le32. LLVM binaries are
4780 * endian-independent. */
4781 if (prolog) {
4782 memcpy(ptr, prolog->code, prolog->code_size);
4783 ptr += prolog->code_size;
4784 }
4785 if (previous_stage) {
4786 memcpy(ptr, previous_stage->code, previous_stage->code_size);
4787 ptr += previous_stage->code_size;
4788 }
4789 if (prolog2) {
4790 memcpy(ptr, prolog2->code, prolog2->code_size);
4791 ptr += prolog2->code_size;
4792 }
4793
4794 memcpy(ptr, mainb->code, mainb->code_size);
4795 ptr += mainb->code_size;
4796
4797 if (epilog)
4798 memcpy(ptr, epilog->code, epilog->code_size);
4799 else if (mainb->rodata_size > 0)
4800 memcpy(ptr, mainb->rodata, mainb->rodata_size);
4801
4802 sscreen->b.ws->buffer_unmap(shader->bo->buf);
4803 return 0;
4804 }
4805
4806 static void si_shader_dump_disassembly(const struct ac_shader_binary *binary,
4807 struct pipe_debug_callback *debug,
4808 const char *name, FILE *file)
4809 {
4810 char *line, *p;
4811 unsigned i, count;
4812
4813 if (binary->disasm_string) {
4814 fprintf(file, "Shader %s disassembly:\n", name);
4815 fprintf(file, "%s", binary->disasm_string);
4816
4817 if (debug && debug->debug_message) {
4818 /* Very long debug messages are cut off, so send the
4819 * disassembly one line at a time. This causes more
4820 * overhead, but on the plus side it simplifies
4821 * parsing of resulting logs.
4822 */
4823 pipe_debug_message(debug, SHADER_INFO,
4824 "Shader Disassembly Begin");
4825
4826 line = binary->disasm_string;
4827 while (*line) {
4828 p = util_strchrnul(line, '\n');
4829 count = p - line;
4830
4831 if (count) {
4832 pipe_debug_message(debug, SHADER_INFO,
4833 "%.*s", count, line);
4834 }
4835
4836 if (!*p)
4837 break;
4838 line = p + 1;
4839 }
4840
4841 pipe_debug_message(debug, SHADER_INFO,
4842 "Shader Disassembly End");
4843 }
4844 } else {
4845 fprintf(file, "Shader %s binary:\n", name);
4846 for (i = 0; i < binary->code_size; i += 4) {
4847 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
4848 binary->code[i + 3], binary->code[i + 2],
4849 binary->code[i + 1], binary->code[i]);
4850 }
4851 }
4852 }
4853
4854 static void si_shader_dump_stats(struct si_screen *sscreen,
4855 const struct si_shader *shader,
4856 struct pipe_debug_callback *debug,
4857 unsigned processor,
4858 FILE *file,
4859 bool check_debug_option)
4860 {
4861 const struct si_shader_config *conf = &shader->config;
4862 unsigned num_inputs = shader->selector ? shader->selector->info.num_inputs : 0;
4863 unsigned code_size = si_get_shader_binary_size(shader);
4864 unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
4865 unsigned lds_per_wave = 0;
4866 unsigned max_simd_waves = 10;
4867
4868 /* Compute LDS usage for PS. */
4869 switch (processor) {
4870 case PIPE_SHADER_FRAGMENT:
4871 /* The minimum usage per wave is (num_inputs * 48). The maximum
4872 * usage is (num_inputs * 48 * 16).
4873 * We can get anything in between and it varies between waves.
4874 *
4875 * The 48 bytes per input for a single primitive is equal to
4876 * 4 bytes/component * 4 components/input * 3 points.
4877 *
4878 * Other stages don't know the size at compile time or don't
4879 * allocate LDS per wave, but instead they do it per thread group.
4880 */
4881 lds_per_wave = conf->lds_size * lds_increment +
4882 align(num_inputs * 48, lds_increment);
4883 break;
4884 case PIPE_SHADER_COMPUTE:
4885 if (shader->selector) {
4886 unsigned max_workgroup_size =
4887 si_get_max_workgroup_size(shader);
4888 lds_per_wave = (conf->lds_size * lds_increment) /
4889 DIV_ROUND_UP(max_workgroup_size, 64);
4890 }
4891 break;
4892 }
4893
4894 /* Compute the per-SIMD wave counts. */
4895 if (conf->num_sgprs) {
4896 if (sscreen->b.chip_class >= VI)
4897 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
4898 else
4899 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
4900 }
4901
4902 if (conf->num_vgprs)
4903 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
4904
4905 /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
4906 * 16KB makes some SIMDs unoccupied). */
4907 if (lds_per_wave)
4908 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
4909
4910 if (!check_debug_option ||
4911 r600_can_dump_shader(&sscreen->b, processor)) {
4912 if (processor == PIPE_SHADER_FRAGMENT) {
4913 fprintf(file, "*** SHADER CONFIG ***\n"
4914 "SPI_PS_INPUT_ADDR = 0x%04x\n"
4915 "SPI_PS_INPUT_ENA = 0x%04x\n",
4916 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
4917 }
4918
4919 fprintf(file, "*** SHADER STATS ***\n"
4920 "SGPRS: %d\n"
4921 "VGPRS: %d\n"
4922 "Spilled SGPRs: %d\n"
4923 "Spilled VGPRs: %d\n"
4924 "Private memory VGPRs: %d\n"
4925 "Code Size: %d bytes\n"
4926 "LDS: %d blocks\n"
4927 "Scratch: %d bytes per wave\n"
4928 "Max Waves: %d\n"
4929 "********************\n\n\n",
4930 conf->num_sgprs, conf->num_vgprs,
4931 conf->spilled_sgprs, conf->spilled_vgprs,
4932 conf->private_mem_vgprs, code_size,
4933 conf->lds_size, conf->scratch_bytes_per_wave,
4934 max_simd_waves);
4935 }
4936
4937 pipe_debug_message(debug, SHADER_INFO,
4938 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
4939 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
4940 "Spilled VGPRs: %d PrivMem VGPRs: %d",
4941 conf->num_sgprs, conf->num_vgprs, code_size,
4942 conf->lds_size, conf->scratch_bytes_per_wave,
4943 max_simd_waves, conf->spilled_sgprs,
4944 conf->spilled_vgprs, conf->private_mem_vgprs);
4945 }
4946
4947 const char *si_get_shader_name(const struct si_shader *shader, unsigned processor)
4948 {
4949 switch (processor) {
4950 case PIPE_SHADER_VERTEX:
4951 if (shader->key.as_es)
4952 return "Vertex Shader as ES";
4953 else if (shader->key.as_ls)
4954 return "Vertex Shader as LS";
4955 else
4956 return "Vertex Shader as VS";
4957 case PIPE_SHADER_TESS_CTRL:
4958 return "Tessellation Control Shader";
4959 case PIPE_SHADER_TESS_EVAL:
4960 if (shader->key.as_es)
4961 return "Tessellation Evaluation Shader as ES";
4962 else
4963 return "Tessellation Evaluation Shader as VS";
4964 case PIPE_SHADER_GEOMETRY:
4965 if (shader->is_gs_copy_shader)
4966 return "GS Copy Shader as VS";
4967 else
4968 return "Geometry Shader";
4969 case PIPE_SHADER_FRAGMENT:
4970 return "Pixel Shader";
4971 case PIPE_SHADER_COMPUTE:
4972 return "Compute Shader";
4973 default:
4974 return "Unknown Shader";
4975 }
4976 }
4977
4978 void si_shader_dump(struct si_screen *sscreen, const struct si_shader *shader,
4979 struct pipe_debug_callback *debug, unsigned processor,
4980 FILE *file, bool check_debug_option)
4981 {
4982 if (!check_debug_option ||
4983 r600_can_dump_shader(&sscreen->b, processor))
4984 si_dump_shader_key(processor, shader, file);
4985
4986 if (!check_debug_option && shader->binary.llvm_ir_string) {
4987 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
4988 si_get_shader_name(shader, processor));
4989 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
4990 }
4991
4992 if (!check_debug_option ||
4993 (r600_can_dump_shader(&sscreen->b, processor) &&
4994 !(sscreen->b.debug_flags & DBG_NO_ASM))) {
4995 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
4996
4997 if (shader->prolog)
4998 si_shader_dump_disassembly(&shader->prolog->binary,
4999 debug, "prolog", file);
5000 if (shader->previous_stage)
5001 si_shader_dump_disassembly(&shader->previous_stage->binary,
5002 debug, "previous stage", file);
5003 if (shader->prolog2)
5004 si_shader_dump_disassembly(&shader->prolog2->binary,
5005 debug, "prolog2", file);
5006
5007 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
5008
5009 if (shader->epilog)
5010 si_shader_dump_disassembly(&shader->epilog->binary,
5011 debug, "epilog", file);
5012 fprintf(file, "\n");
5013 }
5014
5015 si_shader_dump_stats(sscreen, shader, debug, processor, file,
5016 check_debug_option);
5017 }
5018
5019 static int si_compile_llvm(struct si_screen *sscreen,
5020 struct ac_shader_binary *binary,
5021 struct si_shader_config *conf,
5022 LLVMTargetMachineRef tm,
5023 LLVMModuleRef mod,
5024 struct pipe_debug_callback *debug,
5025 unsigned processor,
5026 const char *name)
5027 {
5028 int r = 0;
5029 unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
5030
5031 if (r600_can_dump_shader(&sscreen->b, processor)) {
5032 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
5033
5034 if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
5035 fprintf(stderr, "%s LLVM IR:\n\n", name);
5036 ac_dump_module(mod);
5037 fprintf(stderr, "\n");
5038 }
5039 }
5040
5041 if (sscreen->record_llvm_ir) {
5042 char *ir = LLVMPrintModuleToString(mod);
5043 binary->llvm_ir_string = strdup(ir);
5044 LLVMDisposeMessage(ir);
5045 }
5046
5047 if (!si_replace_shader(count, binary)) {
5048 r = si_llvm_compile(mod, binary, tm, debug);
5049 if (r)
5050 return r;
5051 }
5052
5053 si_shader_binary_read_config(binary, conf, 0);
5054
5055 /* Enable 64-bit and 16-bit denormals, because there is no performance
5056 * cost.
5057 *
5058 * If denormals are enabled, all floating-point output modifiers are
5059 * ignored.
5060 *
5061 * Don't enable denormals for 32-bit floats, because:
5062 * - Floating-point output modifiers would be ignored by the hw.
5063 * - Some opcodes don't support denormals, such as v_mad_f32. We would
5064 * have to stop using those.
5065 * - SI & CI would be very slow.
5066 */
5067 conf->float_mode |= V_00B028_FP_64_DENORMS;
5068
5069 FREE(binary->config);
5070 FREE(binary->global_symbol_offsets);
5071 binary->config = NULL;
5072 binary->global_symbol_offsets = NULL;
5073
5074 /* Some shaders can't have rodata because their binaries can be
5075 * concatenated.
5076 */
5077 if (binary->rodata_size &&
5078 (processor == PIPE_SHADER_VERTEX ||
5079 processor == PIPE_SHADER_TESS_CTRL ||
5080 processor == PIPE_SHADER_TESS_EVAL ||
5081 processor == PIPE_SHADER_FRAGMENT)) {
5082 fprintf(stderr, "radeonsi: The shader can't have rodata.");
5083 return -EINVAL;
5084 }
5085
5086 return r;
5087 }
5088
5089 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
5090 {
5091 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
5092 LLVMBuildRetVoid(ctx->gallivm.builder);
5093 else
5094 LLVMBuildRet(ctx->gallivm.builder, ret);
5095 }
5096
5097 /* Generate code for the hardware VS shader stage to go with a geometry shader */
5098 struct si_shader *
5099 si_generate_gs_copy_shader(struct si_screen *sscreen,
5100 LLVMTargetMachineRef tm,
5101 struct si_shader_selector *gs_selector,
5102 struct pipe_debug_callback *debug)
5103 {
5104 struct si_shader_context ctx;
5105 struct si_shader *shader;
5106 struct gallivm_state *gallivm = &ctx.gallivm;
5107 LLVMBuilderRef builder;
5108 struct lp_build_tgsi_context *bld_base = &ctx.bld_base;
5109 struct lp_build_context *uint = &bld_base->uint_bld;
5110 struct si_shader_output_values *outputs;
5111 struct tgsi_shader_info *gsinfo = &gs_selector->info;
5112 int i, r;
5113
5114 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
5115
5116 if (!outputs)
5117 return NULL;
5118
5119 shader = CALLOC_STRUCT(si_shader);
5120 if (!shader) {
5121 FREE(outputs);
5122 return NULL;
5123 }
5124
5125
5126 shader->selector = gs_selector;
5127 shader->is_gs_copy_shader = true;
5128
5129 si_init_shader_ctx(&ctx, sscreen, tm);
5130 ctx.shader = shader;
5131 ctx.type = PIPE_SHADER_VERTEX;
5132
5133 builder = gallivm->builder;
5134
5135 create_function(&ctx);
5136 preload_ring_buffers(&ctx);
5137
5138 LLVMValueRef voffset =
5139 lp_build_mul_imm(uint, LLVMGetParam(ctx.main_fn,
5140 ctx.param_vertex_id), 4);
5141
5142 /* Fetch the vertex stream ID.*/
5143 LLVMValueRef stream_id;
5144
5145 if (gs_selector->so.num_outputs)
5146 stream_id = unpack_param(&ctx, ctx.param_streamout_config, 24, 2);
5147 else
5148 stream_id = ctx.i32_0;
5149
5150 /* Fill in output information. */
5151 for (i = 0; i < gsinfo->num_outputs; ++i) {
5152 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
5153 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
5154
5155 for (int chan = 0; chan < 4; chan++) {
5156 outputs[i].vertex_stream[chan] =
5157 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
5158 }
5159 }
5160
5161 LLVMBasicBlockRef end_bb;
5162 LLVMValueRef switch_inst;
5163
5164 end_bb = LLVMAppendBasicBlockInContext(gallivm->context, ctx.main_fn, "end");
5165 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
5166
5167 for (int stream = 0; stream < 4; stream++) {
5168 LLVMBasicBlockRef bb;
5169 unsigned offset;
5170
5171 if (!gsinfo->num_stream_output_components[stream])
5172 continue;
5173
5174 if (stream > 0 && !gs_selector->so.num_outputs)
5175 continue;
5176
5177 bb = LLVMInsertBasicBlockInContext(gallivm->context, end_bb, "out");
5178 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
5179 LLVMPositionBuilderAtEnd(builder, bb);
5180
5181 /* Fetch vertex data from GSVS ring */
5182 offset = 0;
5183 for (i = 0; i < gsinfo->num_outputs; ++i) {
5184 for (unsigned chan = 0; chan < 4; chan++) {
5185 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
5186 outputs[i].vertex_stream[chan] != stream) {
5187 outputs[i].values[chan] = ctx.bld_base.base.undef;
5188 continue;
5189 }
5190
5191 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
5192 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
5193 offset++;
5194
5195 outputs[i].values[chan] =
5196 ac_build_buffer_load(&ctx.ac,
5197 ctx.gsvs_ring[0], 1,
5198 ctx.i32_0, voffset,
5199 soffset, 0, 1, 1,
5200 true, false);
5201 }
5202 }
5203
5204 /* Streamout and exports. */
5205 if (gs_selector->so.num_outputs) {
5206 si_llvm_emit_streamout(&ctx, outputs,
5207 gsinfo->num_outputs,
5208 stream);
5209 }
5210
5211 if (stream == 0)
5212 si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
5213
5214 LLVMBuildBr(builder, end_bb);
5215 }
5216
5217 LLVMPositionBuilderAtEnd(builder, end_bb);
5218
5219 LLVMBuildRetVoid(gallivm->builder);
5220
5221 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
5222 si_llvm_optimize_module(&ctx);
5223
5224 r = si_compile_llvm(sscreen, &ctx.shader->binary,
5225 &ctx.shader->config, ctx.tm,
5226 ctx.gallivm.module,
5227 debug, PIPE_SHADER_GEOMETRY,
5228 "GS Copy Shader");
5229 if (!r) {
5230 if (r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
5231 fprintf(stderr, "GS Copy Shader:\n");
5232 si_shader_dump(sscreen, ctx.shader, debug,
5233 PIPE_SHADER_GEOMETRY, stderr, true);
5234 r = si_shader_binary_upload(sscreen, ctx.shader);
5235 }
5236
5237 si_llvm_dispose(&ctx);
5238
5239 FREE(outputs);
5240
5241 if (r != 0) {
5242 FREE(shader);
5243 shader = NULL;
5244 }
5245 return shader;
5246 }
5247
5248 static void si_dump_shader_key_vs(const struct si_shader_key *key,
5249 const struct si_vs_prolog_bits *prolog,
5250 const char *prefix, FILE *f)
5251 {
5252 fprintf(f, " %s.instance_divisors = {", prefix);
5253 for (int i = 0; i < ARRAY_SIZE(prolog->instance_divisors); i++) {
5254 fprintf(f, !i ? "%u" : ", %u",
5255 prolog->instance_divisors[i]);
5256 }
5257 fprintf(f, "}\n");
5258
5259 fprintf(f, " mono.vs.fix_fetch = {");
5260 for (int i = 0; i < SI_MAX_ATTRIBS; i++)
5261 fprintf(f, !i ? "%u" : ", %u", key->mono.vs_fix_fetch[i]);
5262 fprintf(f, "}\n");
5263 }
5264
5265 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
5266 FILE *f)
5267 {
5268 const struct si_shader_key *key = &shader->key;
5269
5270 fprintf(f, "SHADER KEY\n");
5271
5272 switch (processor) {
5273 case PIPE_SHADER_VERTEX:
5274 si_dump_shader_key_vs(key, &key->part.vs.prolog,
5275 "part.vs.prolog", f);
5276 fprintf(f, " as_es = %u\n", key->as_es);
5277 fprintf(f, " as_ls = %u\n", key->as_ls);
5278 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5279 key->mono.u.vs_export_prim_id);
5280 break;
5281
5282 case PIPE_SHADER_TESS_CTRL:
5283 if (shader->selector->screen->b.chip_class >= GFX9) {
5284 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
5285 "part.tcs.ls_prolog", f);
5286 }
5287 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
5288 fprintf(f, " mono.u.ff_tcs_inputs_to_copy[0] = 0x%x\n",
5289 key->mono.u.ff_tcs_inputs_to_copy[0]);
5290 fprintf(f, " mono.u.ff_tcs_inputs_to_copy[1] = 0x%x\n",
5291 key->mono.u.ff_tcs_inputs_to_copy[1]);
5292 break;
5293
5294 case PIPE_SHADER_TESS_EVAL:
5295 fprintf(f, " as_es = %u\n", key->as_es);
5296 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5297 key->mono.u.vs_export_prim_id);
5298 break;
5299
5300 case PIPE_SHADER_GEOMETRY:
5301 if (shader->is_gs_copy_shader)
5302 break;
5303
5304 if (shader->selector->screen->b.chip_class >= GFX9 &&
5305 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
5306 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
5307 "part.gs.vs_prolog", f);
5308 }
5309 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
5310 break;
5311
5312 case PIPE_SHADER_COMPUTE:
5313 break;
5314
5315 case PIPE_SHADER_FRAGMENT:
5316 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
5317 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
5318 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
5319 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
5320 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
5321 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
5322 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
5323 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
5324 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
5325 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
5326 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
5327 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
5328 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
5329 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
5330 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
5331 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
5332 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
5333 break;
5334
5335 default:
5336 assert(0);
5337 }
5338
5339 if ((processor == PIPE_SHADER_GEOMETRY ||
5340 processor == PIPE_SHADER_TESS_EVAL ||
5341 processor == PIPE_SHADER_VERTEX) &&
5342 !key->as_es && !key->as_ls) {
5343 fprintf(f, " opt.kill_outputs[0] = 0x%x\n", key->opt.kill_outputs[0]);
5344 fprintf(f, " opt.kill_outputs[1] = 0x%x\n", key->opt.kill_outputs[1]);
5345 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
5346 }
5347 }
5348
5349 static void si_init_shader_ctx(struct si_shader_context *ctx,
5350 struct si_screen *sscreen,
5351 LLVMTargetMachineRef tm)
5352 {
5353 struct lp_build_tgsi_context *bld_base;
5354
5355 si_llvm_context_init(ctx, sscreen, tm);
5356
5357 bld_base = &ctx->bld_base;
5358 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
5359
5360 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
5361 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
5362 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
5363
5364 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
5365
5366 bld_base->op_actions[TGSI_OPCODE_CLOCK].emit = clock_emit;
5367
5368 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
5369 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
5370 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
5371 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
5372
5373 bld_base->op_actions[TGSI_OPCODE_VOTE_ALL].emit = vote_all_emit;
5374 bld_base->op_actions[TGSI_OPCODE_VOTE_ANY].emit = vote_any_emit;
5375 bld_base->op_actions[TGSI_OPCODE_VOTE_EQ].emit = vote_eq_emit;
5376 bld_base->op_actions[TGSI_OPCODE_BALLOT].emit = ballot_emit;
5377 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].intr_name = "llvm.amdgcn.readfirstlane";
5378 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].emit = read_lane_emit;
5379 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].intr_name = "llvm.amdgcn.readlane";
5380 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].fetch_args = read_invoc_fetch_args;
5381 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].emit = read_lane_emit;
5382
5383 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
5384 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
5385 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
5386 }
5387
5388 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
5389 {
5390 struct si_shader *shader = ctx->shader;
5391 struct tgsi_shader_info *info = &shader->selector->info;
5392
5393 if ((ctx->type != PIPE_SHADER_VERTEX &&
5394 ctx->type != PIPE_SHADER_TESS_EVAL) ||
5395 shader->key.as_ls ||
5396 shader->key.as_es)
5397 return;
5398
5399 ac_optimize_vs_outputs(&ctx->ac,
5400 ctx->main_fn,
5401 shader->info.vs_output_param_offset,
5402 info->num_outputs,
5403 &shader->info.nr_param_exports);
5404 }
5405
5406 static void si_count_scratch_private_memory(struct si_shader_context *ctx)
5407 {
5408 ctx->shader->config.private_mem_vgprs = 0;
5409
5410 /* Process all LLVM instructions. */
5411 LLVMBasicBlockRef bb = LLVMGetFirstBasicBlock(ctx->main_fn);
5412 while (bb) {
5413 LLVMValueRef next = LLVMGetFirstInstruction(bb);
5414
5415 while (next) {
5416 LLVMValueRef inst = next;
5417 next = LLVMGetNextInstruction(next);
5418
5419 if (LLVMGetInstructionOpcode(inst) != LLVMAlloca)
5420 continue;
5421
5422 LLVMTypeRef type = LLVMGetElementType(LLVMTypeOf(inst));
5423 /* No idea why LLVM aligns allocas to 4 elements. */
5424 unsigned alignment = LLVMGetAlignment(inst);
5425 unsigned dw_size = align(llvm_get_type_size(type) / 4, alignment);
5426 ctx->shader->config.private_mem_vgprs += dw_size;
5427 }
5428 bb = LLVMGetNextBasicBlock(bb);
5429 }
5430 }
5431
5432 static void si_init_exec_full_mask(struct si_shader_context *ctx)
5433 {
5434 LLVMValueRef full_mask = LLVMConstInt(ctx->i64, ~0ull, 0);
5435 lp_build_intrinsic(ctx->gallivm.builder,
5436 "llvm.amdgcn.init.exec", ctx->voidt,
5437 &full_mask, 1, LP_FUNC_ATTR_CONVERGENT);
5438 }
5439
5440 static void si_init_exec_from_input(struct si_shader_context *ctx,
5441 unsigned param, unsigned bitoffset)
5442 {
5443 LLVMValueRef args[] = {
5444 LLVMGetParam(ctx->main_fn, param),
5445 LLVMConstInt(ctx->i32, bitoffset, 0),
5446 };
5447 lp_build_intrinsic(ctx->gallivm.builder,
5448 "llvm.amdgcn.init.exec.from.input",
5449 ctx->voidt, args, 2, LP_FUNC_ATTR_CONVERGENT);
5450 }
5451
5452 static bool si_compile_tgsi_main(struct si_shader_context *ctx,
5453 bool is_monolithic)
5454 {
5455 struct si_shader *shader = ctx->shader;
5456 struct si_shader_selector *sel = shader->selector;
5457 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
5458
5459 switch (ctx->type) {
5460 case PIPE_SHADER_VERTEX:
5461 ctx->load_input = declare_input_vs;
5462 if (shader->key.as_ls)
5463 bld_base->emit_epilogue = si_llvm_emit_ls_epilogue;
5464 else if (shader->key.as_es)
5465 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
5466 else
5467 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
5468 break;
5469 case PIPE_SHADER_TESS_CTRL:
5470 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
5471 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
5472 bld_base->emit_store = store_output_tcs;
5473 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
5474 break;
5475 case PIPE_SHADER_TESS_EVAL:
5476 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
5477 if (shader->key.as_es)
5478 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
5479 else
5480 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
5481 break;
5482 case PIPE_SHADER_GEOMETRY:
5483 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
5484 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
5485 break;
5486 case PIPE_SHADER_FRAGMENT:
5487 ctx->load_input = declare_input_fs;
5488 bld_base->emit_epilogue = si_llvm_return_fs_outputs;
5489 break;
5490 case PIPE_SHADER_COMPUTE:
5491 ctx->declare_memory_region = declare_compute_memory;
5492 break;
5493 default:
5494 assert(!"Unsupported shader type");
5495 return false;
5496 }
5497
5498 create_function(ctx);
5499 preload_ring_buffers(ctx);
5500
5501 /* For GFX9 merged shaders:
5502 * - Set EXEC. If the prolog is present, set EXEC there instead.
5503 * - Add a barrier before the second shader.
5504 *
5505 * The same thing for monolithic shaders is done in
5506 * si_build_wrapper_function.
5507 */
5508 if (ctx->screen->b.chip_class >= GFX9 && !is_monolithic) {
5509 if (sel->info.num_instructions > 1 && /* not empty shader */
5510 (shader->key.as_es || shader->key.as_ls) &&
5511 (ctx->type == PIPE_SHADER_TESS_EVAL ||
5512 (ctx->type == PIPE_SHADER_VERTEX &&
5513 !sel->vs_needs_prolog))) {
5514 si_init_exec_from_input(ctx,
5515 ctx->param_merged_wave_info, 0);
5516 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
5517 ctx->type == PIPE_SHADER_GEOMETRY) {
5518 si_init_exec_from_input(ctx,
5519 ctx->param_merged_wave_info, 8);
5520 si_llvm_emit_barrier(NULL, bld_base, NULL);
5521 }
5522 }
5523
5524 if (ctx->type == PIPE_SHADER_GEOMETRY) {
5525 int i;
5526 for (i = 0; i < 4; i++) {
5527 ctx->gs_next_vertex[i] =
5528 lp_build_alloca(&ctx->gallivm,
5529 ctx->i32, "");
5530 }
5531 }
5532
5533 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
5534 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
5535 return false;
5536 }
5537
5538 si_llvm_build_ret(ctx, ctx->return_value);
5539 return true;
5540 }
5541
5542 /**
5543 * Compute the VS prolog key, which contains all the information needed to
5544 * build the VS prolog function, and set shader->info bits where needed.
5545 *
5546 * \param info Shader info of the vertex shader.
5547 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
5548 * \param prolog_key Key of the VS prolog
5549 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
5550 * \param key Output shader part key.
5551 */
5552 static void si_get_vs_prolog_key(const struct tgsi_shader_info *info,
5553 unsigned num_input_sgprs,
5554 const struct si_vs_prolog_bits *prolog_key,
5555 struct si_shader *shader_out,
5556 union si_shader_part_key *key)
5557 {
5558 memset(key, 0, sizeof(*key));
5559 key->vs_prolog.states = *prolog_key;
5560 key->vs_prolog.num_input_sgprs = num_input_sgprs;
5561 key->vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
5562 key->vs_prolog.as_ls = shader_out->key.as_ls;
5563
5564 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
5565 key->vs_prolog.as_ls = 1;
5566 key->vs_prolog.num_merged_next_stage_vgprs = 2;
5567 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
5568 key->vs_prolog.num_merged_next_stage_vgprs = 5;
5569 }
5570
5571 /* Set the instanceID flag. */
5572 for (unsigned i = 0; i < info->num_inputs; i++)
5573 if (key->vs_prolog.states.instance_divisors[i])
5574 shader_out->info.uses_instanceid = true;
5575 }
5576
5577 /**
5578 * Compute the PS prolog key, which contains all the information needed to
5579 * build the PS prolog function, and set related bits in shader->config.
5580 */
5581 static void si_get_ps_prolog_key(struct si_shader *shader,
5582 union si_shader_part_key *key,
5583 bool separate_prolog)
5584 {
5585 struct tgsi_shader_info *info = &shader->selector->info;
5586
5587 memset(key, 0, sizeof(*key));
5588 key->ps_prolog.states = shader->key.part.ps.prolog;
5589 key->ps_prolog.colors_read = info->colors_read;
5590 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
5591 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
5592 key->ps_prolog.wqm = info->uses_derivatives &&
5593 (key->ps_prolog.colors_read ||
5594 key->ps_prolog.states.force_persp_sample_interp ||
5595 key->ps_prolog.states.force_linear_sample_interp ||
5596 key->ps_prolog.states.force_persp_center_interp ||
5597 key->ps_prolog.states.force_linear_center_interp ||
5598 key->ps_prolog.states.bc_optimize_for_persp ||
5599 key->ps_prolog.states.bc_optimize_for_linear);
5600
5601 if (info->colors_read) {
5602 unsigned *color = shader->selector->color_attr_index;
5603
5604 if (shader->key.part.ps.prolog.color_two_side) {
5605 /* BCOLORs are stored after the last input. */
5606 key->ps_prolog.num_interp_inputs = info->num_inputs;
5607 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
5608 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
5609 }
5610
5611 for (unsigned i = 0; i < 2; i++) {
5612 unsigned interp = info->input_interpolate[color[i]];
5613 unsigned location = info->input_interpolate_loc[color[i]];
5614
5615 if (!(info->colors_read & (0xf << i*4)))
5616 continue;
5617
5618 key->ps_prolog.color_attr_index[i] = color[i];
5619
5620 if (shader->key.part.ps.prolog.flatshade_colors &&
5621 interp == TGSI_INTERPOLATE_COLOR)
5622 interp = TGSI_INTERPOLATE_CONSTANT;
5623
5624 switch (interp) {
5625 case TGSI_INTERPOLATE_CONSTANT:
5626 key->ps_prolog.color_interp_vgpr_index[i] = -1;
5627 break;
5628 case TGSI_INTERPOLATE_PERSPECTIVE:
5629 case TGSI_INTERPOLATE_COLOR:
5630 /* Force the interpolation location for colors here. */
5631 if (shader->key.part.ps.prolog.force_persp_sample_interp)
5632 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5633 if (shader->key.part.ps.prolog.force_persp_center_interp)
5634 location = TGSI_INTERPOLATE_LOC_CENTER;
5635
5636 switch (location) {
5637 case TGSI_INTERPOLATE_LOC_SAMPLE:
5638 key->ps_prolog.color_interp_vgpr_index[i] = 0;
5639 shader->config.spi_ps_input_ena |=
5640 S_0286CC_PERSP_SAMPLE_ENA(1);
5641 break;
5642 case TGSI_INTERPOLATE_LOC_CENTER:
5643 key->ps_prolog.color_interp_vgpr_index[i] = 2;
5644 shader->config.spi_ps_input_ena |=
5645 S_0286CC_PERSP_CENTER_ENA(1);
5646 break;
5647 case TGSI_INTERPOLATE_LOC_CENTROID:
5648 key->ps_prolog.color_interp_vgpr_index[i] = 4;
5649 shader->config.spi_ps_input_ena |=
5650 S_0286CC_PERSP_CENTROID_ENA(1);
5651 break;
5652 default:
5653 assert(0);
5654 }
5655 break;
5656 case TGSI_INTERPOLATE_LINEAR:
5657 /* Force the interpolation location for colors here. */
5658 if (shader->key.part.ps.prolog.force_linear_sample_interp)
5659 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5660 if (shader->key.part.ps.prolog.force_linear_center_interp)
5661 location = TGSI_INTERPOLATE_LOC_CENTER;
5662
5663 /* The VGPR assignment for non-monolithic shaders
5664 * works because InitialPSInputAddr is set on the
5665 * main shader and PERSP_PULL_MODEL is never used.
5666 */
5667 switch (location) {
5668 case TGSI_INTERPOLATE_LOC_SAMPLE:
5669 key->ps_prolog.color_interp_vgpr_index[i] =
5670 separate_prolog ? 6 : 9;
5671 shader->config.spi_ps_input_ena |=
5672 S_0286CC_LINEAR_SAMPLE_ENA(1);
5673 break;
5674 case TGSI_INTERPOLATE_LOC_CENTER:
5675 key->ps_prolog.color_interp_vgpr_index[i] =
5676 separate_prolog ? 8 : 11;
5677 shader->config.spi_ps_input_ena |=
5678 S_0286CC_LINEAR_CENTER_ENA(1);
5679 break;
5680 case TGSI_INTERPOLATE_LOC_CENTROID:
5681 key->ps_prolog.color_interp_vgpr_index[i] =
5682 separate_prolog ? 10 : 13;
5683 shader->config.spi_ps_input_ena |=
5684 S_0286CC_LINEAR_CENTROID_ENA(1);
5685 break;
5686 default:
5687 assert(0);
5688 }
5689 break;
5690 default:
5691 assert(0);
5692 }
5693 }
5694 }
5695 }
5696
5697 /**
5698 * Check whether a PS prolog is required based on the key.
5699 */
5700 static bool si_need_ps_prolog(const union si_shader_part_key *key)
5701 {
5702 return key->ps_prolog.colors_read ||
5703 key->ps_prolog.states.force_persp_sample_interp ||
5704 key->ps_prolog.states.force_linear_sample_interp ||
5705 key->ps_prolog.states.force_persp_center_interp ||
5706 key->ps_prolog.states.force_linear_center_interp ||
5707 key->ps_prolog.states.bc_optimize_for_persp ||
5708 key->ps_prolog.states.bc_optimize_for_linear ||
5709 key->ps_prolog.states.poly_stipple;
5710 }
5711
5712 /**
5713 * Compute the PS epilog key, which contains all the information needed to
5714 * build the PS epilog function.
5715 */
5716 static void si_get_ps_epilog_key(struct si_shader *shader,
5717 union si_shader_part_key *key)
5718 {
5719 struct tgsi_shader_info *info = &shader->selector->info;
5720 memset(key, 0, sizeof(*key));
5721 key->ps_epilog.colors_written = info->colors_written;
5722 key->ps_epilog.writes_z = info->writes_z;
5723 key->ps_epilog.writes_stencil = info->writes_stencil;
5724 key->ps_epilog.writes_samplemask = info->writes_samplemask;
5725 key->ps_epilog.states = shader->key.part.ps.epilog;
5726 }
5727
5728 /**
5729 * Build the GS prolog function. Rotate the input vertices for triangle strips
5730 * with adjacency.
5731 */
5732 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
5733 union si_shader_part_key *key)
5734 {
5735 unsigned num_sgprs, num_vgprs;
5736 struct gallivm_state *gallivm = &ctx->gallivm;
5737 LLVMBuilderRef builder = gallivm->builder;
5738 LLVMTypeRef params[48]; /* 40 SGPRs (maximum) + some VGPRs */
5739 LLVMTypeRef returns[48];
5740 LLVMValueRef func, ret;
5741
5742 if (ctx->screen->b.chip_class >= GFX9) {
5743 num_sgprs = 8 + GFX9_GS_NUM_USER_SGPR;
5744 num_vgprs = 5; /* ES inputs are not needed by GS */
5745 } else {
5746 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
5747 num_vgprs = 8;
5748 }
5749
5750 for (unsigned i = 0; i < num_sgprs; ++i) {
5751 params[i] = ctx->i32;
5752 returns[i] = ctx->i32;
5753 }
5754
5755 for (unsigned i = 0; i < num_vgprs; ++i) {
5756 params[num_sgprs + i] = ctx->i32;
5757 returns[num_sgprs + i] = ctx->f32;
5758 }
5759
5760 /* Create the function. */
5761 si_create_function(ctx, "gs_prolog", returns, num_sgprs + num_vgprs,
5762 params, num_sgprs + num_vgprs, num_sgprs - 1, 0);
5763 func = ctx->main_fn;
5764
5765 /* Set the full EXEC mask for the prolog, because we are only fiddling
5766 * with registers here. The main shader part will set the correct EXEC
5767 * mask.
5768 */
5769 if (ctx->screen->b.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
5770 si_init_exec_full_mask(ctx);
5771
5772 /* Copy inputs to outputs. This should be no-op, as the registers match,
5773 * but it will prevent the compiler from overwriting them unintentionally.
5774 */
5775 ret = ctx->return_value;
5776 for (unsigned i = 0; i < num_sgprs; i++) {
5777 LLVMValueRef p = LLVMGetParam(func, i);
5778 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
5779 }
5780 for (unsigned i = 0; i < num_vgprs; i++) {
5781 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
5782 p = LLVMBuildBitCast(builder, p, ctx->f32, "");
5783 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
5784 }
5785
5786 if (key->gs_prolog.states.tri_strip_adj_fix) {
5787 /* Remap the input vertices for every other primitive. */
5788 const unsigned gfx6_vtx_params[6] = {
5789 num_sgprs,
5790 num_sgprs + 1,
5791 num_sgprs + 3,
5792 num_sgprs + 4,
5793 num_sgprs + 5,
5794 num_sgprs + 6
5795 };
5796 const unsigned gfx9_vtx_params[3] = {
5797 num_sgprs,
5798 num_sgprs + 1,
5799 num_sgprs + 4,
5800 };
5801 LLVMValueRef vtx_in[6], vtx_out[6];
5802 LLVMValueRef prim_id, rotate;
5803
5804 if (ctx->screen->b.chip_class >= GFX9) {
5805 for (unsigned i = 0; i < 3; i++) {
5806 vtx_in[i*2] = unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
5807 vtx_in[i*2+1] = unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
5808 }
5809 } else {
5810 for (unsigned i = 0; i < 6; i++)
5811 vtx_in[i] = LLVMGetParam(func, gfx6_vtx_params[i]);
5812 }
5813
5814 prim_id = LLVMGetParam(func, num_sgprs + 2);
5815 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
5816
5817 for (unsigned i = 0; i < 6; ++i) {
5818 LLVMValueRef base, rotated;
5819 base = vtx_in[i];
5820 rotated = vtx_in[(i + 4) % 6];
5821 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
5822 }
5823
5824 if (ctx->screen->b.chip_class >= GFX9) {
5825 for (unsigned i = 0; i < 3; i++) {
5826 LLVMValueRef hi, out;
5827
5828 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
5829 LLVMConstInt(ctx->i32, 16, 0), "");
5830 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
5831 out = LLVMBuildBitCast(builder, out, ctx->f32, "");
5832 ret = LLVMBuildInsertValue(builder, ret, out,
5833 gfx9_vtx_params[i], "");
5834 }
5835 } else {
5836 for (unsigned i = 0; i < 6; i++) {
5837 LLVMValueRef out;
5838
5839 out = LLVMBuildBitCast(builder, vtx_out[i], ctx->f32, "");
5840 ret = LLVMBuildInsertValue(builder, ret, out,
5841 gfx6_vtx_params[i], "");
5842 }
5843 }
5844 }
5845
5846 LLVMBuildRet(builder, ret);
5847 }
5848
5849 /**
5850 * Given a list of shader part functions, build a wrapper function that
5851 * runs them in sequence to form a monolithic shader.
5852 */
5853 static void si_build_wrapper_function(struct si_shader_context *ctx,
5854 LLVMValueRef *parts,
5855 unsigned num_parts,
5856 unsigned main_part,
5857 unsigned next_shader_first_part)
5858 {
5859 struct gallivm_state *gallivm = &ctx->gallivm;
5860 LLVMBuilderRef builder = ctx->gallivm.builder;
5861 /* PS epilog has one arg per color component */
5862 LLVMTypeRef param_types[48];
5863 LLVMValueRef initial[48], out[48];
5864 LLVMTypeRef function_type;
5865 unsigned num_params;
5866 unsigned num_out, initial_num_out;
5867 MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
5868 MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
5869 unsigned num_sgprs, num_vgprs;
5870 unsigned last_sgpr_param;
5871 unsigned gprs;
5872 struct lp_build_if_state if_state;
5873
5874 for (unsigned i = 0; i < num_parts; ++i) {
5875 lp_add_function_attr(parts[i], -1, LP_FUNC_ATTR_ALWAYSINLINE);
5876 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
5877 }
5878
5879 /* The parameters of the wrapper function correspond to those of the
5880 * first part in terms of SGPRs and VGPRs, but we use the types of the
5881 * main part to get the right types. This is relevant for the
5882 * dereferenceable attribute on descriptor table pointers.
5883 */
5884 num_sgprs = 0;
5885 num_vgprs = 0;
5886
5887 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
5888 num_params = LLVMCountParamTypes(function_type);
5889
5890 for (unsigned i = 0; i < num_params; ++i) {
5891 LLVMValueRef param = LLVMGetParam(parts[0], i);
5892
5893 if (ac_is_sgpr_param(param)) {
5894 assert(num_vgprs == 0);
5895 num_sgprs += llvm_get_type_size(LLVMTypeOf(param)) / 4;
5896 } else {
5897 num_vgprs += llvm_get_type_size(LLVMTypeOf(param)) / 4;
5898 }
5899 }
5900 assert(num_vgprs + num_sgprs <= ARRAY_SIZE(param_types));
5901
5902 num_params = 0;
5903 last_sgpr_param = 0;
5904 gprs = 0;
5905 while (gprs < num_sgprs + num_vgprs) {
5906 LLVMValueRef param = LLVMGetParam(parts[main_part], num_params);
5907 unsigned size;
5908
5909 param_types[num_params] = LLVMTypeOf(param);
5910 if (gprs < num_sgprs)
5911 last_sgpr_param = num_params;
5912 size = llvm_get_type_size(param_types[num_params]) / 4;
5913 num_params++;
5914
5915 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
5916 assert(gprs + size <= num_sgprs + num_vgprs &&
5917 (gprs >= num_sgprs || gprs + size <= num_sgprs));
5918
5919 gprs += size;
5920 }
5921
5922 si_create_function(ctx, "wrapper", NULL, 0, param_types, num_params,
5923 last_sgpr_param,
5924 si_get_max_workgroup_size(ctx->shader));
5925
5926 if (is_merged_shader(ctx->shader))
5927 si_init_exec_full_mask(ctx);
5928
5929 /* Record the arguments of the function as if they were an output of
5930 * a previous part.
5931 */
5932 num_out = 0;
5933 num_out_sgpr = 0;
5934
5935 for (unsigned i = 0; i < num_params; ++i) {
5936 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
5937 LLVMTypeRef param_type = LLVMTypeOf(param);
5938 LLVMTypeRef out_type = i <= last_sgpr_param ? ctx->i32 : ctx->f32;
5939 unsigned size = llvm_get_type_size(param_type) / 4;
5940
5941 if (size == 1) {
5942 if (param_type != out_type)
5943 param = LLVMBuildBitCast(builder, param, out_type, "");
5944 out[num_out++] = param;
5945 } else {
5946 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
5947
5948 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
5949 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
5950 param_type = ctx->i64;
5951 }
5952
5953 if (param_type != vector_type)
5954 param = LLVMBuildBitCast(builder, param, vector_type, "");
5955
5956 for (unsigned j = 0; j < size; ++j)
5957 out[num_out++] = LLVMBuildExtractElement(
5958 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
5959 }
5960
5961 if (i <= last_sgpr_param)
5962 num_out_sgpr = num_out;
5963 }
5964
5965 memcpy(initial, out, sizeof(out));
5966 initial_num_out = num_out;
5967 initial_num_out_sgpr = num_out_sgpr;
5968
5969 /* Now chain the parts. */
5970 for (unsigned part = 0; part < num_parts; ++part) {
5971 LLVMValueRef in[48];
5972 LLVMValueRef ret;
5973 LLVMTypeRef ret_type;
5974 unsigned out_idx = 0;
5975
5976 num_params = LLVMCountParams(parts[part]);
5977 assert(num_params <= ARRAY_SIZE(param_types));
5978
5979 /* Merged shaders are executed conditionally depending
5980 * on the number of enabled threads passed in the input SGPRs. */
5981 if (is_merged_shader(ctx->shader) &&
5982 (part == 0 || part == next_shader_first_part)) {
5983 LLVMValueRef ena, count = initial[3];
5984
5985 /* The thread count for the 2nd shader is at bit-offset 8. */
5986 if (part == next_shader_first_part) {
5987 count = LLVMBuildLShr(builder, count,
5988 LLVMConstInt(ctx->i32, 8, 0), "");
5989 }
5990 count = LLVMBuildAnd(builder, count,
5991 LLVMConstInt(ctx->i32, 0x7f, 0), "");
5992 ena = LLVMBuildICmp(builder, LLVMIntULT,
5993 ac_get_thread_id(&ctx->ac), count, "");
5994 lp_build_if(&if_state, &ctx->gallivm, ena);
5995 }
5996
5997 /* Derive arguments for the next part from outputs of the
5998 * previous one.
5999 */
6000 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
6001 LLVMValueRef param;
6002 LLVMTypeRef param_type;
6003 bool is_sgpr;
6004 unsigned param_size;
6005 LLVMValueRef arg = NULL;
6006
6007 param = LLVMGetParam(parts[part], param_idx);
6008 param_type = LLVMTypeOf(param);
6009 param_size = llvm_get_type_size(param_type) / 4;
6010 is_sgpr = ac_is_sgpr_param(param);
6011
6012 if (is_sgpr) {
6013 #if HAVE_LLVM < 0x0400
6014 LLVMRemoveAttribute(param, LLVMByValAttribute);
6015 #else
6016 unsigned kind_id = LLVMGetEnumAttributeKindForName("byval", 5);
6017 LLVMRemoveEnumAttributeAtIndex(parts[part], param_idx + 1, kind_id);
6018 #endif
6019 lp_add_function_attr(parts[part], param_idx + 1, LP_FUNC_ATTR_INREG);
6020 }
6021
6022 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
6023 assert(is_sgpr || out_idx >= num_out_sgpr);
6024
6025 if (param_size == 1)
6026 arg = out[out_idx];
6027 else
6028 arg = lp_build_gather_values(gallivm, &out[out_idx], param_size);
6029
6030 if (LLVMTypeOf(arg) != param_type) {
6031 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6032 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
6033 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
6034 } else {
6035 arg = LLVMBuildBitCast(builder, arg, param_type, "");
6036 }
6037 }
6038
6039 in[param_idx] = arg;
6040 out_idx += param_size;
6041 }
6042
6043 ret = LLVMBuildCall(builder, parts[part], in, num_params, "");
6044
6045 if (is_merged_shader(ctx->shader) &&
6046 (part + 1 == next_shader_first_part ||
6047 part + 1 == num_parts)) {
6048 lp_build_endif(&if_state);
6049
6050 if (part + 1 == next_shader_first_part) {
6051 /* A barrier is required between 2 merged shaders. */
6052 si_llvm_emit_barrier(NULL, &ctx->bld_base, NULL);
6053
6054 /* The second half of the merged shader should use
6055 * the inputs from the toplevel (wrapper) function,
6056 * not the return value from the last call.
6057 *
6058 * That's because the last call was executed condi-
6059 * tionally, so we can't consume it in the main
6060 * block.
6061 */
6062 memcpy(out, initial, sizeof(initial));
6063 num_out = initial_num_out;
6064 num_out_sgpr = initial_num_out_sgpr;
6065 }
6066 continue;
6067 }
6068
6069 /* Extract the returned GPRs. */
6070 ret_type = LLVMTypeOf(ret);
6071 num_out = 0;
6072 num_out_sgpr = 0;
6073
6074 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
6075 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
6076
6077 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
6078
6079 for (unsigned i = 0; i < ret_size; ++i) {
6080 LLVMValueRef val =
6081 LLVMBuildExtractValue(builder, ret, i, "");
6082
6083 out[num_out++] = val;
6084
6085 if (LLVMTypeOf(val) == ctx->i32) {
6086 assert(num_out_sgpr + 1 == num_out);
6087 num_out_sgpr = num_out;
6088 }
6089 }
6090 }
6091 }
6092
6093 LLVMBuildRetVoid(builder);
6094 }
6095
6096 int si_compile_tgsi_shader(struct si_screen *sscreen,
6097 LLVMTargetMachineRef tm,
6098 struct si_shader *shader,
6099 bool is_monolithic,
6100 struct pipe_debug_callback *debug)
6101 {
6102 struct si_shader_selector *sel = shader->selector;
6103 struct si_shader_context ctx;
6104 int r = -1;
6105
6106 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6107 * conversion fails. */
6108 if (r600_can_dump_shader(&sscreen->b, sel->info.processor) &&
6109 !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
6110 tgsi_dump(sel->tokens, 0);
6111 si_dump_streamout(&sel->so);
6112 }
6113
6114 si_init_shader_ctx(&ctx, sscreen, tm);
6115 si_llvm_context_set_tgsi(&ctx, shader);
6116 ctx.separate_prolog = !is_monolithic;
6117
6118 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
6119 sizeof(shader->info.vs_output_param_offset));
6120
6121 shader->info.uses_instanceid = sel->info.uses_instanceid;
6122
6123 ctx.load_system_value = declare_system_value;
6124
6125 if (!si_compile_tgsi_main(&ctx, is_monolithic)) {
6126 si_llvm_dispose(&ctx);
6127 return -1;
6128 }
6129
6130 if (is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
6131 LLVMValueRef parts[2];
6132 bool need_prolog = sel->vs_needs_prolog;
6133
6134 parts[1] = ctx.main_fn;
6135
6136 if (need_prolog) {
6137 union si_shader_part_key prolog_key;
6138 si_get_vs_prolog_key(&sel->info,
6139 shader->info.num_input_sgprs,
6140 &shader->key.part.vs.prolog,
6141 shader, &prolog_key);
6142 si_build_vs_prolog_function(&ctx, &prolog_key);
6143 parts[0] = ctx.main_fn;
6144 }
6145
6146 si_build_wrapper_function(&ctx, parts + !need_prolog,
6147 1 + need_prolog, need_prolog, 0);
6148 } else if (is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
6149 if (sscreen->b.chip_class >= GFX9) {
6150 struct si_shader_selector *ls = shader->key.part.tcs.ls;
6151 LLVMValueRef parts[4];
6152
6153 /* TCS main part */
6154 parts[2] = ctx.main_fn;
6155
6156 /* TCS epilog */
6157 union si_shader_part_key tcs_epilog_key;
6158 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
6159 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6160 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
6161 parts[3] = ctx.main_fn;
6162
6163 /* VS prolog */
6164 if (ls->vs_needs_prolog) {
6165 union si_shader_part_key vs_prolog_key;
6166 si_get_vs_prolog_key(&ls->info,
6167 shader->info.num_input_sgprs,
6168 &shader->key.part.tcs.ls_prolog,
6169 shader, &vs_prolog_key);
6170 vs_prolog_key.vs_prolog.is_monolithic = true;
6171 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6172 parts[0] = ctx.main_fn;
6173 }
6174
6175 /* VS as LS main part */
6176 struct si_shader shader_ls = {};
6177 shader_ls.selector = ls;
6178 shader_ls.key.as_ls = 1;
6179 shader_ls.key.mono = shader->key.mono;
6180 shader_ls.key.opt = shader->key.opt;
6181 si_llvm_context_set_tgsi(&ctx, &shader_ls);
6182
6183 if (!si_compile_tgsi_main(&ctx, true)) {
6184 si_llvm_dispose(&ctx);
6185 return -1;
6186 }
6187 shader->info.uses_instanceid |= ls->info.uses_instanceid;
6188 parts[1] = ctx.main_fn;
6189
6190 /* Reset the shader context. */
6191 ctx.shader = shader;
6192 ctx.type = PIPE_SHADER_TESS_CTRL;
6193
6194 si_build_wrapper_function(&ctx,
6195 parts + !ls->vs_needs_prolog,
6196 4 - !ls->vs_needs_prolog, 0,
6197 ls->vs_needs_prolog ? 2 : 1);
6198 } else {
6199 LLVMValueRef parts[2];
6200 union si_shader_part_key epilog_key;
6201
6202 parts[0] = ctx.main_fn;
6203
6204 memset(&epilog_key, 0, sizeof(epilog_key));
6205 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6206 si_build_tcs_epilog_function(&ctx, &epilog_key);
6207 parts[1] = ctx.main_fn;
6208
6209 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
6210 }
6211 } else if (is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
6212 if (ctx.screen->b.chip_class >= GFX9) {
6213 struct si_shader_selector *es = shader->key.part.gs.es;
6214 LLVMValueRef es_prolog = NULL;
6215 LLVMValueRef es_main = NULL;
6216 LLVMValueRef gs_prolog = NULL;
6217 LLVMValueRef gs_main = ctx.main_fn;
6218
6219 /* GS prolog */
6220 union si_shader_part_key gs_prolog_key;
6221 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
6222 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6223 gs_prolog_key.gs_prolog.is_monolithic = true;
6224 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
6225 gs_prolog = ctx.main_fn;
6226
6227 /* ES prolog */
6228 if (es->vs_needs_prolog) {
6229 union si_shader_part_key vs_prolog_key;
6230 si_get_vs_prolog_key(&es->info,
6231 shader->info.num_input_sgprs,
6232 &shader->key.part.tcs.ls_prolog,
6233 shader, &vs_prolog_key);
6234 vs_prolog_key.vs_prolog.is_monolithic = true;
6235 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6236 es_prolog = ctx.main_fn;
6237 }
6238
6239 /* ES main part */
6240 struct si_shader shader_es = {};
6241 shader_es.selector = es;
6242 shader_es.key.as_es = 1;
6243 shader_es.key.mono = shader->key.mono;
6244 shader_es.key.opt = shader->key.opt;
6245 si_llvm_context_set_tgsi(&ctx, &shader_es);
6246
6247 if (!si_compile_tgsi_main(&ctx, true)) {
6248 si_llvm_dispose(&ctx);
6249 return -1;
6250 }
6251 shader->info.uses_instanceid |= es->info.uses_instanceid;
6252 es_main = ctx.main_fn;
6253
6254 /* Reset the shader context. */
6255 ctx.shader = shader;
6256 ctx.type = PIPE_SHADER_GEOMETRY;
6257
6258 /* Prepare the array of shader parts. */
6259 LLVMValueRef parts[4];
6260 unsigned num_parts = 0, main_part, next_first_part;
6261
6262 if (es_prolog)
6263 parts[num_parts++] = es_prolog;
6264
6265 parts[main_part = num_parts++] = es_main;
6266 parts[next_first_part = num_parts++] = gs_prolog;
6267 parts[num_parts++] = gs_main;
6268
6269 si_build_wrapper_function(&ctx, parts, num_parts,
6270 main_part, next_first_part);
6271 } else {
6272 LLVMValueRef parts[2];
6273 union si_shader_part_key prolog_key;
6274
6275 parts[1] = ctx.main_fn;
6276
6277 memset(&prolog_key, 0, sizeof(prolog_key));
6278 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6279 si_build_gs_prolog_function(&ctx, &prolog_key);
6280 parts[0] = ctx.main_fn;
6281
6282 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
6283 }
6284 } else if (is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
6285 LLVMValueRef parts[3];
6286 union si_shader_part_key prolog_key;
6287 union si_shader_part_key epilog_key;
6288 bool need_prolog;
6289
6290 si_get_ps_prolog_key(shader, &prolog_key, false);
6291 need_prolog = si_need_ps_prolog(&prolog_key);
6292
6293 parts[need_prolog ? 1 : 0] = ctx.main_fn;
6294
6295 if (need_prolog) {
6296 si_build_ps_prolog_function(&ctx, &prolog_key);
6297 parts[0] = ctx.main_fn;
6298 }
6299
6300 si_get_ps_epilog_key(shader, &epilog_key);
6301 si_build_ps_epilog_function(&ctx, &epilog_key);
6302 parts[need_prolog ? 2 : 1] = ctx.main_fn;
6303
6304 si_build_wrapper_function(&ctx, parts, need_prolog ? 3 : 2,
6305 need_prolog ? 1 : 0, 0);
6306 }
6307
6308 si_llvm_optimize_module(&ctx);
6309
6310 /* Post-optimization transformations and analysis. */
6311 si_optimize_vs_outputs(&ctx);
6312
6313 if ((debug && debug->debug_message) ||
6314 r600_can_dump_shader(&sscreen->b, ctx.type))
6315 si_count_scratch_private_memory(&ctx);
6316
6317 /* Compile to bytecode. */
6318 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
6319 ctx.gallivm.module, debug, ctx.type, "TGSI shader");
6320 si_llvm_dispose(&ctx);
6321 if (r) {
6322 fprintf(stderr, "LLVM failed to compile shader\n");
6323 return r;
6324 }
6325
6326 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
6327 * LLVM 3.9svn has this bug.
6328 */
6329 if (sel->type == PIPE_SHADER_COMPUTE) {
6330 unsigned wave_size = 64;
6331 unsigned max_vgprs = 256;
6332 unsigned max_sgprs = sscreen->b.chip_class >= VI ? 800 : 512;
6333 unsigned max_sgprs_per_wave = 128;
6334 unsigned max_block_threads = si_get_max_workgroup_size(shader);
6335 unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
6336 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
6337
6338 max_vgprs = max_vgprs / min_waves_per_simd;
6339 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
6340
6341 if (shader->config.num_sgprs > max_sgprs ||
6342 shader->config.num_vgprs > max_vgprs) {
6343 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6344 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6345 shader->config.num_sgprs, shader->config.num_vgprs,
6346 max_sgprs, max_vgprs);
6347
6348 /* Just terminate the process, because dependent
6349 * shaders can hang due to bad input data, but use
6350 * the env var to allow shader-db to work.
6351 */
6352 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6353 abort();
6354 }
6355 }
6356
6357 /* Add the scratch offset to input SGPRs. */
6358 if (shader->config.scratch_bytes_per_wave && !is_merged_shader(shader))
6359 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6360
6361 /* Calculate the number of fragment input VGPRs. */
6362 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6363 shader->info.num_input_vgprs = 0;
6364 shader->info.face_vgpr_index = -1;
6365
6366 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6367 shader->info.num_input_vgprs += 2;
6368 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6369 shader->info.num_input_vgprs += 2;
6370 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6371 shader->info.num_input_vgprs += 2;
6372 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6373 shader->info.num_input_vgprs += 3;
6374 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6375 shader->info.num_input_vgprs += 2;
6376 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6377 shader->info.num_input_vgprs += 2;
6378 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6379 shader->info.num_input_vgprs += 2;
6380 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6381 shader->info.num_input_vgprs += 1;
6382 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6383 shader->info.num_input_vgprs += 1;
6384 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6385 shader->info.num_input_vgprs += 1;
6386 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6387 shader->info.num_input_vgprs += 1;
6388 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6389 shader->info.num_input_vgprs += 1;
6390 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6391 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6392 shader->info.num_input_vgprs += 1;
6393 }
6394 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr))
6395 shader->info.num_input_vgprs += 1;
6396 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6397 shader->info.num_input_vgprs += 1;
6398 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6399 shader->info.num_input_vgprs += 1;
6400 }
6401
6402 return 0;
6403 }
6404
6405 /**
6406 * Create, compile and return a shader part (prolog or epilog).
6407 *
6408 * \param sscreen screen
6409 * \param list list of shader parts of the same category
6410 * \param type shader type
6411 * \param key shader part key
6412 * \param prolog whether the part being requested is a prolog
6413 * \param tm LLVM target machine
6414 * \param debug debug callback
6415 * \param build the callback responsible for building the main function
6416 * \return non-NULL on success
6417 */
6418 static struct si_shader_part *
6419 si_get_shader_part(struct si_screen *sscreen,
6420 struct si_shader_part **list,
6421 enum pipe_shader_type type,
6422 bool prolog,
6423 union si_shader_part_key *key,
6424 LLVMTargetMachineRef tm,
6425 struct pipe_debug_callback *debug,
6426 void (*build)(struct si_shader_context *,
6427 union si_shader_part_key *),
6428 const char *name)
6429 {
6430 struct si_shader_part *result;
6431
6432 mtx_lock(&sscreen->shader_parts_mutex);
6433
6434 /* Find existing. */
6435 for (result = *list; result; result = result->next) {
6436 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
6437 mtx_unlock(&sscreen->shader_parts_mutex);
6438 return result;
6439 }
6440 }
6441
6442 /* Compile a new one. */
6443 result = CALLOC_STRUCT(si_shader_part);
6444 result->key = *key;
6445
6446 struct si_shader shader = {};
6447 struct si_shader_context ctx;
6448 struct gallivm_state *gallivm = &ctx.gallivm;
6449
6450 si_init_shader_ctx(&ctx, sscreen, tm);
6451 ctx.shader = &shader;
6452 ctx.type = type;
6453
6454 switch (type) {
6455 case PIPE_SHADER_VERTEX:
6456 break;
6457 case PIPE_SHADER_TESS_CTRL:
6458 assert(!prolog);
6459 shader.key.part.tcs.epilog = key->tcs_epilog.states;
6460 break;
6461 case PIPE_SHADER_GEOMETRY:
6462 assert(prolog);
6463 break;
6464 case PIPE_SHADER_FRAGMENT:
6465 if (prolog)
6466 shader.key.part.ps.prolog = key->ps_prolog.states;
6467 else
6468 shader.key.part.ps.epilog = key->ps_epilog.states;
6469 break;
6470 default:
6471 unreachable("bad shader part");
6472 }
6473
6474 build(&ctx, key);
6475
6476 /* Compile. */
6477 si_llvm_optimize_module(&ctx);
6478
6479 if (si_compile_llvm(sscreen, &result->binary, &result->config, tm,
6480 gallivm->module, debug, ctx.type, name)) {
6481 FREE(result);
6482 result = NULL;
6483 goto out;
6484 }
6485
6486 result->next = *list;
6487 *list = result;
6488
6489 out:
6490 si_llvm_dispose(&ctx);
6491 mtx_unlock(&sscreen->shader_parts_mutex);
6492 return result;
6493 }
6494
6495 /**
6496 * Build the vertex shader prolog function.
6497 *
6498 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
6499 * All inputs are returned unmodified. The vertex load indices are
6500 * stored after them, which will be used by the API VS for fetching inputs.
6501 *
6502 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
6503 * input_v0,
6504 * input_v1,
6505 * input_v2,
6506 * input_v3,
6507 * (VertexID + BaseVertex),
6508 * (InstanceID + StartInstance),
6509 * (InstanceID / 2 + StartInstance)
6510 */
6511 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
6512 union si_shader_part_key *key)
6513 {
6514 struct gallivm_state *gallivm = &ctx->gallivm;
6515 LLVMTypeRef *params, *returns;
6516 LLVMValueRef ret, func;
6517 int last_sgpr, num_params, num_returns, i;
6518 unsigned first_vs_vgpr = key->vs_prolog.num_input_sgprs +
6519 key->vs_prolog.num_merged_next_stage_vgprs;
6520 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
6521 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
6522 num_input_vgprs;
6523 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
6524
6525 ctx->param_vertex_id = first_vs_vgpr;
6526 ctx->param_instance_id = first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
6527
6528 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
6529 params = alloca(num_all_input_regs * sizeof(LLVMTypeRef));
6530 returns = alloca((num_all_input_regs + key->vs_prolog.last_input + 1) *
6531 sizeof(LLVMTypeRef));
6532 num_params = 0;
6533 num_returns = 0;
6534
6535 /* Declare input and output SGPRs. */
6536 num_params = 0;
6537 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6538 params[num_params++] = ctx->i32;
6539 returns[num_returns++] = ctx->i32;
6540 }
6541 last_sgpr = num_params - 1;
6542
6543 /* Preloaded VGPRs (outputs must be floats) */
6544 for (i = 0; i < num_input_vgprs; i++) {
6545 params[num_params++] = ctx->i32;
6546 returns[num_returns++] = ctx->f32;
6547 }
6548
6549 /* Vertex load indices. */
6550 for (i = 0; i <= key->vs_prolog.last_input; i++)
6551 returns[num_returns++] = ctx->f32;
6552
6553 /* Create the function. */
6554 si_create_function(ctx, "vs_prolog", returns, num_returns, params,
6555 num_params, last_sgpr, 0);
6556 func = ctx->main_fn;
6557
6558 if (key->vs_prolog.num_merged_next_stage_vgprs &&
6559 !key->vs_prolog.is_monolithic)
6560 si_init_exec_from_input(ctx, 3, 0);
6561
6562 /* Copy inputs to outputs. This should be no-op, as the registers match,
6563 * but it will prevent the compiler from overwriting them unintentionally.
6564 */
6565 ret = ctx->return_value;
6566 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6567 LLVMValueRef p = LLVMGetParam(func, i);
6568 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6569 }
6570 for (; i < num_params; i++) {
6571 LLVMValueRef p = LLVMGetParam(func, i);
6572 p = LLVMBuildBitCast(gallivm->builder, p, ctx->f32, "");
6573 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6574 }
6575
6576 /* Compute vertex load indices from instance divisors. */
6577 for (i = 0; i <= key->vs_prolog.last_input; i++) {
6578 unsigned divisor = key->vs_prolog.states.instance_divisors[i];
6579 LLVMValueRef index;
6580
6581 if (divisor) {
6582 /* InstanceID / Divisor + StartInstance */
6583 index = get_instance_index_for_fetch(ctx,
6584 user_sgpr_base +
6585 SI_SGPR_START_INSTANCE,
6586 divisor);
6587 } else {
6588 /* VertexID + BaseVertex */
6589 index = LLVMBuildAdd(gallivm->builder,
6590 LLVMGetParam(func, ctx->param_vertex_id),
6591 LLVMGetParam(func, user_sgpr_base +
6592 SI_SGPR_BASE_VERTEX), "");
6593 }
6594
6595 index = LLVMBuildBitCast(gallivm->builder, index, ctx->f32, "");
6596 ret = LLVMBuildInsertValue(gallivm->builder, ret, index,
6597 num_params++, "");
6598 }
6599
6600 si_llvm_build_ret(ctx, ret);
6601 }
6602
6603 static bool si_get_vs_prolog(struct si_screen *sscreen,
6604 LLVMTargetMachineRef tm,
6605 struct si_shader *shader,
6606 struct pipe_debug_callback *debug,
6607 struct si_shader *main_part,
6608 const struct si_vs_prolog_bits *key)
6609 {
6610 struct si_shader_selector *vs = main_part->selector;
6611
6612 /* The prolog is a no-op if there are no inputs. */
6613 if (!vs->vs_needs_prolog)
6614 return true;
6615
6616 /* Get the prolog. */
6617 union si_shader_part_key prolog_key;
6618 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
6619 key, shader, &prolog_key);
6620
6621 shader->prolog =
6622 si_get_shader_part(sscreen, &sscreen->vs_prologs,
6623 PIPE_SHADER_VERTEX, true, &prolog_key, tm,
6624 debug, si_build_vs_prolog_function,
6625 "Vertex Shader Prolog");
6626 return shader->prolog != NULL;
6627 }
6628
6629 /**
6630 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
6631 */
6632 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
6633 LLVMTargetMachineRef tm,
6634 struct si_shader *shader,
6635 struct pipe_debug_callback *debug)
6636 {
6637 return si_get_vs_prolog(sscreen, tm, shader, debug, shader,
6638 &shader->key.part.vs.prolog);
6639 }
6640
6641 /**
6642 * Compile the TCS epilog function. This writes tesselation factors to memory
6643 * based on the output primitive type of the tesselator (determined by TES).
6644 */
6645 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
6646 union si_shader_part_key *key)
6647 {
6648 struct gallivm_state *gallivm = &ctx->gallivm;
6649 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
6650 LLVMTypeRef params[32];
6651 LLVMValueRef func;
6652 int last_sgpr, num_params = 0;
6653
6654 if (ctx->screen->b.chip_class >= GFX9) {
6655 params[num_params++] = ctx->i64;
6656 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
6657 params[num_params++] = ctx->i32; /* wave info */
6658 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
6659 params[num_params++] = ctx->i32;
6660 params[num_params++] = ctx->i32;
6661 params[num_params++] = ctx->i32;
6662 params[num_params++] = ctx->i64;
6663 params[num_params++] = ctx->i64;
6664 params[num_params++] = ctx->i64;
6665 params[num_params++] = ctx->i64;
6666 params[num_params++] = ctx->i32;
6667 params[num_params++] = ctx->i32;
6668 params[num_params++] = ctx->i32;
6669 params[num_params++] = ctx->i32;
6670 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
6671 params[num_params++] = ctx->i32;
6672 params[num_params++] = ctx->i32;
6673 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
6674 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
6675 } else {
6676 params[num_params++] = ctx->i64;
6677 params[num_params++] = ctx->i64;
6678 params[num_params++] = ctx->i64;
6679 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
6680 params[num_params++] = ctx->i32;
6681 params[num_params++] = ctx->i32;
6682 params[num_params++] = ctx->i32;
6683 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
6684 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
6685 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
6686 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
6687 }
6688 last_sgpr = num_params - 1;
6689
6690 params[num_params++] = ctx->i32; /* patch index within the wave (REL_PATCH_ID) */
6691 params[num_params++] = ctx->i32; /* invocation ID within the patch */
6692 params[num_params++] = ctx->i32; /* LDS offset where tess factors should be loaded from */
6693
6694 /* Create the function. */
6695 si_create_function(ctx, "tcs_epilog", NULL, 0, params, num_params, last_sgpr,
6696 ctx->screen->b.chip_class >= CIK ? 128 : 64);
6697 declare_lds_as_pointer(ctx);
6698 func = ctx->main_fn;
6699
6700 si_write_tess_factors(bld_base,
6701 LLVMGetParam(func, last_sgpr + 1),
6702 LLVMGetParam(func, last_sgpr + 2),
6703 LLVMGetParam(func, last_sgpr + 3));
6704
6705 LLVMBuildRetVoid(gallivm->builder);
6706 }
6707
6708 /**
6709 * Select and compile (or reuse) TCS parts (epilog).
6710 */
6711 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
6712 LLVMTargetMachineRef tm,
6713 struct si_shader *shader,
6714 struct pipe_debug_callback *debug)
6715 {
6716 if (sscreen->b.chip_class >= GFX9) {
6717 struct si_shader *ls_main_part =
6718 shader->key.part.tcs.ls->main_shader_part_ls;
6719
6720 if (!si_get_vs_prolog(sscreen, tm, shader, debug, ls_main_part,
6721 &shader->key.part.tcs.ls_prolog))
6722 return false;
6723
6724 shader->previous_stage = ls_main_part;
6725 }
6726
6727 /* Get the epilog. */
6728 union si_shader_part_key epilog_key;
6729 memset(&epilog_key, 0, sizeof(epilog_key));
6730 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6731
6732 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
6733 PIPE_SHADER_TESS_CTRL, false,
6734 &epilog_key, tm, debug,
6735 si_build_tcs_epilog_function,
6736 "Tessellation Control Shader Epilog");
6737 return shader->epilog != NULL;
6738 }
6739
6740 /**
6741 * Select and compile (or reuse) GS parts (prolog).
6742 */
6743 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
6744 LLVMTargetMachineRef tm,
6745 struct si_shader *shader,
6746 struct pipe_debug_callback *debug)
6747 {
6748 if (sscreen->b.chip_class >= GFX9) {
6749 struct si_shader *es_main_part =
6750 shader->key.part.gs.es->main_shader_part_es;
6751
6752 if (shader->key.part.gs.es->type == PIPE_SHADER_VERTEX &&
6753 !si_get_vs_prolog(sscreen, tm, shader, debug, es_main_part,
6754 &shader->key.part.gs.vs_prolog))
6755 return false;
6756
6757 shader->previous_stage = es_main_part;
6758 }
6759
6760 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
6761 return true;
6762
6763 union si_shader_part_key prolog_key;
6764 memset(&prolog_key, 0, sizeof(prolog_key));
6765 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6766
6767 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
6768 PIPE_SHADER_GEOMETRY, true,
6769 &prolog_key, tm, debug,
6770 si_build_gs_prolog_function,
6771 "Geometry Shader Prolog");
6772 return shader->prolog2 != NULL;
6773 }
6774
6775 /**
6776 * Build the pixel shader prolog function. This handles:
6777 * - two-side color selection and interpolation
6778 * - overriding interpolation parameters for the API PS
6779 * - polygon stippling
6780 *
6781 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
6782 * overriden by other states. (e.g. per-sample interpolation)
6783 * Interpolated colors are stored after the preloaded VGPRs.
6784 */
6785 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
6786 union si_shader_part_key *key)
6787 {
6788 struct gallivm_state *gallivm = &ctx->gallivm;
6789 LLVMTypeRef *params;
6790 LLVMValueRef ret, func;
6791 int last_sgpr, num_params, num_returns, i, num_color_channels;
6792
6793 assert(si_need_ps_prolog(key));
6794
6795 /* Number of inputs + 8 color elements. */
6796 params = alloca((key->ps_prolog.num_input_sgprs +
6797 key->ps_prolog.num_input_vgprs + 8) *
6798 sizeof(LLVMTypeRef));
6799
6800 /* Declare inputs. */
6801 num_params = 0;
6802 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
6803 params[num_params++] = ctx->i32;
6804 last_sgpr = num_params - 1;
6805
6806 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
6807 params[num_params++] = ctx->f32;
6808
6809 /* Declare outputs (same as inputs + add colors if needed) */
6810 num_returns = num_params;
6811 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
6812 for (i = 0; i < num_color_channels; i++)
6813 params[num_returns++] = ctx->f32;
6814
6815 /* Create the function. */
6816 si_create_function(ctx, "ps_prolog", params, num_returns, params,
6817 num_params, last_sgpr, 0);
6818 func = ctx->main_fn;
6819
6820 /* Copy inputs to outputs. This should be no-op, as the registers match,
6821 * but it will prevent the compiler from overwriting them unintentionally.
6822 */
6823 ret = ctx->return_value;
6824 for (i = 0; i < num_params; i++) {
6825 LLVMValueRef p = LLVMGetParam(func, i);
6826 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6827 }
6828
6829 /* Polygon stippling. */
6830 if (key->ps_prolog.states.poly_stipple) {
6831 /* POS_FIXED_PT is always last. */
6832 unsigned pos = key->ps_prolog.num_input_sgprs +
6833 key->ps_prolog.num_input_vgprs - 1;
6834 LLVMValueRef ptr[2], list;
6835
6836 /* Get the pointer to rw buffers. */
6837 ptr[0] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS);
6838 ptr[1] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS_HI);
6839 list = lp_build_gather_values(gallivm, ptr, 2);
6840 list = LLVMBuildBitCast(gallivm->builder, list, ctx->i64, "");
6841 list = LLVMBuildIntToPtr(gallivm->builder, list,
6842 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS), "");
6843
6844 si_llvm_emit_polygon_stipple(ctx, list, pos);
6845 }
6846
6847 if (key->ps_prolog.states.bc_optimize_for_persp ||
6848 key->ps_prolog.states.bc_optimize_for_linear) {
6849 unsigned i, base = key->ps_prolog.num_input_sgprs;
6850 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
6851
6852 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
6853 * The hw doesn't compute CENTROID if the whole wave only
6854 * contains fully-covered quads.
6855 *
6856 * PRIM_MASK is after user SGPRs.
6857 */
6858 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
6859 bc_optimize = LLVMBuildLShr(gallivm->builder, bc_optimize,
6860 LLVMConstInt(ctx->i32, 31, 0), "");
6861 bc_optimize = LLVMBuildTrunc(gallivm->builder, bc_optimize,
6862 ctx->i1, "");
6863
6864 if (key->ps_prolog.states.bc_optimize_for_persp) {
6865 /* Read PERSP_CENTER. */
6866 for (i = 0; i < 2; i++)
6867 center[i] = LLVMGetParam(func, base + 2 + i);
6868 /* Read PERSP_CENTROID. */
6869 for (i = 0; i < 2; i++)
6870 centroid[i] = LLVMGetParam(func, base + 4 + i);
6871 /* Select PERSP_CENTROID. */
6872 for (i = 0; i < 2; i++) {
6873 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
6874 center[i], centroid[i], "");
6875 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6876 tmp, base + 4 + i, "");
6877 }
6878 }
6879 if (key->ps_prolog.states.bc_optimize_for_linear) {
6880 /* Read LINEAR_CENTER. */
6881 for (i = 0; i < 2; i++)
6882 center[i] = LLVMGetParam(func, base + 8 + i);
6883 /* Read LINEAR_CENTROID. */
6884 for (i = 0; i < 2; i++)
6885 centroid[i] = LLVMGetParam(func, base + 10 + i);
6886 /* Select LINEAR_CENTROID. */
6887 for (i = 0; i < 2; i++) {
6888 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
6889 center[i], centroid[i], "");
6890 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6891 tmp, base + 10 + i, "");
6892 }
6893 }
6894 }
6895
6896 /* Force per-sample interpolation. */
6897 if (key->ps_prolog.states.force_persp_sample_interp) {
6898 unsigned i, base = key->ps_prolog.num_input_sgprs;
6899 LLVMValueRef persp_sample[2];
6900
6901 /* Read PERSP_SAMPLE. */
6902 for (i = 0; i < 2; i++)
6903 persp_sample[i] = LLVMGetParam(func, base + i);
6904 /* Overwrite PERSP_CENTER. */
6905 for (i = 0; i < 2; i++)
6906 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6907 persp_sample[i], base + 2 + i, "");
6908 /* Overwrite PERSP_CENTROID. */
6909 for (i = 0; i < 2; i++)
6910 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6911 persp_sample[i], base + 4 + i, "");
6912 }
6913 if (key->ps_prolog.states.force_linear_sample_interp) {
6914 unsigned i, base = key->ps_prolog.num_input_sgprs;
6915 LLVMValueRef linear_sample[2];
6916
6917 /* Read LINEAR_SAMPLE. */
6918 for (i = 0; i < 2; i++)
6919 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
6920 /* Overwrite LINEAR_CENTER. */
6921 for (i = 0; i < 2; i++)
6922 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6923 linear_sample[i], base + 8 + i, "");
6924 /* Overwrite LINEAR_CENTROID. */
6925 for (i = 0; i < 2; i++)
6926 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6927 linear_sample[i], base + 10 + i, "");
6928 }
6929
6930 /* Force center interpolation. */
6931 if (key->ps_prolog.states.force_persp_center_interp) {
6932 unsigned i, base = key->ps_prolog.num_input_sgprs;
6933 LLVMValueRef persp_center[2];
6934
6935 /* Read PERSP_CENTER. */
6936 for (i = 0; i < 2; i++)
6937 persp_center[i] = LLVMGetParam(func, base + 2 + i);
6938 /* Overwrite PERSP_SAMPLE. */
6939 for (i = 0; i < 2; i++)
6940 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6941 persp_center[i], base + i, "");
6942 /* Overwrite PERSP_CENTROID. */
6943 for (i = 0; i < 2; i++)
6944 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6945 persp_center[i], base + 4 + i, "");
6946 }
6947 if (key->ps_prolog.states.force_linear_center_interp) {
6948 unsigned i, base = key->ps_prolog.num_input_sgprs;
6949 LLVMValueRef linear_center[2];
6950
6951 /* Read LINEAR_CENTER. */
6952 for (i = 0; i < 2; i++)
6953 linear_center[i] = LLVMGetParam(func, base + 8 + i);
6954 /* Overwrite LINEAR_SAMPLE. */
6955 for (i = 0; i < 2; i++)
6956 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6957 linear_center[i], base + 6 + i, "");
6958 /* Overwrite LINEAR_CENTROID. */
6959 for (i = 0; i < 2; i++)
6960 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6961 linear_center[i], base + 10 + i, "");
6962 }
6963
6964 /* Interpolate colors. */
6965 for (i = 0; i < 2; i++) {
6966 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
6967 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
6968 key->ps_prolog.face_vgpr_index;
6969 LLVMValueRef interp[2], color[4];
6970 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
6971
6972 if (!writemask)
6973 continue;
6974
6975 /* If the interpolation qualifier is not CONSTANT (-1). */
6976 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
6977 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
6978 key->ps_prolog.color_interp_vgpr_index[i];
6979
6980 /* Get the (i,j) updated by bc_optimize handling. */
6981 interp[0] = LLVMBuildExtractValue(gallivm->builder, ret,
6982 interp_vgpr, "");
6983 interp[1] = LLVMBuildExtractValue(gallivm->builder, ret,
6984 interp_vgpr + 1, "");
6985 interp_ij = lp_build_gather_values(gallivm, interp, 2);
6986 }
6987
6988 /* Use the absolute location of the input. */
6989 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
6990
6991 if (key->ps_prolog.states.color_two_side) {
6992 face = LLVMGetParam(func, face_vgpr);
6993 face = LLVMBuildBitCast(gallivm->builder, face, ctx->i32, "");
6994 }
6995
6996 interp_fs_input(ctx,
6997 key->ps_prolog.color_attr_index[i],
6998 TGSI_SEMANTIC_COLOR, i,
6999 key->ps_prolog.num_interp_inputs,
7000 key->ps_prolog.colors_read, interp_ij,
7001 prim_mask, face, color);
7002
7003 while (writemask) {
7004 unsigned chan = u_bit_scan(&writemask);
7005 ret = LLVMBuildInsertValue(gallivm->builder, ret, color[chan],
7006 num_params++, "");
7007 }
7008 }
7009
7010 /* Tell LLVM to insert WQM instruction sequence when needed. */
7011 if (key->ps_prolog.wqm) {
7012 LLVMAddTargetDependentFunctionAttr(func,
7013 "amdgpu-ps-wqm-outputs", "");
7014 }
7015
7016 si_llvm_build_ret(ctx, ret);
7017 }
7018
7019 /**
7020 * Build the pixel shader epilog function. This handles everything that must be
7021 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7022 */
7023 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
7024 union si_shader_part_key *key)
7025 {
7026 struct gallivm_state *gallivm = &ctx->gallivm;
7027 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7028 LLVMTypeRef params[16+8*4+3];
7029 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7030 int last_sgpr, num_params = 0, i;
7031 struct si_ps_exports exp = {};
7032
7033 /* Declare input SGPRs. */
7034 params[ctx->param_rw_buffers = num_params++] = ctx->i64;
7035 params[ctx->param_const_and_shader_buffers = num_params++] = ctx->i64;
7036 params[ctx->param_samplers_and_images = num_params++] = ctx->i64;
7037 assert(num_params == SI_PARAM_ALPHA_REF);
7038 params[SI_PARAM_ALPHA_REF] = ctx->f32;
7039 last_sgpr = SI_PARAM_ALPHA_REF;
7040
7041 /* Declare input VGPRs. */
7042 num_params = (last_sgpr + 1) +
7043 util_bitcount(key->ps_epilog.colors_written) * 4 +
7044 key->ps_epilog.writes_z +
7045 key->ps_epilog.writes_stencil +
7046 key->ps_epilog.writes_samplemask;
7047
7048 num_params = MAX2(num_params,
7049 last_sgpr + 1 + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7050
7051 assert(num_params <= ARRAY_SIZE(params));
7052
7053 for (i = last_sgpr + 1; i < num_params; i++)
7054 params[i] = ctx->f32;
7055
7056 /* Create the function. */
7057 si_create_function(ctx, "ps_epilog", NULL, 0, params, num_params,
7058 last_sgpr, 0);
7059 /* Disable elimination of unused inputs. */
7060 si_llvm_add_attribute(ctx->main_fn,
7061 "InitialPSInputAddr", 0xffffff);
7062
7063 /* Process colors. */
7064 unsigned vgpr = last_sgpr + 1;
7065 unsigned colors_written = key->ps_epilog.colors_written;
7066 int last_color_export = -1;
7067
7068 /* Find the last color export. */
7069 if (!key->ps_epilog.writes_z &&
7070 !key->ps_epilog.writes_stencil &&
7071 !key->ps_epilog.writes_samplemask) {
7072 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7073
7074 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7075 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7076 /* Just set this if any of the colorbuffers are enabled. */
7077 if (spi_format &
7078 ((1llu << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7079 last_color_export = 0;
7080 } else {
7081 for (i = 0; i < 8; i++)
7082 if (colors_written & (1 << i) &&
7083 (spi_format >> (i * 4)) & 0xf)
7084 last_color_export = i;
7085 }
7086 }
7087
7088 while (colors_written) {
7089 LLVMValueRef color[4];
7090 int mrt = u_bit_scan(&colors_written);
7091
7092 for (i = 0; i < 4; i++)
7093 color[i] = LLVMGetParam(ctx->main_fn, vgpr++);
7094
7095 si_export_mrt_color(bld_base, color, mrt,
7096 num_params - 1,
7097 mrt == last_color_export, &exp);
7098 }
7099
7100 /* Process depth, stencil, samplemask. */
7101 if (key->ps_epilog.writes_z)
7102 depth = LLVMGetParam(ctx->main_fn, vgpr++);
7103 if (key->ps_epilog.writes_stencil)
7104 stencil = LLVMGetParam(ctx->main_fn, vgpr++);
7105 if (key->ps_epilog.writes_samplemask)
7106 samplemask = LLVMGetParam(ctx->main_fn, vgpr++);
7107
7108 if (depth || stencil || samplemask)
7109 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
7110 else if (last_color_export == -1)
7111 si_export_null(bld_base);
7112
7113 if (exp.num)
7114 si_emit_ps_exports(ctx, &exp);
7115
7116 /* Compile. */
7117 LLVMBuildRetVoid(gallivm->builder);
7118 }
7119
7120 /**
7121 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7122 */
7123 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7124 LLVMTargetMachineRef tm,
7125 struct si_shader *shader,
7126 struct pipe_debug_callback *debug)
7127 {
7128 union si_shader_part_key prolog_key;
7129 union si_shader_part_key epilog_key;
7130
7131 /* Get the prolog. */
7132 si_get_ps_prolog_key(shader, &prolog_key, true);
7133
7134 /* The prolog is a no-op if these aren't set. */
7135 if (si_need_ps_prolog(&prolog_key)) {
7136 shader->prolog =
7137 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7138 PIPE_SHADER_FRAGMENT, true,
7139 &prolog_key, tm, debug,
7140 si_build_ps_prolog_function,
7141 "Fragment Shader Prolog");
7142 if (!shader->prolog)
7143 return false;
7144 }
7145
7146 /* Get the epilog. */
7147 si_get_ps_epilog_key(shader, &epilog_key);
7148
7149 shader->epilog =
7150 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7151 PIPE_SHADER_FRAGMENT, false,
7152 &epilog_key, tm, debug,
7153 si_build_ps_epilog_function,
7154 "Fragment Shader Epilog");
7155 if (!shader->epilog)
7156 return false;
7157
7158 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7159 if (shader->key.part.ps.prolog.poly_stipple) {
7160 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7161 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7162 }
7163
7164 /* Set up the enable bits for per-sample shading if needed. */
7165 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
7166 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7167 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7168 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7169 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7170 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7171 }
7172 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
7173 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7174 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7175 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7176 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7177 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7178 }
7179 if (shader->key.part.ps.prolog.force_persp_center_interp &&
7180 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7181 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7182 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
7183 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7184 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7185 }
7186 if (shader->key.part.ps.prolog.force_linear_center_interp &&
7187 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7188 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7189 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
7190 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7191 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7192 }
7193
7194 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7195 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7196 !(shader->config.spi_ps_input_ena & 0xf)) {
7197 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7198 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7199 }
7200
7201 /* At least one pair of interpolation weights must be enabled. */
7202 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7203 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7204 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7205 }
7206
7207 /* The sample mask input is always enabled, because the API shader always
7208 * passes it through to the epilog. Disable it here if it's unused.
7209 */
7210 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
7211 !shader->selector->info.reads_samplemask)
7212 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7213
7214 return true;
7215 }
7216
7217 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
7218 unsigned *lds_size)
7219 {
7220 /* SPI barrier management bug:
7221 * Make sure we have at least 4k of LDS in use to avoid the bug.
7222 * It applies to workgroup sizes of more than one wavefront.
7223 */
7224 if (sscreen->b.family == CHIP_BONAIRE ||
7225 sscreen->b.family == CHIP_KABINI ||
7226 sscreen->b.family == CHIP_MULLINS)
7227 *lds_size = MAX2(*lds_size, 8);
7228 }
7229
7230 static void si_fix_resource_usage(struct si_screen *sscreen,
7231 struct si_shader *shader)
7232 {
7233 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7234
7235 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7236
7237 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
7238 si_get_max_workgroup_size(shader) > 64) {
7239 si_multiwave_lds_size_workaround(sscreen,
7240 &shader->config.lds_size);
7241 }
7242 }
7243
7244 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
7245 struct si_shader *shader,
7246 struct pipe_debug_callback *debug)
7247 {
7248 struct si_shader_selector *sel = shader->selector;
7249 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
7250 int r;
7251
7252 /* LS, ES, VS are compiled on demand if the main part hasn't been
7253 * compiled for that stage.
7254 *
7255 * Vertex shaders are compiled on demand when a vertex fetch
7256 * workaround must be applied.
7257 */
7258 if (shader->is_monolithic) {
7259 /* Monolithic shader (compiled as a whole, has many variants,
7260 * may take a long time to compile).
7261 */
7262 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
7263 if (r)
7264 return r;
7265 } else {
7266 /* The shader consists of 2-3 parts:
7267 *
7268 * - the middle part is the user shader, it has 1 variant only
7269 * and it was compiled during the creation of the shader
7270 * selector
7271 * - the prolog part is inserted at the beginning
7272 * - the epilog part is inserted at the end
7273 *
7274 * The prolog and epilog have many (but simple) variants.
7275 */
7276
7277 /* Copy the compiled TGSI shader data over. */
7278 shader->is_binary_shared = true;
7279 shader->binary = mainp->binary;
7280 shader->config = mainp->config;
7281 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7282 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7283 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7284 memcpy(shader->info.vs_output_param_offset,
7285 mainp->info.vs_output_param_offset,
7286 sizeof(mainp->info.vs_output_param_offset));
7287 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7288 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7289 shader->info.nr_param_exports = mainp->info.nr_param_exports;
7290
7291 /* Select prologs and/or epilogs. */
7292 switch (sel->type) {
7293 case PIPE_SHADER_VERTEX:
7294 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
7295 return -1;
7296 break;
7297 case PIPE_SHADER_TESS_CTRL:
7298 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
7299 return -1;
7300 break;
7301 case PIPE_SHADER_TESS_EVAL:
7302 break;
7303 case PIPE_SHADER_GEOMETRY:
7304 if (!si_shader_select_gs_parts(sscreen, tm, shader, debug))
7305 return -1;
7306 break;
7307 case PIPE_SHADER_FRAGMENT:
7308 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
7309 return -1;
7310
7311 /* Make sure we have at least as many VGPRs as there
7312 * are allocated inputs.
7313 */
7314 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7315 shader->info.num_input_vgprs);
7316 break;
7317 }
7318
7319 /* Update SGPR and VGPR counts. */
7320 if (shader->prolog) {
7321 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7322 shader->prolog->config.num_sgprs);
7323 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7324 shader->prolog->config.num_vgprs);
7325 }
7326 if (shader->previous_stage) {
7327 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7328 shader->previous_stage->config.num_sgprs);
7329 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7330 shader->previous_stage->config.num_vgprs);
7331 shader->config.spilled_sgprs =
7332 MAX2(shader->config.spilled_sgprs,
7333 shader->previous_stage->config.spilled_sgprs);
7334 shader->config.spilled_vgprs =
7335 MAX2(shader->config.spilled_vgprs,
7336 shader->previous_stage->config.spilled_vgprs);
7337 shader->config.private_mem_vgprs =
7338 MAX2(shader->config.private_mem_vgprs,
7339 shader->previous_stage->config.private_mem_vgprs);
7340 shader->config.scratch_bytes_per_wave =
7341 MAX2(shader->config.scratch_bytes_per_wave,
7342 shader->previous_stage->config.scratch_bytes_per_wave);
7343 shader->info.uses_instanceid |=
7344 shader->previous_stage->info.uses_instanceid;
7345 }
7346 if (shader->prolog2) {
7347 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7348 shader->prolog2->config.num_sgprs);
7349 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7350 shader->prolog2->config.num_vgprs);
7351 }
7352 if (shader->epilog) {
7353 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7354 shader->epilog->config.num_sgprs);
7355 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7356 shader->epilog->config.num_vgprs);
7357 }
7358 }
7359
7360 si_fix_resource_usage(sscreen, shader);
7361 si_shader_dump(sscreen, shader, debug, sel->info.processor,
7362 stderr, true);
7363
7364 /* Upload. */
7365 r = si_shader_binary_upload(sscreen, shader);
7366 if (r) {
7367 fprintf(stderr, "LLVM failed to upload shader\n");
7368 return r;
7369 }
7370
7371 return 0;
7372 }
7373
7374 void si_shader_destroy(struct si_shader *shader)
7375 {
7376 if (shader->scratch_bo)
7377 r600_resource_reference(&shader->scratch_bo, NULL);
7378
7379 r600_resource_reference(&shader->bo, NULL);
7380
7381 if (!shader->is_binary_shared)
7382 radeon_shader_binary_clean(&shader->binary);
7383
7384 free(shader->shader_log);
7385 }