radeonsi: use uint32_t to declare si_shader_key.opt.kill_outputs
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_flow.h"
35 #include "gallivm/lp_bld_misc.h"
36 #include "util/u_memory.h"
37 #include "util/u_string.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_util.h"
40 #include "tgsi/tgsi_dump.h"
41
42 #include "ac_binary.h"
43 #include "ac_llvm_util.h"
44 #include "ac_exp_param.h"
45 #include "si_shader_internal.h"
46 #include "si_pipe.h"
47 #include "sid.h"
48
49
50 static const char *scratch_rsrc_dword0_symbol =
51 "SCRATCH_RSRC_DWORD0";
52
53 static const char *scratch_rsrc_dword1_symbol =
54 "SCRATCH_RSRC_DWORD1";
55
56 struct si_shader_output_values
57 {
58 LLVMValueRef values[4];
59 unsigned semantic_name;
60 unsigned semantic_index;
61 ubyte vertex_stream[4];
62 };
63
64 static void si_init_shader_ctx(struct si_shader_context *ctx,
65 struct si_screen *sscreen,
66 LLVMTargetMachineRef tm);
67
68 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
69 struct lp_build_tgsi_context *bld_base,
70 struct lp_build_emit_data *emit_data);
71
72 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
73 FILE *f);
74
75 static unsigned llvm_get_type_size(LLVMTypeRef type);
76
77 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
78 union si_shader_part_key *key);
79 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
80 union si_shader_part_key *key);
81 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
82 union si_shader_part_key *key);
83 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
84 union si_shader_part_key *key);
85
86 /* Ideally pass the sample mask input to the PS epilog as v13, which
87 * is its usual location, so that the shader doesn't have to add v_mov.
88 */
89 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 13
90
91 enum {
92 CONST_ADDR_SPACE = 2,
93 LOCAL_ADDR_SPACE = 3,
94 };
95
96 static bool is_merged_shader(struct si_shader *shader)
97 {
98 if (shader->selector->screen->b.chip_class <= VI)
99 return false;
100
101 return shader->key.as_ls ||
102 shader->key.as_es ||
103 shader->selector->type == PIPE_SHADER_TESS_CTRL ||
104 shader->selector->type == PIPE_SHADER_GEOMETRY;
105 }
106
107 /**
108 * Returns a unique index for a per-patch semantic name and index. The index
109 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
110 * can be calculated.
111 */
112 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
113 {
114 switch (semantic_name) {
115 case TGSI_SEMANTIC_TESSOUTER:
116 return 0;
117 case TGSI_SEMANTIC_TESSINNER:
118 return 1;
119 case TGSI_SEMANTIC_PATCH:
120 assert(index < 30);
121 return 2 + index;
122
123 default:
124 assert(!"invalid semantic name");
125 return 0;
126 }
127 }
128
129 /**
130 * Returns a unique index for a semantic name and index. The index must be
131 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
132 * calculated.
133 */
134 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
135 {
136 switch (semantic_name) {
137 case TGSI_SEMANTIC_POSITION:
138 return 0;
139 case TGSI_SEMANTIC_GENERIC:
140 /* Since some shader stages use the the highest used IO index
141 * to determine the size to allocate for inputs/outputs
142 * (in LDS, tess and GS rings). GENERIC should be placed right
143 * after POSITION to make that size as small as possible.
144 */
145 if (index < SI_MAX_IO_GENERIC)
146 return 1 + index;
147
148 assert(!"invalid generic index");
149 return 0;
150 case TGSI_SEMANTIC_PSIZE:
151 return SI_MAX_IO_GENERIC + 1;
152 case TGSI_SEMANTIC_CLIPDIST:
153 assert(index <= 1);
154 return SI_MAX_IO_GENERIC + 2 + index;
155 case TGSI_SEMANTIC_FOG:
156 return SI_MAX_IO_GENERIC + 4;
157 case TGSI_SEMANTIC_LAYER:
158 return SI_MAX_IO_GENERIC + 5;
159 case TGSI_SEMANTIC_VIEWPORT_INDEX:
160 return SI_MAX_IO_GENERIC + 6;
161 case TGSI_SEMANTIC_PRIMID:
162 return SI_MAX_IO_GENERIC + 7;
163 case TGSI_SEMANTIC_COLOR: /* these alias */
164 case TGSI_SEMANTIC_BCOLOR:
165 assert(index < 2);
166 return SI_MAX_IO_GENERIC + 8 + index;
167 case TGSI_SEMANTIC_TEXCOORD:
168 assert(index < 8);
169 assert(SI_MAX_IO_GENERIC + 10 + index < 64);
170 return SI_MAX_IO_GENERIC + 10 + index;
171 default:
172 assert(!"invalid semantic name");
173 return 0;
174 }
175 }
176
177 /**
178 * Get the value of a shader input parameter and extract a bitfield.
179 */
180 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
181 unsigned param, unsigned rshift,
182 unsigned bitwidth)
183 {
184 struct gallivm_state *gallivm = &ctx->gallivm;
185 LLVMValueRef value = LLVMGetParam(ctx->main_fn,
186 param);
187
188 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
189 value = bitcast(&ctx->bld_base,
190 TGSI_TYPE_UNSIGNED, value);
191
192 if (rshift)
193 value = LLVMBuildLShr(gallivm->builder, value,
194 LLVMConstInt(ctx->i32, rshift, 0), "");
195
196 if (rshift + bitwidth < 32) {
197 unsigned mask = (1 << bitwidth) - 1;
198 value = LLVMBuildAnd(gallivm->builder, value,
199 LLVMConstInt(ctx->i32, mask, 0), "");
200 }
201
202 return value;
203 }
204
205 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
206 {
207 switch (ctx->type) {
208 case PIPE_SHADER_TESS_CTRL:
209 return unpack_param(ctx, ctx->param_tcs_rel_ids, 0, 8);
210
211 case PIPE_SHADER_TESS_EVAL:
212 return LLVMGetParam(ctx->main_fn,
213 ctx->param_tes_rel_patch_id);
214
215 default:
216 assert(0);
217 return NULL;
218 }
219 }
220
221 /* Tessellation shaders pass outputs to the next shader using LDS.
222 *
223 * LS outputs = TCS inputs
224 * TCS outputs = TES inputs
225 *
226 * The LDS layout is:
227 * - TCS inputs for patch 0
228 * - TCS inputs for patch 1
229 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
230 * - ...
231 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
232 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
233 * - TCS outputs for patch 1
234 * - Per-patch TCS outputs for patch 1
235 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
236 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
237 * - ...
238 *
239 * All three shaders VS(LS), TCS, TES share the same LDS space.
240 */
241
242 static LLVMValueRef
243 get_tcs_in_patch_stride(struct si_shader_context *ctx)
244 {
245 return unpack_param(ctx, ctx->param_vs_state_bits, 8, 13);
246 }
247
248 static LLVMValueRef
249 get_tcs_out_patch_stride(struct si_shader_context *ctx)
250 {
251 return unpack_param(ctx, ctx->param_tcs_out_lds_layout, 0, 13);
252 }
253
254 static LLVMValueRef
255 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
256 {
257 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
258 unpack_param(ctx,
259 ctx->param_tcs_out_lds_offsets,
260 0, 16),
261 4);
262 }
263
264 static LLVMValueRef
265 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
266 {
267 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
268 unpack_param(ctx,
269 ctx->param_tcs_out_lds_offsets,
270 16, 16),
271 4);
272 }
273
274 static LLVMValueRef
275 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
276 {
277 struct gallivm_state *gallivm = &ctx->gallivm;
278 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
279 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
280
281 return LLVMBuildMul(gallivm->builder, patch_stride, rel_patch_id, "");
282 }
283
284 static LLVMValueRef
285 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
286 {
287 struct gallivm_state *gallivm = &ctx->gallivm;
288 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
289 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
290 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
291
292 return LLVMBuildAdd(gallivm->builder, patch0_offset,
293 LLVMBuildMul(gallivm->builder, patch_stride,
294 rel_patch_id, ""),
295 "");
296 }
297
298 static LLVMValueRef
299 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
300 {
301 struct gallivm_state *gallivm = &ctx->gallivm;
302 LLVMValueRef patch0_patch_data_offset =
303 get_tcs_out_patch0_patch_data_offset(ctx);
304 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
305 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
306
307 return LLVMBuildAdd(gallivm->builder, patch0_patch_data_offset,
308 LLVMBuildMul(gallivm->builder, patch_stride,
309 rel_patch_id, ""),
310 "");
311 }
312
313 static LLVMValueRef get_instance_index_for_fetch(
314 struct si_shader_context *ctx,
315 unsigned param_start_instance, unsigned divisor)
316 {
317 struct gallivm_state *gallivm = &ctx->gallivm;
318
319 LLVMValueRef result = LLVMGetParam(ctx->main_fn,
320 ctx->param_instance_id);
321
322 /* The division must be done before START_INSTANCE is added. */
323 if (divisor > 1)
324 result = LLVMBuildUDiv(gallivm->builder, result,
325 LLVMConstInt(ctx->i32, divisor, 0), "");
326
327 return LLVMBuildAdd(gallivm->builder, result,
328 LLVMGetParam(ctx->main_fn, param_start_instance), "");
329 }
330
331 /* Bitcast <4 x float> to <2 x double>, extract the component, and convert
332 * to float. */
333 static LLVMValueRef extract_double_to_float(struct si_shader_context *ctx,
334 LLVMValueRef vec4,
335 unsigned double_index)
336 {
337 LLVMBuilderRef builder = ctx->gallivm.builder;
338 LLVMTypeRef f64 = LLVMDoubleTypeInContext(ctx->gallivm.context);
339 LLVMValueRef dvec2 = LLVMBuildBitCast(builder, vec4,
340 LLVMVectorType(f64, 2), "");
341 LLVMValueRef index = LLVMConstInt(ctx->i32, double_index, 0);
342 LLVMValueRef value = LLVMBuildExtractElement(builder, dvec2, index, "");
343 return LLVMBuildFPTrunc(builder, value, ctx->f32, "");
344 }
345
346 static void declare_input_vs(
347 struct si_shader_context *ctx,
348 unsigned input_index,
349 const struct tgsi_full_declaration *decl,
350 LLVMValueRef out[4])
351 {
352 struct gallivm_state *gallivm = &ctx->gallivm;
353
354 unsigned chan;
355 unsigned fix_fetch;
356 unsigned num_fetches;
357 unsigned fetch_stride;
358
359 LLVMValueRef t_list_ptr;
360 LLVMValueRef t_offset;
361 LLVMValueRef t_list;
362 LLVMValueRef vertex_index;
363 LLVMValueRef input[3];
364
365 /* Load the T list */
366 t_list_ptr = LLVMGetParam(ctx->main_fn, ctx->param_vertex_buffers);
367
368 t_offset = LLVMConstInt(ctx->i32, input_index, 0);
369
370 t_list = ac_build_indexed_load_const(&ctx->ac, t_list_ptr, t_offset);
371
372 vertex_index = LLVMGetParam(ctx->main_fn,
373 ctx->param_vertex_index0 +
374 input_index);
375
376 fix_fetch = ctx->shader->key.mono.vs_fix_fetch[input_index];
377
378 /* Do multiple loads for special formats. */
379 switch (fix_fetch) {
380 case SI_FIX_FETCH_RGB_64_FLOAT:
381 num_fetches = 3; /* 3 2-dword loads */
382 fetch_stride = 8;
383 break;
384 case SI_FIX_FETCH_RGBA_64_FLOAT:
385 num_fetches = 2; /* 2 4-dword loads */
386 fetch_stride = 16;
387 break;
388 case SI_FIX_FETCH_RGB_8:
389 case SI_FIX_FETCH_RGB_8_INT:
390 num_fetches = 3;
391 fetch_stride = 1;
392 break;
393 case SI_FIX_FETCH_RGB_16:
394 case SI_FIX_FETCH_RGB_16_INT:
395 num_fetches = 3;
396 fetch_stride = 2;
397 break;
398 default:
399 num_fetches = 1;
400 fetch_stride = 0;
401 }
402
403 for (unsigned i = 0; i < num_fetches; i++) {
404 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
405
406 input[i] = ac_build_buffer_load_format(&ctx->ac, t_list,
407 vertex_index, voffset,
408 true);
409 }
410
411 /* Break up the vec4 into individual components */
412 for (chan = 0; chan < 4; chan++) {
413 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
414 out[chan] = LLVMBuildExtractElement(gallivm->builder,
415 input[0], llvm_chan, "");
416 }
417
418 switch (fix_fetch) {
419 case SI_FIX_FETCH_A2_SNORM:
420 case SI_FIX_FETCH_A2_SSCALED:
421 case SI_FIX_FETCH_A2_SINT: {
422 /* The hardware returns an unsigned value; convert it to a
423 * signed one.
424 */
425 LLVMValueRef tmp = out[3];
426 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
427
428 /* First, recover the sign-extended signed integer value. */
429 if (fix_fetch == SI_FIX_FETCH_A2_SSCALED)
430 tmp = LLVMBuildFPToUI(gallivm->builder, tmp, ctx->i32, "");
431 else
432 tmp = LLVMBuildBitCast(gallivm->builder, tmp, ctx->i32, "");
433
434 /* For the integer-like cases, do a natural sign extension.
435 *
436 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
437 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
438 * exponent.
439 */
440 tmp = LLVMBuildShl(gallivm->builder, tmp,
441 fix_fetch == SI_FIX_FETCH_A2_SNORM ?
442 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
443 tmp = LLVMBuildAShr(gallivm->builder, tmp, c30, "");
444
445 /* Convert back to the right type. */
446 if (fix_fetch == SI_FIX_FETCH_A2_SNORM) {
447 LLVMValueRef clamp;
448 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
449 tmp = LLVMBuildSIToFP(gallivm->builder, tmp, ctx->f32, "");
450 clamp = LLVMBuildFCmp(gallivm->builder, LLVMRealULT, tmp, neg_one, "");
451 tmp = LLVMBuildSelect(gallivm->builder, clamp, neg_one, tmp, "");
452 } else if (fix_fetch == SI_FIX_FETCH_A2_SSCALED) {
453 tmp = LLVMBuildSIToFP(gallivm->builder, tmp, ctx->f32, "");
454 }
455
456 out[3] = tmp;
457 break;
458 }
459 case SI_FIX_FETCH_RGBA_32_UNORM:
460 case SI_FIX_FETCH_RGBX_32_UNORM:
461 for (chan = 0; chan < 4; chan++) {
462 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
463 ctx->i32, "");
464 out[chan] = LLVMBuildUIToFP(gallivm->builder,
465 out[chan], ctx->f32, "");
466 out[chan] = LLVMBuildFMul(gallivm->builder, out[chan],
467 LLVMConstReal(ctx->f32, 1.0 / UINT_MAX), "");
468 }
469 /* RGBX UINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
470 if (fix_fetch == SI_FIX_FETCH_RGBX_32_UNORM)
471 out[3] = LLVMConstReal(ctx->f32, 1);
472 break;
473 case SI_FIX_FETCH_RGBA_32_SNORM:
474 case SI_FIX_FETCH_RGBX_32_SNORM:
475 case SI_FIX_FETCH_RGBA_32_FIXED:
476 case SI_FIX_FETCH_RGBX_32_FIXED: {
477 double scale;
478 if (fix_fetch >= SI_FIX_FETCH_RGBA_32_FIXED)
479 scale = 1.0 / 0x10000;
480 else
481 scale = 1.0 / INT_MAX;
482
483 for (chan = 0; chan < 4; chan++) {
484 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
485 ctx->i32, "");
486 out[chan] = LLVMBuildSIToFP(gallivm->builder,
487 out[chan], ctx->f32, "");
488 out[chan] = LLVMBuildFMul(gallivm->builder, out[chan],
489 LLVMConstReal(ctx->f32, scale), "");
490 }
491 /* RGBX SINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
492 if (fix_fetch == SI_FIX_FETCH_RGBX_32_SNORM ||
493 fix_fetch == SI_FIX_FETCH_RGBX_32_FIXED)
494 out[3] = LLVMConstReal(ctx->f32, 1);
495 break;
496 }
497 case SI_FIX_FETCH_RGBA_32_USCALED:
498 for (chan = 0; chan < 4; chan++) {
499 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
500 ctx->i32, "");
501 out[chan] = LLVMBuildUIToFP(gallivm->builder,
502 out[chan], ctx->f32, "");
503 }
504 break;
505 case SI_FIX_FETCH_RGBA_32_SSCALED:
506 for (chan = 0; chan < 4; chan++) {
507 out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
508 ctx->i32, "");
509 out[chan] = LLVMBuildSIToFP(gallivm->builder,
510 out[chan], ctx->f32, "");
511 }
512 break;
513 case SI_FIX_FETCH_RG_64_FLOAT:
514 for (chan = 0; chan < 2; chan++)
515 out[chan] = extract_double_to_float(ctx, input[0], chan);
516
517 out[2] = LLVMConstReal(ctx->f32, 0);
518 out[3] = LLVMConstReal(ctx->f32, 1);
519 break;
520 case SI_FIX_FETCH_RGB_64_FLOAT:
521 for (chan = 0; chan < 3; chan++)
522 out[chan] = extract_double_to_float(ctx, input[chan], 0);
523
524 out[3] = LLVMConstReal(ctx->f32, 1);
525 break;
526 case SI_FIX_FETCH_RGBA_64_FLOAT:
527 for (chan = 0; chan < 4; chan++) {
528 out[chan] = extract_double_to_float(ctx, input[chan / 2],
529 chan % 2);
530 }
531 break;
532 case SI_FIX_FETCH_RGB_8:
533 case SI_FIX_FETCH_RGB_8_INT:
534 case SI_FIX_FETCH_RGB_16:
535 case SI_FIX_FETCH_RGB_16_INT:
536 for (chan = 0; chan < 3; chan++) {
537 out[chan] = LLVMBuildExtractElement(gallivm->builder,
538 input[chan],
539 ctx->i32_0, "");
540 }
541 if (fix_fetch == SI_FIX_FETCH_RGB_8 ||
542 fix_fetch == SI_FIX_FETCH_RGB_16) {
543 out[3] = LLVMConstReal(ctx->f32, 1);
544 } else {
545 out[3] = LLVMBuildBitCast(gallivm->builder, ctx->i32_1,
546 ctx->f32, "");
547 }
548 break;
549 }
550 }
551
552 static LLVMValueRef get_primitive_id(struct lp_build_tgsi_context *bld_base,
553 unsigned swizzle)
554 {
555 struct si_shader_context *ctx = si_shader_context(bld_base);
556
557 if (swizzle > 0)
558 return ctx->i32_0;
559
560 switch (ctx->type) {
561 case PIPE_SHADER_VERTEX:
562 return LLVMGetParam(ctx->main_fn,
563 ctx->param_vs_prim_id);
564 case PIPE_SHADER_TESS_CTRL:
565 return LLVMGetParam(ctx->main_fn,
566 ctx->param_tcs_patch_id);
567 case PIPE_SHADER_TESS_EVAL:
568 return LLVMGetParam(ctx->main_fn,
569 ctx->param_tes_patch_id);
570 case PIPE_SHADER_GEOMETRY:
571 return LLVMGetParam(ctx->main_fn,
572 ctx->param_gs_prim_id);
573 default:
574 assert(0);
575 return ctx->i32_0;
576 }
577 }
578
579 /**
580 * Return the value of tgsi_ind_register for indexing.
581 * This is the indirect index with the constant offset added to it.
582 */
583 static LLVMValueRef get_indirect_index(struct si_shader_context *ctx,
584 const struct tgsi_ind_register *ind,
585 int rel_index)
586 {
587 struct gallivm_state *gallivm = &ctx->gallivm;
588 LLVMValueRef result;
589
590 result = ctx->addrs[ind->Index][ind->Swizzle];
591 result = LLVMBuildLoad(gallivm->builder, result, "");
592 result = LLVMBuildAdd(gallivm->builder, result,
593 LLVMConstInt(ctx->i32, rel_index, 0), "");
594 return result;
595 }
596
597 /**
598 * Like get_indirect_index, but restricts the return value to a (possibly
599 * undefined) value inside [0..num).
600 */
601 LLVMValueRef si_get_bounded_indirect_index(struct si_shader_context *ctx,
602 const struct tgsi_ind_register *ind,
603 int rel_index, unsigned num)
604 {
605 LLVMValueRef result = get_indirect_index(ctx, ind, rel_index);
606
607 return si_llvm_bound_index(ctx, result, num);
608 }
609
610
611 /**
612 * Calculate a dword address given an input or output register and a stride.
613 */
614 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
615 const struct tgsi_full_dst_register *dst,
616 const struct tgsi_full_src_register *src,
617 LLVMValueRef vertex_dw_stride,
618 LLVMValueRef base_addr)
619 {
620 struct gallivm_state *gallivm = &ctx->gallivm;
621 struct tgsi_shader_info *info = &ctx->shader->selector->info;
622 ubyte *name, *index, *array_first;
623 int first, param;
624 struct tgsi_full_dst_register reg;
625
626 /* Set the register description. The address computation is the same
627 * for sources and destinations. */
628 if (src) {
629 reg.Register.File = src->Register.File;
630 reg.Register.Index = src->Register.Index;
631 reg.Register.Indirect = src->Register.Indirect;
632 reg.Register.Dimension = src->Register.Dimension;
633 reg.Indirect = src->Indirect;
634 reg.Dimension = src->Dimension;
635 reg.DimIndirect = src->DimIndirect;
636 } else
637 reg = *dst;
638
639 /* If the register is 2-dimensional (e.g. an array of vertices
640 * in a primitive), calculate the base address of the vertex. */
641 if (reg.Register.Dimension) {
642 LLVMValueRef index;
643
644 if (reg.Dimension.Indirect)
645 index = get_indirect_index(ctx, &reg.DimIndirect,
646 reg.Dimension.Index);
647 else
648 index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
649
650 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
651 LLVMBuildMul(gallivm->builder, index,
652 vertex_dw_stride, ""), "");
653 }
654
655 /* Get information about the register. */
656 if (reg.Register.File == TGSI_FILE_INPUT) {
657 name = info->input_semantic_name;
658 index = info->input_semantic_index;
659 array_first = info->input_array_first;
660 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
661 name = info->output_semantic_name;
662 index = info->output_semantic_index;
663 array_first = info->output_array_first;
664 } else {
665 assert(0);
666 return NULL;
667 }
668
669 if (reg.Register.Indirect) {
670 /* Add the relative address of the element. */
671 LLVMValueRef ind_index;
672
673 if (reg.Indirect.ArrayID)
674 first = array_first[reg.Indirect.ArrayID];
675 else
676 first = reg.Register.Index;
677
678 ind_index = get_indirect_index(ctx, &reg.Indirect,
679 reg.Register.Index - first);
680
681 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
682 LLVMBuildMul(gallivm->builder, ind_index,
683 LLVMConstInt(ctx->i32, 4, 0), ""), "");
684
685 param = reg.Register.Dimension ?
686 si_shader_io_get_unique_index(name[first], index[first]) :
687 si_shader_io_get_unique_index_patch(name[first], index[first]);
688 } else {
689 param = reg.Register.Dimension ?
690 si_shader_io_get_unique_index(name[reg.Register.Index],
691 index[reg.Register.Index]) :
692 si_shader_io_get_unique_index_patch(name[reg.Register.Index],
693 index[reg.Register.Index]);
694 }
695
696 /* Add the base address of the element. */
697 return LLVMBuildAdd(gallivm->builder, base_addr,
698 LLVMConstInt(ctx->i32, param * 4, 0), "");
699 }
700
701 /* The offchip buffer layout for TCS->TES is
702 *
703 * - attribute 0 of patch 0 vertex 0
704 * - attribute 0 of patch 0 vertex 1
705 * - attribute 0 of patch 0 vertex 2
706 * ...
707 * - attribute 0 of patch 1 vertex 0
708 * - attribute 0 of patch 1 vertex 1
709 * ...
710 * - attribute 1 of patch 0 vertex 0
711 * - attribute 1 of patch 0 vertex 1
712 * ...
713 * - per patch attribute 0 of patch 0
714 * - per patch attribute 0 of patch 1
715 * ...
716 *
717 * Note that every attribute has 4 components.
718 */
719 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
720 LLVMValueRef rel_patch_id,
721 LLVMValueRef vertex_index,
722 LLVMValueRef param_index)
723 {
724 struct gallivm_state *gallivm = &ctx->gallivm;
725 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
726 LLVMValueRef param_stride, constant16;
727
728 vertices_per_patch = unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
729 num_patches = unpack_param(ctx, ctx->param_tcs_offchip_layout, 0, 6);
730 total_vertices = LLVMBuildMul(gallivm->builder, vertices_per_patch,
731 num_patches, "");
732
733 constant16 = LLVMConstInt(ctx->i32, 16, 0);
734 if (vertex_index) {
735 base_addr = LLVMBuildMul(gallivm->builder, rel_patch_id,
736 vertices_per_patch, "");
737
738 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
739 vertex_index, "");
740
741 param_stride = total_vertices;
742 } else {
743 base_addr = rel_patch_id;
744 param_stride = num_patches;
745 }
746
747 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
748 LLVMBuildMul(gallivm->builder, param_index,
749 param_stride, ""), "");
750
751 base_addr = LLVMBuildMul(gallivm->builder, base_addr, constant16, "");
752
753 if (!vertex_index) {
754 LLVMValueRef patch_data_offset =
755 unpack_param(ctx, ctx->param_tcs_offchip_layout, 12, 20);
756
757 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
758 patch_data_offset, "");
759 }
760 return base_addr;
761 }
762
763 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
764 struct si_shader_context *ctx,
765 const struct tgsi_full_dst_register *dst,
766 const struct tgsi_full_src_register *src)
767 {
768 struct gallivm_state *gallivm = &ctx->gallivm;
769 struct tgsi_shader_info *info = &ctx->shader->selector->info;
770 ubyte *name, *index, *array_first;
771 struct tgsi_full_src_register reg;
772 LLVMValueRef vertex_index = NULL;
773 LLVMValueRef param_index = NULL;
774 unsigned param_index_base, param_base;
775
776 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
777
778 if (reg.Register.Dimension) {
779
780 if (reg.Dimension.Indirect)
781 vertex_index = get_indirect_index(ctx, &reg.DimIndirect,
782 reg.Dimension.Index);
783 else
784 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
785 }
786
787 /* Get information about the register. */
788 if (reg.Register.File == TGSI_FILE_INPUT) {
789 name = info->input_semantic_name;
790 index = info->input_semantic_index;
791 array_first = info->input_array_first;
792 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
793 name = info->output_semantic_name;
794 index = info->output_semantic_index;
795 array_first = info->output_array_first;
796 } else {
797 assert(0);
798 return NULL;
799 }
800
801 if (reg.Register.Indirect) {
802 if (reg.Indirect.ArrayID)
803 param_base = array_first[reg.Indirect.ArrayID];
804 else
805 param_base = reg.Register.Index;
806
807 param_index = get_indirect_index(ctx, &reg.Indirect,
808 reg.Register.Index - param_base);
809
810 } else {
811 param_base = reg.Register.Index;
812 param_index = ctx->i32_0;
813 }
814
815 param_index_base = reg.Register.Dimension ?
816 si_shader_io_get_unique_index(name[param_base], index[param_base]) :
817 si_shader_io_get_unique_index_patch(name[param_base], index[param_base]);
818
819 param_index = LLVMBuildAdd(gallivm->builder, param_index,
820 LLVMConstInt(ctx->i32, param_index_base, 0),
821 "");
822
823 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
824 vertex_index, param_index);
825 }
826
827 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
828 enum tgsi_opcode_type type, unsigned swizzle,
829 LLVMValueRef buffer, LLVMValueRef offset,
830 LLVMValueRef base, bool can_speculate)
831 {
832 struct si_shader_context *ctx = si_shader_context(bld_base);
833 struct gallivm_state *gallivm = &ctx->gallivm;
834 LLVMValueRef value, value2;
835 LLVMTypeRef llvm_type = tgsi2llvmtype(bld_base, type);
836 LLVMTypeRef vec_type = LLVMVectorType(llvm_type, 4);
837
838 if (swizzle == ~0) {
839 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
840 0, 1, 0, can_speculate, false);
841
842 return LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
843 }
844
845 if (!tgsi_type_is_64bit(type)) {
846 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
847 0, 1, 0, can_speculate, false);
848
849 value = LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
850 return LLVMBuildExtractElement(gallivm->builder, value,
851 LLVMConstInt(ctx->i32, swizzle, 0), "");
852 }
853
854 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
855 swizzle * 4, 1, 0, can_speculate, false);
856
857 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
858 swizzle * 4 + 4, 1, 0, can_speculate, false);
859
860 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
861 }
862
863 /**
864 * Load from LDS.
865 *
866 * \param type output value type
867 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
868 * \param dw_addr address in dwords
869 */
870 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
871 enum tgsi_opcode_type type, unsigned swizzle,
872 LLVMValueRef dw_addr)
873 {
874 struct si_shader_context *ctx = si_shader_context(bld_base);
875 struct gallivm_state *gallivm = &ctx->gallivm;
876 LLVMValueRef value;
877
878 if (swizzle == ~0) {
879 LLVMValueRef values[TGSI_NUM_CHANNELS];
880
881 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
882 values[chan] = lds_load(bld_base, type, chan, dw_addr);
883
884 return lp_build_gather_values(gallivm, values,
885 TGSI_NUM_CHANNELS);
886 }
887
888 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
889 LLVMConstInt(ctx->i32, swizzle, 0));
890
891 value = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
892 if (tgsi_type_is_64bit(type)) {
893 LLVMValueRef value2;
894 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
895 ctx->i32_1);
896 value2 = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
897 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
898 }
899
900 return LLVMBuildBitCast(gallivm->builder, value,
901 tgsi2llvmtype(bld_base, type), "");
902 }
903
904 /**
905 * Store to LDS.
906 *
907 * \param swizzle offset (typically 0..3)
908 * \param dw_addr address in dwords
909 * \param value value to store
910 */
911 static void lds_store(struct lp_build_tgsi_context *bld_base,
912 unsigned dw_offset_imm, LLVMValueRef dw_addr,
913 LLVMValueRef value)
914 {
915 struct si_shader_context *ctx = si_shader_context(bld_base);
916 struct gallivm_state *gallivm = &ctx->gallivm;
917
918 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
919 LLVMConstInt(ctx->i32, dw_offset_imm, 0));
920
921 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
922 ac_build_indexed_store(&ctx->ac, ctx->lds,
923 dw_addr, value);
924 }
925
926 static LLVMValueRef desc_from_addr_base64k(struct si_shader_context *ctx,
927 unsigned param)
928 {
929 LLVMBuilderRef builder = ctx->gallivm.builder;
930
931 LLVMValueRef addr = LLVMGetParam(ctx->main_fn, param);
932 addr = LLVMBuildZExt(builder, addr, ctx->i64, "");
933 addr = LLVMBuildShl(builder, addr, LLVMConstInt(ctx->i64, 16, 0), "");
934
935 uint64_t desc2 = 0xffffffff;
936 uint64_t desc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
937 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
938 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
939 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
940 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
941 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
942 LLVMValueRef hi = LLVMConstInt(ctx->i64, desc2 | (desc3 << 32), 0);
943
944 LLVMValueRef desc = LLVMGetUndef(LLVMVectorType(ctx->i64, 2));
945 desc = LLVMBuildInsertElement(builder, desc, addr, ctx->i32_0, "");
946 desc = LLVMBuildInsertElement(builder, desc, hi, ctx->i32_1, "");
947 return LLVMBuildBitCast(builder, desc, ctx->v4i32, "");
948 }
949
950 static LLVMValueRef fetch_input_tcs(
951 struct lp_build_tgsi_context *bld_base,
952 const struct tgsi_full_src_register *reg,
953 enum tgsi_opcode_type type, unsigned swizzle)
954 {
955 struct si_shader_context *ctx = si_shader_context(bld_base);
956 LLVMValueRef dw_addr, stride;
957
958 stride = unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
959 dw_addr = get_tcs_in_current_patch_offset(ctx);
960 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
961
962 return lds_load(bld_base, type, swizzle, dw_addr);
963 }
964
965 static LLVMValueRef fetch_output_tcs(
966 struct lp_build_tgsi_context *bld_base,
967 const struct tgsi_full_src_register *reg,
968 enum tgsi_opcode_type type, unsigned swizzle)
969 {
970 struct si_shader_context *ctx = si_shader_context(bld_base);
971 LLVMValueRef dw_addr, stride;
972
973 if (reg->Register.Dimension) {
974 stride = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 8);
975 dw_addr = get_tcs_out_current_patch_offset(ctx);
976 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
977 } else {
978 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
979 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
980 }
981
982 return lds_load(bld_base, type, swizzle, dw_addr);
983 }
984
985 static LLVMValueRef fetch_input_tes(
986 struct lp_build_tgsi_context *bld_base,
987 const struct tgsi_full_src_register *reg,
988 enum tgsi_opcode_type type, unsigned swizzle)
989 {
990 struct si_shader_context *ctx = si_shader_context(bld_base);
991 LLVMValueRef buffer, base, addr;
992
993 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
994
995 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
996 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
997
998 return buffer_load(bld_base, type, swizzle, buffer, base, addr, true);
999 }
1000
1001 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1002 const struct tgsi_full_instruction *inst,
1003 const struct tgsi_opcode_info *info,
1004 LLVMValueRef dst[4])
1005 {
1006 struct si_shader_context *ctx = si_shader_context(bld_base);
1007 struct gallivm_state *gallivm = &ctx->gallivm;
1008 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
1009 const struct tgsi_shader_info *sh_info = &ctx->shader->selector->info;
1010 unsigned chan_index;
1011 LLVMValueRef dw_addr, stride;
1012 LLVMValueRef buffer, base, buf_addr;
1013 LLVMValueRef values[4];
1014 bool skip_lds_store;
1015 bool is_tess_factor = false;
1016
1017 /* Only handle per-patch and per-vertex outputs here.
1018 * Vectors will be lowered to scalars and this function will be called again.
1019 */
1020 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1021 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1022 si_llvm_emit_store(bld_base, inst, info, dst);
1023 return;
1024 }
1025
1026 if (reg->Register.Dimension) {
1027 stride = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 8);
1028 dw_addr = get_tcs_out_current_patch_offset(ctx);
1029 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1030 skip_lds_store = !sh_info->reads_pervertex_outputs;
1031 } else {
1032 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1033 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1034 skip_lds_store = !sh_info->reads_perpatch_outputs;
1035
1036 if (!reg->Register.Indirect) {
1037 int name = sh_info->output_semantic_name[reg->Register.Index];
1038
1039 /* Always write tess factors into LDS for the TCS epilog. */
1040 if (name == TGSI_SEMANTIC_TESSINNER ||
1041 name == TGSI_SEMANTIC_TESSOUTER) {
1042 skip_lds_store = false;
1043 is_tess_factor = true;
1044 }
1045 }
1046 }
1047
1048 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1049
1050 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1051 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1052
1053
1054 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
1055 LLVMValueRef value = dst[chan_index];
1056
1057 if (inst->Instruction.Saturate)
1058 value = ac_build_clamp(&ctx->ac, value);
1059
1060 /* Skip LDS stores if there is no LDS read of this output. */
1061 if (!skip_lds_store)
1062 lds_store(bld_base, chan_index, dw_addr, value);
1063
1064 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
1065 values[chan_index] = value;
1066
1067 if (inst->Dst[0].Register.WriteMask != 0xF && !is_tess_factor) {
1068 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1069 buf_addr, base,
1070 4 * chan_index, 1, 0, true, false);
1071 }
1072 }
1073
1074 if (inst->Dst[0].Register.WriteMask == 0xF && !is_tess_factor) {
1075 LLVMValueRef value = lp_build_gather_values(gallivm,
1076 values, 4);
1077 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buf_addr,
1078 base, 0, 1, 0, true, false);
1079 }
1080 }
1081
1082 static LLVMValueRef fetch_input_gs(
1083 struct lp_build_tgsi_context *bld_base,
1084 const struct tgsi_full_src_register *reg,
1085 enum tgsi_opcode_type type,
1086 unsigned swizzle)
1087 {
1088 struct si_shader_context *ctx = si_shader_context(bld_base);
1089 struct si_shader *shader = ctx->shader;
1090 struct lp_build_context *uint = &ctx->bld_base.uint_bld;
1091 struct gallivm_state *gallivm = &ctx->gallivm;
1092 LLVMValueRef vtx_offset, soffset;
1093 struct tgsi_shader_info *info = &shader->selector->info;
1094 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1095 unsigned semantic_index = info->input_semantic_index[reg->Register.Index];
1096 unsigned param;
1097 LLVMValueRef value;
1098
1099 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1100 return get_primitive_id(bld_base, swizzle);
1101
1102 if (!reg->Register.Dimension)
1103 return NULL;
1104
1105 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1106
1107 /* GFX9 has the ESGS ring in LDS. */
1108 if (ctx->screen->b.chip_class >= GFX9) {
1109 unsigned index = reg->Dimension.Index;
1110
1111 switch (index / 2) {
1112 case 0:
1113 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx01_offset,
1114 index % 2 ? 16 : 0, 16);
1115 break;
1116 case 1:
1117 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx23_offset,
1118 index % 2 ? 16 : 0, 16);
1119 break;
1120 case 2:
1121 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx45_offset,
1122 index % 2 ? 16 : 0, 16);
1123 break;
1124 default:
1125 assert(0);
1126 return NULL;
1127 }
1128
1129 vtx_offset = LLVMBuildAdd(gallivm->builder, vtx_offset,
1130 LLVMConstInt(ctx->i32, param * 4, 0), "");
1131 return lds_load(bld_base, type, swizzle, vtx_offset);
1132 }
1133
1134 /* GFX6: input load from the ESGS ring in memory. */
1135 if (swizzle == ~0) {
1136 LLVMValueRef values[TGSI_NUM_CHANNELS];
1137 unsigned chan;
1138 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1139 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
1140 }
1141 return lp_build_gather_values(gallivm, values,
1142 TGSI_NUM_CHANNELS);
1143 }
1144
1145 /* Get the vertex offset parameter on GFX6. */
1146 unsigned vtx_offset_param = reg->Dimension.Index;
1147 if (vtx_offset_param < 2) {
1148 vtx_offset_param += ctx->param_gs_vtx0_offset;
1149 } else {
1150 assert(vtx_offset_param < 6);
1151 vtx_offset_param += ctx->param_gs_vtx2_offset - 2;
1152 }
1153 vtx_offset = lp_build_mul_imm(uint,
1154 LLVMGetParam(ctx->main_fn,
1155 vtx_offset_param),
1156 4);
1157
1158 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1159
1160 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1161 vtx_offset, soffset, 0, 1, 0, true, false);
1162 if (tgsi_type_is_64bit(type)) {
1163 LLVMValueRef value2;
1164 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1165
1166 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1167 ctx->i32_0, vtx_offset, soffset,
1168 0, 1, 0, true, false);
1169 return si_llvm_emit_fetch_64bit(bld_base, type,
1170 value, value2);
1171 }
1172 return LLVMBuildBitCast(gallivm->builder,
1173 value,
1174 tgsi2llvmtype(bld_base, type), "");
1175 }
1176
1177 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1178 {
1179 switch (interpolate) {
1180 case TGSI_INTERPOLATE_CONSTANT:
1181 return 0;
1182
1183 case TGSI_INTERPOLATE_LINEAR:
1184 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1185 return SI_PARAM_LINEAR_SAMPLE;
1186 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1187 return SI_PARAM_LINEAR_CENTROID;
1188 else
1189 return SI_PARAM_LINEAR_CENTER;
1190 break;
1191 case TGSI_INTERPOLATE_COLOR:
1192 case TGSI_INTERPOLATE_PERSPECTIVE:
1193 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1194 return SI_PARAM_PERSP_SAMPLE;
1195 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1196 return SI_PARAM_PERSP_CENTROID;
1197 else
1198 return SI_PARAM_PERSP_CENTER;
1199 break;
1200 default:
1201 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1202 return -1;
1203 }
1204 }
1205
1206 /**
1207 * Interpolate a fragment shader input.
1208 *
1209 * @param ctx context
1210 * @param input_index index of the input in hardware
1211 * @param semantic_name TGSI_SEMANTIC_*
1212 * @param semantic_index semantic index
1213 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1214 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1215 * @param interp_param interpolation weights (i,j)
1216 * @param prim_mask SI_PARAM_PRIM_MASK
1217 * @param face SI_PARAM_FRONT_FACE
1218 * @param result the return value (4 components)
1219 */
1220 static void interp_fs_input(struct si_shader_context *ctx,
1221 unsigned input_index,
1222 unsigned semantic_name,
1223 unsigned semantic_index,
1224 unsigned num_interp_inputs,
1225 unsigned colors_read_mask,
1226 LLVMValueRef interp_param,
1227 LLVMValueRef prim_mask,
1228 LLVMValueRef face,
1229 LLVMValueRef result[4])
1230 {
1231 struct gallivm_state *gallivm = &ctx->gallivm;
1232 LLVMValueRef attr_number;
1233 LLVMValueRef i, j;
1234
1235 unsigned chan;
1236
1237 /* fs.constant returns the param from the middle vertex, so it's not
1238 * really useful for flat shading. It's meant to be used for custom
1239 * interpolation (but the intrinsic can't fetch from the other two
1240 * vertices).
1241 *
1242 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1243 * to do the right thing. The only reason we use fs.constant is that
1244 * fs.interp cannot be used on integers, because they can be equal
1245 * to NaN.
1246 *
1247 * When interp is false we will use fs.constant or for newer llvm,
1248 * amdgcn.interp.mov.
1249 */
1250 bool interp = interp_param != NULL;
1251
1252 attr_number = LLVMConstInt(ctx->i32, input_index, 0);
1253
1254 if (interp) {
1255 interp_param = LLVMBuildBitCast(gallivm->builder, interp_param,
1256 LLVMVectorType(ctx->f32, 2), "");
1257
1258 i = LLVMBuildExtractElement(gallivm->builder, interp_param,
1259 ctx->i32_0, "");
1260 j = LLVMBuildExtractElement(gallivm->builder, interp_param,
1261 ctx->i32_1, "");
1262 }
1263
1264 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1265 ctx->shader->key.part.ps.prolog.color_two_side) {
1266 LLVMValueRef is_face_positive;
1267 LLVMValueRef back_attr_number;
1268
1269 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1270 * otherwise it's at offset "num_inputs".
1271 */
1272 unsigned back_attr_offset = num_interp_inputs;
1273 if (semantic_index == 1 && colors_read_mask & 0xf)
1274 back_attr_offset += 1;
1275
1276 back_attr_number = LLVMConstInt(ctx->i32, back_attr_offset, 0);
1277
1278 is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1279 face, ctx->i32_0, "");
1280
1281 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1282 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
1283 LLVMValueRef front, back;
1284
1285 if (interp) {
1286 front = ac_build_fs_interp(&ctx->ac, llvm_chan,
1287 attr_number, prim_mask,
1288 i, j);
1289 back = ac_build_fs_interp(&ctx->ac, llvm_chan,
1290 back_attr_number, prim_mask,
1291 i, j);
1292 } else {
1293 front = ac_build_fs_interp_mov(&ctx->ac,
1294 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1295 llvm_chan, attr_number, prim_mask);
1296 back = ac_build_fs_interp_mov(&ctx->ac,
1297 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1298 llvm_chan, back_attr_number, prim_mask);
1299 }
1300
1301 result[chan] = LLVMBuildSelect(gallivm->builder,
1302 is_face_positive,
1303 front,
1304 back,
1305 "");
1306 }
1307 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1308 if (interp) {
1309 result[0] = ac_build_fs_interp(&ctx->ac, ctx->i32_0,
1310 attr_number, prim_mask, i, j);
1311 } else {
1312 result[0] = ac_build_fs_interp_mov(&ctx->ac, ctx->i32_0,
1313 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1314 attr_number, prim_mask);
1315 }
1316 result[1] =
1317 result[2] = LLVMConstReal(ctx->f32, 0.0f);
1318 result[3] = LLVMConstReal(ctx->f32, 1.0f);
1319 } else {
1320 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1321 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
1322
1323 if (interp) {
1324 result[chan] = ac_build_fs_interp(&ctx->ac,
1325 llvm_chan, attr_number, prim_mask, i, j);
1326 } else {
1327 result[chan] = ac_build_fs_interp_mov(&ctx->ac,
1328 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1329 llvm_chan, attr_number, prim_mask);
1330 }
1331 }
1332 }
1333 }
1334
1335 static void declare_input_fs(
1336 struct si_shader_context *ctx,
1337 unsigned input_index,
1338 const struct tgsi_full_declaration *decl,
1339 LLVMValueRef out[4])
1340 {
1341 struct lp_build_context *base = &ctx->bld_base.base;
1342 struct si_shader *shader = ctx->shader;
1343 LLVMValueRef main_fn = ctx->main_fn;
1344 LLVMValueRef interp_param = NULL;
1345 int interp_param_idx;
1346
1347 /* Get colors from input VGPRs (set by the prolog). */
1348 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR) {
1349 unsigned i = decl->Semantic.Index;
1350 unsigned colors_read = shader->selector->info.colors_read;
1351 unsigned mask = colors_read >> (i * 4);
1352 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1353 (i ? util_bitcount(colors_read & 0xf) : 0);
1354
1355 out[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1356 out[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1357 out[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1358 out[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1359 return;
1360 }
1361
1362 interp_param_idx = lookup_interp_param_index(decl->Interp.Interpolate,
1363 decl->Interp.Location);
1364 if (interp_param_idx == -1)
1365 return;
1366 else if (interp_param_idx) {
1367 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
1368 }
1369
1370 interp_fs_input(ctx, input_index, decl->Semantic.Name,
1371 decl->Semantic.Index, shader->selector->info.num_inputs,
1372 shader->selector->info.colors_read, interp_param,
1373 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1374 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1375 &out[0]);
1376 }
1377
1378 static LLVMValueRef get_sample_id(struct si_shader_context *ctx)
1379 {
1380 return unpack_param(ctx, SI_PARAM_ANCILLARY, 8, 4);
1381 }
1382
1383
1384 /**
1385 * Load a dword from a constant buffer.
1386 */
1387 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1388 LLVMValueRef resource,
1389 LLVMValueRef offset)
1390 {
1391 return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL,
1392 0, 0, 0, true, true);
1393 }
1394
1395 static LLVMValueRef load_sample_position(struct si_shader_context *ctx, LLVMValueRef sample_id)
1396 {
1397 struct lp_build_context *uint_bld = &ctx->bld_base.uint_bld;
1398 struct gallivm_state *gallivm = &ctx->gallivm;
1399 LLVMBuilderRef builder = gallivm->builder;
1400 LLVMValueRef desc = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1401 LLVMValueRef buf_index = LLVMConstInt(ctx->i32, SI_PS_CONST_SAMPLE_POSITIONS, 0);
1402 LLVMValueRef resource = ac_build_indexed_load_const(&ctx->ac, desc, buf_index);
1403
1404 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1405 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1406 LLVMValueRef offset1 = LLVMBuildAdd(builder, offset0, LLVMConstInt(ctx->i32, 4, 0), "");
1407
1408 LLVMValueRef pos[4] = {
1409 buffer_load_const(ctx, resource, offset0),
1410 buffer_load_const(ctx, resource, offset1),
1411 LLVMConstReal(ctx->f32, 0),
1412 LLVMConstReal(ctx->f32, 0)
1413 };
1414
1415 return lp_build_gather_values(gallivm, pos, 4);
1416 }
1417
1418 static void declare_system_value(struct si_shader_context *ctx,
1419 unsigned index,
1420 const struct tgsi_full_declaration *decl)
1421 {
1422 struct lp_build_context *bld = &ctx->bld_base.base;
1423 struct gallivm_state *gallivm = &ctx->gallivm;
1424 LLVMValueRef value = 0;
1425
1426 assert(index < RADEON_LLVM_MAX_SYSTEM_VALUES);
1427
1428 switch (decl->Semantic.Name) {
1429 case TGSI_SEMANTIC_INSTANCEID:
1430 value = LLVMGetParam(ctx->main_fn,
1431 ctx->param_instance_id);
1432 break;
1433
1434 case TGSI_SEMANTIC_VERTEXID:
1435 value = LLVMBuildAdd(gallivm->builder,
1436 LLVMGetParam(ctx->main_fn,
1437 ctx->param_vertex_id),
1438 LLVMGetParam(ctx->main_fn,
1439 ctx->param_base_vertex), "");
1440 break;
1441
1442 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1443 /* Unused. Clarify the meaning in indexed vs. non-indexed
1444 * draws if this is ever used again. */
1445 assert(false);
1446 break;
1447
1448 case TGSI_SEMANTIC_BASEVERTEX:
1449 {
1450 /* For non-indexed draws, the base vertex set by the driver
1451 * (for direct draws) or the CP (for indirect draws) is the
1452 * first vertex ID, but GLSL expects 0 to be returned.
1453 */
1454 LLVMValueRef vs_state = LLVMGetParam(ctx->main_fn, ctx->param_vs_state_bits);
1455 LLVMValueRef indexed;
1456
1457 indexed = LLVMBuildLShr(gallivm->builder, vs_state, ctx->i32_1, "");
1458 indexed = LLVMBuildTrunc(gallivm->builder, indexed, ctx->i1, "");
1459
1460 value = LLVMBuildSelect(gallivm->builder, indexed,
1461 LLVMGetParam(ctx->main_fn, ctx->param_base_vertex),
1462 ctx->i32_0, "");
1463 break;
1464 }
1465
1466 case TGSI_SEMANTIC_BASEINSTANCE:
1467 value = LLVMGetParam(ctx->main_fn, ctx->param_start_instance);
1468 break;
1469
1470 case TGSI_SEMANTIC_DRAWID:
1471 value = LLVMGetParam(ctx->main_fn, ctx->param_draw_id);
1472 break;
1473
1474 case TGSI_SEMANTIC_INVOCATIONID:
1475 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1476 value = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
1477 else if (ctx->type == PIPE_SHADER_GEOMETRY)
1478 value = LLVMGetParam(ctx->main_fn,
1479 ctx->param_gs_instance_id);
1480 else
1481 assert(!"INVOCATIONID not implemented");
1482 break;
1483
1484 case TGSI_SEMANTIC_POSITION:
1485 {
1486 LLVMValueRef pos[4] = {
1487 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
1488 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
1489 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Z_FLOAT),
1490 lp_build_emit_llvm_unary(&ctx->bld_base, TGSI_OPCODE_RCP,
1491 LLVMGetParam(ctx->main_fn,
1492 SI_PARAM_POS_W_FLOAT)),
1493 };
1494 value = lp_build_gather_values(gallivm, pos, 4);
1495 break;
1496 }
1497
1498 case TGSI_SEMANTIC_FACE:
1499 value = LLVMGetParam(ctx->main_fn, SI_PARAM_FRONT_FACE);
1500 break;
1501
1502 case TGSI_SEMANTIC_SAMPLEID:
1503 value = get_sample_id(ctx);
1504 break;
1505
1506 case TGSI_SEMANTIC_SAMPLEPOS: {
1507 LLVMValueRef pos[4] = {
1508 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
1509 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
1510 LLVMConstReal(ctx->f32, 0),
1511 LLVMConstReal(ctx->f32, 0)
1512 };
1513 pos[0] = lp_build_emit_llvm_unary(&ctx->bld_base,
1514 TGSI_OPCODE_FRC, pos[0]);
1515 pos[1] = lp_build_emit_llvm_unary(&ctx->bld_base,
1516 TGSI_OPCODE_FRC, pos[1]);
1517 value = lp_build_gather_values(gallivm, pos, 4);
1518 break;
1519 }
1520
1521 case TGSI_SEMANTIC_SAMPLEMASK:
1522 /* This can only occur with the OpenGL Core profile, which
1523 * doesn't support smoothing.
1524 */
1525 value = LLVMGetParam(ctx->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1526 break;
1527
1528 case TGSI_SEMANTIC_TESSCOORD:
1529 {
1530 LLVMValueRef coord[4] = {
1531 LLVMGetParam(ctx->main_fn, ctx->param_tes_u),
1532 LLVMGetParam(ctx->main_fn, ctx->param_tes_v),
1533 bld->zero,
1534 bld->zero
1535 };
1536
1537 /* For triangles, the vector should be (u, v, 1-u-v). */
1538 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1539 PIPE_PRIM_TRIANGLES)
1540 coord[2] = lp_build_sub(bld, bld->one,
1541 lp_build_add(bld, coord[0], coord[1]));
1542
1543 value = lp_build_gather_values(gallivm, coord, 4);
1544 break;
1545 }
1546
1547 case TGSI_SEMANTIC_VERTICESIN:
1548 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1549 value = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 26, 6);
1550 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1551 value = unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
1552 else
1553 assert(!"invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1554 break;
1555
1556 case TGSI_SEMANTIC_TESSINNER:
1557 case TGSI_SEMANTIC_TESSOUTER:
1558 {
1559 LLVMValueRef buffer, base, addr;
1560 int param = si_shader_io_get_unique_index_patch(decl->Semantic.Name, 0);
1561
1562 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1563
1564 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1565 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1566 LLVMConstInt(ctx->i32, param, 0));
1567
1568 value = buffer_load(&ctx->bld_base, TGSI_TYPE_FLOAT,
1569 ~0, buffer, base, addr, true);
1570
1571 break;
1572 }
1573
1574 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
1575 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
1576 {
1577 LLVMValueRef buf, slot, val[4];
1578 int i, offset;
1579
1580 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
1581 buf = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1582 buf = ac_build_indexed_load_const(&ctx->ac, buf, slot);
1583 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
1584
1585 for (i = 0; i < 4; i++)
1586 val[i] = buffer_load_const(ctx, buf,
1587 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
1588 value = lp_build_gather_values(gallivm, val, 4);
1589 break;
1590 }
1591
1592 case TGSI_SEMANTIC_PRIMID:
1593 value = get_primitive_id(&ctx->bld_base, 0);
1594 break;
1595
1596 case TGSI_SEMANTIC_GRID_SIZE:
1597 value = LLVMGetParam(ctx->main_fn, ctx->param_grid_size);
1598 break;
1599
1600 case TGSI_SEMANTIC_BLOCK_SIZE:
1601 {
1602 LLVMValueRef values[3];
1603 unsigned i;
1604 unsigned *properties = ctx->shader->selector->info.properties;
1605
1606 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1607 unsigned sizes[3] = {
1608 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1609 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1610 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1611 };
1612
1613 for (i = 0; i < 3; ++i)
1614 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1615
1616 value = lp_build_gather_values(gallivm, values, 3);
1617 } else {
1618 value = LLVMGetParam(ctx->main_fn, ctx->param_block_size);
1619 }
1620 break;
1621 }
1622
1623 case TGSI_SEMANTIC_BLOCK_ID:
1624 {
1625 LLVMValueRef values[3];
1626
1627 for (int i = 0; i < 3; i++) {
1628 values[i] = ctx->i32_0;
1629 if (ctx->param_block_id[i] >= 0) {
1630 values[i] = LLVMGetParam(ctx->main_fn,
1631 ctx->param_block_id[i]);
1632 }
1633 }
1634 value = lp_build_gather_values(gallivm, values, 3);
1635 break;
1636 }
1637
1638 case TGSI_SEMANTIC_THREAD_ID:
1639 value = LLVMGetParam(ctx->main_fn, ctx->param_thread_id);
1640 break;
1641
1642 case TGSI_SEMANTIC_HELPER_INVOCATION:
1643 value = lp_build_intrinsic(gallivm->builder,
1644 "llvm.amdgcn.ps.live",
1645 ctx->i1, NULL, 0,
1646 LP_FUNC_ATTR_READNONE);
1647 value = LLVMBuildNot(gallivm->builder, value, "");
1648 value = LLVMBuildSExt(gallivm->builder, value, ctx->i32, "");
1649 break;
1650
1651 case TGSI_SEMANTIC_SUBGROUP_SIZE:
1652 value = LLVMConstInt(ctx->i32, 64, 0);
1653 break;
1654
1655 case TGSI_SEMANTIC_SUBGROUP_INVOCATION:
1656 value = ac_get_thread_id(&ctx->ac);
1657 break;
1658
1659 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
1660 {
1661 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
1662 id = LLVMBuildZExt(gallivm->builder, id, ctx->i64, "");
1663 value = LLVMBuildShl(gallivm->builder, LLVMConstInt(ctx->i64, 1, 0), id, "");
1664 value = LLVMBuildBitCast(gallivm->builder, value, ctx->v2i32, "");
1665 break;
1666 }
1667
1668 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
1669 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
1670 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
1671 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
1672 {
1673 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
1674 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_GT_MASK ||
1675 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK) {
1676 /* All bits set except LSB */
1677 value = LLVMConstInt(ctx->i64, -2, 0);
1678 } else {
1679 /* All bits set */
1680 value = LLVMConstInt(ctx->i64, -1, 0);
1681 }
1682 id = LLVMBuildZExt(gallivm->builder, id, ctx->i64, "");
1683 value = LLVMBuildShl(gallivm->builder, value, id, "");
1684 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK ||
1685 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LT_MASK)
1686 value = LLVMBuildNot(gallivm->builder, value, "");
1687 value = LLVMBuildBitCast(gallivm->builder, value, ctx->v2i32, "");
1688 break;
1689 }
1690
1691 default:
1692 assert(!"unknown system value");
1693 return;
1694 }
1695
1696 ctx->system_values[index] = value;
1697 }
1698
1699 static void declare_compute_memory(struct si_shader_context *ctx,
1700 const struct tgsi_full_declaration *decl)
1701 {
1702 struct si_shader_selector *sel = ctx->shader->selector;
1703 struct gallivm_state *gallivm = &ctx->gallivm;
1704
1705 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, LOCAL_ADDR_SPACE);
1706 LLVMValueRef var;
1707
1708 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
1709 assert(decl->Range.First == decl->Range.Last);
1710 assert(!ctx->shared_memory);
1711
1712 var = LLVMAddGlobalInAddressSpace(gallivm->module,
1713 LLVMArrayType(ctx->i8, sel->local_size),
1714 "compute_lds",
1715 LOCAL_ADDR_SPACE);
1716 LLVMSetAlignment(var, 4);
1717
1718 ctx->shared_memory = LLVMBuildBitCast(gallivm->builder, var, i8p, "");
1719 }
1720
1721 static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
1722 {
1723 LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
1724 ctx->param_const_and_shader_buffers);
1725
1726 return ac_build_indexed_load_const(&ctx->ac, list_ptr,
1727 LLVMConstInt(ctx->i32, si_get_constbuf_slot(i), 0));
1728 }
1729
1730 static LLVMValueRef fetch_constant(
1731 struct lp_build_tgsi_context *bld_base,
1732 const struct tgsi_full_src_register *reg,
1733 enum tgsi_opcode_type type,
1734 unsigned swizzle)
1735 {
1736 struct si_shader_context *ctx = si_shader_context(bld_base);
1737 struct lp_build_context *base = &bld_base->base;
1738 const struct tgsi_ind_register *ireg = &reg->Indirect;
1739 unsigned buf, idx;
1740
1741 LLVMValueRef addr, bufp;
1742 LLVMValueRef result;
1743
1744 if (swizzle == LP_CHAN_ALL) {
1745 unsigned chan;
1746 LLVMValueRef values[4];
1747 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
1748 values[chan] = fetch_constant(bld_base, reg, type, chan);
1749
1750 return lp_build_gather_values(&ctx->gallivm, values, 4);
1751 }
1752
1753 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
1754 idx = reg->Register.Index * 4 + swizzle;
1755
1756 if (reg->Register.Dimension && reg->Dimension.Indirect) {
1757 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
1758 LLVMValueRef index;
1759 index = si_get_bounded_indirect_index(ctx, &reg->DimIndirect,
1760 reg->Dimension.Index,
1761 ctx->num_const_buffers);
1762 index = LLVMBuildAdd(ctx->gallivm.builder, index,
1763 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
1764 bufp = ac_build_indexed_load_const(&ctx->ac, ptr, index);
1765 } else
1766 bufp = load_const_buffer_desc(ctx, buf);
1767
1768 if (reg->Register.Indirect) {
1769 addr = ctx->addrs[ireg->Index][ireg->Swizzle];
1770 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
1771 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
1772 addr = lp_build_add(&bld_base->uint_bld, addr,
1773 LLVMConstInt(ctx->i32, idx * 4, 0));
1774 } else {
1775 addr = LLVMConstInt(ctx->i32, idx * 4, 0);
1776 }
1777
1778 result = buffer_load_const(ctx, bufp, addr);
1779
1780 if (!tgsi_type_is_64bit(type))
1781 result = bitcast(bld_base, type, result);
1782 else {
1783 LLVMValueRef addr2, result2;
1784
1785 addr2 = lp_build_add(&bld_base->uint_bld, addr,
1786 LLVMConstInt(ctx->i32, 4, 0));
1787 result2 = buffer_load_const(ctx, bufp, addr2);
1788
1789 result = si_llvm_emit_fetch_64bit(bld_base, type,
1790 result, result2);
1791 }
1792 return result;
1793 }
1794
1795 /* Upper 16 bits must be zero. */
1796 static LLVMValueRef si_llvm_pack_two_int16(struct si_shader_context *ctx,
1797 LLVMValueRef val[2])
1798 {
1799 return LLVMBuildOr(ctx->gallivm.builder, val[0],
1800 LLVMBuildShl(ctx->gallivm.builder, val[1],
1801 LLVMConstInt(ctx->i32, 16, 0),
1802 ""), "");
1803 }
1804
1805 /* Upper 16 bits are ignored and will be dropped. */
1806 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct si_shader_context *ctx,
1807 LLVMValueRef val[2])
1808 {
1809 LLVMValueRef v[2] = {
1810 LLVMBuildAnd(ctx->gallivm.builder, val[0],
1811 LLVMConstInt(ctx->i32, 0xffff, 0), ""),
1812 val[1],
1813 };
1814 return si_llvm_pack_two_int16(ctx, v);
1815 }
1816
1817 /* Initialize arguments for the shader export intrinsic */
1818 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
1819 LLVMValueRef *values,
1820 unsigned target,
1821 struct ac_export_args *args)
1822 {
1823 struct si_shader_context *ctx = si_shader_context(bld_base);
1824 struct lp_build_context *base = &bld_base->base;
1825 LLVMBuilderRef builder = ctx->gallivm.builder;
1826 LLVMValueRef val[4];
1827 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1828 unsigned chan;
1829 bool is_int8, is_int10;
1830
1831 /* Default is 0xf. Adjusted below depending on the format. */
1832 args->enabled_channels = 0xf; /* writemask */
1833
1834 /* Specify whether the EXEC mask represents the valid mask */
1835 args->valid_mask = 0;
1836
1837 /* Specify whether this is the last export */
1838 args->done = 0;
1839
1840 /* Specify the target we are exporting */
1841 args->target = target;
1842
1843 if (ctx->type == PIPE_SHADER_FRAGMENT) {
1844 const struct si_shader_key *key = &ctx->shader->key;
1845 unsigned col_formats = key->part.ps.epilog.spi_shader_col_format;
1846 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1847
1848 assert(cbuf >= 0 && cbuf < 8);
1849 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1850 is_int8 = (key->part.ps.epilog.color_is_int8 >> cbuf) & 0x1;
1851 is_int10 = (key->part.ps.epilog.color_is_int10 >> cbuf) & 0x1;
1852 }
1853
1854 args->compr = false;
1855 args->out[0] = base->undef;
1856 args->out[1] = base->undef;
1857 args->out[2] = base->undef;
1858 args->out[3] = base->undef;
1859
1860 switch (spi_shader_col_format) {
1861 case V_028714_SPI_SHADER_ZERO:
1862 args->enabled_channels = 0; /* writemask */
1863 args->target = V_008DFC_SQ_EXP_NULL;
1864 break;
1865
1866 case V_028714_SPI_SHADER_32_R:
1867 args->enabled_channels = 1; /* writemask */
1868 args->out[0] = values[0];
1869 break;
1870
1871 case V_028714_SPI_SHADER_32_GR:
1872 args->enabled_channels = 0x3; /* writemask */
1873 args->out[0] = values[0];
1874 args->out[1] = values[1];
1875 break;
1876
1877 case V_028714_SPI_SHADER_32_AR:
1878 args->enabled_channels = 0x9; /* writemask */
1879 args->out[0] = values[0];
1880 args->out[3] = values[3];
1881 break;
1882
1883 case V_028714_SPI_SHADER_FP16_ABGR:
1884 args->compr = 1; /* COMPR flag */
1885
1886 for (chan = 0; chan < 2; chan++) {
1887 LLVMValueRef pack_args[2] = {
1888 values[2 * chan],
1889 values[2 * chan + 1]
1890 };
1891 LLVMValueRef packed;
1892
1893 packed = ac_build_cvt_pkrtz_f16(&ctx->ac, pack_args);
1894 args->out[chan] =
1895 LLVMBuildBitCast(ctx->gallivm.builder,
1896 packed, ctx->f32, "");
1897 }
1898 break;
1899
1900 case V_028714_SPI_SHADER_UNORM16_ABGR:
1901 for (chan = 0; chan < 4; chan++) {
1902 val[chan] = ac_build_clamp(&ctx->ac, values[chan]);
1903 val[chan] = LLVMBuildFMul(builder, val[chan],
1904 LLVMConstReal(ctx->f32, 65535), "");
1905 val[chan] = LLVMBuildFAdd(builder, val[chan],
1906 LLVMConstReal(ctx->f32, 0.5), "");
1907 val[chan] = LLVMBuildFPToUI(builder, val[chan],
1908 ctx->i32, "");
1909 }
1910
1911 args->compr = 1; /* COMPR flag */
1912 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1913 si_llvm_pack_two_int16(ctx, val));
1914 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1915 si_llvm_pack_two_int16(ctx, val+2));
1916 break;
1917
1918 case V_028714_SPI_SHADER_SNORM16_ABGR:
1919 for (chan = 0; chan < 4; chan++) {
1920 /* Clamp between [-1, 1]. */
1921 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MIN,
1922 values[chan],
1923 LLVMConstReal(ctx->f32, 1));
1924 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MAX,
1925 val[chan],
1926 LLVMConstReal(ctx->f32, -1));
1927 /* Convert to a signed integer in [-32767, 32767]. */
1928 val[chan] = LLVMBuildFMul(builder, val[chan],
1929 LLVMConstReal(ctx->f32, 32767), "");
1930 /* If positive, add 0.5, else add -0.5. */
1931 val[chan] = LLVMBuildFAdd(builder, val[chan],
1932 LLVMBuildSelect(builder,
1933 LLVMBuildFCmp(builder, LLVMRealOGE,
1934 val[chan], base->zero, ""),
1935 LLVMConstReal(ctx->f32, 0.5),
1936 LLVMConstReal(ctx->f32, -0.5), ""), "");
1937 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
1938 }
1939
1940 args->compr = 1; /* COMPR flag */
1941 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1942 si_llvm_pack_two_int32_as_int16(ctx, val));
1943 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1944 si_llvm_pack_two_int32_as_int16(ctx, val+2));
1945 break;
1946
1947 case V_028714_SPI_SHADER_UINT16_ABGR: {
1948 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1949 is_int8 ? 255 : is_int10 ? 1023 : 65535, 0);
1950 LLVMValueRef max_alpha =
1951 !is_int10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
1952
1953 /* Clamp. */
1954 for (chan = 0; chan < 4; chan++) {
1955 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1956 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
1957 val[chan],
1958 chan == 3 ? max_alpha : max_rgb);
1959 }
1960
1961 args->compr = 1; /* COMPR flag */
1962 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1963 si_llvm_pack_two_int16(ctx, val));
1964 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1965 si_llvm_pack_two_int16(ctx, val+2));
1966 break;
1967 }
1968
1969 case V_028714_SPI_SHADER_SINT16_ABGR: {
1970 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1971 is_int8 ? 127 : is_int10 ? 511 : 32767, 0);
1972 LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
1973 is_int8 ? -128 : is_int10 ? -512 : -32768, 0);
1974 LLVMValueRef max_alpha =
1975 !is_int10 ? max_rgb : ctx->i32_1;
1976 LLVMValueRef min_alpha =
1977 !is_int10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
1978
1979 /* Clamp. */
1980 for (chan = 0; chan < 4; chan++) {
1981 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1982 val[chan] = lp_build_emit_llvm_binary(bld_base,
1983 TGSI_OPCODE_IMIN,
1984 val[chan], chan == 3 ? max_alpha : max_rgb);
1985 val[chan] = lp_build_emit_llvm_binary(bld_base,
1986 TGSI_OPCODE_IMAX,
1987 val[chan], chan == 3 ? min_alpha : min_rgb);
1988 }
1989
1990 args->compr = 1; /* COMPR flag */
1991 args->out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1992 si_llvm_pack_two_int32_as_int16(ctx, val));
1993 args->out[1] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1994 si_llvm_pack_two_int32_as_int16(ctx, val+2));
1995 break;
1996 }
1997
1998 case V_028714_SPI_SHADER_32_ABGR:
1999 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2000 break;
2001 }
2002 }
2003
2004 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2005 LLVMValueRef alpha)
2006 {
2007 struct si_shader_context *ctx = si_shader_context(bld_base);
2008
2009 if (ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2010 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
2011 SI_PARAM_ALPHA_REF);
2012
2013 LLVMValueRef alpha_pass =
2014 lp_build_cmp(&bld_base->base,
2015 ctx->shader->key.part.ps.epilog.alpha_func,
2016 alpha, alpha_ref);
2017 LLVMValueRef arg =
2018 lp_build_select(&bld_base->base,
2019 alpha_pass,
2020 LLVMConstReal(ctx->f32, 1.0f),
2021 LLVMConstReal(ctx->f32, -1.0f));
2022
2023 ac_build_kill(&ctx->ac, arg);
2024 } else {
2025 ac_build_kill(&ctx->ac, NULL);
2026 }
2027 }
2028
2029 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2030 LLVMValueRef alpha,
2031 unsigned samplemask_param)
2032 {
2033 struct si_shader_context *ctx = si_shader_context(bld_base);
2034 struct gallivm_state *gallivm = &ctx->gallivm;
2035 LLVMValueRef coverage;
2036
2037 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2038 coverage = LLVMGetParam(ctx->main_fn,
2039 samplemask_param);
2040 coverage = bitcast(bld_base, TGSI_TYPE_SIGNED, coverage);
2041
2042 coverage = lp_build_intrinsic(gallivm->builder, "llvm.ctpop.i32",
2043 ctx->i32,
2044 &coverage, 1, LP_FUNC_ATTR_READNONE);
2045
2046 coverage = LLVMBuildUIToFP(gallivm->builder, coverage,
2047 ctx->f32, "");
2048
2049 coverage = LLVMBuildFMul(gallivm->builder, coverage,
2050 LLVMConstReal(ctx->f32,
2051 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2052
2053 return LLVMBuildFMul(gallivm->builder, alpha, coverage, "");
2054 }
2055
2056 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context *bld_base,
2057 struct ac_export_args *pos, LLVMValueRef *out_elts)
2058 {
2059 struct si_shader_context *ctx = si_shader_context(bld_base);
2060 struct lp_build_context *base = &bld_base->base;
2061 unsigned reg_index;
2062 unsigned chan;
2063 unsigned const_chan;
2064 LLVMValueRef base_elt;
2065 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2066 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
2067 SI_VS_CONST_CLIP_PLANES, 0);
2068 LLVMValueRef const_resource = ac_build_indexed_load_const(&ctx->ac, ptr, constbuf_index);
2069
2070 for (reg_index = 0; reg_index < 2; reg_index ++) {
2071 struct ac_export_args *args = &pos[2 + reg_index];
2072
2073 args->out[0] =
2074 args->out[1] =
2075 args->out[2] =
2076 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
2077
2078 /* Compute dot products of position and user clip plane vectors */
2079 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2080 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2081 LLVMValueRef addr =
2082 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
2083 const_chan) * 4, 0);
2084 base_elt = buffer_load_const(ctx, const_resource,
2085 addr);
2086 args->out[chan] =
2087 lp_build_add(base, args->out[chan],
2088 lp_build_mul(base, base_elt,
2089 out_elts[const_chan]));
2090 }
2091 }
2092
2093 args->enabled_channels = 0xf;
2094 args->valid_mask = 0;
2095 args->done = 0;
2096 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
2097 args->compr = 0;
2098 }
2099 }
2100
2101 static void si_dump_streamout(struct pipe_stream_output_info *so)
2102 {
2103 unsigned i;
2104
2105 if (so->num_outputs)
2106 fprintf(stderr, "STREAMOUT\n");
2107
2108 for (i = 0; i < so->num_outputs; i++) {
2109 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2110 so->output[i].start_component;
2111 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2112 i, so->output[i].output_buffer,
2113 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2114 so->output[i].register_index,
2115 mask & 1 ? "x" : "",
2116 mask & 2 ? "y" : "",
2117 mask & 4 ? "z" : "",
2118 mask & 8 ? "w" : "");
2119 }
2120 }
2121
2122 static void emit_streamout_output(struct si_shader_context *ctx,
2123 LLVMValueRef const *so_buffers,
2124 LLVMValueRef const *so_write_offsets,
2125 struct pipe_stream_output *stream_out,
2126 struct si_shader_output_values *shader_out)
2127 {
2128 struct gallivm_state *gallivm = &ctx->gallivm;
2129 LLVMBuilderRef builder = gallivm->builder;
2130 unsigned buf_idx = stream_out->output_buffer;
2131 unsigned start = stream_out->start_component;
2132 unsigned num_comps = stream_out->num_components;
2133 LLVMValueRef out[4];
2134
2135 assert(num_comps && num_comps <= 4);
2136 if (!num_comps || num_comps > 4)
2137 return;
2138
2139 /* Load the output as int. */
2140 for (int j = 0; j < num_comps; j++) {
2141 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
2142
2143 out[j] = LLVMBuildBitCast(builder,
2144 shader_out->values[start + j],
2145 ctx->i32, "");
2146 }
2147
2148 /* Pack the output. */
2149 LLVMValueRef vdata = NULL;
2150
2151 switch (num_comps) {
2152 case 1: /* as i32 */
2153 vdata = out[0];
2154 break;
2155 case 2: /* as v2i32 */
2156 case 3: /* as v4i32 (aligned to 4) */
2157 case 4: /* as v4i32 */
2158 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2159 for (int j = 0; j < num_comps; j++) {
2160 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
2161 LLVMConstInt(ctx->i32, j, 0), "");
2162 }
2163 break;
2164 }
2165
2166 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
2167 vdata, num_comps,
2168 so_write_offsets[buf_idx],
2169 ctx->i32_0,
2170 stream_out->dst_offset * 4, 1, 1, true, false);
2171 }
2172
2173 /**
2174 * Write streamout data to buffers for vertex stream @p stream (different
2175 * vertex streams can occur for GS copy shaders).
2176 */
2177 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2178 struct si_shader_output_values *outputs,
2179 unsigned noutput, unsigned stream)
2180 {
2181 struct si_shader_selector *sel = ctx->shader->selector;
2182 struct pipe_stream_output_info *so = &sel->so;
2183 struct gallivm_state *gallivm = &ctx->gallivm;
2184 LLVMBuilderRef builder = gallivm->builder;
2185 int i;
2186 struct lp_build_if_state if_ctx;
2187
2188 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2189 LLVMValueRef so_vtx_count =
2190 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2191
2192 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
2193
2194 /* can_emit = tid < so_vtx_count; */
2195 LLVMValueRef can_emit =
2196 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2197
2198 /* Emit the streamout code conditionally. This actually avoids
2199 * out-of-bounds buffer access. The hw tells us via the SGPR
2200 * (so_vtx_count) which threads are allowed to emit streamout data. */
2201 lp_build_if(&if_ctx, gallivm, can_emit);
2202 {
2203 /* The buffer offset is computed as follows:
2204 * ByteOffset = streamout_offset[buffer_id]*4 +
2205 * (streamout_write_index + thread_id)*stride[buffer_id] +
2206 * attrib_offset
2207 */
2208
2209 LLVMValueRef so_write_index =
2210 LLVMGetParam(ctx->main_fn,
2211 ctx->param_streamout_write_index);
2212
2213 /* Compute (streamout_write_index + thread_id). */
2214 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2215
2216 /* Load the descriptor and compute the write offset for each
2217 * enabled buffer. */
2218 LLVMValueRef so_write_offset[4] = {};
2219 LLVMValueRef so_buffers[4];
2220 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
2221 ctx->param_rw_buffers);
2222
2223 for (i = 0; i < 4; i++) {
2224 if (!so->stride[i])
2225 continue;
2226
2227 LLVMValueRef offset = LLVMConstInt(ctx->i32,
2228 SI_VS_STREAMOUT_BUF0 + i, 0);
2229
2230 so_buffers[i] = ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
2231
2232 LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
2233 ctx->param_streamout_offset[i]);
2234 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2235
2236 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2237 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2238 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2239 }
2240
2241 /* Write streamout data. */
2242 for (i = 0; i < so->num_outputs; i++) {
2243 unsigned reg = so->output[i].register_index;
2244
2245 if (reg >= noutput)
2246 continue;
2247
2248 if (stream != so->output[i].stream)
2249 continue;
2250
2251 emit_streamout_output(ctx, so_buffers, so_write_offset,
2252 &so->output[i], &outputs[reg]);
2253 }
2254 }
2255 lp_build_endif(&if_ctx);
2256 }
2257
2258
2259 /* Generate export instructions for hardware VS shader stage */
2260 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
2261 struct si_shader_output_values *outputs,
2262 unsigned noutput)
2263 {
2264 struct si_shader_context *ctx = si_shader_context(bld_base);
2265 struct si_shader *shader = ctx->shader;
2266 struct lp_build_context *base = &bld_base->base;
2267 struct ac_export_args args, pos_args[4] = {};
2268 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2269 unsigned semantic_name, semantic_index;
2270 unsigned target;
2271 unsigned param_count = 0;
2272 unsigned pos_idx;
2273 int i;
2274
2275 for (i = 0; i < noutput; i++) {
2276 semantic_name = outputs[i].semantic_name;
2277 semantic_index = outputs[i].semantic_index;
2278 bool export_param = true;
2279 unsigned id;
2280
2281 switch (semantic_name) {
2282 case TGSI_SEMANTIC_POSITION: /* ignore these */
2283 case TGSI_SEMANTIC_PSIZE:
2284 case TGSI_SEMANTIC_CLIPVERTEX:
2285 case TGSI_SEMANTIC_EDGEFLAG:
2286 break;
2287 case TGSI_SEMANTIC_GENERIC:
2288 /* don't process indices the function can't handle */
2289 if (semantic_index >= SI_MAX_IO_GENERIC)
2290 break;
2291 /* fall through */
2292 default:
2293 id = si_shader_io_get_unique_index(semantic_name, semantic_index);
2294 if (shader->key.opt.kill_outputs[id / 32] & (1u << (id % 32)))
2295 export_param = false;
2296 }
2297
2298 if (outputs[i].vertex_stream[0] != 0 &&
2299 outputs[i].vertex_stream[1] != 0 &&
2300 outputs[i].vertex_stream[2] != 0 &&
2301 outputs[i].vertex_stream[3] != 0)
2302 export_param = false;
2303
2304 handle_semantic:
2305 /* Select the correct target */
2306 switch(semantic_name) {
2307 case TGSI_SEMANTIC_PSIZE:
2308 psize_value = outputs[i].values[0];
2309 continue;
2310 case TGSI_SEMANTIC_EDGEFLAG:
2311 edgeflag_value = outputs[i].values[0];
2312 continue;
2313 case TGSI_SEMANTIC_LAYER:
2314 layer_value = outputs[i].values[0];
2315 semantic_name = TGSI_SEMANTIC_GENERIC;
2316 goto handle_semantic;
2317 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2318 viewport_index_value = outputs[i].values[0];
2319 semantic_name = TGSI_SEMANTIC_GENERIC;
2320 goto handle_semantic;
2321 case TGSI_SEMANTIC_POSITION:
2322 target = V_008DFC_SQ_EXP_POS;
2323 break;
2324 case TGSI_SEMANTIC_CLIPDIST:
2325 if (shader->key.opt.clip_disable) {
2326 semantic_name = TGSI_SEMANTIC_GENERIC;
2327 goto handle_semantic;
2328 }
2329 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
2330 break;
2331 case TGSI_SEMANTIC_CLIPVERTEX:
2332 if (shader->key.opt.clip_disable)
2333 continue;
2334 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
2335 continue;
2336 case TGSI_SEMANTIC_COLOR:
2337 case TGSI_SEMANTIC_BCOLOR:
2338 case TGSI_SEMANTIC_PRIMID:
2339 case TGSI_SEMANTIC_FOG:
2340 case TGSI_SEMANTIC_TEXCOORD:
2341 case TGSI_SEMANTIC_GENERIC:
2342 if (!export_param)
2343 continue;
2344 target = V_008DFC_SQ_EXP_PARAM + param_count;
2345 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2346 shader->info.vs_output_param_offset[i] = param_count;
2347 param_count++;
2348 break;
2349 default:
2350 target = 0;
2351 fprintf(stderr,
2352 "Warning: SI unhandled vs output type:%d\n",
2353 semantic_name);
2354 }
2355
2356 si_llvm_init_export_args(bld_base, outputs[i].values, target, &args);
2357
2358 if (target >= V_008DFC_SQ_EXP_POS &&
2359 target <= (V_008DFC_SQ_EXP_POS + 3)) {
2360 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2361 &args, sizeof(args));
2362 } else {
2363 ac_build_export(&ctx->ac, &args);
2364 }
2365
2366 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
2367 semantic_name = TGSI_SEMANTIC_GENERIC;
2368 goto handle_semantic;
2369 }
2370 }
2371
2372 shader->info.nr_param_exports = param_count;
2373
2374 /* We need to add the position output manually if it's missing. */
2375 if (!pos_args[0].out[0]) {
2376 pos_args[0].enabled_channels = 0xf; /* writemask */
2377 pos_args[0].valid_mask = 0; /* EXEC mask */
2378 pos_args[0].done = 0; /* last export? */
2379 pos_args[0].target = V_008DFC_SQ_EXP_POS;
2380 pos_args[0].compr = 0; /* COMPR flag */
2381 pos_args[0].out[0] = base->zero; /* X */
2382 pos_args[0].out[1] = base->zero; /* Y */
2383 pos_args[0].out[2] = base->zero; /* Z */
2384 pos_args[0].out[3] = base->one; /* W */
2385 }
2386
2387 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2388 if (shader->selector->info.writes_psize ||
2389 shader->selector->info.writes_edgeflag ||
2390 shader->selector->info.writes_viewport_index ||
2391 shader->selector->info.writes_layer) {
2392 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
2393 (shader->selector->info.writes_edgeflag << 1) |
2394 (shader->selector->info.writes_layer << 2);
2395
2396 pos_args[1].valid_mask = 0; /* EXEC mask */
2397 pos_args[1].done = 0; /* last export? */
2398 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2399 pos_args[1].compr = 0; /* COMPR flag */
2400 pos_args[1].out[0] = base->zero; /* X */
2401 pos_args[1].out[1] = base->zero; /* Y */
2402 pos_args[1].out[2] = base->zero; /* Z */
2403 pos_args[1].out[3] = base->zero; /* W */
2404
2405 if (shader->selector->info.writes_psize)
2406 pos_args[1].out[0] = psize_value;
2407
2408 if (shader->selector->info.writes_edgeflag) {
2409 /* The output is a float, but the hw expects an integer
2410 * with the first bit containing the edge flag. */
2411 edgeflag_value = LLVMBuildFPToUI(ctx->gallivm.builder,
2412 edgeflag_value,
2413 ctx->i32, "");
2414 edgeflag_value = lp_build_min(&bld_base->int_bld,
2415 edgeflag_value,
2416 ctx->i32_1);
2417
2418 /* The LLVM intrinsic expects a float. */
2419 pos_args[1].out[1] = LLVMBuildBitCast(ctx->gallivm.builder,
2420 edgeflag_value,
2421 ctx->f32, "");
2422 }
2423
2424 if (ctx->screen->b.chip_class >= GFX9) {
2425 /* GFX9 has the layer in out.z[10:0] and the viewport
2426 * index in out.z[19:16].
2427 */
2428 if (shader->selector->info.writes_layer)
2429 pos_args[1].out[2] = layer_value;
2430
2431 if (shader->selector->info.writes_viewport_index) {
2432 LLVMValueRef v = viewport_index_value;
2433
2434 v = bitcast(bld_base, TGSI_TYPE_UNSIGNED, v);
2435 v = LLVMBuildShl(ctx->gallivm.builder, v,
2436 LLVMConstInt(ctx->i32, 16, 0), "");
2437 v = LLVMBuildOr(ctx->gallivm.builder, v,
2438 bitcast(bld_base, TGSI_TYPE_UNSIGNED,
2439 pos_args[1].out[2]), "");
2440 pos_args[1].out[2] = bitcast(bld_base, TGSI_TYPE_FLOAT, v);
2441 pos_args[1].enabled_channels |= 1 << 2;
2442 }
2443 } else {
2444 if (shader->selector->info.writes_layer)
2445 pos_args[1].out[2] = layer_value;
2446
2447 if (shader->selector->info.writes_viewport_index) {
2448 pos_args[1].out[3] = viewport_index_value;
2449 pos_args[1].enabled_channels |= 1 << 3;
2450 }
2451 }
2452 }
2453
2454 for (i = 0; i < 4; i++)
2455 if (pos_args[i].out[0])
2456 shader->info.nr_pos_exports++;
2457
2458 pos_idx = 0;
2459 for (i = 0; i < 4; i++) {
2460 if (!pos_args[i].out[0])
2461 continue;
2462
2463 /* Specify the target we are exporting */
2464 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2465
2466 if (pos_idx == shader->info.nr_pos_exports)
2467 /* Specify that this is the last export */
2468 pos_args[i].done = 1;
2469
2470 ac_build_export(&ctx->ac, &pos_args[i]);
2471 }
2472 }
2473
2474 /**
2475 * Forward all outputs from the vertex shader to the TES. This is only used
2476 * for the fixed function TCS.
2477 */
2478 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2479 {
2480 struct si_shader_context *ctx = si_shader_context(bld_base);
2481 struct gallivm_state *gallivm = &ctx->gallivm;
2482 LLVMValueRef invocation_id, buffer, buffer_offset;
2483 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
2484 uint64_t inputs;
2485
2486 invocation_id = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
2487 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
2488 buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2489
2490 lds_vertex_stride = unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
2491 lds_vertex_offset = LLVMBuildMul(gallivm->builder, invocation_id,
2492 lds_vertex_stride, "");
2493 lds_base = get_tcs_in_current_patch_offset(ctx);
2494 lds_base = LLVMBuildAdd(gallivm->builder, lds_base, lds_vertex_offset, "");
2495
2496 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
2497 while (inputs) {
2498 unsigned i = u_bit_scan64(&inputs);
2499
2500 LLVMValueRef lds_ptr = LLVMBuildAdd(gallivm->builder, lds_base,
2501 LLVMConstInt(ctx->i32, 4 * i, 0),
2502 "");
2503
2504 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
2505 get_rel_patch_id(ctx),
2506 invocation_id,
2507 LLVMConstInt(ctx->i32, i, 0));
2508
2509 LLVMValueRef value = lds_load(bld_base, TGSI_TYPE_SIGNED, ~0,
2510 lds_ptr);
2511
2512 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
2513 buffer_offset, 0, 1, 0, true, false);
2514 }
2515 }
2516
2517 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
2518 LLVMValueRef rel_patch_id,
2519 LLVMValueRef invocation_id,
2520 LLVMValueRef tcs_out_current_patch_data_offset)
2521 {
2522 struct si_shader_context *ctx = si_shader_context(bld_base);
2523 struct gallivm_state *gallivm = &ctx->gallivm;
2524 struct si_shader *shader = ctx->shader;
2525 unsigned tess_inner_index, tess_outer_index;
2526 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2527 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2528 unsigned stride, outer_comps, inner_comps, i, offset;
2529 struct lp_build_if_state if_ctx, inner_if_ctx;
2530
2531 si_llvm_emit_barrier(NULL, bld_base, NULL);
2532
2533 /* Do this only for invocation 0, because the tess levels are per-patch,
2534 * not per-vertex.
2535 *
2536 * This can't jump, because invocation 0 executes this. It should
2537 * at least mask out the loads and stores for other invocations.
2538 */
2539 lp_build_if(&if_ctx, gallivm,
2540 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2541 invocation_id, ctx->i32_0, ""));
2542
2543 /* Determine the layout of one tess factor element in the buffer. */
2544 switch (shader->key.part.tcs.epilog.prim_mode) {
2545 case PIPE_PRIM_LINES:
2546 stride = 2; /* 2 dwords, 1 vec2 store */
2547 outer_comps = 2;
2548 inner_comps = 0;
2549 break;
2550 case PIPE_PRIM_TRIANGLES:
2551 stride = 4; /* 4 dwords, 1 vec4 store */
2552 outer_comps = 3;
2553 inner_comps = 1;
2554 break;
2555 case PIPE_PRIM_QUADS:
2556 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2557 outer_comps = 4;
2558 inner_comps = 2;
2559 break;
2560 default:
2561 assert(0);
2562 return;
2563 }
2564
2565 /* Load tess_inner and tess_outer from LDS.
2566 * Any invocation can write them, so we can't get them from a temporary.
2567 */
2568 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
2569 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
2570
2571 lds_base = tcs_out_current_patch_data_offset;
2572 lds_inner = LLVMBuildAdd(gallivm->builder, lds_base,
2573 LLVMConstInt(ctx->i32,
2574 tess_inner_index * 4, 0), "");
2575 lds_outer = LLVMBuildAdd(gallivm->builder, lds_base,
2576 LLVMConstInt(ctx->i32,
2577 tess_outer_index * 4, 0), "");
2578
2579 for (i = 0; i < 4; i++) {
2580 inner[i] = LLVMGetUndef(ctx->i32);
2581 outer[i] = LLVMGetUndef(ctx->i32);
2582 }
2583
2584 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
2585 /* For isolines, the hardware expects tess factors in the
2586 * reverse order from what GLSL / TGSI specify.
2587 */
2588 outer[0] = out[1] = lds_load(bld_base, TGSI_TYPE_SIGNED, 0, lds_outer);
2589 outer[1] = out[0] = lds_load(bld_base, TGSI_TYPE_SIGNED, 1, lds_outer);
2590 } else {
2591 for (i = 0; i < outer_comps; i++) {
2592 outer[i] = out[i] =
2593 lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
2594 }
2595 for (i = 0; i < inner_comps; i++) {
2596 inner[i] = out[outer_comps+i] =
2597 lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
2598 }
2599 }
2600
2601 /* Convert the outputs to vectors for stores. */
2602 vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
2603 vec1 = NULL;
2604
2605 if (stride > 4)
2606 vec1 = lp_build_gather_values(gallivm, out+4, stride - 4);
2607
2608 /* Get the buffer. */
2609 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_factor_addr_base64k);
2610
2611 /* Get the offset. */
2612 tf_base = LLVMGetParam(ctx->main_fn,
2613 ctx->param_tcs_factor_offset);
2614 byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
2615 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
2616
2617 lp_build_if(&inner_if_ctx, gallivm,
2618 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2619 rel_patch_id, ctx->i32_0, ""));
2620
2621 /* Store the dynamic HS control word. */
2622 offset = 0;
2623 if (ctx->screen->b.chip_class <= VI) {
2624 ac_build_buffer_store_dword(&ctx->ac, buffer,
2625 LLVMConstInt(ctx->i32, 0x80000000, 0),
2626 1, ctx->i32_0, tf_base,
2627 offset, 1, 0, true, false);
2628 offset += 4;
2629 }
2630
2631 lp_build_endif(&inner_if_ctx);
2632
2633 /* Store the tessellation factors. */
2634 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2635 MIN2(stride, 4), byteoffset, tf_base,
2636 offset, 1, 0, true, false);
2637 offset += 16;
2638 if (vec1)
2639 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2640 stride - 4, byteoffset, tf_base,
2641 offset, 1, 0, true, false);
2642
2643 /* Store the tess factors into the offchip buffer if TES reads them. */
2644 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
2645 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
2646 LLVMValueRef tf_inner_offset;
2647 unsigned param_outer, param_inner;
2648
2649 buf = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
2650 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2651
2652 param_outer = si_shader_io_get_unique_index_patch(
2653 TGSI_SEMANTIC_TESSOUTER, 0);
2654 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2655 LLVMConstInt(ctx->i32, param_outer, 0));
2656
2657 outer_vec = lp_build_gather_values(gallivm, outer,
2658 util_next_power_of_two(outer_comps));
2659
2660 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
2661 outer_comps, tf_outer_offset,
2662 base, 0, 1, 0, true, false);
2663 if (inner_comps) {
2664 param_inner = si_shader_io_get_unique_index_patch(
2665 TGSI_SEMANTIC_TESSINNER, 0);
2666 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2667 LLVMConstInt(ctx->i32, param_inner, 0));
2668
2669 inner_vec = inner_comps == 1 ? inner[0] :
2670 lp_build_gather_values(gallivm, inner, inner_comps);
2671 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
2672 inner_comps, tf_inner_offset,
2673 base, 0, 1, 0, true, false);
2674 }
2675 }
2676
2677 lp_build_endif(&if_ctx);
2678 }
2679
2680 static LLVMValueRef
2681 si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
2682 unsigned param, unsigned return_index)
2683 {
2684 return LLVMBuildInsertValue(ctx->gallivm.builder, ret,
2685 LLVMGetParam(ctx->main_fn, param),
2686 return_index, "");
2687 }
2688
2689 static LLVMValueRef
2690 si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
2691 unsigned param, unsigned return_index)
2692 {
2693 LLVMBuilderRef builder = ctx->gallivm.builder;
2694 LLVMValueRef p = LLVMGetParam(ctx->main_fn, param);
2695
2696 return LLVMBuildInsertValue(builder, ret,
2697 LLVMBuildBitCast(builder, p, ctx->f32, ""),
2698 return_index, "");
2699 }
2700
2701 static LLVMValueRef
2702 si_insert_input_ptr_as_2xi32(struct si_shader_context *ctx, LLVMValueRef ret,
2703 unsigned param, unsigned return_index)
2704 {
2705 LLVMBuilderRef builder = ctx->gallivm.builder;
2706 LLVMValueRef ptr, lo, hi;
2707
2708 ptr = LLVMGetParam(ctx->main_fn, param);
2709 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i64, "");
2710 ptr = LLVMBuildBitCast(builder, ptr, ctx->v2i32, "");
2711 lo = LLVMBuildExtractElement(builder, ptr, ctx->i32_0, "");
2712 hi = LLVMBuildExtractElement(builder, ptr, ctx->i32_1, "");
2713 ret = LLVMBuildInsertValue(builder, ret, lo, return_index, "");
2714 return LLVMBuildInsertValue(builder, ret, hi, return_index + 1, "");
2715 }
2716
2717 /* This only writes the tessellation factor levels. */
2718 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
2719 {
2720 struct si_shader_context *ctx = si_shader_context(bld_base);
2721 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2722
2723 si_copy_tcs_inputs(bld_base);
2724
2725 rel_patch_id = get_rel_patch_id(ctx);
2726 invocation_id = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
2727 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2728
2729 /* Return epilog parameters from this function. */
2730 LLVMBuilderRef builder = ctx->gallivm.builder;
2731 LLVMValueRef ret = ctx->return_value;
2732 unsigned vgpr;
2733
2734 if (ctx->screen->b.chip_class >= GFX9) {
2735 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2736 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2737 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2738 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2739 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2740 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
2741 /* Tess offchip and tess factor offsets are at the beginning. */
2742 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
2743 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
2744 vgpr = 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K + 1;
2745 } else {
2746 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2747 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
2748 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2749 GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2750 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2751 GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K);
2752 /* Tess offchip and tess factor offsets are after user SGPRs. */
2753 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset,
2754 GFX6_TCS_NUM_USER_SGPR);
2755 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset,
2756 GFX6_TCS_NUM_USER_SGPR + 1);
2757 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
2758 }
2759
2760 /* VGPRs */
2761 rel_patch_id = bitcast(bld_base, TGSI_TYPE_FLOAT, rel_patch_id);
2762 invocation_id = bitcast(bld_base, TGSI_TYPE_FLOAT, invocation_id);
2763 tf_lds_offset = bitcast(bld_base, TGSI_TYPE_FLOAT, tf_lds_offset);
2764
2765 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2766 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2767 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2768 ctx->return_value = ret;
2769 }
2770
2771 /* Pass TCS inputs from LS to TCS on GFX9. */
2772 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
2773 {
2774 LLVMValueRef ret = ctx->return_value;
2775
2776 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers, 0);
2777 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
2778 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
2779 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
2780 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
2781
2782 ret = si_insert_input_ret(ctx, ret, ctx->param_vs_state_bits,
2783 8 + SI_SGPR_VS_STATE_BITS);
2784 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
2785 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2786 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_offsets,
2787 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
2788 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_layout,
2789 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2790 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
2791 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
2792 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
2793 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
2794
2795 unsigned desc_param = ctx->param_tcs_factor_addr_base64k + 2;
2796 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
2797 8 + GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS);
2798 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
2799 8 + GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES);
2800
2801 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
2802 ret = si_insert_input_ret_float(ctx, ret,
2803 ctx->param_tcs_patch_id, vgpr++);
2804 ret = si_insert_input_ret_float(ctx, ret,
2805 ctx->param_tcs_rel_ids, vgpr++);
2806 ctx->return_value = ret;
2807 }
2808
2809 /* Pass GS inputs from ES to GS on GFX9. */
2810 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
2811 {
2812 LLVMValueRef ret = ctx->return_value;
2813
2814 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers, 0);
2815 ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
2816 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
2817
2818 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
2819
2820 unsigned desc_param = ctx->param_vs_state_bits + 1;
2821 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
2822 8 + GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS);
2823 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
2824 8 + GFX9_SGPR_GS_SAMPLERS_AND_IMAGES);
2825
2826 unsigned vgpr = 8 + GFX9_GS_NUM_USER_SGPR;
2827 for (unsigned i = 0; i < 5; i++) {
2828 unsigned param = ctx->param_gs_vtx01_offset + i;
2829 ret = si_insert_input_ret_float(ctx, ret, param, vgpr++);
2830 }
2831 ctx->return_value = ret;
2832 }
2833
2834 static void si_llvm_emit_ls_epilogue(struct lp_build_tgsi_context *bld_base)
2835 {
2836 struct si_shader_context *ctx = si_shader_context(bld_base);
2837 struct si_shader *shader = ctx->shader;
2838 struct tgsi_shader_info *info = &shader->selector->info;
2839 struct gallivm_state *gallivm = &ctx->gallivm;
2840 unsigned i, chan;
2841 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
2842 ctx->param_rel_auto_id);
2843 LLVMValueRef vertex_dw_stride =
2844 unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
2845 LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
2846 vertex_dw_stride, "");
2847
2848 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2849 * its inputs from it. */
2850 for (i = 0; i < info->num_outputs; i++) {
2851 LLVMValueRef *out_ptr = ctx->outputs[i];
2852 unsigned name = info->output_semantic_name[i];
2853 unsigned index = info->output_semantic_index[i];
2854
2855 /* The ARB_shader_viewport_layer_array spec contains the
2856 * following issue:
2857 *
2858 * 2) What happens if gl_ViewportIndex or gl_Layer is
2859 * written in the vertex shader and a geometry shader is
2860 * present?
2861 *
2862 * RESOLVED: The value written by the last vertex processing
2863 * stage is used. If the last vertex processing stage
2864 * (vertex, tessellation evaluation or geometry) does not
2865 * statically assign to gl_ViewportIndex or gl_Layer, index
2866 * or layer zero is assumed.
2867 *
2868 * So writes to those outputs in VS-as-LS are simply ignored.
2869 */
2870 if (name == TGSI_SEMANTIC_LAYER ||
2871 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
2872 continue;
2873
2874 int param = si_shader_io_get_unique_index(name, index);
2875 LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
2876 LLVMConstInt(ctx->i32, param * 4, 0), "");
2877
2878 for (chan = 0; chan < 4; chan++) {
2879 lds_store(bld_base, chan, dw_addr,
2880 LLVMBuildLoad(gallivm->builder, out_ptr[chan], ""));
2881 }
2882 }
2883
2884 if (ctx->screen->b.chip_class >= GFX9)
2885 si_set_ls_return_value_for_tcs(ctx);
2886 }
2887
2888 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context *bld_base)
2889 {
2890 struct si_shader_context *ctx = si_shader_context(bld_base);
2891 struct gallivm_state *gallivm = &ctx->gallivm;
2892 struct si_shader *es = ctx->shader;
2893 struct tgsi_shader_info *info = &es->selector->info;
2894 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
2895 ctx->param_es2gs_offset);
2896 LLVMValueRef lds_base = NULL;
2897 unsigned chan;
2898 int i;
2899
2900 if (ctx->screen->b.chip_class >= GFX9 && info->num_outputs) {
2901 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
2902 lds_base = LLVMBuildMul(gallivm->builder, ac_get_thread_id(&ctx->ac),
2903 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
2904 }
2905
2906 for (i = 0; i < info->num_outputs; i++) {
2907 LLVMValueRef *out_ptr = ctx->outputs[i];
2908 int param;
2909
2910 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2911 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2912 continue;
2913
2914 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
2915 info->output_semantic_index[i]);
2916
2917 for (chan = 0; chan < 4; chan++) {
2918 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
2919 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
2920
2921 /* GFX9 has the ESGS ring in LDS. */
2922 if (ctx->screen->b.chip_class >= GFX9) {
2923 lds_store(bld_base, param * 4 + chan, lds_base, out_val);
2924 continue;
2925 }
2926
2927 ac_build_buffer_store_dword(&ctx->ac,
2928 ctx->esgs_ring,
2929 out_val, 1, NULL, soffset,
2930 (4 * param + chan) * 4,
2931 1, 1, true, true);
2932 }
2933 }
2934
2935 if (ctx->screen->b.chip_class >= GFX9)
2936 si_set_es_return_value_for_gs(ctx);
2937 }
2938
2939 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
2940 {
2941 if (ctx->screen->b.chip_class >= GFX9)
2942 return unpack_param(ctx, ctx->param_merged_wave_info, 16, 8);
2943 else
2944 return LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id);
2945 }
2946
2947 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
2948 {
2949 struct si_shader_context *ctx = si_shader_context(bld_base);
2950
2951 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
2952 si_get_gs_wave_id(ctx));
2953 }
2954
2955 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
2956 {
2957 struct si_shader_context *ctx = si_shader_context(bld_base);
2958 struct gallivm_state *gallivm = &ctx->gallivm;
2959 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2960 struct si_shader_output_values *outputs = NULL;
2961 int i,j;
2962
2963 assert(!ctx->shader->is_gs_copy_shader);
2964
2965 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2966
2967 /* Vertex color clamping.
2968 *
2969 * This uses a state constant loaded in a user data SGPR and
2970 * an IF statement is added that clamps all colors if the constant
2971 * is true.
2972 */
2973 if (ctx->type == PIPE_SHADER_VERTEX) {
2974 struct lp_build_if_state if_ctx;
2975 LLVMValueRef cond = NULL;
2976 LLVMValueRef addr, val;
2977
2978 for (i = 0; i < info->num_outputs; i++) {
2979 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
2980 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
2981 continue;
2982
2983 /* We've found a color. */
2984 if (!cond) {
2985 /* The state is in the first bit of the user SGPR. */
2986 cond = LLVMGetParam(ctx->main_fn,
2987 ctx->param_vs_state_bits);
2988 cond = LLVMBuildTrunc(gallivm->builder, cond,
2989 ctx->i1, "");
2990 lp_build_if(&if_ctx, gallivm, cond);
2991 }
2992
2993 for (j = 0; j < 4; j++) {
2994 addr = ctx->outputs[i][j];
2995 val = LLVMBuildLoad(gallivm->builder, addr, "");
2996 val = ac_build_clamp(&ctx->ac, val);
2997 LLVMBuildStore(gallivm->builder, val, addr);
2998 }
2999 }
3000
3001 if (cond)
3002 lp_build_endif(&if_ctx);
3003 }
3004
3005 for (i = 0; i < info->num_outputs; i++) {
3006 outputs[i].semantic_name = info->output_semantic_name[i];
3007 outputs[i].semantic_index = info->output_semantic_index[i];
3008
3009 for (j = 0; j < 4; j++) {
3010 outputs[i].values[j] =
3011 LLVMBuildLoad(gallivm->builder,
3012 ctx->outputs[i][j],
3013 "");
3014 outputs[i].vertex_stream[j] =
3015 (info->output_streams[i] >> (2 * j)) & 3;
3016 }
3017 }
3018
3019 if (ctx->shader->selector->so.num_outputs)
3020 si_llvm_emit_streamout(ctx, outputs, i, 0);
3021
3022 /* Export PrimitiveID. */
3023 if (ctx->shader->key.mono.u.vs_export_prim_id) {
3024 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
3025 outputs[i].semantic_index = 0;
3026 outputs[i].values[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
3027 get_primitive_id(bld_base, 0));
3028 for (j = 1; j < 4; j++)
3029 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
3030
3031 memset(outputs[i].vertex_stream, 0,
3032 sizeof(outputs[i].vertex_stream));
3033 i++;
3034 }
3035
3036 si_llvm_export_vs(bld_base, outputs, i);
3037 FREE(outputs);
3038 }
3039
3040 struct si_ps_exports {
3041 unsigned num;
3042 struct ac_export_args args[10];
3043 };
3044
3045 unsigned si_get_spi_shader_z_format(bool writes_z, bool writes_stencil,
3046 bool writes_samplemask)
3047 {
3048 if (writes_z) {
3049 /* Z needs 32 bits. */
3050 if (writes_samplemask)
3051 return V_028710_SPI_SHADER_32_ABGR;
3052 else if (writes_stencil)
3053 return V_028710_SPI_SHADER_32_GR;
3054 else
3055 return V_028710_SPI_SHADER_32_R;
3056 } else if (writes_stencil || writes_samplemask) {
3057 /* Both stencil and sample mask need only 16 bits. */
3058 return V_028710_SPI_SHADER_UINT16_ABGR;
3059 } else {
3060 return V_028710_SPI_SHADER_ZERO;
3061 }
3062 }
3063
3064 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
3065 LLVMValueRef depth, LLVMValueRef stencil,
3066 LLVMValueRef samplemask, struct si_ps_exports *exp)
3067 {
3068 struct si_shader_context *ctx = si_shader_context(bld_base);
3069 struct lp_build_context *base = &bld_base->base;
3070 struct ac_export_args args;
3071 unsigned mask = 0;
3072 unsigned format = si_get_spi_shader_z_format(depth != NULL,
3073 stencil != NULL,
3074 samplemask != NULL);
3075
3076 assert(depth || stencil || samplemask);
3077
3078 args.valid_mask = 1; /* whether the EXEC mask is valid */
3079 args.done = 1; /* DONE bit */
3080
3081 /* Specify the target we are exporting */
3082 args.target = V_008DFC_SQ_EXP_MRTZ;
3083
3084 args.compr = 0; /* COMP flag */
3085 args.out[0] = base->undef; /* R, depth */
3086 args.out[1] = base->undef; /* G, stencil test value[0:7], stencil op value[8:15] */
3087 args.out[2] = base->undef; /* B, sample mask */
3088 args.out[3] = base->undef; /* A, alpha to mask */
3089
3090 if (format == V_028710_SPI_SHADER_UINT16_ABGR) {
3091 assert(!depth);
3092 args.compr = 1; /* COMPR flag */
3093
3094 if (stencil) {
3095 /* Stencil should be in X[23:16]. */
3096 stencil = bitcast(bld_base, TGSI_TYPE_UNSIGNED, stencil);
3097 stencil = LLVMBuildShl(ctx->gallivm.builder, stencil,
3098 LLVMConstInt(ctx->i32, 16, 0), "");
3099 args.out[0] = bitcast(bld_base, TGSI_TYPE_FLOAT, stencil);
3100 mask |= 0x3;
3101 }
3102 if (samplemask) {
3103 /* SampleMask should be in Y[15:0]. */
3104 args.out[1] = samplemask;
3105 mask |= 0xc;
3106 }
3107 } else {
3108 if (depth) {
3109 args.out[0] = depth;
3110 mask |= 0x1;
3111 }
3112 if (stencil) {
3113 args.out[1] = stencil;
3114 mask |= 0x2;
3115 }
3116 if (samplemask) {
3117 args.out[2] = samplemask;
3118 mask |= 0x4;
3119 }
3120 }
3121
3122 /* SI (except OLAND and HAINAN) has a bug that it only looks
3123 * at the X writemask component. */
3124 if (ctx->screen->b.chip_class == SI &&
3125 ctx->screen->b.family != CHIP_OLAND &&
3126 ctx->screen->b.family != CHIP_HAINAN)
3127 mask |= 0x1;
3128
3129 /* Specify which components to enable */
3130 args.enabled_channels = mask;
3131
3132 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3133 }
3134
3135 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
3136 LLVMValueRef *color, unsigned index,
3137 unsigned samplemask_param,
3138 bool is_last, struct si_ps_exports *exp)
3139 {
3140 struct si_shader_context *ctx = si_shader_context(bld_base);
3141 struct lp_build_context *base = &bld_base->base;
3142 int i;
3143
3144 /* Clamp color */
3145 if (ctx->shader->key.part.ps.epilog.clamp_color)
3146 for (i = 0; i < 4; i++)
3147 color[i] = ac_build_clamp(&ctx->ac, color[i]);
3148
3149 /* Alpha to one */
3150 if (ctx->shader->key.part.ps.epilog.alpha_to_one)
3151 color[3] = base->one;
3152
3153 /* Alpha test */
3154 if (index == 0 &&
3155 ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
3156 si_alpha_test(bld_base, color[3]);
3157
3158 /* Line & polygon smoothing */
3159 if (ctx->shader->key.part.ps.epilog.poly_line_smoothing)
3160 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
3161 samplemask_param);
3162
3163 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3164 if (ctx->shader->key.part.ps.epilog.last_cbuf > 0) {
3165 struct ac_export_args args[8];
3166 int c, last = -1;
3167
3168 /* Get the export arguments, also find out what the last one is. */
3169 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3170 si_llvm_init_export_args(bld_base, color,
3171 V_008DFC_SQ_EXP_MRT + c, &args[c]);
3172 if (args[c].enabled_channels)
3173 last = c;
3174 }
3175
3176 /* Emit all exports. */
3177 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3178 if (is_last && last == c) {
3179 args[c].valid_mask = 1; /* whether the EXEC mask is valid */
3180 args[c].done = 1; /* DONE bit */
3181 } else if (!args[c].enabled_channels)
3182 continue; /* unnecessary NULL export */
3183
3184 memcpy(&exp->args[exp->num++], &args[c], sizeof(args[c]));
3185 }
3186 } else {
3187 struct ac_export_args args;
3188
3189 /* Export */
3190 si_llvm_init_export_args(bld_base, color, V_008DFC_SQ_EXP_MRT + index,
3191 &args);
3192 if (is_last) {
3193 args.valid_mask = 1; /* whether the EXEC mask is valid */
3194 args.done = 1; /* DONE bit */
3195 } else if (!args.enabled_channels)
3196 return; /* unnecessary NULL export */
3197
3198 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3199 }
3200 }
3201
3202 static void si_emit_ps_exports(struct si_shader_context *ctx,
3203 struct si_ps_exports *exp)
3204 {
3205 for (unsigned i = 0; i < exp->num; i++)
3206 ac_build_export(&ctx->ac, &exp->args[i]);
3207 }
3208
3209 static void si_export_null(struct lp_build_tgsi_context *bld_base)
3210 {
3211 struct si_shader_context *ctx = si_shader_context(bld_base);
3212 struct lp_build_context *base = &bld_base->base;
3213 struct ac_export_args args;
3214
3215 args.enabled_channels = 0x0; /* enabled channels */
3216 args.valid_mask = 1; /* whether the EXEC mask is valid */
3217 args.done = 1; /* DONE bit */
3218 args.target = V_008DFC_SQ_EXP_NULL;
3219 args.compr = 0; /* COMPR flag (0 = 32-bit export) */
3220 args.out[0] = base->undef; /* R */
3221 args.out[1] = base->undef; /* G */
3222 args.out[2] = base->undef; /* B */
3223 args.out[3] = base->undef; /* A */
3224
3225 ac_build_export(&ctx->ac, &args);
3226 }
3227
3228 /**
3229 * Return PS outputs in this order:
3230 *
3231 * v[0:3] = color0.xyzw
3232 * v[4:7] = color1.xyzw
3233 * ...
3234 * vN+0 = Depth
3235 * vN+1 = Stencil
3236 * vN+2 = SampleMask
3237 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3238 *
3239 * The alpha-ref SGPR is returned via its original location.
3240 */
3241 static void si_llvm_return_fs_outputs(struct lp_build_tgsi_context *bld_base)
3242 {
3243 struct si_shader_context *ctx = si_shader_context(bld_base);
3244 struct si_shader *shader = ctx->shader;
3245 struct tgsi_shader_info *info = &shader->selector->info;
3246 LLVMBuilderRef builder = ctx->gallivm.builder;
3247 unsigned i, j, first_vgpr, vgpr;
3248
3249 LLVMValueRef color[8][4] = {};
3250 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3251 LLVMValueRef ret;
3252
3253 /* Read the output values. */
3254 for (i = 0; i < info->num_outputs; i++) {
3255 unsigned semantic_name = info->output_semantic_name[i];
3256 unsigned semantic_index = info->output_semantic_index[i];
3257
3258 switch (semantic_name) {
3259 case TGSI_SEMANTIC_COLOR:
3260 assert(semantic_index < 8);
3261 for (j = 0; j < 4; j++) {
3262 LLVMValueRef ptr = ctx->outputs[i][j];
3263 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3264 color[semantic_index][j] = result;
3265 }
3266 break;
3267 case TGSI_SEMANTIC_POSITION:
3268 depth = LLVMBuildLoad(builder,
3269 ctx->outputs[i][2], "");
3270 break;
3271 case TGSI_SEMANTIC_STENCIL:
3272 stencil = LLVMBuildLoad(builder,
3273 ctx->outputs[i][1], "");
3274 break;
3275 case TGSI_SEMANTIC_SAMPLEMASK:
3276 samplemask = LLVMBuildLoad(builder,
3277 ctx->outputs[i][0], "");
3278 break;
3279 default:
3280 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3281 semantic_name);
3282 }
3283 }
3284
3285 /* Fill the return structure. */
3286 ret = ctx->return_value;
3287
3288 /* Set SGPRs. */
3289 ret = LLVMBuildInsertValue(builder, ret,
3290 bitcast(bld_base, TGSI_TYPE_SIGNED,
3291 LLVMGetParam(ctx->main_fn,
3292 SI_PARAM_ALPHA_REF)),
3293 SI_SGPR_ALPHA_REF, "");
3294
3295 /* Set VGPRs */
3296 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3297 for (i = 0; i < ARRAY_SIZE(color); i++) {
3298 if (!color[i][0])
3299 continue;
3300
3301 for (j = 0; j < 4; j++)
3302 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3303 }
3304 if (depth)
3305 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3306 if (stencil)
3307 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3308 if (samplemask)
3309 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3310
3311 /* Add the input sample mask for smoothing at the end. */
3312 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3313 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3314 ret = LLVMBuildInsertValue(builder, ret,
3315 LLVMGetParam(ctx->main_fn,
3316 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3317
3318 ctx->return_value = ret;
3319 }
3320
3321 /* Prevent optimizations (at least of memory accesses) across the current
3322 * point in the program by emitting empty inline assembly that is marked as
3323 * having side effects.
3324 *
3325 * Optionally, a value can be passed through the inline assembly to prevent
3326 * LLVM from hoisting calls to ReadNone functions.
3327 */
3328 static void emit_optimization_barrier(struct si_shader_context *ctx,
3329 LLVMValueRef *pvgpr)
3330 {
3331 static int counter = 0;
3332
3333 LLVMBuilderRef builder = ctx->gallivm.builder;
3334 char code[16];
3335
3336 snprintf(code, sizeof(code), "; %d", p_atomic_inc_return(&counter));
3337
3338 if (!pvgpr) {
3339 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
3340 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
3341 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
3342 } else {
3343 LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
3344 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "=v,0", true, false);
3345 LLVMValueRef vgpr = *pvgpr;
3346 LLVMTypeRef vgpr_type = LLVMTypeOf(vgpr);
3347 unsigned vgpr_size = llvm_get_type_size(vgpr_type);
3348 LLVMValueRef vgpr0;
3349
3350 assert(vgpr_size % 4 == 0);
3351
3352 vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
3353 vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
3354 vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
3355 vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
3356 vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
3357
3358 *pvgpr = vgpr;
3359 }
3360 }
3361
3362 void si_emit_waitcnt(struct si_shader_context *ctx, unsigned simm16)
3363 {
3364 struct gallivm_state *gallivm = &ctx->gallivm;
3365 LLVMBuilderRef builder = gallivm->builder;
3366 LLVMValueRef args[1] = {
3367 LLVMConstInt(ctx->i32, simm16, 0)
3368 };
3369 lp_build_intrinsic(builder, "llvm.amdgcn.s.waitcnt",
3370 ctx->voidt, args, 1, 0);
3371 }
3372
3373 static void membar_emit(
3374 const struct lp_build_tgsi_action *action,
3375 struct lp_build_tgsi_context *bld_base,
3376 struct lp_build_emit_data *emit_data)
3377 {
3378 struct si_shader_context *ctx = si_shader_context(bld_base);
3379 LLVMValueRef src0 = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
3380 unsigned flags = LLVMConstIntGetZExtValue(src0);
3381 unsigned waitcnt = NOOP_WAITCNT;
3382
3383 if (flags & TGSI_MEMBAR_THREAD_GROUP)
3384 waitcnt &= VM_CNT & LGKM_CNT;
3385
3386 if (flags & (TGSI_MEMBAR_ATOMIC_BUFFER |
3387 TGSI_MEMBAR_SHADER_BUFFER |
3388 TGSI_MEMBAR_SHADER_IMAGE))
3389 waitcnt &= VM_CNT;
3390
3391 if (flags & TGSI_MEMBAR_SHARED)
3392 waitcnt &= LGKM_CNT;
3393
3394 if (waitcnt != NOOP_WAITCNT)
3395 si_emit_waitcnt(ctx, waitcnt);
3396 }
3397
3398 static void clock_emit(
3399 const struct lp_build_tgsi_action *action,
3400 struct lp_build_tgsi_context *bld_base,
3401 struct lp_build_emit_data *emit_data)
3402 {
3403 struct si_shader_context *ctx = si_shader_context(bld_base);
3404 struct gallivm_state *gallivm = &ctx->gallivm;
3405 LLVMValueRef tmp;
3406
3407 tmp = lp_build_intrinsic(gallivm->builder, "llvm.readcyclecounter",
3408 ctx->i64, NULL, 0, 0);
3409 tmp = LLVMBuildBitCast(gallivm->builder, tmp, ctx->v2i32, "");
3410
3411 emit_data->output[0] =
3412 LLVMBuildExtractElement(gallivm->builder, tmp, ctx->i32_0, "");
3413 emit_data->output[1] =
3414 LLVMBuildExtractElement(gallivm->builder, tmp, ctx->i32_1, "");
3415 }
3416
3417 LLVMTypeRef si_const_array(LLVMTypeRef elem_type, int num_elements)
3418 {
3419 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
3420 CONST_ADDR_SPACE);
3421 }
3422
3423 static void si_llvm_emit_ddxy(
3424 const struct lp_build_tgsi_action *action,
3425 struct lp_build_tgsi_context *bld_base,
3426 struct lp_build_emit_data *emit_data)
3427 {
3428 struct si_shader_context *ctx = si_shader_context(bld_base);
3429 struct gallivm_state *gallivm = &ctx->gallivm;
3430 unsigned opcode = emit_data->info->opcode;
3431 LLVMValueRef val;
3432 int idx;
3433 unsigned mask;
3434
3435 if (opcode == TGSI_OPCODE_DDX_FINE)
3436 mask = AC_TID_MASK_LEFT;
3437 else if (opcode == TGSI_OPCODE_DDY_FINE)
3438 mask = AC_TID_MASK_TOP;
3439 else
3440 mask = AC_TID_MASK_TOP_LEFT;
3441
3442 /* for DDX we want to next X pixel, DDY next Y pixel. */
3443 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
3444
3445 val = LLVMBuildBitCast(gallivm->builder, emit_data->args[0], ctx->i32, "");
3446 val = ac_build_ddxy(&ctx->ac, ctx->screen->has_ds_bpermute,
3447 mask, idx, ctx->lds, val);
3448 emit_data->output[emit_data->chan] = val;
3449 }
3450
3451 /*
3452 * this takes an I,J coordinate pair,
3453 * and works out the X and Y derivatives.
3454 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
3455 */
3456 static LLVMValueRef si_llvm_emit_ddxy_interp(
3457 struct lp_build_tgsi_context *bld_base,
3458 LLVMValueRef interp_ij)
3459 {
3460 struct si_shader_context *ctx = si_shader_context(bld_base);
3461 struct gallivm_state *gallivm = &ctx->gallivm;
3462 LLVMValueRef result[4], a;
3463 unsigned i;
3464
3465 for (i = 0; i < 2; i++) {
3466 a = LLVMBuildExtractElement(gallivm->builder, interp_ij,
3467 LLVMConstInt(ctx->i32, i, 0), "");
3468 result[i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDX, a);
3469 result[2+i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDY, a);
3470 }
3471
3472 return lp_build_gather_values(gallivm, result, 4);
3473 }
3474
3475 static void interp_fetch_args(
3476 struct lp_build_tgsi_context *bld_base,
3477 struct lp_build_emit_data *emit_data)
3478 {
3479 struct si_shader_context *ctx = si_shader_context(bld_base);
3480 struct gallivm_state *gallivm = &ctx->gallivm;
3481 const struct tgsi_full_instruction *inst = emit_data->inst;
3482
3483 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
3484 /* offset is in second src, first two channels */
3485 emit_data->args[0] = lp_build_emit_fetch(bld_base,
3486 emit_data->inst, 1,
3487 TGSI_CHAN_X);
3488 emit_data->args[1] = lp_build_emit_fetch(bld_base,
3489 emit_data->inst, 1,
3490 TGSI_CHAN_Y);
3491 emit_data->arg_count = 2;
3492 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3493 LLVMValueRef sample_position;
3494 LLVMValueRef sample_id;
3495 LLVMValueRef halfval = LLVMConstReal(ctx->f32, 0.5f);
3496
3497 /* fetch sample ID, then fetch its sample position,
3498 * and place into first two channels.
3499 */
3500 sample_id = lp_build_emit_fetch(bld_base,
3501 emit_data->inst, 1, TGSI_CHAN_X);
3502 sample_id = LLVMBuildBitCast(gallivm->builder, sample_id,
3503 ctx->i32, "");
3504 sample_position = load_sample_position(ctx, sample_id);
3505
3506 emit_data->args[0] = LLVMBuildExtractElement(gallivm->builder,
3507 sample_position,
3508 ctx->i32_0, "");
3509
3510 emit_data->args[0] = LLVMBuildFSub(gallivm->builder, emit_data->args[0], halfval, "");
3511 emit_data->args[1] = LLVMBuildExtractElement(gallivm->builder,
3512 sample_position,
3513 ctx->i32_1, "");
3514 emit_data->args[1] = LLVMBuildFSub(gallivm->builder, emit_data->args[1], halfval, "");
3515 emit_data->arg_count = 2;
3516 }
3517 }
3518
3519 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
3520 struct lp_build_tgsi_context *bld_base,
3521 struct lp_build_emit_data *emit_data)
3522 {
3523 struct si_shader_context *ctx = si_shader_context(bld_base);
3524 struct si_shader *shader = ctx->shader;
3525 struct gallivm_state *gallivm = &ctx->gallivm;
3526 LLVMValueRef interp_param;
3527 const struct tgsi_full_instruction *inst = emit_data->inst;
3528 int input_index = inst->Src[0].Register.Index;
3529 int chan;
3530 int i;
3531 LLVMValueRef attr_number;
3532 LLVMValueRef params = LLVMGetParam(ctx->main_fn, SI_PARAM_PRIM_MASK);
3533 int interp_param_idx;
3534 unsigned interp = shader->selector->info.input_interpolate[input_index];
3535 unsigned location;
3536
3537 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
3538
3539 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3540 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
3541 location = TGSI_INTERPOLATE_LOC_CENTER;
3542 else
3543 location = TGSI_INTERPOLATE_LOC_CENTROID;
3544
3545 interp_param_idx = lookup_interp_param_index(interp, location);
3546 if (interp_param_idx == -1)
3547 return;
3548 else if (interp_param_idx)
3549 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
3550 else
3551 interp_param = NULL;
3552
3553 attr_number = LLVMConstInt(ctx->i32, input_index, 0);
3554
3555 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3556 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3557 LLVMValueRef ij_out[2];
3558 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
3559
3560 /*
3561 * take the I then J parameters, and the DDX/Y for it, and
3562 * calculate the IJ inputs for the interpolator.
3563 * temp1 = ddx * offset/sample.x + I;
3564 * interp_param.I = ddy * offset/sample.y + temp1;
3565 * temp1 = ddx * offset/sample.x + J;
3566 * interp_param.J = ddy * offset/sample.y + temp1;
3567 */
3568 for (i = 0; i < 2; i++) {
3569 LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, 0);
3570 LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, 0);
3571 LLVMValueRef ddx_el = LLVMBuildExtractElement(gallivm->builder,
3572 ddxy_out, ix_ll, "");
3573 LLVMValueRef ddy_el = LLVMBuildExtractElement(gallivm->builder,
3574 ddxy_out, iy_ll, "");
3575 LLVMValueRef interp_el = LLVMBuildExtractElement(gallivm->builder,
3576 interp_param, ix_ll, "");
3577 LLVMValueRef temp1, temp2;
3578
3579 interp_el = LLVMBuildBitCast(gallivm->builder, interp_el,
3580 ctx->f32, "");
3581
3582 temp1 = LLVMBuildFMul(gallivm->builder, ddx_el, emit_data->args[0], "");
3583
3584 temp1 = LLVMBuildFAdd(gallivm->builder, temp1, interp_el, "");
3585
3586 temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
3587
3588 ij_out[i] = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
3589 }
3590 interp_param = lp_build_gather_values(gallivm, ij_out, 2);
3591 }
3592
3593 for (chan = 0; chan < 4; chan++) {
3594 LLVMValueRef llvm_chan;
3595 unsigned schan;
3596
3597 schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
3598 llvm_chan = LLVMConstInt(ctx->i32, schan, 0);
3599
3600 if (interp_param) {
3601 interp_param = LLVMBuildBitCast(gallivm->builder,
3602 interp_param, LLVMVectorType(ctx->f32, 2), "");
3603 LLVMValueRef i = LLVMBuildExtractElement(
3604 gallivm->builder, interp_param, ctx->i32_0, "");
3605 LLVMValueRef j = LLVMBuildExtractElement(
3606 gallivm->builder, interp_param, ctx->i32_1, "");
3607 emit_data->output[chan] = ac_build_fs_interp(&ctx->ac,
3608 llvm_chan, attr_number, params,
3609 i, j);
3610 } else {
3611 emit_data->output[chan] = ac_build_fs_interp_mov(&ctx->ac,
3612 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
3613 llvm_chan, attr_number, params);
3614 }
3615 }
3616 }
3617
3618 static LLVMValueRef si_emit_ballot(struct si_shader_context *ctx,
3619 LLVMValueRef value)
3620 {
3621 struct gallivm_state *gallivm = &ctx->gallivm;
3622 LLVMValueRef args[3] = {
3623 value,
3624 ctx->i32_0,
3625 LLVMConstInt(ctx->i32, LLVMIntNE, 0)
3626 };
3627
3628 /* We currently have no other way to prevent LLVM from lifting the icmp
3629 * calls to a dominating basic block.
3630 */
3631 emit_optimization_barrier(ctx, &args[0]);
3632
3633 if (LLVMTypeOf(args[0]) != ctx->i32)
3634 args[0] = LLVMBuildBitCast(gallivm->builder, args[0], ctx->i32, "");
3635
3636 return lp_build_intrinsic(gallivm->builder,
3637 "llvm.amdgcn.icmp.i32",
3638 ctx->i64, args, 3,
3639 LP_FUNC_ATTR_NOUNWIND |
3640 LP_FUNC_ATTR_READNONE |
3641 LP_FUNC_ATTR_CONVERGENT);
3642 }
3643
3644 static void vote_all_emit(
3645 const struct lp_build_tgsi_action *action,
3646 struct lp_build_tgsi_context *bld_base,
3647 struct lp_build_emit_data *emit_data)
3648 {
3649 struct si_shader_context *ctx = si_shader_context(bld_base);
3650 struct gallivm_state *gallivm = &ctx->gallivm;
3651 LLVMValueRef active_set, vote_set;
3652 LLVMValueRef tmp;
3653
3654 active_set = si_emit_ballot(ctx, ctx->i32_1);
3655 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3656
3657 tmp = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, vote_set, active_set, "");
3658 emit_data->output[emit_data->chan] =
3659 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3660 }
3661
3662 static void vote_any_emit(
3663 const struct lp_build_tgsi_action *action,
3664 struct lp_build_tgsi_context *bld_base,
3665 struct lp_build_emit_data *emit_data)
3666 {
3667 struct si_shader_context *ctx = si_shader_context(bld_base);
3668 struct gallivm_state *gallivm = &ctx->gallivm;
3669 LLVMValueRef vote_set;
3670 LLVMValueRef tmp;
3671
3672 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3673
3674 tmp = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
3675 vote_set, LLVMConstInt(ctx->i64, 0, 0), "");
3676 emit_data->output[emit_data->chan] =
3677 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3678 }
3679
3680 static void vote_eq_emit(
3681 const struct lp_build_tgsi_action *action,
3682 struct lp_build_tgsi_context *bld_base,
3683 struct lp_build_emit_data *emit_data)
3684 {
3685 struct si_shader_context *ctx = si_shader_context(bld_base);
3686 struct gallivm_state *gallivm = &ctx->gallivm;
3687 LLVMValueRef active_set, vote_set;
3688 LLVMValueRef all, none, tmp;
3689
3690 active_set = si_emit_ballot(ctx, ctx->i32_1);
3691 vote_set = si_emit_ballot(ctx, emit_data->args[0]);
3692
3693 all = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, vote_set, active_set, "");
3694 none = LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
3695 vote_set, LLVMConstInt(ctx->i64, 0, 0), "");
3696 tmp = LLVMBuildOr(gallivm->builder, all, none, "");
3697 emit_data->output[emit_data->chan] =
3698 LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
3699 }
3700
3701 static void ballot_emit(
3702 const struct lp_build_tgsi_action *action,
3703 struct lp_build_tgsi_context *bld_base,
3704 struct lp_build_emit_data *emit_data)
3705 {
3706 struct si_shader_context *ctx = si_shader_context(bld_base);
3707 LLVMBuilderRef builder = ctx->gallivm.builder;
3708 LLVMValueRef tmp;
3709
3710 tmp = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
3711 tmp = si_emit_ballot(ctx, tmp);
3712 tmp = LLVMBuildBitCast(builder, tmp, ctx->v2i32, "");
3713
3714 emit_data->output[0] = LLVMBuildExtractElement(builder, tmp, ctx->i32_0, "");
3715 emit_data->output[1] = LLVMBuildExtractElement(builder, tmp, ctx->i32_1, "");
3716 }
3717
3718 static void read_invoc_fetch_args(
3719 struct lp_build_tgsi_context *bld_base,
3720 struct lp_build_emit_data *emit_data)
3721 {
3722 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
3723 0, emit_data->src_chan);
3724
3725 /* Always read the source invocation (= lane) from the X channel. */
3726 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
3727 1, TGSI_CHAN_X);
3728 emit_data->arg_count = 2;
3729 }
3730
3731 static void read_lane_emit(
3732 const struct lp_build_tgsi_action *action,
3733 struct lp_build_tgsi_context *bld_base,
3734 struct lp_build_emit_data *emit_data)
3735 {
3736 struct si_shader_context *ctx = si_shader_context(bld_base);
3737 LLVMBuilderRef builder = ctx->gallivm.builder;
3738
3739 /* We currently have no other way to prevent LLVM from lifting the icmp
3740 * calls to a dominating basic block.
3741 */
3742 emit_optimization_barrier(ctx, &emit_data->args[0]);
3743
3744 for (unsigned i = 0; i < emit_data->arg_count; ++i) {
3745 emit_data->args[i] = LLVMBuildBitCast(builder, emit_data->args[i],
3746 ctx->i32, "");
3747 }
3748
3749 emit_data->output[emit_data->chan] =
3750 ac_build_intrinsic(&ctx->ac, action->intr_name,
3751 ctx->i32, emit_data->args, emit_data->arg_count,
3752 AC_FUNC_ATTR_READNONE |
3753 AC_FUNC_ATTR_CONVERGENT);
3754 }
3755
3756 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
3757 struct lp_build_emit_data *emit_data)
3758 {
3759 struct si_shader_context *ctx = si_shader_context(bld_base);
3760 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
3761 LLVMValueRef imm;
3762 unsigned stream;
3763
3764 assert(src0.File == TGSI_FILE_IMMEDIATE);
3765
3766 imm = ctx->imms[src0.Index * TGSI_NUM_CHANNELS + src0.SwizzleX];
3767 stream = LLVMConstIntGetZExtValue(imm) & 0x3;
3768 return stream;
3769 }
3770
3771 /* Emit one vertex from the geometry shader */
3772 static void si_llvm_emit_vertex(
3773 const struct lp_build_tgsi_action *action,
3774 struct lp_build_tgsi_context *bld_base,
3775 struct lp_build_emit_data *emit_data)
3776 {
3777 struct si_shader_context *ctx = si_shader_context(bld_base);
3778 struct lp_build_context *uint = &bld_base->uint_bld;
3779 struct si_shader *shader = ctx->shader;
3780 struct tgsi_shader_info *info = &shader->selector->info;
3781 struct gallivm_state *gallivm = &ctx->gallivm;
3782 struct lp_build_if_state if_state;
3783 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
3784 ctx->param_gs2vs_offset);
3785 LLVMValueRef gs_next_vertex;
3786 LLVMValueRef can_emit, kill;
3787 unsigned chan, offset;
3788 int i;
3789 unsigned stream;
3790
3791 stream = si_llvm_get_stream(bld_base, emit_data);
3792
3793 /* Write vertex attribute values to GSVS ring */
3794 gs_next_vertex = LLVMBuildLoad(gallivm->builder,
3795 ctx->gs_next_vertex[stream],
3796 "");
3797
3798 /* If this thread has already emitted the declared maximum number of
3799 * vertices, skip the write: excessive vertex emissions are not
3800 * supposed to have any effect.
3801 *
3802 * If the shader has no writes to memory, kill it instead. This skips
3803 * further memory loads and may allow LLVM to skip to the end
3804 * altogether.
3805 */
3806 can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULT, gs_next_vertex,
3807 LLVMConstInt(ctx->i32,
3808 shader->selector->gs_max_out_vertices, 0), "");
3809
3810 bool use_kill = !info->writes_memory;
3811 if (use_kill) {
3812 kill = lp_build_select(&bld_base->base, can_emit,
3813 LLVMConstReal(ctx->f32, 1.0f),
3814 LLVMConstReal(ctx->f32, -1.0f));
3815
3816 ac_build_kill(&ctx->ac, kill);
3817 } else {
3818 lp_build_if(&if_state, gallivm, can_emit);
3819 }
3820
3821 offset = 0;
3822 for (i = 0; i < info->num_outputs; i++) {
3823 LLVMValueRef *out_ptr = ctx->outputs[i];
3824
3825 for (chan = 0; chan < 4; chan++) {
3826 if (!(info->output_usagemask[i] & (1 << chan)) ||
3827 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
3828 continue;
3829
3830 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
3831 LLVMValueRef voffset =
3832 LLVMConstInt(ctx->i32, offset *
3833 shader->selector->gs_max_out_vertices, 0);
3834 offset++;
3835
3836 voffset = lp_build_add(uint, voffset, gs_next_vertex);
3837 voffset = lp_build_mul_imm(uint, voffset, 4);
3838
3839 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
3840
3841 ac_build_buffer_store_dword(&ctx->ac,
3842 ctx->gsvs_ring[stream],
3843 out_val, 1,
3844 voffset, soffset, 0,
3845 1, 1, true, true);
3846 }
3847 }
3848
3849 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
3850 ctx->i32_1);
3851
3852 LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
3853
3854 /* Signal vertex emission */
3855 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
3856 si_get_gs_wave_id(ctx));
3857 if (!use_kill)
3858 lp_build_endif(&if_state);
3859 }
3860
3861 /* Cut one primitive from the geometry shader */
3862 static void si_llvm_emit_primitive(
3863 const struct lp_build_tgsi_action *action,
3864 struct lp_build_tgsi_context *bld_base,
3865 struct lp_build_emit_data *emit_data)
3866 {
3867 struct si_shader_context *ctx = si_shader_context(bld_base);
3868 unsigned stream;
3869
3870 /* Signal primitive cut */
3871 stream = si_llvm_get_stream(bld_base, emit_data);
3872 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
3873 si_get_gs_wave_id(ctx));
3874 }
3875
3876 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
3877 struct lp_build_tgsi_context *bld_base,
3878 struct lp_build_emit_data *emit_data)
3879 {
3880 struct si_shader_context *ctx = si_shader_context(bld_base);
3881 struct gallivm_state *gallivm = &ctx->gallivm;
3882
3883 /* SI only (thanks to a hw bug workaround):
3884 * The real barrier instruction isn’t needed, because an entire patch
3885 * always fits into a single wave.
3886 */
3887 if (ctx->screen->b.chip_class == SI &&
3888 ctx->type == PIPE_SHADER_TESS_CTRL) {
3889 si_emit_waitcnt(ctx, LGKM_CNT & VM_CNT);
3890 return;
3891 }
3892
3893 lp_build_intrinsic(gallivm->builder,
3894 "llvm.amdgcn.s.barrier",
3895 ctx->voidt, NULL, 0, LP_FUNC_ATTR_CONVERGENT);
3896 }
3897
3898 static const struct lp_build_tgsi_action interp_action = {
3899 .fetch_args = interp_fetch_args,
3900 .emit = build_interp_intrinsic,
3901 };
3902
3903 static void si_create_function(struct si_shader_context *ctx,
3904 const char *name,
3905 LLVMTypeRef *returns, unsigned num_returns,
3906 LLVMTypeRef *params, unsigned num_params,
3907 int last_sgpr, unsigned max_workgroup_size)
3908 {
3909 int i;
3910
3911 si_llvm_create_func(ctx, name, returns, num_returns,
3912 params, num_params);
3913 ctx->return_value = LLVMGetUndef(ctx->return_type);
3914
3915 for (i = 0; i <= last_sgpr; ++i) {
3916 LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
3917
3918 /* The combination of:
3919 * - ByVal
3920 * - dereferenceable
3921 * - invariant.load
3922 * allows the optimization passes to move loads and reduces
3923 * SGPR spilling significantly.
3924 */
3925 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
3926 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_BYVAL);
3927 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_NOALIAS);
3928 ac_add_attr_dereferenceable(P, UINT64_MAX);
3929 } else
3930 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_INREG);
3931 }
3932
3933 if (max_workgroup_size) {
3934 si_llvm_add_attribute(ctx->main_fn, "amdgpu-max-work-group-size",
3935 max_workgroup_size);
3936 }
3937 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3938 "no-signed-zeros-fp-math",
3939 "true");
3940
3941 if (ctx->screen->b.debug_flags & DBG_UNSAFE_MATH) {
3942 /* These were copied from some LLVM test. */
3943 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3944 "less-precise-fpmad",
3945 "true");
3946 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3947 "no-infs-fp-math",
3948 "true");
3949 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3950 "no-nans-fp-math",
3951 "true");
3952 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3953 "unsafe-fp-math",
3954 "true");
3955 }
3956 }
3957
3958 static void declare_streamout_params(struct si_shader_context *ctx,
3959 struct pipe_stream_output_info *so,
3960 LLVMTypeRef *params, LLVMTypeRef i32,
3961 unsigned *num_params)
3962 {
3963 int i;
3964
3965 /* Streamout SGPRs. */
3966 if (so->num_outputs) {
3967 if (ctx->type != PIPE_SHADER_TESS_EVAL)
3968 params[ctx->param_streamout_config = (*num_params)++] = i32;
3969 else
3970 ctx->param_streamout_config = *num_params - 1;
3971
3972 params[ctx->param_streamout_write_index = (*num_params)++] = i32;
3973 }
3974 /* A streamout buffer offset is loaded if the stride is non-zero. */
3975 for (i = 0; i < 4; i++) {
3976 if (!so->stride[i])
3977 continue;
3978
3979 params[ctx->param_streamout_offset[i] = (*num_params)++] = i32;
3980 }
3981 }
3982
3983 static unsigned llvm_get_type_size(LLVMTypeRef type)
3984 {
3985 LLVMTypeKind kind = LLVMGetTypeKind(type);
3986
3987 switch (kind) {
3988 case LLVMIntegerTypeKind:
3989 return LLVMGetIntTypeWidth(type) / 8;
3990 case LLVMFloatTypeKind:
3991 return 4;
3992 case LLVMPointerTypeKind:
3993 return 8;
3994 case LLVMVectorTypeKind:
3995 return LLVMGetVectorSize(type) *
3996 llvm_get_type_size(LLVMGetElementType(type));
3997 case LLVMArrayTypeKind:
3998 return LLVMGetArrayLength(type) *
3999 llvm_get_type_size(LLVMGetElementType(type));
4000 default:
4001 assert(0);
4002 return 0;
4003 }
4004 }
4005
4006 static void declare_lds_as_pointer(struct si_shader_context *ctx)
4007 {
4008 struct gallivm_state *gallivm = &ctx->gallivm;
4009
4010 unsigned lds_size = ctx->screen->b.chip_class >= CIK ? 65536 : 32768;
4011 ctx->lds = LLVMBuildIntToPtr(gallivm->builder, ctx->i32_0,
4012 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), LOCAL_ADDR_SPACE),
4013 "lds");
4014 }
4015
4016 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
4017 {
4018 switch (shader->selector->type) {
4019 case PIPE_SHADER_TESS_CTRL:
4020 /* Return this so that LLVM doesn't remove s_barrier
4021 * instructions on chips where we use s_barrier. */
4022 return shader->selector->screen->b.chip_class >= CIK ? 128 : 64;
4023
4024 case PIPE_SHADER_GEOMETRY:
4025 return shader->selector->screen->b.chip_class >= GFX9 ? 128 : 64;
4026
4027 case PIPE_SHADER_COMPUTE:
4028 break; /* see below */
4029
4030 default:
4031 return 0;
4032 }
4033
4034 const unsigned *properties = shader->selector->info.properties;
4035 unsigned max_work_group_size =
4036 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
4037 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
4038 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
4039
4040 if (!max_work_group_size) {
4041 /* This is a variable group size compute shader,
4042 * compile it for the maximum possible group size.
4043 */
4044 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
4045 }
4046 return max_work_group_size;
4047 }
4048
4049 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
4050 LLVMTypeRef *params,
4051 unsigned *num_params,
4052 bool assign_params)
4053 {
4054 params[(*num_params)++] = si_const_array(ctx->v4i32,
4055 SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS);
4056 params[(*num_params)++] = si_const_array(ctx->v8i32,
4057 SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2);
4058
4059 if (assign_params) {
4060 ctx->param_const_and_shader_buffers = *num_params - 2;
4061 ctx->param_samplers_and_images = *num_params - 1;
4062 }
4063 }
4064
4065 static void declare_default_desc_pointers(struct si_shader_context *ctx,
4066 LLVMTypeRef *params,
4067 unsigned *num_params)
4068 {
4069 params[ctx->param_rw_buffers = (*num_params)++] =
4070 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4071 declare_per_stage_desc_pointers(ctx, params, num_params, true);
4072 }
4073
4074 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx,
4075 LLVMTypeRef *params,
4076 unsigned *num_params)
4077 {
4078 params[ctx->param_vertex_buffers = (*num_params)++] =
4079 si_const_array(ctx->v4i32, SI_NUM_VERTEX_BUFFERS);
4080 params[ctx->param_base_vertex = (*num_params)++] = ctx->i32;
4081 params[ctx->param_start_instance = (*num_params)++] = ctx->i32;
4082 params[ctx->param_draw_id = (*num_params)++] = ctx->i32;
4083 params[ctx->param_vs_state_bits = (*num_params)++] = ctx->i32;
4084 }
4085
4086 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
4087 LLVMTypeRef *params, unsigned *num_params,
4088 unsigned *num_prolog_vgprs)
4089 {
4090 struct si_shader *shader = ctx->shader;
4091
4092 params[ctx->param_vertex_id = (*num_params)++] = ctx->i32;
4093 if (shader->key.as_ls) {
4094 params[ctx->param_rel_auto_id = (*num_params)++] = ctx->i32;
4095 params[ctx->param_instance_id = (*num_params)++] = ctx->i32;
4096 } else {
4097 params[ctx->param_instance_id = (*num_params)++] = ctx->i32;
4098 params[ctx->param_vs_prim_id = (*num_params)++] = ctx->i32;
4099 }
4100 params[(*num_params)++] = ctx->i32; /* unused */
4101
4102 if (!shader->is_gs_copy_shader) {
4103 /* Vertex load indices. */
4104 ctx->param_vertex_index0 = (*num_params);
4105 for (unsigned i = 0; i < shader->selector->info.num_inputs; i++)
4106 params[(*num_params)++] = ctx->i32;
4107 *num_prolog_vgprs += shader->selector->info.num_inputs;
4108 }
4109 }
4110
4111 static void declare_tes_input_vgprs(struct si_shader_context *ctx,
4112 LLVMTypeRef *params, unsigned *num_params)
4113 {
4114 params[ctx->param_tes_u = (*num_params)++] = ctx->f32;
4115 params[ctx->param_tes_v = (*num_params)++] = ctx->f32;
4116 params[ctx->param_tes_rel_patch_id = (*num_params)++] = ctx->i32;
4117 params[ctx->param_tes_patch_id = (*num_params)++] = ctx->i32;
4118 }
4119
4120 enum {
4121 /* Convenient merged shader definitions. */
4122 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
4123 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
4124 };
4125
4126 static void create_function(struct si_shader_context *ctx)
4127 {
4128 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
4129 struct gallivm_state *gallivm = &ctx->gallivm;
4130 struct si_shader *shader = ctx->shader;
4131 LLVMTypeRef params[100]; /* just make it large enough */
4132 LLVMTypeRef returns[16+32*4];
4133 unsigned i, last_sgpr, num_params = 0, num_return_sgprs;
4134 unsigned num_returns = 0;
4135 unsigned num_prolog_vgprs = 0;
4136 unsigned type = ctx->type;
4137
4138 /* Set MERGED shaders. */
4139 if (ctx->screen->b.chip_class >= GFX9) {
4140 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
4141 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
4142 else if (shader->key.as_es || type == PIPE_SHADER_GEOMETRY)
4143 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
4144 }
4145
4146 LLVMTypeRef v3i32 = LLVMVectorType(ctx->i32, 3);
4147
4148 switch (type) {
4149 case PIPE_SHADER_VERTEX:
4150 declare_default_desc_pointers(ctx, params, &num_params);
4151 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4152
4153 if (shader->key.as_es) {
4154 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
4155 } else if (shader->key.as_ls) {
4156 /* no extra parameters */
4157 } else {
4158 if (shader->is_gs_copy_shader)
4159 num_params = ctx->param_rw_buffers + 1;
4160
4161 /* The locations of the other parameters are assigned dynamically. */
4162 declare_streamout_params(ctx, &shader->selector->so,
4163 params, ctx->i32, &num_params);
4164 }
4165
4166 last_sgpr = num_params-1;
4167
4168 /* VGPRs */
4169 declare_vs_input_vgprs(ctx, params, &num_params,
4170 &num_prolog_vgprs);
4171 break;
4172
4173 case PIPE_SHADER_TESS_CTRL: /* SI-CI-VI */
4174 declare_default_desc_pointers(ctx, params, &num_params);
4175 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4176 params[ctx->param_tcs_out_lds_offsets = num_params++] = ctx->i32;
4177 params[ctx->param_tcs_out_lds_layout = num_params++] = ctx->i32;
4178 params[ctx->param_vs_state_bits = num_params++] = ctx->i32;
4179 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4180 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
4181 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4182 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
4183 last_sgpr = num_params - 1;
4184
4185 /* VGPRs */
4186 params[ctx->param_tcs_patch_id = num_params++] = ctx->i32;
4187 params[ctx->param_tcs_rel_ids = num_params++] = ctx->i32;
4188
4189 /* param_tcs_offchip_offset and param_tcs_factor_offset are
4190 * placed after the user SGPRs.
4191 */
4192 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
4193 returns[num_returns++] = ctx->i32; /* SGPRs */
4194 for (i = 0; i < 3; i++)
4195 returns[num_returns++] = ctx->f32; /* VGPRs */
4196 break;
4197
4198 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
4199 /* Merged stages have 8 system SGPRs at the beginning. */
4200 params[ctx->param_rw_buffers = num_params++] = /* SPI_SHADER_USER_DATA_ADDR_LO_HS */
4201 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4202 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4203 params[ctx->param_merged_wave_info = num_params++] = ctx->i32;
4204 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
4205 params[ctx->param_merged_scratch_offset = num_params++] = ctx->i32;
4206 params[num_params++] = ctx->i32; /* unused */
4207 params[num_params++] = ctx->i32; /* unused */
4208
4209 params[num_params++] = ctx->i32; /* unused */
4210 params[num_params++] = ctx->i32; /* unused */
4211 declare_per_stage_desc_pointers(ctx, params, &num_params,
4212 ctx->type == PIPE_SHADER_VERTEX);
4213 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4214
4215 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4216 params[ctx->param_tcs_out_lds_offsets = num_params++] = ctx->i32;
4217 params[ctx->param_tcs_out_lds_layout = num_params++] = ctx->i32;
4218 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4219 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
4220 params[num_params++] = ctx->i32; /* unused */
4221
4222 declare_per_stage_desc_pointers(ctx, params, &num_params,
4223 ctx->type == PIPE_SHADER_TESS_CTRL);
4224 last_sgpr = num_params - 1;
4225
4226 /* VGPRs (first TCS, then VS) */
4227 params[ctx->param_tcs_patch_id = num_params++] = ctx->i32;
4228 params[ctx->param_tcs_rel_ids = num_params++] = ctx->i32;
4229
4230 if (ctx->type == PIPE_SHADER_VERTEX) {
4231 declare_vs_input_vgprs(ctx, params, &num_params,
4232 &num_prolog_vgprs);
4233
4234 /* LS return values are inputs to the TCS main shader part. */
4235 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
4236 returns[num_returns++] = ctx->i32; /* SGPRs */
4237 for (i = 0; i < 2; i++)
4238 returns[num_returns++] = ctx->f32; /* VGPRs */
4239 } else {
4240 /* TCS return values are inputs to the TCS epilog.
4241 *
4242 * param_tcs_offchip_offset, param_tcs_factor_offset,
4243 * param_tcs_offchip_layout, and param_rw_buffers
4244 * should be passed to the epilog.
4245 */
4246 for (i = 0; i <= 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K; i++)
4247 returns[num_returns++] = ctx->i32; /* SGPRs */
4248 for (i = 0; i < 3; i++)
4249 returns[num_returns++] = ctx->f32; /* VGPRs */
4250 }
4251 break;
4252
4253 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
4254 /* Merged stages have 8 system SGPRs at the beginning. */
4255 params[ctx->param_rw_buffers = num_params++] = /* SPI_SHADER_USER_DATA_ADDR_LO_GS */
4256 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS);
4257 params[ctx->param_gs2vs_offset = num_params++] = ctx->i32;
4258 params[ctx->param_merged_wave_info = num_params++] = ctx->i32;
4259 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4260 params[ctx->param_merged_scratch_offset = num_params++] = ctx->i32;
4261 params[num_params++] = ctx->i32; /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
4262 params[num_params++] = ctx->i32; /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
4263
4264 params[num_params++] = ctx->i32; /* unused */
4265 params[num_params++] = ctx->i32; /* unused */
4266 declare_per_stage_desc_pointers(ctx, params, &num_params,
4267 (ctx->type == PIPE_SHADER_VERTEX ||
4268 ctx->type == PIPE_SHADER_TESS_EVAL));
4269 if (ctx->type == PIPE_SHADER_VERTEX) {
4270 declare_vs_specific_input_sgprs(ctx, params, &num_params);
4271 } else {
4272 /* TESS_EVAL (and also GEOMETRY):
4273 * Declare as many input SGPRs as the VS has. */
4274 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4275 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4276 params[num_params++] = ctx->i32; /* unused */
4277 params[num_params++] = ctx->i32; /* unused */
4278 params[num_params++] = ctx->i32; /* unused */
4279 params[ctx->param_vs_state_bits = num_params++] = ctx->i32; /* unused */
4280 }
4281
4282 declare_per_stage_desc_pointers(ctx, params, &num_params,
4283 ctx->type == PIPE_SHADER_GEOMETRY);
4284 last_sgpr = num_params - 1;
4285
4286 /* VGPRs (first GS, then VS/TES) */
4287 params[ctx->param_gs_vtx01_offset = num_params++] = ctx->i32;
4288 params[ctx->param_gs_vtx23_offset = num_params++] = ctx->i32;
4289 params[ctx->param_gs_prim_id = num_params++] = ctx->i32;
4290 params[ctx->param_gs_instance_id = num_params++] = ctx->i32;
4291 params[ctx->param_gs_vtx45_offset = num_params++] = ctx->i32;
4292
4293 if (ctx->type == PIPE_SHADER_VERTEX) {
4294 declare_vs_input_vgprs(ctx, params, &num_params,
4295 &num_prolog_vgprs);
4296 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
4297 declare_tes_input_vgprs(ctx, params, &num_params);
4298 }
4299
4300 if (ctx->type == PIPE_SHADER_VERTEX ||
4301 ctx->type == PIPE_SHADER_TESS_EVAL) {
4302 /* ES return values are inputs to GS. */
4303 for (i = 0; i < 8 + GFX9_GS_NUM_USER_SGPR; i++)
4304 returns[num_returns++] = ctx->i32; /* SGPRs */
4305 for (i = 0; i < 5; i++)
4306 returns[num_returns++] = ctx->f32; /* VGPRs */
4307 }
4308 break;
4309
4310 case PIPE_SHADER_TESS_EVAL:
4311 declare_default_desc_pointers(ctx, params, &num_params);
4312 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
4313 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
4314
4315 if (shader->key.as_es) {
4316 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4317 params[num_params++] = ctx->i32;
4318 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
4319 } else {
4320 params[num_params++] = ctx->i32;
4321 declare_streamout_params(ctx, &shader->selector->so,
4322 params, ctx->i32, &num_params);
4323 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
4324 }
4325 last_sgpr = num_params - 1;
4326
4327 /* VGPRs */
4328 declare_tes_input_vgprs(ctx, params, &num_params);
4329 break;
4330
4331 case PIPE_SHADER_GEOMETRY:
4332 declare_default_desc_pointers(ctx, params, &num_params);
4333 params[ctx->param_gs2vs_offset = num_params++] = ctx->i32;
4334 params[ctx->param_gs_wave_id = num_params++] = ctx->i32;
4335 last_sgpr = num_params - 1;
4336
4337 /* VGPRs */
4338 params[ctx->param_gs_vtx0_offset = num_params++] = ctx->i32;
4339 params[ctx->param_gs_vtx1_offset = num_params++] = ctx->i32;
4340 params[ctx->param_gs_prim_id = num_params++] = ctx->i32;
4341 params[ctx->param_gs_vtx2_offset = num_params++] = ctx->i32;
4342 params[ctx->param_gs_vtx3_offset = num_params++] = ctx->i32;
4343 params[ctx->param_gs_vtx4_offset = num_params++] = ctx->i32;
4344 params[ctx->param_gs_vtx5_offset = num_params++] = ctx->i32;
4345 params[ctx->param_gs_instance_id = num_params++] = ctx->i32;
4346 break;
4347
4348 case PIPE_SHADER_FRAGMENT:
4349 declare_default_desc_pointers(ctx, params, &num_params);
4350 params[SI_PARAM_ALPHA_REF] = ctx->f32;
4351 params[SI_PARAM_PRIM_MASK] = ctx->i32;
4352 last_sgpr = SI_PARAM_PRIM_MASK;
4353 params[SI_PARAM_PERSP_SAMPLE] = ctx->v2i32;
4354 params[SI_PARAM_PERSP_CENTER] = ctx->v2i32;
4355 params[SI_PARAM_PERSP_CENTROID] = ctx->v2i32;
4356 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
4357 params[SI_PARAM_LINEAR_SAMPLE] = ctx->v2i32;
4358 params[SI_PARAM_LINEAR_CENTER] = ctx->v2i32;
4359 params[SI_PARAM_LINEAR_CENTROID] = ctx->v2i32;
4360 params[SI_PARAM_LINE_STIPPLE_TEX] = ctx->f32;
4361 params[SI_PARAM_POS_X_FLOAT] = ctx->f32;
4362 params[SI_PARAM_POS_Y_FLOAT] = ctx->f32;
4363 params[SI_PARAM_POS_Z_FLOAT] = ctx->f32;
4364 params[SI_PARAM_POS_W_FLOAT] = ctx->f32;
4365 params[SI_PARAM_FRONT_FACE] = ctx->i32;
4366 shader->info.face_vgpr_index = 20;
4367 params[SI_PARAM_ANCILLARY] = ctx->i32;
4368 params[SI_PARAM_SAMPLE_COVERAGE] = ctx->f32;
4369 params[SI_PARAM_POS_FIXED_PT] = ctx->i32;
4370 num_params = SI_PARAM_POS_FIXED_PT+1;
4371
4372 /* Color inputs from the prolog. */
4373 if (shader->selector->info.colors_read) {
4374 unsigned num_color_elements =
4375 util_bitcount(shader->selector->info.colors_read);
4376
4377 assert(num_params + num_color_elements <= ARRAY_SIZE(params));
4378 for (i = 0; i < num_color_elements; i++)
4379 params[num_params++] = ctx->f32;
4380
4381 num_prolog_vgprs += num_color_elements;
4382 }
4383
4384 /* Outputs for the epilog. */
4385 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
4386 num_returns =
4387 num_return_sgprs +
4388 util_bitcount(shader->selector->info.colors_written) * 4 +
4389 shader->selector->info.writes_z +
4390 shader->selector->info.writes_stencil +
4391 shader->selector->info.writes_samplemask +
4392 1 /* SampleMaskIn */;
4393
4394 num_returns = MAX2(num_returns,
4395 num_return_sgprs +
4396 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
4397
4398 for (i = 0; i < num_return_sgprs; i++)
4399 returns[i] = ctx->i32;
4400 for (; i < num_returns; i++)
4401 returns[i] = ctx->f32;
4402 break;
4403
4404 case PIPE_SHADER_COMPUTE:
4405 declare_default_desc_pointers(ctx, params, &num_params);
4406 if (shader->selector->info.uses_grid_size)
4407 params[ctx->param_grid_size = num_params++] = v3i32;
4408 if (shader->selector->info.uses_block_size)
4409 params[ctx->param_block_size = num_params++] = v3i32;
4410
4411 for (i = 0; i < 3; i++) {
4412 ctx->param_block_id[i] = -1;
4413 if (shader->selector->info.uses_block_id[i])
4414 params[ctx->param_block_id[i] = num_params++] = ctx->i32;
4415 }
4416 last_sgpr = num_params - 1;
4417
4418 params[ctx->param_thread_id = num_params++] = v3i32;
4419 break;
4420 default:
4421 assert(0 && "unimplemented shader");
4422 return;
4423 }
4424
4425 assert(num_params <= ARRAY_SIZE(params));
4426
4427 si_create_function(ctx, "main", returns, num_returns, params,
4428 num_params, last_sgpr,
4429 si_get_max_workgroup_size(shader));
4430
4431 /* Reserve register locations for VGPR inputs the PS prolog may need. */
4432 if (ctx->type == PIPE_SHADER_FRAGMENT &&
4433 ctx->separate_prolog) {
4434 si_llvm_add_attribute(ctx->main_fn,
4435 "InitialPSInputAddr",
4436 S_0286D0_PERSP_SAMPLE_ENA(1) |
4437 S_0286D0_PERSP_CENTER_ENA(1) |
4438 S_0286D0_PERSP_CENTROID_ENA(1) |
4439 S_0286D0_LINEAR_SAMPLE_ENA(1) |
4440 S_0286D0_LINEAR_CENTER_ENA(1) |
4441 S_0286D0_LINEAR_CENTROID_ENA(1) |
4442 S_0286D0_FRONT_FACE_ENA(1) |
4443 S_0286D0_POS_FIXED_PT_ENA(1));
4444 }
4445
4446 shader->info.num_input_sgprs = 0;
4447 shader->info.num_input_vgprs = 0;
4448
4449 for (i = 0; i <= last_sgpr; ++i)
4450 shader->info.num_input_sgprs += llvm_get_type_size(params[i]) / 4;
4451
4452 for (; i < num_params; ++i)
4453 shader->info.num_input_vgprs += llvm_get_type_size(params[i]) / 4;
4454
4455 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
4456 shader->info.num_input_vgprs -= num_prolog_vgprs;
4457
4458 if (!ctx->screen->has_ds_bpermute &&
4459 bld_base->info &&
4460 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
4461 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0 ||
4462 bld_base->info->opcode_count[TGSI_OPCODE_DDX_FINE] > 0 ||
4463 bld_base->info->opcode_count[TGSI_OPCODE_DDY_FINE] > 0 ||
4464 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_OFFSET] > 0 ||
4465 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_SAMPLE] > 0))
4466 ctx->lds =
4467 LLVMAddGlobalInAddressSpace(gallivm->module,
4468 LLVMArrayType(ctx->i32, 64),
4469 "ddxy_lds",
4470 LOCAL_ADDR_SPACE);
4471
4472 if (shader->key.as_ls ||
4473 ctx->type == PIPE_SHADER_TESS_CTRL ||
4474 /* GFX9 has the ESGS ring buffer in LDS. */
4475 (ctx->screen->b.chip_class >= GFX9 &&
4476 (shader->key.as_es ||
4477 ctx->type == PIPE_SHADER_GEOMETRY)))
4478 declare_lds_as_pointer(ctx);
4479 }
4480
4481 /**
4482 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
4483 * for later use.
4484 */
4485 static void preload_ring_buffers(struct si_shader_context *ctx)
4486 {
4487 struct gallivm_state *gallivm = &ctx->gallivm;
4488 LLVMBuilderRef builder = gallivm->builder;
4489
4490 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
4491 ctx->param_rw_buffers);
4492
4493 if (ctx->screen->b.chip_class <= VI &&
4494 (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)) {
4495 unsigned ring =
4496 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
4497 : SI_ES_RING_ESGS;
4498 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
4499
4500 ctx->esgs_ring =
4501 ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4502 }
4503
4504 if (ctx->shader->is_gs_copy_shader) {
4505 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4506
4507 ctx->gsvs_ring[0] =
4508 ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4509 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
4510 const struct si_shader_selector *sel = ctx->shader->selector;
4511 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4512 LLVMValueRef base_ring;
4513
4514 base_ring = ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
4515
4516 /* The conceptual layout of the GSVS ring is
4517 * v0c0 .. vLv0 v0c1 .. vLc1 ..
4518 * but the real memory layout is swizzled across
4519 * threads:
4520 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
4521 * t16v0c0 ..
4522 * Override the buffer descriptor accordingly.
4523 */
4524 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
4525 uint64_t stream_offset = 0;
4526
4527 for (unsigned stream = 0; stream < 4; ++stream) {
4528 unsigned num_components;
4529 unsigned stride;
4530 unsigned num_records;
4531 LLVMValueRef ring, tmp;
4532
4533 num_components = sel->info.num_stream_output_components[stream];
4534 if (!num_components)
4535 continue;
4536
4537 stride = 4 * num_components * sel->gs_max_out_vertices;
4538
4539 /* Limit on the stride field for <= CIK. */
4540 assert(stride < (1 << 14));
4541
4542 num_records = 64;
4543
4544 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
4545 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
4546 tmp = LLVMBuildAdd(builder, tmp,
4547 LLVMConstInt(ctx->i64,
4548 stream_offset, 0), "");
4549 stream_offset += stride * 64;
4550
4551 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
4552 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
4553 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
4554 tmp = LLVMBuildOr(builder, tmp,
4555 LLVMConstInt(ctx->i32,
4556 S_008F04_STRIDE(stride) |
4557 S_008F04_SWIZZLE_ENABLE(1), 0), "");
4558 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
4559 ring = LLVMBuildInsertElement(builder, ring,
4560 LLVMConstInt(ctx->i32, num_records, 0),
4561 LLVMConstInt(ctx->i32, 2, 0), "");
4562 ring = LLVMBuildInsertElement(builder, ring,
4563 LLVMConstInt(ctx->i32,
4564 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
4565 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
4566 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
4567 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
4568 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
4569 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
4570 S_008F0C_ELEMENT_SIZE(1) | /* element_size = 4 (bytes) */
4571 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
4572 S_008F0C_ADD_TID_ENABLE(1),
4573 0),
4574 LLVMConstInt(ctx->i32, 3, 0), "");
4575
4576 ctx->gsvs_ring[stream] = ring;
4577 }
4578 }
4579 }
4580
4581 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
4582 LLVMValueRef param_rw_buffers,
4583 unsigned param_pos_fixed_pt)
4584 {
4585 struct gallivm_state *gallivm = &ctx->gallivm;
4586 LLVMBuilderRef builder = gallivm->builder;
4587 LLVMValueRef slot, desc, offset, row, bit, address[2];
4588
4589 /* Use the fixed-point gl_FragCoord input.
4590 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
4591 * per coordinate to get the repeating effect.
4592 */
4593 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
4594 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
4595
4596 /* Load the buffer descriptor. */
4597 slot = LLVMConstInt(ctx->i32, SI_PS_CONST_POLY_STIPPLE, 0);
4598 desc = ac_build_indexed_load_const(&ctx->ac, param_rw_buffers, slot);
4599
4600 /* The stipple pattern is 32x32, each row has 32 bits. */
4601 offset = LLVMBuildMul(builder, address[1],
4602 LLVMConstInt(ctx->i32, 4, 0), "");
4603 row = buffer_load_const(ctx, desc, offset);
4604 row = LLVMBuildBitCast(builder, row, ctx->i32, "");
4605 bit = LLVMBuildLShr(builder, row, address[0], "");
4606 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
4607
4608 /* The intrinsic kills the thread if arg < 0. */
4609 bit = LLVMBuildSelect(builder, bit, LLVMConstReal(ctx->f32, 0),
4610 LLVMConstReal(ctx->f32, -1), "");
4611 ac_build_kill(&ctx->ac, bit);
4612 }
4613
4614 void si_shader_binary_read_config(struct ac_shader_binary *binary,
4615 struct si_shader_config *conf,
4616 unsigned symbol_offset)
4617 {
4618 unsigned i;
4619 const unsigned char *config =
4620 ac_shader_binary_config_start(binary, symbol_offset);
4621 bool really_needs_scratch = false;
4622
4623 /* LLVM adds SGPR spills to the scratch size.
4624 * Find out if we really need the scratch buffer.
4625 */
4626 for (i = 0; i < binary->reloc_count; i++) {
4627 const struct ac_shader_reloc *reloc = &binary->relocs[i];
4628
4629 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
4630 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
4631 really_needs_scratch = true;
4632 break;
4633 }
4634 }
4635
4636 /* XXX: We may be able to emit some of these values directly rather than
4637 * extracting fields to be emitted later.
4638 */
4639
4640 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
4641 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
4642 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
4643 switch (reg) {
4644 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
4645 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
4646 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
4647 case R_00B428_SPI_SHADER_PGM_RSRC1_HS:
4648 case R_00B848_COMPUTE_PGM_RSRC1:
4649 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
4650 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
4651 conf->float_mode = G_00B028_FLOAT_MODE(value);
4652 conf->rsrc1 = value;
4653 break;
4654 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
4655 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
4656 break;
4657 case R_00B84C_COMPUTE_PGM_RSRC2:
4658 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
4659 conf->rsrc2 = value;
4660 break;
4661 case R_0286CC_SPI_PS_INPUT_ENA:
4662 conf->spi_ps_input_ena = value;
4663 break;
4664 case R_0286D0_SPI_PS_INPUT_ADDR:
4665 conf->spi_ps_input_addr = value;
4666 break;
4667 case R_0286E8_SPI_TMPRING_SIZE:
4668 case R_00B860_COMPUTE_TMPRING_SIZE:
4669 /* WAVESIZE is in units of 256 dwords. */
4670 if (really_needs_scratch)
4671 conf->scratch_bytes_per_wave =
4672 G_00B860_WAVESIZE(value) * 256 * 4;
4673 break;
4674 case 0x4: /* SPILLED_SGPRS */
4675 conf->spilled_sgprs = value;
4676 break;
4677 case 0x8: /* SPILLED_VGPRS */
4678 conf->spilled_vgprs = value;
4679 break;
4680 default:
4681 {
4682 static bool printed;
4683
4684 if (!printed) {
4685 fprintf(stderr, "Warning: LLVM emitted unknown "
4686 "config register: 0x%x\n", reg);
4687 printed = true;
4688 }
4689 }
4690 break;
4691 }
4692 }
4693
4694 if (!conf->spi_ps_input_addr)
4695 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
4696 }
4697
4698 void si_shader_apply_scratch_relocs(struct si_shader *shader,
4699 uint64_t scratch_va)
4700 {
4701 unsigned i;
4702 uint32_t scratch_rsrc_dword0 = scratch_va;
4703 uint32_t scratch_rsrc_dword1 =
4704 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
4705
4706 /* Enable scratch coalescing. */
4707 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
4708
4709 for (i = 0 ; i < shader->binary.reloc_count; i++) {
4710 const struct ac_shader_reloc *reloc =
4711 &shader->binary.relocs[i];
4712 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
4713 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
4714 &scratch_rsrc_dword0, 4);
4715 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
4716 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
4717 &scratch_rsrc_dword1, 4);
4718 }
4719 }
4720 }
4721
4722 static unsigned si_get_shader_binary_size(const struct si_shader *shader)
4723 {
4724 unsigned size = shader->binary.code_size;
4725
4726 if (shader->prolog)
4727 size += shader->prolog->binary.code_size;
4728 if (shader->previous_stage)
4729 size += shader->previous_stage->binary.code_size;
4730 if (shader->prolog2)
4731 size += shader->prolog2->binary.code_size;
4732 if (shader->epilog)
4733 size += shader->epilog->binary.code_size;
4734 return size;
4735 }
4736
4737 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
4738 {
4739 const struct ac_shader_binary *prolog =
4740 shader->prolog ? &shader->prolog->binary : NULL;
4741 const struct ac_shader_binary *previous_stage =
4742 shader->previous_stage ? &shader->previous_stage->binary : NULL;
4743 const struct ac_shader_binary *prolog2 =
4744 shader->prolog2 ? &shader->prolog2->binary : NULL;
4745 const struct ac_shader_binary *epilog =
4746 shader->epilog ? &shader->epilog->binary : NULL;
4747 const struct ac_shader_binary *mainb = &shader->binary;
4748 unsigned bo_size = si_get_shader_binary_size(shader) +
4749 (!epilog ? mainb->rodata_size : 0);
4750 unsigned char *ptr;
4751
4752 assert(!prolog || !prolog->rodata_size);
4753 assert(!previous_stage || !previous_stage->rodata_size);
4754 assert(!prolog2 || !prolog2->rodata_size);
4755 assert((!prolog && !previous_stage && !prolog2 && !epilog) ||
4756 !mainb->rodata_size);
4757 assert(!epilog || !epilog->rodata_size);
4758
4759 /* GFX9 can fetch at most 128 bytes past the end of the shader.
4760 * Prevent VM faults.
4761 */
4762 if (sscreen->b.chip_class >= GFX9)
4763 bo_size += 128;
4764
4765 r600_resource_reference(&shader->bo, NULL);
4766 shader->bo = (struct r600_resource*)
4767 pipe_buffer_create(&sscreen->b.b, 0,
4768 PIPE_USAGE_IMMUTABLE,
4769 align(bo_size, SI_CPDMA_ALIGNMENT));
4770 if (!shader->bo)
4771 return -ENOMEM;
4772
4773 /* Upload. */
4774 ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
4775 PIPE_TRANSFER_READ_WRITE |
4776 PIPE_TRANSFER_UNSYNCHRONIZED);
4777
4778 /* Don't use util_memcpy_cpu_to_le32. LLVM binaries are
4779 * endian-independent. */
4780 if (prolog) {
4781 memcpy(ptr, prolog->code, prolog->code_size);
4782 ptr += prolog->code_size;
4783 }
4784 if (previous_stage) {
4785 memcpy(ptr, previous_stage->code, previous_stage->code_size);
4786 ptr += previous_stage->code_size;
4787 }
4788 if (prolog2) {
4789 memcpy(ptr, prolog2->code, prolog2->code_size);
4790 ptr += prolog2->code_size;
4791 }
4792
4793 memcpy(ptr, mainb->code, mainb->code_size);
4794 ptr += mainb->code_size;
4795
4796 if (epilog)
4797 memcpy(ptr, epilog->code, epilog->code_size);
4798 else if (mainb->rodata_size > 0)
4799 memcpy(ptr, mainb->rodata, mainb->rodata_size);
4800
4801 sscreen->b.ws->buffer_unmap(shader->bo->buf);
4802 return 0;
4803 }
4804
4805 static void si_shader_dump_disassembly(const struct ac_shader_binary *binary,
4806 struct pipe_debug_callback *debug,
4807 const char *name, FILE *file)
4808 {
4809 char *line, *p;
4810 unsigned i, count;
4811
4812 if (binary->disasm_string) {
4813 fprintf(file, "Shader %s disassembly:\n", name);
4814 fprintf(file, "%s", binary->disasm_string);
4815
4816 if (debug && debug->debug_message) {
4817 /* Very long debug messages are cut off, so send the
4818 * disassembly one line at a time. This causes more
4819 * overhead, but on the plus side it simplifies
4820 * parsing of resulting logs.
4821 */
4822 pipe_debug_message(debug, SHADER_INFO,
4823 "Shader Disassembly Begin");
4824
4825 line = binary->disasm_string;
4826 while (*line) {
4827 p = util_strchrnul(line, '\n');
4828 count = p - line;
4829
4830 if (count) {
4831 pipe_debug_message(debug, SHADER_INFO,
4832 "%.*s", count, line);
4833 }
4834
4835 if (!*p)
4836 break;
4837 line = p + 1;
4838 }
4839
4840 pipe_debug_message(debug, SHADER_INFO,
4841 "Shader Disassembly End");
4842 }
4843 } else {
4844 fprintf(file, "Shader %s binary:\n", name);
4845 for (i = 0; i < binary->code_size; i += 4) {
4846 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
4847 binary->code[i + 3], binary->code[i + 2],
4848 binary->code[i + 1], binary->code[i]);
4849 }
4850 }
4851 }
4852
4853 static void si_shader_dump_stats(struct si_screen *sscreen,
4854 const struct si_shader *shader,
4855 struct pipe_debug_callback *debug,
4856 unsigned processor,
4857 FILE *file,
4858 bool check_debug_option)
4859 {
4860 const struct si_shader_config *conf = &shader->config;
4861 unsigned num_inputs = shader->selector ? shader->selector->info.num_inputs : 0;
4862 unsigned code_size = si_get_shader_binary_size(shader);
4863 unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
4864 unsigned lds_per_wave = 0;
4865 unsigned max_simd_waves = 10;
4866
4867 /* Compute LDS usage for PS. */
4868 switch (processor) {
4869 case PIPE_SHADER_FRAGMENT:
4870 /* The minimum usage per wave is (num_inputs * 48). The maximum
4871 * usage is (num_inputs * 48 * 16).
4872 * We can get anything in between and it varies between waves.
4873 *
4874 * The 48 bytes per input for a single primitive is equal to
4875 * 4 bytes/component * 4 components/input * 3 points.
4876 *
4877 * Other stages don't know the size at compile time or don't
4878 * allocate LDS per wave, but instead they do it per thread group.
4879 */
4880 lds_per_wave = conf->lds_size * lds_increment +
4881 align(num_inputs * 48, lds_increment);
4882 break;
4883 case PIPE_SHADER_COMPUTE:
4884 if (shader->selector) {
4885 unsigned max_workgroup_size =
4886 si_get_max_workgroup_size(shader);
4887 lds_per_wave = (conf->lds_size * lds_increment) /
4888 DIV_ROUND_UP(max_workgroup_size, 64);
4889 }
4890 break;
4891 }
4892
4893 /* Compute the per-SIMD wave counts. */
4894 if (conf->num_sgprs) {
4895 if (sscreen->b.chip_class >= VI)
4896 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
4897 else
4898 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
4899 }
4900
4901 if (conf->num_vgprs)
4902 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
4903
4904 /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
4905 * 16KB makes some SIMDs unoccupied). */
4906 if (lds_per_wave)
4907 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
4908
4909 if (!check_debug_option ||
4910 r600_can_dump_shader(&sscreen->b, processor)) {
4911 if (processor == PIPE_SHADER_FRAGMENT) {
4912 fprintf(file, "*** SHADER CONFIG ***\n"
4913 "SPI_PS_INPUT_ADDR = 0x%04x\n"
4914 "SPI_PS_INPUT_ENA = 0x%04x\n",
4915 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
4916 }
4917
4918 fprintf(file, "*** SHADER STATS ***\n"
4919 "SGPRS: %d\n"
4920 "VGPRS: %d\n"
4921 "Spilled SGPRs: %d\n"
4922 "Spilled VGPRs: %d\n"
4923 "Private memory VGPRs: %d\n"
4924 "Code Size: %d bytes\n"
4925 "LDS: %d blocks\n"
4926 "Scratch: %d bytes per wave\n"
4927 "Max Waves: %d\n"
4928 "********************\n\n\n",
4929 conf->num_sgprs, conf->num_vgprs,
4930 conf->spilled_sgprs, conf->spilled_vgprs,
4931 conf->private_mem_vgprs, code_size,
4932 conf->lds_size, conf->scratch_bytes_per_wave,
4933 max_simd_waves);
4934 }
4935
4936 pipe_debug_message(debug, SHADER_INFO,
4937 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
4938 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
4939 "Spilled VGPRs: %d PrivMem VGPRs: %d",
4940 conf->num_sgprs, conf->num_vgprs, code_size,
4941 conf->lds_size, conf->scratch_bytes_per_wave,
4942 max_simd_waves, conf->spilled_sgprs,
4943 conf->spilled_vgprs, conf->private_mem_vgprs);
4944 }
4945
4946 const char *si_get_shader_name(const struct si_shader *shader, unsigned processor)
4947 {
4948 switch (processor) {
4949 case PIPE_SHADER_VERTEX:
4950 if (shader->key.as_es)
4951 return "Vertex Shader as ES";
4952 else if (shader->key.as_ls)
4953 return "Vertex Shader as LS";
4954 else
4955 return "Vertex Shader as VS";
4956 case PIPE_SHADER_TESS_CTRL:
4957 return "Tessellation Control Shader";
4958 case PIPE_SHADER_TESS_EVAL:
4959 if (shader->key.as_es)
4960 return "Tessellation Evaluation Shader as ES";
4961 else
4962 return "Tessellation Evaluation Shader as VS";
4963 case PIPE_SHADER_GEOMETRY:
4964 if (shader->is_gs_copy_shader)
4965 return "GS Copy Shader as VS";
4966 else
4967 return "Geometry Shader";
4968 case PIPE_SHADER_FRAGMENT:
4969 return "Pixel Shader";
4970 case PIPE_SHADER_COMPUTE:
4971 return "Compute Shader";
4972 default:
4973 return "Unknown Shader";
4974 }
4975 }
4976
4977 void si_shader_dump(struct si_screen *sscreen, const struct si_shader *shader,
4978 struct pipe_debug_callback *debug, unsigned processor,
4979 FILE *file, bool check_debug_option)
4980 {
4981 if (!check_debug_option ||
4982 r600_can_dump_shader(&sscreen->b, processor))
4983 si_dump_shader_key(processor, shader, file);
4984
4985 if (!check_debug_option && shader->binary.llvm_ir_string) {
4986 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
4987 si_get_shader_name(shader, processor));
4988 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
4989 }
4990
4991 if (!check_debug_option ||
4992 (r600_can_dump_shader(&sscreen->b, processor) &&
4993 !(sscreen->b.debug_flags & DBG_NO_ASM))) {
4994 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
4995
4996 if (shader->prolog)
4997 si_shader_dump_disassembly(&shader->prolog->binary,
4998 debug, "prolog", file);
4999 if (shader->previous_stage)
5000 si_shader_dump_disassembly(&shader->previous_stage->binary,
5001 debug, "previous stage", file);
5002 if (shader->prolog2)
5003 si_shader_dump_disassembly(&shader->prolog2->binary,
5004 debug, "prolog2", file);
5005
5006 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
5007
5008 if (shader->epilog)
5009 si_shader_dump_disassembly(&shader->epilog->binary,
5010 debug, "epilog", file);
5011 fprintf(file, "\n");
5012 }
5013
5014 si_shader_dump_stats(sscreen, shader, debug, processor, file,
5015 check_debug_option);
5016 }
5017
5018 static int si_compile_llvm(struct si_screen *sscreen,
5019 struct ac_shader_binary *binary,
5020 struct si_shader_config *conf,
5021 LLVMTargetMachineRef tm,
5022 LLVMModuleRef mod,
5023 struct pipe_debug_callback *debug,
5024 unsigned processor,
5025 const char *name)
5026 {
5027 int r = 0;
5028 unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
5029
5030 if (r600_can_dump_shader(&sscreen->b, processor)) {
5031 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
5032
5033 if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
5034 fprintf(stderr, "%s LLVM IR:\n\n", name);
5035 ac_dump_module(mod);
5036 fprintf(stderr, "\n");
5037 }
5038 }
5039
5040 if (sscreen->record_llvm_ir) {
5041 char *ir = LLVMPrintModuleToString(mod);
5042 binary->llvm_ir_string = strdup(ir);
5043 LLVMDisposeMessage(ir);
5044 }
5045
5046 if (!si_replace_shader(count, binary)) {
5047 r = si_llvm_compile(mod, binary, tm, debug);
5048 if (r)
5049 return r;
5050 }
5051
5052 si_shader_binary_read_config(binary, conf, 0);
5053
5054 /* Enable 64-bit and 16-bit denormals, because there is no performance
5055 * cost.
5056 *
5057 * If denormals are enabled, all floating-point output modifiers are
5058 * ignored.
5059 *
5060 * Don't enable denormals for 32-bit floats, because:
5061 * - Floating-point output modifiers would be ignored by the hw.
5062 * - Some opcodes don't support denormals, such as v_mad_f32. We would
5063 * have to stop using those.
5064 * - SI & CI would be very slow.
5065 */
5066 conf->float_mode |= V_00B028_FP_64_DENORMS;
5067
5068 FREE(binary->config);
5069 FREE(binary->global_symbol_offsets);
5070 binary->config = NULL;
5071 binary->global_symbol_offsets = NULL;
5072
5073 /* Some shaders can't have rodata because their binaries can be
5074 * concatenated.
5075 */
5076 if (binary->rodata_size &&
5077 (processor == PIPE_SHADER_VERTEX ||
5078 processor == PIPE_SHADER_TESS_CTRL ||
5079 processor == PIPE_SHADER_TESS_EVAL ||
5080 processor == PIPE_SHADER_FRAGMENT)) {
5081 fprintf(stderr, "radeonsi: The shader can't have rodata.");
5082 return -EINVAL;
5083 }
5084
5085 return r;
5086 }
5087
5088 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
5089 {
5090 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
5091 LLVMBuildRetVoid(ctx->gallivm.builder);
5092 else
5093 LLVMBuildRet(ctx->gallivm.builder, ret);
5094 }
5095
5096 /* Generate code for the hardware VS shader stage to go with a geometry shader */
5097 struct si_shader *
5098 si_generate_gs_copy_shader(struct si_screen *sscreen,
5099 LLVMTargetMachineRef tm,
5100 struct si_shader_selector *gs_selector,
5101 struct pipe_debug_callback *debug)
5102 {
5103 struct si_shader_context ctx;
5104 struct si_shader *shader;
5105 struct gallivm_state *gallivm = &ctx.gallivm;
5106 LLVMBuilderRef builder;
5107 struct lp_build_tgsi_context *bld_base = &ctx.bld_base;
5108 struct lp_build_context *uint = &bld_base->uint_bld;
5109 struct si_shader_output_values *outputs;
5110 struct tgsi_shader_info *gsinfo = &gs_selector->info;
5111 int i, r;
5112
5113 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
5114
5115 if (!outputs)
5116 return NULL;
5117
5118 shader = CALLOC_STRUCT(si_shader);
5119 if (!shader) {
5120 FREE(outputs);
5121 return NULL;
5122 }
5123
5124
5125 shader->selector = gs_selector;
5126 shader->is_gs_copy_shader = true;
5127
5128 si_init_shader_ctx(&ctx, sscreen, tm);
5129 ctx.shader = shader;
5130 ctx.type = PIPE_SHADER_VERTEX;
5131
5132 builder = gallivm->builder;
5133
5134 create_function(&ctx);
5135 preload_ring_buffers(&ctx);
5136
5137 LLVMValueRef voffset =
5138 lp_build_mul_imm(uint, LLVMGetParam(ctx.main_fn,
5139 ctx.param_vertex_id), 4);
5140
5141 /* Fetch the vertex stream ID.*/
5142 LLVMValueRef stream_id;
5143
5144 if (gs_selector->so.num_outputs)
5145 stream_id = unpack_param(&ctx, ctx.param_streamout_config, 24, 2);
5146 else
5147 stream_id = ctx.i32_0;
5148
5149 /* Fill in output information. */
5150 for (i = 0; i < gsinfo->num_outputs; ++i) {
5151 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
5152 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
5153
5154 for (int chan = 0; chan < 4; chan++) {
5155 outputs[i].vertex_stream[chan] =
5156 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
5157 }
5158 }
5159
5160 LLVMBasicBlockRef end_bb;
5161 LLVMValueRef switch_inst;
5162
5163 end_bb = LLVMAppendBasicBlockInContext(gallivm->context, ctx.main_fn, "end");
5164 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
5165
5166 for (int stream = 0; stream < 4; stream++) {
5167 LLVMBasicBlockRef bb;
5168 unsigned offset;
5169
5170 if (!gsinfo->num_stream_output_components[stream])
5171 continue;
5172
5173 if (stream > 0 && !gs_selector->so.num_outputs)
5174 continue;
5175
5176 bb = LLVMInsertBasicBlockInContext(gallivm->context, end_bb, "out");
5177 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
5178 LLVMPositionBuilderAtEnd(builder, bb);
5179
5180 /* Fetch vertex data from GSVS ring */
5181 offset = 0;
5182 for (i = 0; i < gsinfo->num_outputs; ++i) {
5183 for (unsigned chan = 0; chan < 4; chan++) {
5184 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
5185 outputs[i].vertex_stream[chan] != stream) {
5186 outputs[i].values[chan] = ctx.bld_base.base.undef;
5187 continue;
5188 }
5189
5190 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
5191 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
5192 offset++;
5193
5194 outputs[i].values[chan] =
5195 ac_build_buffer_load(&ctx.ac,
5196 ctx.gsvs_ring[0], 1,
5197 ctx.i32_0, voffset,
5198 soffset, 0, 1, 1,
5199 true, false);
5200 }
5201 }
5202
5203 /* Streamout and exports. */
5204 if (gs_selector->so.num_outputs) {
5205 si_llvm_emit_streamout(&ctx, outputs,
5206 gsinfo->num_outputs,
5207 stream);
5208 }
5209
5210 if (stream == 0)
5211 si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
5212
5213 LLVMBuildBr(builder, end_bb);
5214 }
5215
5216 LLVMPositionBuilderAtEnd(builder, end_bb);
5217
5218 LLVMBuildRetVoid(gallivm->builder);
5219
5220 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
5221 si_llvm_optimize_module(&ctx);
5222
5223 r = si_compile_llvm(sscreen, &ctx.shader->binary,
5224 &ctx.shader->config, ctx.tm,
5225 ctx.gallivm.module,
5226 debug, PIPE_SHADER_GEOMETRY,
5227 "GS Copy Shader");
5228 if (!r) {
5229 if (r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
5230 fprintf(stderr, "GS Copy Shader:\n");
5231 si_shader_dump(sscreen, ctx.shader, debug,
5232 PIPE_SHADER_GEOMETRY, stderr, true);
5233 r = si_shader_binary_upload(sscreen, ctx.shader);
5234 }
5235
5236 si_llvm_dispose(&ctx);
5237
5238 FREE(outputs);
5239
5240 if (r != 0) {
5241 FREE(shader);
5242 shader = NULL;
5243 }
5244 return shader;
5245 }
5246
5247 static void si_dump_shader_key_vs(const struct si_shader_key *key,
5248 const struct si_vs_prolog_bits *prolog,
5249 const char *prefix, FILE *f)
5250 {
5251 fprintf(f, " %s.instance_divisors = {", prefix);
5252 for (int i = 0; i < ARRAY_SIZE(prolog->instance_divisors); i++) {
5253 fprintf(f, !i ? "%u" : ", %u",
5254 prolog->instance_divisors[i]);
5255 }
5256 fprintf(f, "}\n");
5257
5258 fprintf(f, " mono.vs.fix_fetch = {");
5259 for (int i = 0; i < SI_MAX_ATTRIBS; i++)
5260 fprintf(f, !i ? "%u" : ", %u", key->mono.vs_fix_fetch[i]);
5261 fprintf(f, "}\n");
5262 }
5263
5264 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
5265 FILE *f)
5266 {
5267 const struct si_shader_key *key = &shader->key;
5268
5269 fprintf(f, "SHADER KEY\n");
5270
5271 switch (processor) {
5272 case PIPE_SHADER_VERTEX:
5273 si_dump_shader_key_vs(key, &key->part.vs.prolog,
5274 "part.vs.prolog", f);
5275 fprintf(f, " as_es = %u\n", key->as_es);
5276 fprintf(f, " as_ls = %u\n", key->as_ls);
5277 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5278 key->mono.u.vs_export_prim_id);
5279 break;
5280
5281 case PIPE_SHADER_TESS_CTRL:
5282 if (shader->selector->screen->b.chip_class >= GFX9) {
5283 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
5284 "part.tcs.ls_prolog", f);
5285 }
5286 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
5287 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
5288 break;
5289
5290 case PIPE_SHADER_TESS_EVAL:
5291 fprintf(f, " as_es = %u\n", key->as_es);
5292 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5293 key->mono.u.vs_export_prim_id);
5294 break;
5295
5296 case PIPE_SHADER_GEOMETRY:
5297 if (shader->is_gs_copy_shader)
5298 break;
5299
5300 if (shader->selector->screen->b.chip_class >= GFX9 &&
5301 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
5302 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
5303 "part.gs.vs_prolog", f);
5304 }
5305 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
5306 break;
5307
5308 case PIPE_SHADER_COMPUTE:
5309 break;
5310
5311 case PIPE_SHADER_FRAGMENT:
5312 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
5313 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
5314 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
5315 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
5316 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
5317 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
5318 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
5319 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
5320 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
5321 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
5322 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
5323 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
5324 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
5325 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
5326 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
5327 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
5328 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
5329 break;
5330
5331 default:
5332 assert(0);
5333 }
5334
5335 if ((processor == PIPE_SHADER_GEOMETRY ||
5336 processor == PIPE_SHADER_TESS_EVAL ||
5337 processor == PIPE_SHADER_VERTEX) &&
5338 !key->as_es && !key->as_ls) {
5339 fprintf(f, " opt.kill_outputs[0] = 0x%x\n", key->opt.kill_outputs[0]);
5340 fprintf(f, " opt.kill_outputs[1] = 0x%x\n", key->opt.kill_outputs[1]);
5341 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
5342 }
5343 }
5344
5345 static void si_init_shader_ctx(struct si_shader_context *ctx,
5346 struct si_screen *sscreen,
5347 LLVMTargetMachineRef tm)
5348 {
5349 struct lp_build_tgsi_context *bld_base;
5350
5351 si_llvm_context_init(ctx, sscreen, tm);
5352
5353 bld_base = &ctx->bld_base;
5354 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
5355
5356 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
5357 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
5358 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
5359
5360 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
5361
5362 bld_base->op_actions[TGSI_OPCODE_CLOCK].emit = clock_emit;
5363
5364 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
5365 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
5366 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
5367 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
5368
5369 bld_base->op_actions[TGSI_OPCODE_VOTE_ALL].emit = vote_all_emit;
5370 bld_base->op_actions[TGSI_OPCODE_VOTE_ANY].emit = vote_any_emit;
5371 bld_base->op_actions[TGSI_OPCODE_VOTE_EQ].emit = vote_eq_emit;
5372 bld_base->op_actions[TGSI_OPCODE_BALLOT].emit = ballot_emit;
5373 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].intr_name = "llvm.amdgcn.readfirstlane";
5374 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].emit = read_lane_emit;
5375 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].intr_name = "llvm.amdgcn.readlane";
5376 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].fetch_args = read_invoc_fetch_args;
5377 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].emit = read_lane_emit;
5378
5379 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
5380 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
5381 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
5382 }
5383
5384 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
5385 {
5386 struct si_shader *shader = ctx->shader;
5387 struct tgsi_shader_info *info = &shader->selector->info;
5388
5389 if ((ctx->type != PIPE_SHADER_VERTEX &&
5390 ctx->type != PIPE_SHADER_TESS_EVAL) ||
5391 shader->key.as_ls ||
5392 shader->key.as_es)
5393 return;
5394
5395 ac_optimize_vs_outputs(&ctx->ac,
5396 ctx->main_fn,
5397 shader->info.vs_output_param_offset,
5398 info->num_outputs,
5399 &shader->info.nr_param_exports);
5400 }
5401
5402 static void si_count_scratch_private_memory(struct si_shader_context *ctx)
5403 {
5404 ctx->shader->config.private_mem_vgprs = 0;
5405
5406 /* Process all LLVM instructions. */
5407 LLVMBasicBlockRef bb = LLVMGetFirstBasicBlock(ctx->main_fn);
5408 while (bb) {
5409 LLVMValueRef next = LLVMGetFirstInstruction(bb);
5410
5411 while (next) {
5412 LLVMValueRef inst = next;
5413 next = LLVMGetNextInstruction(next);
5414
5415 if (LLVMGetInstructionOpcode(inst) != LLVMAlloca)
5416 continue;
5417
5418 LLVMTypeRef type = LLVMGetElementType(LLVMTypeOf(inst));
5419 /* No idea why LLVM aligns allocas to 4 elements. */
5420 unsigned alignment = LLVMGetAlignment(inst);
5421 unsigned dw_size = align(llvm_get_type_size(type) / 4, alignment);
5422 ctx->shader->config.private_mem_vgprs += dw_size;
5423 }
5424 bb = LLVMGetNextBasicBlock(bb);
5425 }
5426 }
5427
5428 static void si_init_exec_full_mask(struct si_shader_context *ctx)
5429 {
5430 LLVMValueRef full_mask = LLVMConstInt(ctx->i64, ~0ull, 0);
5431 lp_build_intrinsic(ctx->gallivm.builder,
5432 "llvm.amdgcn.init.exec", ctx->voidt,
5433 &full_mask, 1, LP_FUNC_ATTR_CONVERGENT);
5434 }
5435
5436 static void si_init_exec_from_input(struct si_shader_context *ctx,
5437 unsigned param, unsigned bitoffset)
5438 {
5439 LLVMValueRef args[] = {
5440 LLVMGetParam(ctx->main_fn, param),
5441 LLVMConstInt(ctx->i32, bitoffset, 0),
5442 };
5443 lp_build_intrinsic(ctx->gallivm.builder,
5444 "llvm.amdgcn.init.exec.from.input",
5445 ctx->voidt, args, 2, LP_FUNC_ATTR_CONVERGENT);
5446 }
5447
5448 static bool si_compile_tgsi_main(struct si_shader_context *ctx,
5449 bool is_monolithic)
5450 {
5451 struct si_shader *shader = ctx->shader;
5452 struct si_shader_selector *sel = shader->selector;
5453 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
5454
5455 switch (ctx->type) {
5456 case PIPE_SHADER_VERTEX:
5457 ctx->load_input = declare_input_vs;
5458 if (shader->key.as_ls)
5459 bld_base->emit_epilogue = si_llvm_emit_ls_epilogue;
5460 else if (shader->key.as_es)
5461 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
5462 else
5463 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
5464 break;
5465 case PIPE_SHADER_TESS_CTRL:
5466 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
5467 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
5468 bld_base->emit_store = store_output_tcs;
5469 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
5470 break;
5471 case PIPE_SHADER_TESS_EVAL:
5472 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
5473 if (shader->key.as_es)
5474 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
5475 else
5476 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
5477 break;
5478 case PIPE_SHADER_GEOMETRY:
5479 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
5480 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
5481 break;
5482 case PIPE_SHADER_FRAGMENT:
5483 ctx->load_input = declare_input_fs;
5484 bld_base->emit_epilogue = si_llvm_return_fs_outputs;
5485 break;
5486 case PIPE_SHADER_COMPUTE:
5487 ctx->declare_memory_region = declare_compute_memory;
5488 break;
5489 default:
5490 assert(!"Unsupported shader type");
5491 return false;
5492 }
5493
5494 create_function(ctx);
5495 preload_ring_buffers(ctx);
5496
5497 /* For GFX9 merged shaders:
5498 * - Set EXEC. If the prolog is present, set EXEC there instead.
5499 * - Add a barrier before the second shader.
5500 *
5501 * The same thing for monolithic shaders is done in
5502 * si_build_wrapper_function.
5503 */
5504 if (ctx->screen->b.chip_class >= GFX9 && !is_monolithic) {
5505 if (sel->info.num_instructions > 1 && /* not empty shader */
5506 (shader->key.as_es || shader->key.as_ls) &&
5507 (ctx->type == PIPE_SHADER_TESS_EVAL ||
5508 (ctx->type == PIPE_SHADER_VERTEX &&
5509 !sel->vs_needs_prolog))) {
5510 si_init_exec_from_input(ctx,
5511 ctx->param_merged_wave_info, 0);
5512 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
5513 ctx->type == PIPE_SHADER_GEOMETRY) {
5514 si_init_exec_from_input(ctx,
5515 ctx->param_merged_wave_info, 8);
5516 si_llvm_emit_barrier(NULL, bld_base, NULL);
5517 }
5518 }
5519
5520 if (ctx->type == PIPE_SHADER_GEOMETRY) {
5521 int i;
5522 for (i = 0; i < 4; i++) {
5523 ctx->gs_next_vertex[i] =
5524 lp_build_alloca(&ctx->gallivm,
5525 ctx->i32, "");
5526 }
5527 }
5528
5529 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
5530 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
5531 return false;
5532 }
5533
5534 si_llvm_build_ret(ctx, ctx->return_value);
5535 return true;
5536 }
5537
5538 /**
5539 * Compute the VS prolog key, which contains all the information needed to
5540 * build the VS prolog function, and set shader->info bits where needed.
5541 *
5542 * \param info Shader info of the vertex shader.
5543 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
5544 * \param prolog_key Key of the VS prolog
5545 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
5546 * \param key Output shader part key.
5547 */
5548 static void si_get_vs_prolog_key(const struct tgsi_shader_info *info,
5549 unsigned num_input_sgprs,
5550 const struct si_vs_prolog_bits *prolog_key,
5551 struct si_shader *shader_out,
5552 union si_shader_part_key *key)
5553 {
5554 memset(key, 0, sizeof(*key));
5555 key->vs_prolog.states = *prolog_key;
5556 key->vs_prolog.num_input_sgprs = num_input_sgprs;
5557 key->vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
5558 key->vs_prolog.as_ls = shader_out->key.as_ls;
5559
5560 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
5561 key->vs_prolog.as_ls = 1;
5562 key->vs_prolog.num_merged_next_stage_vgprs = 2;
5563 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
5564 key->vs_prolog.num_merged_next_stage_vgprs = 5;
5565 }
5566
5567 /* Set the instanceID flag. */
5568 for (unsigned i = 0; i < info->num_inputs; i++)
5569 if (key->vs_prolog.states.instance_divisors[i])
5570 shader_out->info.uses_instanceid = true;
5571 }
5572
5573 /**
5574 * Compute the PS prolog key, which contains all the information needed to
5575 * build the PS prolog function, and set related bits in shader->config.
5576 */
5577 static void si_get_ps_prolog_key(struct si_shader *shader,
5578 union si_shader_part_key *key,
5579 bool separate_prolog)
5580 {
5581 struct tgsi_shader_info *info = &shader->selector->info;
5582
5583 memset(key, 0, sizeof(*key));
5584 key->ps_prolog.states = shader->key.part.ps.prolog;
5585 key->ps_prolog.colors_read = info->colors_read;
5586 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
5587 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
5588 key->ps_prolog.wqm = info->uses_derivatives &&
5589 (key->ps_prolog.colors_read ||
5590 key->ps_prolog.states.force_persp_sample_interp ||
5591 key->ps_prolog.states.force_linear_sample_interp ||
5592 key->ps_prolog.states.force_persp_center_interp ||
5593 key->ps_prolog.states.force_linear_center_interp ||
5594 key->ps_prolog.states.bc_optimize_for_persp ||
5595 key->ps_prolog.states.bc_optimize_for_linear);
5596
5597 if (info->colors_read) {
5598 unsigned *color = shader->selector->color_attr_index;
5599
5600 if (shader->key.part.ps.prolog.color_two_side) {
5601 /* BCOLORs are stored after the last input. */
5602 key->ps_prolog.num_interp_inputs = info->num_inputs;
5603 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
5604 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
5605 }
5606
5607 for (unsigned i = 0; i < 2; i++) {
5608 unsigned interp = info->input_interpolate[color[i]];
5609 unsigned location = info->input_interpolate_loc[color[i]];
5610
5611 if (!(info->colors_read & (0xf << i*4)))
5612 continue;
5613
5614 key->ps_prolog.color_attr_index[i] = color[i];
5615
5616 if (shader->key.part.ps.prolog.flatshade_colors &&
5617 interp == TGSI_INTERPOLATE_COLOR)
5618 interp = TGSI_INTERPOLATE_CONSTANT;
5619
5620 switch (interp) {
5621 case TGSI_INTERPOLATE_CONSTANT:
5622 key->ps_prolog.color_interp_vgpr_index[i] = -1;
5623 break;
5624 case TGSI_INTERPOLATE_PERSPECTIVE:
5625 case TGSI_INTERPOLATE_COLOR:
5626 /* Force the interpolation location for colors here. */
5627 if (shader->key.part.ps.prolog.force_persp_sample_interp)
5628 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5629 if (shader->key.part.ps.prolog.force_persp_center_interp)
5630 location = TGSI_INTERPOLATE_LOC_CENTER;
5631
5632 switch (location) {
5633 case TGSI_INTERPOLATE_LOC_SAMPLE:
5634 key->ps_prolog.color_interp_vgpr_index[i] = 0;
5635 shader->config.spi_ps_input_ena |=
5636 S_0286CC_PERSP_SAMPLE_ENA(1);
5637 break;
5638 case TGSI_INTERPOLATE_LOC_CENTER:
5639 key->ps_prolog.color_interp_vgpr_index[i] = 2;
5640 shader->config.spi_ps_input_ena |=
5641 S_0286CC_PERSP_CENTER_ENA(1);
5642 break;
5643 case TGSI_INTERPOLATE_LOC_CENTROID:
5644 key->ps_prolog.color_interp_vgpr_index[i] = 4;
5645 shader->config.spi_ps_input_ena |=
5646 S_0286CC_PERSP_CENTROID_ENA(1);
5647 break;
5648 default:
5649 assert(0);
5650 }
5651 break;
5652 case TGSI_INTERPOLATE_LINEAR:
5653 /* Force the interpolation location for colors here. */
5654 if (shader->key.part.ps.prolog.force_linear_sample_interp)
5655 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5656 if (shader->key.part.ps.prolog.force_linear_center_interp)
5657 location = TGSI_INTERPOLATE_LOC_CENTER;
5658
5659 /* The VGPR assignment for non-monolithic shaders
5660 * works because InitialPSInputAddr is set on the
5661 * main shader and PERSP_PULL_MODEL is never used.
5662 */
5663 switch (location) {
5664 case TGSI_INTERPOLATE_LOC_SAMPLE:
5665 key->ps_prolog.color_interp_vgpr_index[i] =
5666 separate_prolog ? 6 : 9;
5667 shader->config.spi_ps_input_ena |=
5668 S_0286CC_LINEAR_SAMPLE_ENA(1);
5669 break;
5670 case TGSI_INTERPOLATE_LOC_CENTER:
5671 key->ps_prolog.color_interp_vgpr_index[i] =
5672 separate_prolog ? 8 : 11;
5673 shader->config.spi_ps_input_ena |=
5674 S_0286CC_LINEAR_CENTER_ENA(1);
5675 break;
5676 case TGSI_INTERPOLATE_LOC_CENTROID:
5677 key->ps_prolog.color_interp_vgpr_index[i] =
5678 separate_prolog ? 10 : 13;
5679 shader->config.spi_ps_input_ena |=
5680 S_0286CC_LINEAR_CENTROID_ENA(1);
5681 break;
5682 default:
5683 assert(0);
5684 }
5685 break;
5686 default:
5687 assert(0);
5688 }
5689 }
5690 }
5691 }
5692
5693 /**
5694 * Check whether a PS prolog is required based on the key.
5695 */
5696 static bool si_need_ps_prolog(const union si_shader_part_key *key)
5697 {
5698 return key->ps_prolog.colors_read ||
5699 key->ps_prolog.states.force_persp_sample_interp ||
5700 key->ps_prolog.states.force_linear_sample_interp ||
5701 key->ps_prolog.states.force_persp_center_interp ||
5702 key->ps_prolog.states.force_linear_center_interp ||
5703 key->ps_prolog.states.bc_optimize_for_persp ||
5704 key->ps_prolog.states.bc_optimize_for_linear ||
5705 key->ps_prolog.states.poly_stipple;
5706 }
5707
5708 /**
5709 * Compute the PS epilog key, which contains all the information needed to
5710 * build the PS epilog function.
5711 */
5712 static void si_get_ps_epilog_key(struct si_shader *shader,
5713 union si_shader_part_key *key)
5714 {
5715 struct tgsi_shader_info *info = &shader->selector->info;
5716 memset(key, 0, sizeof(*key));
5717 key->ps_epilog.colors_written = info->colors_written;
5718 key->ps_epilog.writes_z = info->writes_z;
5719 key->ps_epilog.writes_stencil = info->writes_stencil;
5720 key->ps_epilog.writes_samplemask = info->writes_samplemask;
5721 key->ps_epilog.states = shader->key.part.ps.epilog;
5722 }
5723
5724 /**
5725 * Build the GS prolog function. Rotate the input vertices for triangle strips
5726 * with adjacency.
5727 */
5728 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
5729 union si_shader_part_key *key)
5730 {
5731 unsigned num_sgprs, num_vgprs;
5732 struct gallivm_state *gallivm = &ctx->gallivm;
5733 LLVMBuilderRef builder = gallivm->builder;
5734 LLVMTypeRef params[48]; /* 40 SGPRs (maximum) + some VGPRs */
5735 LLVMTypeRef returns[48];
5736 LLVMValueRef func, ret;
5737
5738 if (ctx->screen->b.chip_class >= GFX9) {
5739 num_sgprs = 8 + GFX9_GS_NUM_USER_SGPR;
5740 num_vgprs = 5; /* ES inputs are not needed by GS */
5741 } else {
5742 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
5743 num_vgprs = 8;
5744 }
5745
5746 for (unsigned i = 0; i < num_sgprs; ++i) {
5747 params[i] = ctx->i32;
5748 returns[i] = ctx->i32;
5749 }
5750
5751 for (unsigned i = 0; i < num_vgprs; ++i) {
5752 params[num_sgprs + i] = ctx->i32;
5753 returns[num_sgprs + i] = ctx->f32;
5754 }
5755
5756 /* Create the function. */
5757 si_create_function(ctx, "gs_prolog", returns, num_sgprs + num_vgprs,
5758 params, num_sgprs + num_vgprs, num_sgprs - 1, 0);
5759 func = ctx->main_fn;
5760
5761 /* Set the full EXEC mask for the prolog, because we are only fiddling
5762 * with registers here. The main shader part will set the correct EXEC
5763 * mask.
5764 */
5765 if (ctx->screen->b.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
5766 si_init_exec_full_mask(ctx);
5767
5768 /* Copy inputs to outputs. This should be no-op, as the registers match,
5769 * but it will prevent the compiler from overwriting them unintentionally.
5770 */
5771 ret = ctx->return_value;
5772 for (unsigned i = 0; i < num_sgprs; i++) {
5773 LLVMValueRef p = LLVMGetParam(func, i);
5774 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
5775 }
5776 for (unsigned i = 0; i < num_vgprs; i++) {
5777 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
5778 p = LLVMBuildBitCast(builder, p, ctx->f32, "");
5779 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
5780 }
5781
5782 if (key->gs_prolog.states.tri_strip_adj_fix) {
5783 /* Remap the input vertices for every other primitive. */
5784 const unsigned gfx6_vtx_params[6] = {
5785 num_sgprs,
5786 num_sgprs + 1,
5787 num_sgprs + 3,
5788 num_sgprs + 4,
5789 num_sgprs + 5,
5790 num_sgprs + 6
5791 };
5792 const unsigned gfx9_vtx_params[3] = {
5793 num_sgprs,
5794 num_sgprs + 1,
5795 num_sgprs + 4,
5796 };
5797 LLVMValueRef vtx_in[6], vtx_out[6];
5798 LLVMValueRef prim_id, rotate;
5799
5800 if (ctx->screen->b.chip_class >= GFX9) {
5801 for (unsigned i = 0; i < 3; i++) {
5802 vtx_in[i*2] = unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
5803 vtx_in[i*2+1] = unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
5804 }
5805 } else {
5806 for (unsigned i = 0; i < 6; i++)
5807 vtx_in[i] = LLVMGetParam(func, gfx6_vtx_params[i]);
5808 }
5809
5810 prim_id = LLVMGetParam(func, num_sgprs + 2);
5811 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
5812
5813 for (unsigned i = 0; i < 6; ++i) {
5814 LLVMValueRef base, rotated;
5815 base = vtx_in[i];
5816 rotated = vtx_in[(i + 4) % 6];
5817 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
5818 }
5819
5820 if (ctx->screen->b.chip_class >= GFX9) {
5821 for (unsigned i = 0; i < 3; i++) {
5822 LLVMValueRef hi, out;
5823
5824 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
5825 LLVMConstInt(ctx->i32, 16, 0), "");
5826 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
5827 out = LLVMBuildBitCast(builder, out, ctx->f32, "");
5828 ret = LLVMBuildInsertValue(builder, ret, out,
5829 gfx9_vtx_params[i], "");
5830 }
5831 } else {
5832 for (unsigned i = 0; i < 6; i++) {
5833 LLVMValueRef out;
5834
5835 out = LLVMBuildBitCast(builder, vtx_out[i], ctx->f32, "");
5836 ret = LLVMBuildInsertValue(builder, ret, out,
5837 gfx6_vtx_params[i], "");
5838 }
5839 }
5840 }
5841
5842 LLVMBuildRet(builder, ret);
5843 }
5844
5845 /**
5846 * Given a list of shader part functions, build a wrapper function that
5847 * runs them in sequence to form a monolithic shader.
5848 */
5849 static void si_build_wrapper_function(struct si_shader_context *ctx,
5850 LLVMValueRef *parts,
5851 unsigned num_parts,
5852 unsigned main_part,
5853 unsigned next_shader_first_part)
5854 {
5855 struct gallivm_state *gallivm = &ctx->gallivm;
5856 LLVMBuilderRef builder = ctx->gallivm.builder;
5857 /* PS epilog has one arg per color component */
5858 LLVMTypeRef param_types[48];
5859 LLVMValueRef initial[48], out[48];
5860 LLVMTypeRef function_type;
5861 unsigned num_params;
5862 unsigned num_out, initial_num_out;
5863 MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
5864 MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
5865 unsigned num_sgprs, num_vgprs;
5866 unsigned last_sgpr_param;
5867 unsigned gprs;
5868 struct lp_build_if_state if_state;
5869
5870 for (unsigned i = 0; i < num_parts; ++i) {
5871 lp_add_function_attr(parts[i], -1, LP_FUNC_ATTR_ALWAYSINLINE);
5872 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
5873 }
5874
5875 /* The parameters of the wrapper function correspond to those of the
5876 * first part in terms of SGPRs and VGPRs, but we use the types of the
5877 * main part to get the right types. This is relevant for the
5878 * dereferenceable attribute on descriptor table pointers.
5879 */
5880 num_sgprs = 0;
5881 num_vgprs = 0;
5882
5883 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
5884 num_params = LLVMCountParamTypes(function_type);
5885
5886 for (unsigned i = 0; i < num_params; ++i) {
5887 LLVMValueRef param = LLVMGetParam(parts[0], i);
5888
5889 if (ac_is_sgpr_param(param)) {
5890 assert(num_vgprs == 0);
5891 num_sgprs += llvm_get_type_size(LLVMTypeOf(param)) / 4;
5892 } else {
5893 num_vgprs += llvm_get_type_size(LLVMTypeOf(param)) / 4;
5894 }
5895 }
5896 assert(num_vgprs + num_sgprs <= ARRAY_SIZE(param_types));
5897
5898 num_params = 0;
5899 last_sgpr_param = 0;
5900 gprs = 0;
5901 while (gprs < num_sgprs + num_vgprs) {
5902 LLVMValueRef param = LLVMGetParam(parts[main_part], num_params);
5903 unsigned size;
5904
5905 param_types[num_params] = LLVMTypeOf(param);
5906 if (gprs < num_sgprs)
5907 last_sgpr_param = num_params;
5908 size = llvm_get_type_size(param_types[num_params]) / 4;
5909 num_params++;
5910
5911 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
5912 assert(gprs + size <= num_sgprs + num_vgprs &&
5913 (gprs >= num_sgprs || gprs + size <= num_sgprs));
5914
5915 gprs += size;
5916 }
5917
5918 si_create_function(ctx, "wrapper", NULL, 0, param_types, num_params,
5919 last_sgpr_param,
5920 si_get_max_workgroup_size(ctx->shader));
5921
5922 if (is_merged_shader(ctx->shader))
5923 si_init_exec_full_mask(ctx);
5924
5925 /* Record the arguments of the function as if they were an output of
5926 * a previous part.
5927 */
5928 num_out = 0;
5929 num_out_sgpr = 0;
5930
5931 for (unsigned i = 0; i < num_params; ++i) {
5932 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
5933 LLVMTypeRef param_type = LLVMTypeOf(param);
5934 LLVMTypeRef out_type = i <= last_sgpr_param ? ctx->i32 : ctx->f32;
5935 unsigned size = llvm_get_type_size(param_type) / 4;
5936
5937 if (size == 1) {
5938 if (param_type != out_type)
5939 param = LLVMBuildBitCast(builder, param, out_type, "");
5940 out[num_out++] = param;
5941 } else {
5942 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
5943
5944 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
5945 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
5946 param_type = ctx->i64;
5947 }
5948
5949 if (param_type != vector_type)
5950 param = LLVMBuildBitCast(builder, param, vector_type, "");
5951
5952 for (unsigned j = 0; j < size; ++j)
5953 out[num_out++] = LLVMBuildExtractElement(
5954 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
5955 }
5956
5957 if (i <= last_sgpr_param)
5958 num_out_sgpr = num_out;
5959 }
5960
5961 memcpy(initial, out, sizeof(out));
5962 initial_num_out = num_out;
5963 initial_num_out_sgpr = num_out_sgpr;
5964
5965 /* Now chain the parts. */
5966 for (unsigned part = 0; part < num_parts; ++part) {
5967 LLVMValueRef in[48];
5968 LLVMValueRef ret;
5969 LLVMTypeRef ret_type;
5970 unsigned out_idx = 0;
5971
5972 num_params = LLVMCountParams(parts[part]);
5973 assert(num_params <= ARRAY_SIZE(param_types));
5974
5975 /* Merged shaders are executed conditionally depending
5976 * on the number of enabled threads passed in the input SGPRs. */
5977 if (is_merged_shader(ctx->shader) &&
5978 (part == 0 || part == next_shader_first_part)) {
5979 LLVMValueRef ena, count = initial[3];
5980
5981 /* The thread count for the 2nd shader is at bit-offset 8. */
5982 if (part == next_shader_first_part) {
5983 count = LLVMBuildLShr(builder, count,
5984 LLVMConstInt(ctx->i32, 8, 0), "");
5985 }
5986 count = LLVMBuildAnd(builder, count,
5987 LLVMConstInt(ctx->i32, 0x7f, 0), "");
5988 ena = LLVMBuildICmp(builder, LLVMIntULT,
5989 ac_get_thread_id(&ctx->ac), count, "");
5990 lp_build_if(&if_state, &ctx->gallivm, ena);
5991 }
5992
5993 /* Derive arguments for the next part from outputs of the
5994 * previous one.
5995 */
5996 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
5997 LLVMValueRef param;
5998 LLVMTypeRef param_type;
5999 bool is_sgpr;
6000 unsigned param_size;
6001 LLVMValueRef arg = NULL;
6002
6003 param = LLVMGetParam(parts[part], param_idx);
6004 param_type = LLVMTypeOf(param);
6005 param_size = llvm_get_type_size(param_type) / 4;
6006 is_sgpr = ac_is_sgpr_param(param);
6007
6008 if (is_sgpr) {
6009 #if HAVE_LLVM < 0x0400
6010 LLVMRemoveAttribute(param, LLVMByValAttribute);
6011 #else
6012 unsigned kind_id = LLVMGetEnumAttributeKindForName("byval", 5);
6013 LLVMRemoveEnumAttributeAtIndex(parts[part], param_idx + 1, kind_id);
6014 #endif
6015 lp_add_function_attr(parts[part], param_idx + 1, LP_FUNC_ATTR_INREG);
6016 }
6017
6018 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
6019 assert(is_sgpr || out_idx >= num_out_sgpr);
6020
6021 if (param_size == 1)
6022 arg = out[out_idx];
6023 else
6024 arg = lp_build_gather_values(gallivm, &out[out_idx], param_size);
6025
6026 if (LLVMTypeOf(arg) != param_type) {
6027 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6028 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
6029 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
6030 } else {
6031 arg = LLVMBuildBitCast(builder, arg, param_type, "");
6032 }
6033 }
6034
6035 in[param_idx] = arg;
6036 out_idx += param_size;
6037 }
6038
6039 ret = LLVMBuildCall(builder, parts[part], in, num_params, "");
6040
6041 if (is_merged_shader(ctx->shader) &&
6042 (part + 1 == next_shader_first_part ||
6043 part + 1 == num_parts)) {
6044 lp_build_endif(&if_state);
6045
6046 if (part + 1 == next_shader_first_part) {
6047 /* A barrier is required between 2 merged shaders. */
6048 si_llvm_emit_barrier(NULL, &ctx->bld_base, NULL);
6049
6050 /* The second half of the merged shader should use
6051 * the inputs from the toplevel (wrapper) function,
6052 * not the return value from the last call.
6053 *
6054 * That's because the last call was executed condi-
6055 * tionally, so we can't consume it in the main
6056 * block.
6057 */
6058 memcpy(out, initial, sizeof(initial));
6059 num_out = initial_num_out;
6060 num_out_sgpr = initial_num_out_sgpr;
6061 }
6062 continue;
6063 }
6064
6065 /* Extract the returned GPRs. */
6066 ret_type = LLVMTypeOf(ret);
6067 num_out = 0;
6068 num_out_sgpr = 0;
6069
6070 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
6071 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
6072
6073 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
6074
6075 for (unsigned i = 0; i < ret_size; ++i) {
6076 LLVMValueRef val =
6077 LLVMBuildExtractValue(builder, ret, i, "");
6078
6079 out[num_out++] = val;
6080
6081 if (LLVMTypeOf(val) == ctx->i32) {
6082 assert(num_out_sgpr + 1 == num_out);
6083 num_out_sgpr = num_out;
6084 }
6085 }
6086 }
6087 }
6088
6089 LLVMBuildRetVoid(builder);
6090 }
6091
6092 int si_compile_tgsi_shader(struct si_screen *sscreen,
6093 LLVMTargetMachineRef tm,
6094 struct si_shader *shader,
6095 bool is_monolithic,
6096 struct pipe_debug_callback *debug)
6097 {
6098 struct si_shader_selector *sel = shader->selector;
6099 struct si_shader_context ctx;
6100 int r = -1;
6101
6102 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6103 * conversion fails. */
6104 if (r600_can_dump_shader(&sscreen->b, sel->info.processor) &&
6105 !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
6106 tgsi_dump(sel->tokens, 0);
6107 si_dump_streamout(&sel->so);
6108 }
6109
6110 si_init_shader_ctx(&ctx, sscreen, tm);
6111 si_llvm_context_set_tgsi(&ctx, shader);
6112 ctx.separate_prolog = !is_monolithic;
6113
6114 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
6115 sizeof(shader->info.vs_output_param_offset));
6116
6117 shader->info.uses_instanceid = sel->info.uses_instanceid;
6118
6119 ctx.load_system_value = declare_system_value;
6120
6121 if (!si_compile_tgsi_main(&ctx, is_monolithic)) {
6122 si_llvm_dispose(&ctx);
6123 return -1;
6124 }
6125
6126 if (is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
6127 LLVMValueRef parts[2];
6128 bool need_prolog = sel->vs_needs_prolog;
6129
6130 parts[1] = ctx.main_fn;
6131
6132 if (need_prolog) {
6133 union si_shader_part_key prolog_key;
6134 si_get_vs_prolog_key(&sel->info,
6135 shader->info.num_input_sgprs,
6136 &shader->key.part.vs.prolog,
6137 shader, &prolog_key);
6138 si_build_vs_prolog_function(&ctx, &prolog_key);
6139 parts[0] = ctx.main_fn;
6140 }
6141
6142 si_build_wrapper_function(&ctx, parts + !need_prolog,
6143 1 + need_prolog, need_prolog, 0);
6144 } else if (is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
6145 if (sscreen->b.chip_class >= GFX9) {
6146 struct si_shader_selector *ls = shader->key.part.tcs.ls;
6147 LLVMValueRef parts[4];
6148
6149 /* TCS main part */
6150 parts[2] = ctx.main_fn;
6151
6152 /* TCS epilog */
6153 union si_shader_part_key tcs_epilog_key;
6154 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
6155 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6156 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
6157 parts[3] = ctx.main_fn;
6158
6159 /* VS prolog */
6160 if (ls->vs_needs_prolog) {
6161 union si_shader_part_key vs_prolog_key;
6162 si_get_vs_prolog_key(&ls->info,
6163 shader->info.num_input_sgprs,
6164 &shader->key.part.tcs.ls_prolog,
6165 shader, &vs_prolog_key);
6166 vs_prolog_key.vs_prolog.is_monolithic = true;
6167 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6168 parts[0] = ctx.main_fn;
6169 }
6170
6171 /* VS as LS main part */
6172 struct si_shader shader_ls = {};
6173 shader_ls.selector = ls;
6174 shader_ls.key.as_ls = 1;
6175 shader_ls.key.mono = shader->key.mono;
6176 shader_ls.key.opt = shader->key.opt;
6177 si_llvm_context_set_tgsi(&ctx, &shader_ls);
6178
6179 if (!si_compile_tgsi_main(&ctx, true)) {
6180 si_llvm_dispose(&ctx);
6181 return -1;
6182 }
6183 shader->info.uses_instanceid |= ls->info.uses_instanceid;
6184 parts[1] = ctx.main_fn;
6185
6186 /* Reset the shader context. */
6187 ctx.shader = shader;
6188 ctx.type = PIPE_SHADER_TESS_CTRL;
6189
6190 si_build_wrapper_function(&ctx,
6191 parts + !ls->vs_needs_prolog,
6192 4 - !ls->vs_needs_prolog, 0,
6193 ls->vs_needs_prolog ? 2 : 1);
6194 } else {
6195 LLVMValueRef parts[2];
6196 union si_shader_part_key epilog_key;
6197
6198 parts[0] = ctx.main_fn;
6199
6200 memset(&epilog_key, 0, sizeof(epilog_key));
6201 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6202 si_build_tcs_epilog_function(&ctx, &epilog_key);
6203 parts[1] = ctx.main_fn;
6204
6205 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
6206 }
6207 } else if (is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
6208 if (ctx.screen->b.chip_class >= GFX9) {
6209 struct si_shader_selector *es = shader->key.part.gs.es;
6210 LLVMValueRef es_prolog = NULL;
6211 LLVMValueRef es_main = NULL;
6212 LLVMValueRef gs_prolog = NULL;
6213 LLVMValueRef gs_main = ctx.main_fn;
6214
6215 /* GS prolog */
6216 union si_shader_part_key gs_prolog_key;
6217 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
6218 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6219 gs_prolog_key.gs_prolog.is_monolithic = true;
6220 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
6221 gs_prolog = ctx.main_fn;
6222
6223 /* ES prolog */
6224 if (es->vs_needs_prolog) {
6225 union si_shader_part_key vs_prolog_key;
6226 si_get_vs_prolog_key(&es->info,
6227 shader->info.num_input_sgprs,
6228 &shader->key.part.tcs.ls_prolog,
6229 shader, &vs_prolog_key);
6230 vs_prolog_key.vs_prolog.is_monolithic = true;
6231 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6232 es_prolog = ctx.main_fn;
6233 }
6234
6235 /* ES main part */
6236 struct si_shader shader_es = {};
6237 shader_es.selector = es;
6238 shader_es.key.as_es = 1;
6239 shader_es.key.mono = shader->key.mono;
6240 shader_es.key.opt = shader->key.opt;
6241 si_llvm_context_set_tgsi(&ctx, &shader_es);
6242
6243 if (!si_compile_tgsi_main(&ctx, true)) {
6244 si_llvm_dispose(&ctx);
6245 return -1;
6246 }
6247 shader->info.uses_instanceid |= es->info.uses_instanceid;
6248 es_main = ctx.main_fn;
6249
6250 /* Reset the shader context. */
6251 ctx.shader = shader;
6252 ctx.type = PIPE_SHADER_GEOMETRY;
6253
6254 /* Prepare the array of shader parts. */
6255 LLVMValueRef parts[4];
6256 unsigned num_parts = 0, main_part, next_first_part;
6257
6258 if (es_prolog)
6259 parts[num_parts++] = es_prolog;
6260
6261 parts[main_part = num_parts++] = es_main;
6262 parts[next_first_part = num_parts++] = gs_prolog;
6263 parts[num_parts++] = gs_main;
6264
6265 si_build_wrapper_function(&ctx, parts, num_parts,
6266 main_part, next_first_part);
6267 } else {
6268 LLVMValueRef parts[2];
6269 union si_shader_part_key prolog_key;
6270
6271 parts[1] = ctx.main_fn;
6272
6273 memset(&prolog_key, 0, sizeof(prolog_key));
6274 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6275 si_build_gs_prolog_function(&ctx, &prolog_key);
6276 parts[0] = ctx.main_fn;
6277
6278 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
6279 }
6280 } else if (is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
6281 LLVMValueRef parts[3];
6282 union si_shader_part_key prolog_key;
6283 union si_shader_part_key epilog_key;
6284 bool need_prolog;
6285
6286 si_get_ps_prolog_key(shader, &prolog_key, false);
6287 need_prolog = si_need_ps_prolog(&prolog_key);
6288
6289 parts[need_prolog ? 1 : 0] = ctx.main_fn;
6290
6291 if (need_prolog) {
6292 si_build_ps_prolog_function(&ctx, &prolog_key);
6293 parts[0] = ctx.main_fn;
6294 }
6295
6296 si_get_ps_epilog_key(shader, &epilog_key);
6297 si_build_ps_epilog_function(&ctx, &epilog_key);
6298 parts[need_prolog ? 2 : 1] = ctx.main_fn;
6299
6300 si_build_wrapper_function(&ctx, parts, need_prolog ? 3 : 2,
6301 need_prolog ? 1 : 0, 0);
6302 }
6303
6304 si_llvm_optimize_module(&ctx);
6305
6306 /* Post-optimization transformations and analysis. */
6307 si_optimize_vs_outputs(&ctx);
6308
6309 if ((debug && debug->debug_message) ||
6310 r600_can_dump_shader(&sscreen->b, ctx.type))
6311 si_count_scratch_private_memory(&ctx);
6312
6313 /* Compile to bytecode. */
6314 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
6315 ctx.gallivm.module, debug, ctx.type, "TGSI shader");
6316 si_llvm_dispose(&ctx);
6317 if (r) {
6318 fprintf(stderr, "LLVM failed to compile shader\n");
6319 return r;
6320 }
6321
6322 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
6323 * LLVM 3.9svn has this bug.
6324 */
6325 if (sel->type == PIPE_SHADER_COMPUTE) {
6326 unsigned wave_size = 64;
6327 unsigned max_vgprs = 256;
6328 unsigned max_sgprs = sscreen->b.chip_class >= VI ? 800 : 512;
6329 unsigned max_sgprs_per_wave = 128;
6330 unsigned max_block_threads = si_get_max_workgroup_size(shader);
6331 unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
6332 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
6333
6334 max_vgprs = max_vgprs / min_waves_per_simd;
6335 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
6336
6337 if (shader->config.num_sgprs > max_sgprs ||
6338 shader->config.num_vgprs > max_vgprs) {
6339 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6340 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6341 shader->config.num_sgprs, shader->config.num_vgprs,
6342 max_sgprs, max_vgprs);
6343
6344 /* Just terminate the process, because dependent
6345 * shaders can hang due to bad input data, but use
6346 * the env var to allow shader-db to work.
6347 */
6348 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6349 abort();
6350 }
6351 }
6352
6353 /* Add the scratch offset to input SGPRs. */
6354 if (shader->config.scratch_bytes_per_wave && !is_merged_shader(shader))
6355 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6356
6357 /* Calculate the number of fragment input VGPRs. */
6358 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6359 shader->info.num_input_vgprs = 0;
6360 shader->info.face_vgpr_index = -1;
6361
6362 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6363 shader->info.num_input_vgprs += 2;
6364 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6365 shader->info.num_input_vgprs += 2;
6366 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6367 shader->info.num_input_vgprs += 2;
6368 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6369 shader->info.num_input_vgprs += 3;
6370 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6371 shader->info.num_input_vgprs += 2;
6372 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6373 shader->info.num_input_vgprs += 2;
6374 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6375 shader->info.num_input_vgprs += 2;
6376 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6377 shader->info.num_input_vgprs += 1;
6378 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6379 shader->info.num_input_vgprs += 1;
6380 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6381 shader->info.num_input_vgprs += 1;
6382 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6383 shader->info.num_input_vgprs += 1;
6384 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6385 shader->info.num_input_vgprs += 1;
6386 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6387 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6388 shader->info.num_input_vgprs += 1;
6389 }
6390 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr))
6391 shader->info.num_input_vgprs += 1;
6392 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6393 shader->info.num_input_vgprs += 1;
6394 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6395 shader->info.num_input_vgprs += 1;
6396 }
6397
6398 return 0;
6399 }
6400
6401 /**
6402 * Create, compile and return a shader part (prolog or epilog).
6403 *
6404 * \param sscreen screen
6405 * \param list list of shader parts of the same category
6406 * \param type shader type
6407 * \param key shader part key
6408 * \param prolog whether the part being requested is a prolog
6409 * \param tm LLVM target machine
6410 * \param debug debug callback
6411 * \param build the callback responsible for building the main function
6412 * \return non-NULL on success
6413 */
6414 static struct si_shader_part *
6415 si_get_shader_part(struct si_screen *sscreen,
6416 struct si_shader_part **list,
6417 enum pipe_shader_type type,
6418 bool prolog,
6419 union si_shader_part_key *key,
6420 LLVMTargetMachineRef tm,
6421 struct pipe_debug_callback *debug,
6422 void (*build)(struct si_shader_context *,
6423 union si_shader_part_key *),
6424 const char *name)
6425 {
6426 struct si_shader_part *result;
6427
6428 mtx_lock(&sscreen->shader_parts_mutex);
6429
6430 /* Find existing. */
6431 for (result = *list; result; result = result->next) {
6432 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
6433 mtx_unlock(&sscreen->shader_parts_mutex);
6434 return result;
6435 }
6436 }
6437
6438 /* Compile a new one. */
6439 result = CALLOC_STRUCT(si_shader_part);
6440 result->key = *key;
6441
6442 struct si_shader shader = {};
6443 struct si_shader_context ctx;
6444 struct gallivm_state *gallivm = &ctx.gallivm;
6445
6446 si_init_shader_ctx(&ctx, sscreen, tm);
6447 ctx.shader = &shader;
6448 ctx.type = type;
6449
6450 switch (type) {
6451 case PIPE_SHADER_VERTEX:
6452 break;
6453 case PIPE_SHADER_TESS_CTRL:
6454 assert(!prolog);
6455 shader.key.part.tcs.epilog = key->tcs_epilog.states;
6456 break;
6457 case PIPE_SHADER_GEOMETRY:
6458 assert(prolog);
6459 break;
6460 case PIPE_SHADER_FRAGMENT:
6461 if (prolog)
6462 shader.key.part.ps.prolog = key->ps_prolog.states;
6463 else
6464 shader.key.part.ps.epilog = key->ps_epilog.states;
6465 break;
6466 default:
6467 unreachable("bad shader part");
6468 }
6469
6470 build(&ctx, key);
6471
6472 /* Compile. */
6473 si_llvm_optimize_module(&ctx);
6474
6475 if (si_compile_llvm(sscreen, &result->binary, &result->config, tm,
6476 gallivm->module, debug, ctx.type, name)) {
6477 FREE(result);
6478 result = NULL;
6479 goto out;
6480 }
6481
6482 result->next = *list;
6483 *list = result;
6484
6485 out:
6486 si_llvm_dispose(&ctx);
6487 mtx_unlock(&sscreen->shader_parts_mutex);
6488 return result;
6489 }
6490
6491 /**
6492 * Build the vertex shader prolog function.
6493 *
6494 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
6495 * All inputs are returned unmodified. The vertex load indices are
6496 * stored after them, which will be used by the API VS for fetching inputs.
6497 *
6498 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
6499 * input_v0,
6500 * input_v1,
6501 * input_v2,
6502 * input_v3,
6503 * (VertexID + BaseVertex),
6504 * (InstanceID + StartInstance),
6505 * (InstanceID / 2 + StartInstance)
6506 */
6507 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
6508 union si_shader_part_key *key)
6509 {
6510 struct gallivm_state *gallivm = &ctx->gallivm;
6511 LLVMTypeRef *params, *returns;
6512 LLVMValueRef ret, func;
6513 int last_sgpr, num_params, num_returns, i;
6514 unsigned first_vs_vgpr = key->vs_prolog.num_input_sgprs +
6515 key->vs_prolog.num_merged_next_stage_vgprs;
6516 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
6517 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
6518 num_input_vgprs;
6519 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
6520
6521 ctx->param_vertex_id = first_vs_vgpr;
6522 ctx->param_instance_id = first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
6523
6524 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
6525 params = alloca(num_all_input_regs * sizeof(LLVMTypeRef));
6526 returns = alloca((num_all_input_regs + key->vs_prolog.last_input + 1) *
6527 sizeof(LLVMTypeRef));
6528 num_params = 0;
6529 num_returns = 0;
6530
6531 /* Declare input and output SGPRs. */
6532 num_params = 0;
6533 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6534 params[num_params++] = ctx->i32;
6535 returns[num_returns++] = ctx->i32;
6536 }
6537 last_sgpr = num_params - 1;
6538
6539 /* Preloaded VGPRs (outputs must be floats) */
6540 for (i = 0; i < num_input_vgprs; i++) {
6541 params[num_params++] = ctx->i32;
6542 returns[num_returns++] = ctx->f32;
6543 }
6544
6545 /* Vertex load indices. */
6546 for (i = 0; i <= key->vs_prolog.last_input; i++)
6547 returns[num_returns++] = ctx->f32;
6548
6549 /* Create the function. */
6550 si_create_function(ctx, "vs_prolog", returns, num_returns, params,
6551 num_params, last_sgpr, 0);
6552 func = ctx->main_fn;
6553
6554 if (key->vs_prolog.num_merged_next_stage_vgprs &&
6555 !key->vs_prolog.is_monolithic)
6556 si_init_exec_from_input(ctx, 3, 0);
6557
6558 /* Copy inputs to outputs. This should be no-op, as the registers match,
6559 * but it will prevent the compiler from overwriting them unintentionally.
6560 */
6561 ret = ctx->return_value;
6562 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6563 LLVMValueRef p = LLVMGetParam(func, i);
6564 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6565 }
6566 for (; i < num_params; i++) {
6567 LLVMValueRef p = LLVMGetParam(func, i);
6568 p = LLVMBuildBitCast(gallivm->builder, p, ctx->f32, "");
6569 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6570 }
6571
6572 /* Compute vertex load indices from instance divisors. */
6573 for (i = 0; i <= key->vs_prolog.last_input; i++) {
6574 unsigned divisor = key->vs_prolog.states.instance_divisors[i];
6575 LLVMValueRef index;
6576
6577 if (divisor) {
6578 /* InstanceID / Divisor + StartInstance */
6579 index = get_instance_index_for_fetch(ctx,
6580 user_sgpr_base +
6581 SI_SGPR_START_INSTANCE,
6582 divisor);
6583 } else {
6584 /* VertexID + BaseVertex */
6585 index = LLVMBuildAdd(gallivm->builder,
6586 LLVMGetParam(func, ctx->param_vertex_id),
6587 LLVMGetParam(func, user_sgpr_base +
6588 SI_SGPR_BASE_VERTEX), "");
6589 }
6590
6591 index = LLVMBuildBitCast(gallivm->builder, index, ctx->f32, "");
6592 ret = LLVMBuildInsertValue(gallivm->builder, ret, index,
6593 num_params++, "");
6594 }
6595
6596 si_llvm_build_ret(ctx, ret);
6597 }
6598
6599 static bool si_get_vs_prolog(struct si_screen *sscreen,
6600 LLVMTargetMachineRef tm,
6601 struct si_shader *shader,
6602 struct pipe_debug_callback *debug,
6603 struct si_shader *main_part,
6604 const struct si_vs_prolog_bits *key)
6605 {
6606 struct si_shader_selector *vs = main_part->selector;
6607
6608 /* The prolog is a no-op if there are no inputs. */
6609 if (!vs->vs_needs_prolog)
6610 return true;
6611
6612 /* Get the prolog. */
6613 union si_shader_part_key prolog_key;
6614 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
6615 key, shader, &prolog_key);
6616
6617 shader->prolog =
6618 si_get_shader_part(sscreen, &sscreen->vs_prologs,
6619 PIPE_SHADER_VERTEX, true, &prolog_key, tm,
6620 debug, si_build_vs_prolog_function,
6621 "Vertex Shader Prolog");
6622 return shader->prolog != NULL;
6623 }
6624
6625 /**
6626 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
6627 */
6628 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
6629 LLVMTargetMachineRef tm,
6630 struct si_shader *shader,
6631 struct pipe_debug_callback *debug)
6632 {
6633 return si_get_vs_prolog(sscreen, tm, shader, debug, shader,
6634 &shader->key.part.vs.prolog);
6635 }
6636
6637 /**
6638 * Compile the TCS epilog function. This writes tesselation factors to memory
6639 * based on the output primitive type of the tesselator (determined by TES).
6640 */
6641 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
6642 union si_shader_part_key *key)
6643 {
6644 struct gallivm_state *gallivm = &ctx->gallivm;
6645 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
6646 LLVMTypeRef params[32];
6647 LLVMValueRef func;
6648 int last_sgpr, num_params = 0;
6649
6650 if (ctx->screen->b.chip_class >= GFX9) {
6651 params[num_params++] = ctx->i64;
6652 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
6653 params[num_params++] = ctx->i32; /* wave info */
6654 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
6655 params[num_params++] = ctx->i32;
6656 params[num_params++] = ctx->i32;
6657 params[num_params++] = ctx->i32;
6658 params[num_params++] = ctx->i64;
6659 params[num_params++] = ctx->i64;
6660 params[num_params++] = ctx->i64;
6661 params[num_params++] = ctx->i64;
6662 params[num_params++] = ctx->i32;
6663 params[num_params++] = ctx->i32;
6664 params[num_params++] = ctx->i32;
6665 params[num_params++] = ctx->i32;
6666 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
6667 params[num_params++] = ctx->i32;
6668 params[num_params++] = ctx->i32;
6669 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
6670 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
6671 } else {
6672 params[num_params++] = ctx->i64;
6673 params[num_params++] = ctx->i64;
6674 params[num_params++] = ctx->i64;
6675 params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
6676 params[num_params++] = ctx->i32;
6677 params[num_params++] = ctx->i32;
6678 params[num_params++] = ctx->i32;
6679 params[ctx->param_tcs_offchip_addr_base64k = num_params++] = ctx->i32;
6680 params[ctx->param_tcs_factor_addr_base64k = num_params++] = ctx->i32;
6681 params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
6682 params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
6683 }
6684 last_sgpr = num_params - 1;
6685
6686 params[num_params++] = ctx->i32; /* patch index within the wave (REL_PATCH_ID) */
6687 params[num_params++] = ctx->i32; /* invocation ID within the patch */
6688 params[num_params++] = ctx->i32; /* LDS offset where tess factors should be loaded from */
6689
6690 /* Create the function. */
6691 si_create_function(ctx, "tcs_epilog", NULL, 0, params, num_params, last_sgpr,
6692 ctx->screen->b.chip_class >= CIK ? 128 : 64);
6693 declare_lds_as_pointer(ctx);
6694 func = ctx->main_fn;
6695
6696 si_write_tess_factors(bld_base,
6697 LLVMGetParam(func, last_sgpr + 1),
6698 LLVMGetParam(func, last_sgpr + 2),
6699 LLVMGetParam(func, last_sgpr + 3));
6700
6701 LLVMBuildRetVoid(gallivm->builder);
6702 }
6703
6704 /**
6705 * Select and compile (or reuse) TCS parts (epilog).
6706 */
6707 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
6708 LLVMTargetMachineRef tm,
6709 struct si_shader *shader,
6710 struct pipe_debug_callback *debug)
6711 {
6712 if (sscreen->b.chip_class >= GFX9) {
6713 struct si_shader *ls_main_part =
6714 shader->key.part.tcs.ls->main_shader_part_ls;
6715
6716 if (!si_get_vs_prolog(sscreen, tm, shader, debug, ls_main_part,
6717 &shader->key.part.tcs.ls_prolog))
6718 return false;
6719
6720 shader->previous_stage = ls_main_part;
6721 }
6722
6723 /* Get the epilog. */
6724 union si_shader_part_key epilog_key;
6725 memset(&epilog_key, 0, sizeof(epilog_key));
6726 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6727
6728 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
6729 PIPE_SHADER_TESS_CTRL, false,
6730 &epilog_key, tm, debug,
6731 si_build_tcs_epilog_function,
6732 "Tessellation Control Shader Epilog");
6733 return shader->epilog != NULL;
6734 }
6735
6736 /**
6737 * Select and compile (or reuse) GS parts (prolog).
6738 */
6739 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
6740 LLVMTargetMachineRef tm,
6741 struct si_shader *shader,
6742 struct pipe_debug_callback *debug)
6743 {
6744 if (sscreen->b.chip_class >= GFX9) {
6745 struct si_shader *es_main_part =
6746 shader->key.part.gs.es->main_shader_part_es;
6747
6748 if (shader->key.part.gs.es->type == PIPE_SHADER_VERTEX &&
6749 !si_get_vs_prolog(sscreen, tm, shader, debug, es_main_part,
6750 &shader->key.part.gs.vs_prolog))
6751 return false;
6752
6753 shader->previous_stage = es_main_part;
6754 }
6755
6756 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
6757 return true;
6758
6759 union si_shader_part_key prolog_key;
6760 memset(&prolog_key, 0, sizeof(prolog_key));
6761 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6762
6763 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
6764 PIPE_SHADER_GEOMETRY, true,
6765 &prolog_key, tm, debug,
6766 si_build_gs_prolog_function,
6767 "Geometry Shader Prolog");
6768 return shader->prolog2 != NULL;
6769 }
6770
6771 /**
6772 * Build the pixel shader prolog function. This handles:
6773 * - two-side color selection and interpolation
6774 * - overriding interpolation parameters for the API PS
6775 * - polygon stippling
6776 *
6777 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
6778 * overriden by other states. (e.g. per-sample interpolation)
6779 * Interpolated colors are stored after the preloaded VGPRs.
6780 */
6781 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
6782 union si_shader_part_key *key)
6783 {
6784 struct gallivm_state *gallivm = &ctx->gallivm;
6785 LLVMTypeRef *params;
6786 LLVMValueRef ret, func;
6787 int last_sgpr, num_params, num_returns, i, num_color_channels;
6788
6789 assert(si_need_ps_prolog(key));
6790
6791 /* Number of inputs + 8 color elements. */
6792 params = alloca((key->ps_prolog.num_input_sgprs +
6793 key->ps_prolog.num_input_vgprs + 8) *
6794 sizeof(LLVMTypeRef));
6795
6796 /* Declare inputs. */
6797 num_params = 0;
6798 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
6799 params[num_params++] = ctx->i32;
6800 last_sgpr = num_params - 1;
6801
6802 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
6803 params[num_params++] = ctx->f32;
6804
6805 /* Declare outputs (same as inputs + add colors if needed) */
6806 num_returns = num_params;
6807 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
6808 for (i = 0; i < num_color_channels; i++)
6809 params[num_returns++] = ctx->f32;
6810
6811 /* Create the function. */
6812 si_create_function(ctx, "ps_prolog", params, num_returns, params,
6813 num_params, last_sgpr, 0);
6814 func = ctx->main_fn;
6815
6816 /* Copy inputs to outputs. This should be no-op, as the registers match,
6817 * but it will prevent the compiler from overwriting them unintentionally.
6818 */
6819 ret = ctx->return_value;
6820 for (i = 0; i < num_params; i++) {
6821 LLVMValueRef p = LLVMGetParam(func, i);
6822 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6823 }
6824
6825 /* Polygon stippling. */
6826 if (key->ps_prolog.states.poly_stipple) {
6827 /* POS_FIXED_PT is always last. */
6828 unsigned pos = key->ps_prolog.num_input_sgprs +
6829 key->ps_prolog.num_input_vgprs - 1;
6830 LLVMValueRef ptr[2], list;
6831
6832 /* Get the pointer to rw buffers. */
6833 ptr[0] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS);
6834 ptr[1] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS_HI);
6835 list = lp_build_gather_values(gallivm, ptr, 2);
6836 list = LLVMBuildBitCast(gallivm->builder, list, ctx->i64, "");
6837 list = LLVMBuildIntToPtr(gallivm->builder, list,
6838 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS), "");
6839
6840 si_llvm_emit_polygon_stipple(ctx, list, pos);
6841 }
6842
6843 if (key->ps_prolog.states.bc_optimize_for_persp ||
6844 key->ps_prolog.states.bc_optimize_for_linear) {
6845 unsigned i, base = key->ps_prolog.num_input_sgprs;
6846 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
6847
6848 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
6849 * The hw doesn't compute CENTROID if the whole wave only
6850 * contains fully-covered quads.
6851 *
6852 * PRIM_MASK is after user SGPRs.
6853 */
6854 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
6855 bc_optimize = LLVMBuildLShr(gallivm->builder, bc_optimize,
6856 LLVMConstInt(ctx->i32, 31, 0), "");
6857 bc_optimize = LLVMBuildTrunc(gallivm->builder, bc_optimize,
6858 ctx->i1, "");
6859
6860 if (key->ps_prolog.states.bc_optimize_for_persp) {
6861 /* Read PERSP_CENTER. */
6862 for (i = 0; i < 2; i++)
6863 center[i] = LLVMGetParam(func, base + 2 + i);
6864 /* Read PERSP_CENTROID. */
6865 for (i = 0; i < 2; i++)
6866 centroid[i] = LLVMGetParam(func, base + 4 + i);
6867 /* Select PERSP_CENTROID. */
6868 for (i = 0; i < 2; i++) {
6869 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
6870 center[i], centroid[i], "");
6871 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6872 tmp, base + 4 + i, "");
6873 }
6874 }
6875 if (key->ps_prolog.states.bc_optimize_for_linear) {
6876 /* Read LINEAR_CENTER. */
6877 for (i = 0; i < 2; i++)
6878 center[i] = LLVMGetParam(func, base + 8 + i);
6879 /* Read LINEAR_CENTROID. */
6880 for (i = 0; i < 2; i++)
6881 centroid[i] = LLVMGetParam(func, base + 10 + i);
6882 /* Select LINEAR_CENTROID. */
6883 for (i = 0; i < 2; i++) {
6884 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
6885 center[i], centroid[i], "");
6886 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6887 tmp, base + 10 + i, "");
6888 }
6889 }
6890 }
6891
6892 /* Force per-sample interpolation. */
6893 if (key->ps_prolog.states.force_persp_sample_interp) {
6894 unsigned i, base = key->ps_prolog.num_input_sgprs;
6895 LLVMValueRef persp_sample[2];
6896
6897 /* Read PERSP_SAMPLE. */
6898 for (i = 0; i < 2; i++)
6899 persp_sample[i] = LLVMGetParam(func, base + i);
6900 /* Overwrite PERSP_CENTER. */
6901 for (i = 0; i < 2; i++)
6902 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6903 persp_sample[i], base + 2 + i, "");
6904 /* Overwrite PERSP_CENTROID. */
6905 for (i = 0; i < 2; i++)
6906 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6907 persp_sample[i], base + 4 + i, "");
6908 }
6909 if (key->ps_prolog.states.force_linear_sample_interp) {
6910 unsigned i, base = key->ps_prolog.num_input_sgprs;
6911 LLVMValueRef linear_sample[2];
6912
6913 /* Read LINEAR_SAMPLE. */
6914 for (i = 0; i < 2; i++)
6915 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
6916 /* Overwrite LINEAR_CENTER. */
6917 for (i = 0; i < 2; i++)
6918 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6919 linear_sample[i], base + 8 + i, "");
6920 /* Overwrite LINEAR_CENTROID. */
6921 for (i = 0; i < 2; i++)
6922 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6923 linear_sample[i], base + 10 + i, "");
6924 }
6925
6926 /* Force center interpolation. */
6927 if (key->ps_prolog.states.force_persp_center_interp) {
6928 unsigned i, base = key->ps_prolog.num_input_sgprs;
6929 LLVMValueRef persp_center[2];
6930
6931 /* Read PERSP_CENTER. */
6932 for (i = 0; i < 2; i++)
6933 persp_center[i] = LLVMGetParam(func, base + 2 + i);
6934 /* Overwrite PERSP_SAMPLE. */
6935 for (i = 0; i < 2; i++)
6936 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6937 persp_center[i], base + i, "");
6938 /* Overwrite PERSP_CENTROID. */
6939 for (i = 0; i < 2; i++)
6940 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6941 persp_center[i], base + 4 + i, "");
6942 }
6943 if (key->ps_prolog.states.force_linear_center_interp) {
6944 unsigned i, base = key->ps_prolog.num_input_sgprs;
6945 LLVMValueRef linear_center[2];
6946
6947 /* Read LINEAR_CENTER. */
6948 for (i = 0; i < 2; i++)
6949 linear_center[i] = LLVMGetParam(func, base + 8 + i);
6950 /* Overwrite LINEAR_SAMPLE. */
6951 for (i = 0; i < 2; i++)
6952 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6953 linear_center[i], base + 6 + i, "");
6954 /* Overwrite LINEAR_CENTROID. */
6955 for (i = 0; i < 2; i++)
6956 ret = LLVMBuildInsertValue(gallivm->builder, ret,
6957 linear_center[i], base + 10 + i, "");
6958 }
6959
6960 /* Interpolate colors. */
6961 for (i = 0; i < 2; i++) {
6962 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
6963 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
6964 key->ps_prolog.face_vgpr_index;
6965 LLVMValueRef interp[2], color[4];
6966 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
6967
6968 if (!writemask)
6969 continue;
6970
6971 /* If the interpolation qualifier is not CONSTANT (-1). */
6972 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
6973 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
6974 key->ps_prolog.color_interp_vgpr_index[i];
6975
6976 /* Get the (i,j) updated by bc_optimize handling. */
6977 interp[0] = LLVMBuildExtractValue(gallivm->builder, ret,
6978 interp_vgpr, "");
6979 interp[1] = LLVMBuildExtractValue(gallivm->builder, ret,
6980 interp_vgpr + 1, "");
6981 interp_ij = lp_build_gather_values(gallivm, interp, 2);
6982 }
6983
6984 /* Use the absolute location of the input. */
6985 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
6986
6987 if (key->ps_prolog.states.color_two_side) {
6988 face = LLVMGetParam(func, face_vgpr);
6989 face = LLVMBuildBitCast(gallivm->builder, face, ctx->i32, "");
6990 }
6991
6992 interp_fs_input(ctx,
6993 key->ps_prolog.color_attr_index[i],
6994 TGSI_SEMANTIC_COLOR, i,
6995 key->ps_prolog.num_interp_inputs,
6996 key->ps_prolog.colors_read, interp_ij,
6997 prim_mask, face, color);
6998
6999 while (writemask) {
7000 unsigned chan = u_bit_scan(&writemask);
7001 ret = LLVMBuildInsertValue(gallivm->builder, ret, color[chan],
7002 num_params++, "");
7003 }
7004 }
7005
7006 /* Tell LLVM to insert WQM instruction sequence when needed. */
7007 if (key->ps_prolog.wqm) {
7008 LLVMAddTargetDependentFunctionAttr(func,
7009 "amdgpu-ps-wqm-outputs", "");
7010 }
7011
7012 si_llvm_build_ret(ctx, ret);
7013 }
7014
7015 /**
7016 * Build the pixel shader epilog function. This handles everything that must be
7017 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7018 */
7019 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
7020 union si_shader_part_key *key)
7021 {
7022 struct gallivm_state *gallivm = &ctx->gallivm;
7023 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7024 LLVMTypeRef params[16+8*4+3];
7025 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7026 int last_sgpr, num_params = 0, i;
7027 struct si_ps_exports exp = {};
7028
7029 /* Declare input SGPRs. */
7030 params[ctx->param_rw_buffers = num_params++] = ctx->i64;
7031 params[ctx->param_const_and_shader_buffers = num_params++] = ctx->i64;
7032 params[ctx->param_samplers_and_images = num_params++] = ctx->i64;
7033 assert(num_params == SI_PARAM_ALPHA_REF);
7034 params[SI_PARAM_ALPHA_REF] = ctx->f32;
7035 last_sgpr = SI_PARAM_ALPHA_REF;
7036
7037 /* Declare input VGPRs. */
7038 num_params = (last_sgpr + 1) +
7039 util_bitcount(key->ps_epilog.colors_written) * 4 +
7040 key->ps_epilog.writes_z +
7041 key->ps_epilog.writes_stencil +
7042 key->ps_epilog.writes_samplemask;
7043
7044 num_params = MAX2(num_params,
7045 last_sgpr + 1 + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7046
7047 assert(num_params <= ARRAY_SIZE(params));
7048
7049 for (i = last_sgpr + 1; i < num_params; i++)
7050 params[i] = ctx->f32;
7051
7052 /* Create the function. */
7053 si_create_function(ctx, "ps_epilog", NULL, 0, params, num_params,
7054 last_sgpr, 0);
7055 /* Disable elimination of unused inputs. */
7056 si_llvm_add_attribute(ctx->main_fn,
7057 "InitialPSInputAddr", 0xffffff);
7058
7059 /* Process colors. */
7060 unsigned vgpr = last_sgpr + 1;
7061 unsigned colors_written = key->ps_epilog.colors_written;
7062 int last_color_export = -1;
7063
7064 /* Find the last color export. */
7065 if (!key->ps_epilog.writes_z &&
7066 !key->ps_epilog.writes_stencil &&
7067 !key->ps_epilog.writes_samplemask) {
7068 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7069
7070 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7071 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7072 /* Just set this if any of the colorbuffers are enabled. */
7073 if (spi_format &
7074 ((1llu << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7075 last_color_export = 0;
7076 } else {
7077 for (i = 0; i < 8; i++)
7078 if (colors_written & (1 << i) &&
7079 (spi_format >> (i * 4)) & 0xf)
7080 last_color_export = i;
7081 }
7082 }
7083
7084 while (colors_written) {
7085 LLVMValueRef color[4];
7086 int mrt = u_bit_scan(&colors_written);
7087
7088 for (i = 0; i < 4; i++)
7089 color[i] = LLVMGetParam(ctx->main_fn, vgpr++);
7090
7091 si_export_mrt_color(bld_base, color, mrt,
7092 num_params - 1,
7093 mrt == last_color_export, &exp);
7094 }
7095
7096 /* Process depth, stencil, samplemask. */
7097 if (key->ps_epilog.writes_z)
7098 depth = LLVMGetParam(ctx->main_fn, vgpr++);
7099 if (key->ps_epilog.writes_stencil)
7100 stencil = LLVMGetParam(ctx->main_fn, vgpr++);
7101 if (key->ps_epilog.writes_samplemask)
7102 samplemask = LLVMGetParam(ctx->main_fn, vgpr++);
7103
7104 if (depth || stencil || samplemask)
7105 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
7106 else if (last_color_export == -1)
7107 si_export_null(bld_base);
7108
7109 if (exp.num)
7110 si_emit_ps_exports(ctx, &exp);
7111
7112 /* Compile. */
7113 LLVMBuildRetVoid(gallivm->builder);
7114 }
7115
7116 /**
7117 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7118 */
7119 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7120 LLVMTargetMachineRef tm,
7121 struct si_shader *shader,
7122 struct pipe_debug_callback *debug)
7123 {
7124 union si_shader_part_key prolog_key;
7125 union si_shader_part_key epilog_key;
7126
7127 /* Get the prolog. */
7128 si_get_ps_prolog_key(shader, &prolog_key, true);
7129
7130 /* The prolog is a no-op if these aren't set. */
7131 if (si_need_ps_prolog(&prolog_key)) {
7132 shader->prolog =
7133 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7134 PIPE_SHADER_FRAGMENT, true,
7135 &prolog_key, tm, debug,
7136 si_build_ps_prolog_function,
7137 "Fragment Shader Prolog");
7138 if (!shader->prolog)
7139 return false;
7140 }
7141
7142 /* Get the epilog. */
7143 si_get_ps_epilog_key(shader, &epilog_key);
7144
7145 shader->epilog =
7146 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7147 PIPE_SHADER_FRAGMENT, false,
7148 &epilog_key, tm, debug,
7149 si_build_ps_epilog_function,
7150 "Fragment Shader Epilog");
7151 if (!shader->epilog)
7152 return false;
7153
7154 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7155 if (shader->key.part.ps.prolog.poly_stipple) {
7156 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7157 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7158 }
7159
7160 /* Set up the enable bits for per-sample shading if needed. */
7161 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
7162 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7163 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7164 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7165 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7166 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7167 }
7168 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
7169 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7170 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7171 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7172 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7173 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7174 }
7175 if (shader->key.part.ps.prolog.force_persp_center_interp &&
7176 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7177 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7178 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
7179 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7180 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7181 }
7182 if (shader->key.part.ps.prolog.force_linear_center_interp &&
7183 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7184 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7185 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
7186 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7187 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7188 }
7189
7190 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7191 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7192 !(shader->config.spi_ps_input_ena & 0xf)) {
7193 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7194 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7195 }
7196
7197 /* At least one pair of interpolation weights must be enabled. */
7198 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7199 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7200 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7201 }
7202
7203 /* The sample mask input is always enabled, because the API shader always
7204 * passes it through to the epilog. Disable it here if it's unused.
7205 */
7206 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
7207 !shader->selector->info.reads_samplemask)
7208 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7209
7210 return true;
7211 }
7212
7213 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
7214 unsigned *lds_size)
7215 {
7216 /* SPI barrier management bug:
7217 * Make sure we have at least 4k of LDS in use to avoid the bug.
7218 * It applies to workgroup sizes of more than one wavefront.
7219 */
7220 if (sscreen->b.family == CHIP_BONAIRE ||
7221 sscreen->b.family == CHIP_KABINI ||
7222 sscreen->b.family == CHIP_MULLINS)
7223 *lds_size = MAX2(*lds_size, 8);
7224 }
7225
7226 static void si_fix_resource_usage(struct si_screen *sscreen,
7227 struct si_shader *shader)
7228 {
7229 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7230
7231 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7232
7233 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
7234 si_get_max_workgroup_size(shader) > 64) {
7235 si_multiwave_lds_size_workaround(sscreen,
7236 &shader->config.lds_size);
7237 }
7238 }
7239
7240 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
7241 struct si_shader *shader,
7242 struct pipe_debug_callback *debug)
7243 {
7244 struct si_shader_selector *sel = shader->selector;
7245 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
7246 int r;
7247
7248 /* LS, ES, VS are compiled on demand if the main part hasn't been
7249 * compiled for that stage.
7250 *
7251 * Vertex shaders are compiled on demand when a vertex fetch
7252 * workaround must be applied.
7253 */
7254 if (shader->is_monolithic) {
7255 /* Monolithic shader (compiled as a whole, has many variants,
7256 * may take a long time to compile).
7257 */
7258 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
7259 if (r)
7260 return r;
7261 } else {
7262 /* The shader consists of 2-3 parts:
7263 *
7264 * - the middle part is the user shader, it has 1 variant only
7265 * and it was compiled during the creation of the shader
7266 * selector
7267 * - the prolog part is inserted at the beginning
7268 * - the epilog part is inserted at the end
7269 *
7270 * The prolog and epilog have many (but simple) variants.
7271 */
7272
7273 /* Copy the compiled TGSI shader data over. */
7274 shader->is_binary_shared = true;
7275 shader->binary = mainp->binary;
7276 shader->config = mainp->config;
7277 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7278 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7279 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7280 memcpy(shader->info.vs_output_param_offset,
7281 mainp->info.vs_output_param_offset,
7282 sizeof(mainp->info.vs_output_param_offset));
7283 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7284 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7285 shader->info.nr_param_exports = mainp->info.nr_param_exports;
7286
7287 /* Select prologs and/or epilogs. */
7288 switch (sel->type) {
7289 case PIPE_SHADER_VERTEX:
7290 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
7291 return -1;
7292 break;
7293 case PIPE_SHADER_TESS_CTRL:
7294 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
7295 return -1;
7296 break;
7297 case PIPE_SHADER_TESS_EVAL:
7298 break;
7299 case PIPE_SHADER_GEOMETRY:
7300 if (!si_shader_select_gs_parts(sscreen, tm, shader, debug))
7301 return -1;
7302 break;
7303 case PIPE_SHADER_FRAGMENT:
7304 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
7305 return -1;
7306
7307 /* Make sure we have at least as many VGPRs as there
7308 * are allocated inputs.
7309 */
7310 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7311 shader->info.num_input_vgprs);
7312 break;
7313 }
7314
7315 /* Update SGPR and VGPR counts. */
7316 if (shader->prolog) {
7317 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7318 shader->prolog->config.num_sgprs);
7319 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7320 shader->prolog->config.num_vgprs);
7321 }
7322 if (shader->previous_stage) {
7323 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7324 shader->previous_stage->config.num_sgprs);
7325 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7326 shader->previous_stage->config.num_vgprs);
7327 shader->config.spilled_sgprs =
7328 MAX2(shader->config.spilled_sgprs,
7329 shader->previous_stage->config.spilled_sgprs);
7330 shader->config.spilled_vgprs =
7331 MAX2(shader->config.spilled_vgprs,
7332 shader->previous_stage->config.spilled_vgprs);
7333 shader->config.private_mem_vgprs =
7334 MAX2(shader->config.private_mem_vgprs,
7335 shader->previous_stage->config.private_mem_vgprs);
7336 shader->config.scratch_bytes_per_wave =
7337 MAX2(shader->config.scratch_bytes_per_wave,
7338 shader->previous_stage->config.scratch_bytes_per_wave);
7339 shader->info.uses_instanceid |=
7340 shader->previous_stage->info.uses_instanceid;
7341 }
7342 if (shader->prolog2) {
7343 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7344 shader->prolog2->config.num_sgprs);
7345 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7346 shader->prolog2->config.num_vgprs);
7347 }
7348 if (shader->epilog) {
7349 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7350 shader->epilog->config.num_sgprs);
7351 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7352 shader->epilog->config.num_vgprs);
7353 }
7354 }
7355
7356 si_fix_resource_usage(sscreen, shader);
7357 si_shader_dump(sscreen, shader, debug, sel->info.processor,
7358 stderr, true);
7359
7360 /* Upload. */
7361 r = si_shader_binary_upload(sscreen, shader);
7362 if (r) {
7363 fprintf(stderr, "LLVM failed to upload shader\n");
7364 return r;
7365 }
7366
7367 return 0;
7368 }
7369
7370 void si_shader_destroy(struct si_shader *shader)
7371 {
7372 if (shader->scratch_bo)
7373 r600_resource_reference(&shader->scratch_bo, NULL);
7374
7375 r600_resource_reference(&shader->bo, NULL);
7376
7377 if (!shader->is_binary_shared)
7378 radeon_shader_binary_clean(&shader->binary);
7379
7380 free(shader->shader_log);
7381 }