radeonsi: print shader-db stats for main parts, not final binaries
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "gallivm/lp_bld_const.h"
25 #include "gallivm/lp_bld_gather.h"
26 #include "gallivm/lp_bld_intr.h"
27 #include "gallivm/lp_bld_logic.h"
28 #include "gallivm/lp_bld_arit.h"
29 #include "gallivm/lp_bld_flow.h"
30 #include "gallivm/lp_bld_misc.h"
31 #include "util/u_memory.h"
32 #include "util/u_string.h"
33 #include "tgsi/tgsi_build.h"
34 #include "tgsi/tgsi_util.h"
35 #include "tgsi/tgsi_dump.h"
36
37 #include "ac_binary.h"
38 #include "ac_llvm_util.h"
39 #include "ac_exp_param.h"
40 #include "ac_shader_util.h"
41 #include "si_shader_internal.h"
42 #include "si_pipe.h"
43 #include "sid.h"
44
45 #include "compiler/nir/nir.h"
46
47 static const char *scratch_rsrc_dword0_symbol =
48 "SCRATCH_RSRC_DWORD0";
49
50 static const char *scratch_rsrc_dword1_symbol =
51 "SCRATCH_RSRC_DWORD1";
52
53 struct si_shader_output_values
54 {
55 LLVMValueRef values[4];
56 unsigned semantic_name;
57 unsigned semantic_index;
58 ubyte vertex_stream[4];
59 };
60
61 /**
62 * Used to collect types and other info about arguments of the LLVM function
63 * before the function is created.
64 */
65 struct si_function_info {
66 LLVMTypeRef types[100];
67 LLVMValueRef *assign[100];
68 unsigned num_sgpr_params;
69 unsigned num_params;
70 };
71
72 enum si_arg_regfile {
73 ARG_SGPR,
74 ARG_VGPR
75 };
76
77 static void si_init_shader_ctx(struct si_shader_context *ctx,
78 struct si_screen *sscreen,
79 LLVMTargetMachineRef tm);
80
81 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
82 struct lp_build_tgsi_context *bld_base,
83 struct lp_build_emit_data *emit_data);
84
85 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
86 FILE *f);
87
88 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
89 union si_shader_part_key *key);
90 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
91 union si_shader_part_key *key);
92 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
93 union si_shader_part_key *key);
94 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
95 union si_shader_part_key *key);
96
97 /* Ideally pass the sample mask input to the PS epilog as v14, which
98 * is its usual location, so that the shader doesn't have to add v_mov.
99 */
100 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 14
101
102 static bool llvm_type_is_64bit(struct si_shader_context *ctx,
103 LLVMTypeRef type)
104 {
105 if (type == ctx->ac.i64 || type == ctx->ac.f64)
106 return true;
107
108 return false;
109 }
110
111 static bool is_merged_shader(struct si_shader *shader)
112 {
113 if (shader->selector->screen->info.chip_class <= VI)
114 return false;
115
116 return shader->key.as_ls ||
117 shader->key.as_es ||
118 shader->selector->type == PIPE_SHADER_TESS_CTRL ||
119 shader->selector->type == PIPE_SHADER_GEOMETRY;
120 }
121
122 static void si_init_function_info(struct si_function_info *fninfo)
123 {
124 fninfo->num_params = 0;
125 fninfo->num_sgpr_params = 0;
126 }
127
128 static unsigned add_arg_assign(struct si_function_info *fninfo,
129 enum si_arg_regfile regfile, LLVMTypeRef type,
130 LLVMValueRef *assign)
131 {
132 assert(regfile != ARG_SGPR || fninfo->num_sgpr_params == fninfo->num_params);
133
134 unsigned idx = fninfo->num_params++;
135 assert(idx < ARRAY_SIZE(fninfo->types));
136
137 if (regfile == ARG_SGPR)
138 fninfo->num_sgpr_params = fninfo->num_params;
139
140 fninfo->types[idx] = type;
141 fninfo->assign[idx] = assign;
142 return idx;
143 }
144
145 static unsigned add_arg(struct si_function_info *fninfo,
146 enum si_arg_regfile regfile, LLVMTypeRef type)
147 {
148 return add_arg_assign(fninfo, regfile, type, NULL);
149 }
150
151 static void add_arg_assign_checked(struct si_function_info *fninfo,
152 enum si_arg_regfile regfile, LLVMTypeRef type,
153 LLVMValueRef *assign, unsigned idx)
154 {
155 MAYBE_UNUSED unsigned actual = add_arg_assign(fninfo, regfile, type, assign);
156 assert(actual == idx);
157 }
158
159 static void add_arg_checked(struct si_function_info *fninfo,
160 enum si_arg_regfile regfile, LLVMTypeRef type,
161 unsigned idx)
162 {
163 add_arg_assign_checked(fninfo, regfile, type, NULL, idx);
164 }
165
166 /**
167 * Returns a unique index for a per-patch semantic name and index. The index
168 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
169 * can be calculated.
170 */
171 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
172 {
173 switch (semantic_name) {
174 case TGSI_SEMANTIC_TESSOUTER:
175 return 0;
176 case TGSI_SEMANTIC_TESSINNER:
177 return 1;
178 case TGSI_SEMANTIC_PATCH:
179 assert(index < 30);
180 return 2 + index;
181
182 default:
183 assert(!"invalid semantic name");
184 return 0;
185 }
186 }
187
188 /**
189 * Returns a unique index for a semantic name and index. The index must be
190 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
191 * calculated.
192 */
193 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
194 {
195 switch (semantic_name) {
196 case TGSI_SEMANTIC_POSITION:
197 return 0;
198 case TGSI_SEMANTIC_GENERIC:
199 /* Since some shader stages use the the highest used IO index
200 * to determine the size to allocate for inputs/outputs
201 * (in LDS, tess and GS rings). GENERIC should be placed right
202 * after POSITION to make that size as small as possible.
203 */
204 if (index < SI_MAX_IO_GENERIC)
205 return 1 + index;
206
207 assert(!"invalid generic index");
208 return 0;
209 case TGSI_SEMANTIC_PSIZE:
210 return SI_MAX_IO_GENERIC + 1;
211 case TGSI_SEMANTIC_CLIPDIST:
212 assert(index <= 1);
213 return SI_MAX_IO_GENERIC + 2 + index;
214 case TGSI_SEMANTIC_FOG:
215 return SI_MAX_IO_GENERIC + 4;
216 case TGSI_SEMANTIC_LAYER:
217 return SI_MAX_IO_GENERIC + 5;
218 case TGSI_SEMANTIC_VIEWPORT_INDEX:
219 return SI_MAX_IO_GENERIC + 6;
220 case TGSI_SEMANTIC_PRIMID:
221 return SI_MAX_IO_GENERIC + 7;
222 case TGSI_SEMANTIC_COLOR: /* these alias */
223 case TGSI_SEMANTIC_BCOLOR:
224 assert(index < 2);
225 return SI_MAX_IO_GENERIC + 8 + index;
226 case TGSI_SEMANTIC_TEXCOORD:
227 assert(index < 8);
228 assert(SI_MAX_IO_GENERIC + 10 + index < 64);
229 return SI_MAX_IO_GENERIC + 10 + index;
230 default:
231 assert(!"invalid semantic name");
232 return 0;
233 }
234 }
235
236 /**
237 * Get the value of a shader input parameter and extract a bitfield.
238 */
239 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
240 LLVMValueRef value, unsigned rshift,
241 unsigned bitwidth)
242 {
243 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
244 value = ac_to_integer(&ctx->ac, value);
245
246 if (rshift)
247 value = LLVMBuildLShr(ctx->ac.builder, value,
248 LLVMConstInt(ctx->i32, rshift, 0), "");
249
250 if (rshift + bitwidth < 32) {
251 unsigned mask = (1 << bitwidth) - 1;
252 value = LLVMBuildAnd(ctx->ac.builder, value,
253 LLVMConstInt(ctx->i32, mask, 0), "");
254 }
255
256 return value;
257 }
258
259 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
260 unsigned param, unsigned rshift,
261 unsigned bitwidth)
262 {
263 LLVMValueRef value = LLVMGetParam(ctx->main_fn, param);
264
265 return unpack_llvm_param(ctx, value, rshift, bitwidth);
266 }
267
268 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
269 {
270 switch (ctx->type) {
271 case PIPE_SHADER_TESS_CTRL:
272 return unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 0, 8);
273
274 case PIPE_SHADER_TESS_EVAL:
275 return LLVMGetParam(ctx->main_fn,
276 ctx->param_tes_rel_patch_id);
277
278 default:
279 assert(0);
280 return NULL;
281 }
282 }
283
284 /* Tessellation shaders pass outputs to the next shader using LDS.
285 *
286 * LS outputs = TCS inputs
287 * TCS outputs = TES inputs
288 *
289 * The LDS layout is:
290 * - TCS inputs for patch 0
291 * - TCS inputs for patch 1
292 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
293 * - ...
294 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
295 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
296 * - TCS outputs for patch 1
297 * - Per-patch TCS outputs for patch 1
298 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
299 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
300 * - ...
301 *
302 * All three shaders VS(LS), TCS, TES share the same LDS space.
303 */
304
305 static LLVMValueRef
306 get_tcs_in_patch_stride(struct si_shader_context *ctx)
307 {
308 return unpack_param(ctx, ctx->param_vs_state_bits, 8, 13);
309 }
310
311 static unsigned get_tcs_out_vertex_dw_stride_constant(struct si_shader_context *ctx)
312 {
313 assert(ctx->type == PIPE_SHADER_TESS_CTRL);
314
315 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
316 return util_last_bit64(ctx->shader->key.mono.u.ff_tcs_inputs_to_copy) * 4;
317
318 return util_last_bit64(ctx->shader->selector->outputs_written) * 4;
319 }
320
321 static LLVMValueRef get_tcs_out_vertex_dw_stride(struct si_shader_context *ctx)
322 {
323 unsigned stride = get_tcs_out_vertex_dw_stride_constant(ctx);
324
325 return LLVMConstInt(ctx->i32, stride, 0);
326 }
327
328 static LLVMValueRef get_tcs_out_patch_stride(struct si_shader_context *ctx)
329 {
330 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
331 return unpack_param(ctx, ctx->param_tcs_out_lds_layout, 0, 13);
332
333 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
334 unsigned tcs_out_vertices = info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
335 unsigned vertex_dw_stride = get_tcs_out_vertex_dw_stride_constant(ctx);
336 unsigned num_patch_outputs = util_last_bit64(ctx->shader->selector->patch_outputs_written);
337 unsigned patch_dw_stride = tcs_out_vertices * vertex_dw_stride +
338 num_patch_outputs * 4;
339 return LLVMConstInt(ctx->i32, patch_dw_stride, 0);
340 }
341
342 static LLVMValueRef
343 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
344 {
345 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
346 unpack_param(ctx,
347 ctx->param_tcs_out_lds_offsets,
348 0, 16),
349 4);
350 }
351
352 static LLVMValueRef
353 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
354 {
355 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
356 unpack_param(ctx,
357 ctx->param_tcs_out_lds_offsets,
358 16, 16),
359 4);
360 }
361
362 static LLVMValueRef
363 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
364 {
365 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
366 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
367
368 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
369 }
370
371 static LLVMValueRef
372 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
373 {
374 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
375 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
376 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
377
378 return LLVMBuildAdd(ctx->ac.builder, patch0_offset,
379 LLVMBuildMul(ctx->ac.builder, patch_stride,
380 rel_patch_id, ""),
381 "");
382 }
383
384 static LLVMValueRef
385 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
386 {
387 LLVMValueRef patch0_patch_data_offset =
388 get_tcs_out_patch0_patch_data_offset(ctx);
389 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
390 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
391
392 return LLVMBuildAdd(ctx->ac.builder, patch0_patch_data_offset,
393 LLVMBuildMul(ctx->ac.builder, patch_stride,
394 rel_patch_id, ""),
395 "");
396 }
397
398 static LLVMValueRef get_num_tcs_out_vertices(struct si_shader_context *ctx)
399 {
400 unsigned tcs_out_vertices =
401 ctx->shader->selector ?
402 ctx->shader->selector->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] : 0;
403
404 /* If !tcs_out_vertices, it's either the fixed-func TCS or the TCS epilog. */
405 if (ctx->type == PIPE_SHADER_TESS_CTRL && tcs_out_vertices)
406 return LLVMConstInt(ctx->i32, tcs_out_vertices, 0);
407
408 return unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
409 }
410
411 static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
412 {
413 unsigned stride;
414
415 switch (ctx->type) {
416 case PIPE_SHADER_VERTEX:
417 stride = util_last_bit64(ctx->shader->selector->outputs_written);
418 return LLVMConstInt(ctx->i32, stride * 4, 0);
419
420 case PIPE_SHADER_TESS_CTRL:
421 if (ctx->screen->info.chip_class >= GFX9 &&
422 ctx->shader->is_monolithic) {
423 stride = util_last_bit64(ctx->shader->key.part.tcs.ls->outputs_written);
424 return LLVMConstInt(ctx->i32, stride * 4, 0);
425 }
426 return unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
427
428 default:
429 assert(0);
430 return NULL;
431 }
432 }
433
434 static LLVMValueRef get_instance_index_for_fetch(
435 struct si_shader_context *ctx,
436 unsigned param_start_instance, LLVMValueRef divisor)
437 {
438 LLVMValueRef result = ctx->abi.instance_id;
439
440 /* The division must be done before START_INSTANCE is added. */
441 if (divisor != ctx->i32_1)
442 result = LLVMBuildUDiv(ctx->ac.builder, result, divisor, "");
443
444 return LLVMBuildAdd(ctx->ac.builder, result,
445 LLVMGetParam(ctx->main_fn, param_start_instance), "");
446 }
447
448 /* Bitcast <4 x float> to <2 x double>, extract the component, and convert
449 * to float. */
450 static LLVMValueRef extract_double_to_float(struct si_shader_context *ctx,
451 LLVMValueRef vec4,
452 unsigned double_index)
453 {
454 LLVMBuilderRef builder = ctx->ac.builder;
455 LLVMTypeRef f64 = LLVMDoubleTypeInContext(ctx->ac.context);
456 LLVMValueRef dvec2 = LLVMBuildBitCast(builder, vec4,
457 LLVMVectorType(f64, 2), "");
458 LLVMValueRef index = LLVMConstInt(ctx->i32, double_index, 0);
459 LLVMValueRef value = LLVMBuildExtractElement(builder, dvec2, index, "");
460 return LLVMBuildFPTrunc(builder, value, ctx->f32, "");
461 }
462
463 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
464 LLVMValueRef i32, unsigned index)
465 {
466 assert(index <= 1);
467
468 if (index == 1)
469 return LLVMBuildAShr(ctx->ac.builder, i32,
470 LLVMConstInt(ctx->i32, 16, 0), "");
471
472 return LLVMBuildSExt(ctx->ac.builder,
473 LLVMBuildTrunc(ctx->ac.builder, i32,
474 ctx->ac.i16, ""),
475 ctx->i32, "");
476 }
477
478 void si_llvm_load_input_vs(
479 struct si_shader_context *ctx,
480 unsigned input_index,
481 LLVMValueRef out[4])
482 {
483 unsigned vs_blit_property =
484 ctx->shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
485
486 if (vs_blit_property) {
487 LLVMValueRef vertex_id = ctx->abi.vertex_id;
488 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
489 LLVMIntULE, vertex_id,
490 ctx->i32_1, "");
491 /* Use LLVMIntNE, because we have 3 vertices and only
492 * the middle one should use y2.
493 */
494 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
495 LLVMIntNE, vertex_id,
496 ctx->i32_1, "");
497
498 if (input_index == 0) {
499 /* Position: */
500 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
501 ctx->param_vs_blit_inputs);
502 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
503 ctx->param_vs_blit_inputs + 1);
504
505 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
506 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
507 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
508 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
509
510 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
511 x1, x2, "");
512 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
513 y1, y2, "");
514
515 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
516 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
517 out[2] = LLVMGetParam(ctx->main_fn,
518 ctx->param_vs_blit_inputs + 2);
519 out[3] = ctx->ac.f32_1;
520 return;
521 }
522
523 /* Color or texture coordinates: */
524 assert(input_index == 1);
525
526 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
527 for (int i = 0; i < 4; i++) {
528 out[i] = LLVMGetParam(ctx->main_fn,
529 ctx->param_vs_blit_inputs + 3 + i);
530 }
531 } else {
532 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
533 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
534 ctx->param_vs_blit_inputs + 3);
535 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
536 ctx->param_vs_blit_inputs + 4);
537 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
538 ctx->param_vs_blit_inputs + 5);
539 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
540 ctx->param_vs_blit_inputs + 6);
541
542 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
543 x1, x2, "");
544 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
545 y1, y2, "");
546 out[2] = LLVMGetParam(ctx->main_fn,
547 ctx->param_vs_blit_inputs + 7);
548 out[3] = LLVMGetParam(ctx->main_fn,
549 ctx->param_vs_blit_inputs + 8);
550 }
551 return;
552 }
553
554 unsigned chan;
555 unsigned fix_fetch;
556 unsigned num_fetches;
557 unsigned fetch_stride;
558
559 LLVMValueRef t_list_ptr;
560 LLVMValueRef t_offset;
561 LLVMValueRef t_list;
562 LLVMValueRef vertex_index;
563 LLVMValueRef input[3];
564
565 /* Load the T list */
566 t_list_ptr = LLVMGetParam(ctx->main_fn, ctx->param_vertex_buffers);
567
568 t_offset = LLVMConstInt(ctx->i32, input_index, 0);
569
570 t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
571
572 vertex_index = LLVMGetParam(ctx->main_fn,
573 ctx->param_vertex_index0 +
574 input_index);
575
576 fix_fetch = ctx->shader->key.mono.vs_fix_fetch[input_index];
577
578 /* Do multiple loads for special formats. */
579 switch (fix_fetch) {
580 case SI_FIX_FETCH_RGB_64_FLOAT:
581 num_fetches = 3; /* 3 2-dword loads */
582 fetch_stride = 8;
583 break;
584 case SI_FIX_FETCH_RGBA_64_FLOAT:
585 num_fetches = 2; /* 2 4-dword loads */
586 fetch_stride = 16;
587 break;
588 case SI_FIX_FETCH_RGB_8:
589 case SI_FIX_FETCH_RGB_8_INT:
590 num_fetches = 3;
591 fetch_stride = 1;
592 break;
593 case SI_FIX_FETCH_RGB_16:
594 case SI_FIX_FETCH_RGB_16_INT:
595 num_fetches = 3;
596 fetch_stride = 2;
597 break;
598 default:
599 num_fetches = 1;
600 fetch_stride = 0;
601 }
602
603 for (unsigned i = 0; i < num_fetches; i++) {
604 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
605
606 input[i] = ac_build_buffer_load_format(&ctx->ac, t_list,
607 vertex_index, voffset,
608 4, true);
609 }
610
611 /* Break up the vec4 into individual components */
612 for (chan = 0; chan < 4; chan++) {
613 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
614 out[chan] = LLVMBuildExtractElement(ctx->ac.builder,
615 input[0], llvm_chan, "");
616 }
617
618 switch (fix_fetch) {
619 case SI_FIX_FETCH_A2_SNORM:
620 case SI_FIX_FETCH_A2_SSCALED:
621 case SI_FIX_FETCH_A2_SINT: {
622 /* The hardware returns an unsigned value; convert it to a
623 * signed one.
624 */
625 LLVMValueRef tmp = out[3];
626 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
627
628 /* First, recover the sign-extended signed integer value. */
629 if (fix_fetch == SI_FIX_FETCH_A2_SSCALED)
630 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
631 else
632 tmp = ac_to_integer(&ctx->ac, tmp);
633
634 /* For the integer-like cases, do a natural sign extension.
635 *
636 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
637 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
638 * exponent.
639 */
640 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
641 fix_fetch == SI_FIX_FETCH_A2_SNORM ?
642 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
643 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
644
645 /* Convert back to the right type. */
646 if (fix_fetch == SI_FIX_FETCH_A2_SNORM) {
647 LLVMValueRef clamp;
648 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
649 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
650 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
651 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
652 } else if (fix_fetch == SI_FIX_FETCH_A2_SSCALED) {
653 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
654 }
655
656 out[3] = tmp;
657 break;
658 }
659 case SI_FIX_FETCH_RGBA_32_UNORM:
660 case SI_FIX_FETCH_RGBX_32_UNORM:
661 for (chan = 0; chan < 4; chan++) {
662 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
663 out[chan] = LLVMBuildUIToFP(ctx->ac.builder,
664 out[chan], ctx->f32, "");
665 out[chan] = LLVMBuildFMul(ctx->ac.builder, out[chan],
666 LLVMConstReal(ctx->f32, 1.0 / UINT_MAX), "");
667 }
668 /* RGBX UINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
669 if (fix_fetch == SI_FIX_FETCH_RGBX_32_UNORM)
670 out[3] = LLVMConstReal(ctx->f32, 1);
671 break;
672 case SI_FIX_FETCH_RGBA_32_SNORM:
673 case SI_FIX_FETCH_RGBX_32_SNORM:
674 case SI_FIX_FETCH_RGBA_32_FIXED:
675 case SI_FIX_FETCH_RGBX_32_FIXED: {
676 double scale;
677 if (fix_fetch >= SI_FIX_FETCH_RGBA_32_FIXED)
678 scale = 1.0 / 0x10000;
679 else
680 scale = 1.0 / INT_MAX;
681
682 for (chan = 0; chan < 4; chan++) {
683 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
684 out[chan] = LLVMBuildSIToFP(ctx->ac.builder,
685 out[chan], ctx->f32, "");
686 out[chan] = LLVMBuildFMul(ctx->ac.builder, out[chan],
687 LLVMConstReal(ctx->f32, scale), "");
688 }
689 /* RGBX SINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
690 if (fix_fetch == SI_FIX_FETCH_RGBX_32_SNORM ||
691 fix_fetch == SI_FIX_FETCH_RGBX_32_FIXED)
692 out[3] = LLVMConstReal(ctx->f32, 1);
693 break;
694 }
695 case SI_FIX_FETCH_RGBA_32_USCALED:
696 for (chan = 0; chan < 4; chan++) {
697 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
698 out[chan] = LLVMBuildUIToFP(ctx->ac.builder,
699 out[chan], ctx->f32, "");
700 }
701 break;
702 case SI_FIX_FETCH_RGBA_32_SSCALED:
703 for (chan = 0; chan < 4; chan++) {
704 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
705 out[chan] = LLVMBuildSIToFP(ctx->ac.builder,
706 out[chan], ctx->f32, "");
707 }
708 break;
709 case SI_FIX_FETCH_RG_64_FLOAT:
710 for (chan = 0; chan < 2; chan++)
711 out[chan] = extract_double_to_float(ctx, input[0], chan);
712
713 out[2] = LLVMConstReal(ctx->f32, 0);
714 out[3] = LLVMConstReal(ctx->f32, 1);
715 break;
716 case SI_FIX_FETCH_RGB_64_FLOAT:
717 for (chan = 0; chan < 3; chan++)
718 out[chan] = extract_double_to_float(ctx, input[chan], 0);
719
720 out[3] = LLVMConstReal(ctx->f32, 1);
721 break;
722 case SI_FIX_FETCH_RGBA_64_FLOAT:
723 for (chan = 0; chan < 4; chan++) {
724 out[chan] = extract_double_to_float(ctx, input[chan / 2],
725 chan % 2);
726 }
727 break;
728 case SI_FIX_FETCH_RGB_8:
729 case SI_FIX_FETCH_RGB_8_INT:
730 case SI_FIX_FETCH_RGB_16:
731 case SI_FIX_FETCH_RGB_16_INT:
732 for (chan = 0; chan < 3; chan++) {
733 out[chan] = LLVMBuildExtractElement(ctx->ac.builder,
734 input[chan],
735 ctx->i32_0, "");
736 }
737 if (fix_fetch == SI_FIX_FETCH_RGB_8 ||
738 fix_fetch == SI_FIX_FETCH_RGB_16) {
739 out[3] = LLVMConstReal(ctx->f32, 1);
740 } else {
741 out[3] = ac_to_float(&ctx->ac, ctx->i32_1);
742 }
743 break;
744 }
745 }
746
747 static void declare_input_vs(
748 struct si_shader_context *ctx,
749 unsigned input_index,
750 const struct tgsi_full_declaration *decl,
751 LLVMValueRef out[4])
752 {
753 si_llvm_load_input_vs(ctx, input_index, out);
754 }
755
756 static LLVMValueRef get_primitive_id(struct si_shader_context *ctx,
757 unsigned swizzle)
758 {
759 if (swizzle > 0)
760 return ctx->i32_0;
761
762 switch (ctx->type) {
763 case PIPE_SHADER_VERTEX:
764 return LLVMGetParam(ctx->main_fn,
765 ctx->param_vs_prim_id);
766 case PIPE_SHADER_TESS_CTRL:
767 return ctx->abi.tcs_patch_id;
768 case PIPE_SHADER_TESS_EVAL:
769 return ctx->abi.tes_patch_id;
770 case PIPE_SHADER_GEOMETRY:
771 return ctx->abi.gs_prim_id;
772 default:
773 assert(0);
774 return ctx->i32_0;
775 }
776 }
777
778 /**
779 * Return the value of tgsi_ind_register for indexing.
780 * This is the indirect index with the constant offset added to it.
781 */
782 LLVMValueRef si_get_indirect_index(struct si_shader_context *ctx,
783 const struct tgsi_ind_register *ind,
784 unsigned addr_mul,
785 int rel_index)
786 {
787 LLVMValueRef result;
788
789 if (ind->File == TGSI_FILE_ADDRESS) {
790 result = ctx->addrs[ind->Index][ind->Swizzle];
791 result = LLVMBuildLoad(ctx->ac.builder, result, "");
792 } else {
793 struct tgsi_full_src_register src = {};
794
795 src.Register.File = ind->File;
796 src.Register.Index = ind->Index;
797
798 /* Set the second index to 0 for constants. */
799 if (ind->File == TGSI_FILE_CONSTANT)
800 src.Register.Dimension = 1;
801
802 result = ctx->bld_base.emit_fetch_funcs[ind->File](&ctx->bld_base, &src,
803 TGSI_TYPE_SIGNED,
804 ind->Swizzle);
805 result = ac_to_integer(&ctx->ac, result);
806 }
807
808 if (addr_mul != 1)
809 result = LLVMBuildMul(ctx->ac.builder, result,
810 LLVMConstInt(ctx->i32, addr_mul, 0), "");
811 result = LLVMBuildAdd(ctx->ac.builder, result,
812 LLVMConstInt(ctx->i32, rel_index, 0), "");
813 return result;
814 }
815
816 /**
817 * Like si_get_indirect_index, but restricts the return value to a (possibly
818 * undefined) value inside [0..num).
819 */
820 LLVMValueRef si_get_bounded_indirect_index(struct si_shader_context *ctx,
821 const struct tgsi_ind_register *ind,
822 int rel_index, unsigned num)
823 {
824 LLVMValueRef result = si_get_indirect_index(ctx, ind, 1, rel_index);
825
826 return si_llvm_bound_index(ctx, result, num);
827 }
828
829 static LLVMValueRef get_dw_address_from_generic_indices(struct si_shader_context *ctx,
830 LLVMValueRef vertex_dw_stride,
831 LLVMValueRef base_addr,
832 LLVMValueRef vertex_index,
833 LLVMValueRef param_index,
834 unsigned input_index,
835 ubyte *name,
836 ubyte *index,
837 bool is_patch)
838 {
839 if (vertex_dw_stride) {
840 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
841 LLVMBuildMul(ctx->ac.builder, vertex_index,
842 vertex_dw_stride, ""), "");
843 }
844
845 if (param_index) {
846 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
847 LLVMBuildMul(ctx->ac.builder, param_index,
848 LLVMConstInt(ctx->i32, 4, 0), ""), "");
849 }
850
851 int param = is_patch ?
852 si_shader_io_get_unique_index_patch(name[input_index],
853 index[input_index]) :
854 si_shader_io_get_unique_index(name[input_index],
855 index[input_index]);
856
857 /* Add the base address of the element. */
858 return LLVMBuildAdd(ctx->ac.builder, base_addr,
859 LLVMConstInt(ctx->i32, param * 4, 0), "");
860 }
861
862 /**
863 * Calculate a dword address given an input or output register and a stride.
864 */
865 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
866 const struct tgsi_full_dst_register *dst,
867 const struct tgsi_full_src_register *src,
868 LLVMValueRef vertex_dw_stride,
869 LLVMValueRef base_addr)
870 {
871 struct tgsi_shader_info *info = &ctx->shader->selector->info;
872 ubyte *name, *index, *array_first;
873 int input_index;
874 struct tgsi_full_dst_register reg;
875 LLVMValueRef vertex_index = NULL;
876 LLVMValueRef ind_index = NULL;
877
878 /* Set the register description. The address computation is the same
879 * for sources and destinations. */
880 if (src) {
881 reg.Register.File = src->Register.File;
882 reg.Register.Index = src->Register.Index;
883 reg.Register.Indirect = src->Register.Indirect;
884 reg.Register.Dimension = src->Register.Dimension;
885 reg.Indirect = src->Indirect;
886 reg.Dimension = src->Dimension;
887 reg.DimIndirect = src->DimIndirect;
888 } else
889 reg = *dst;
890
891 /* If the register is 2-dimensional (e.g. an array of vertices
892 * in a primitive), calculate the base address of the vertex. */
893 if (reg.Register.Dimension) {
894 if (reg.Dimension.Indirect)
895 vertex_index = si_get_indirect_index(ctx, &reg.DimIndirect,
896 1, reg.Dimension.Index);
897 else
898 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
899 }
900
901 /* Get information about the register. */
902 if (reg.Register.File == TGSI_FILE_INPUT) {
903 name = info->input_semantic_name;
904 index = info->input_semantic_index;
905 array_first = info->input_array_first;
906 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
907 name = info->output_semantic_name;
908 index = info->output_semantic_index;
909 array_first = info->output_array_first;
910 } else {
911 assert(0);
912 return NULL;
913 }
914
915 if (reg.Register.Indirect) {
916 /* Add the relative address of the element. */
917 if (reg.Indirect.ArrayID)
918 input_index = array_first[reg.Indirect.ArrayID];
919 else
920 input_index = reg.Register.Index;
921
922 ind_index = si_get_indirect_index(ctx, &reg.Indirect,
923 1, reg.Register.Index - input_index);
924 } else {
925 input_index = reg.Register.Index;
926 }
927
928 return get_dw_address_from_generic_indices(ctx, vertex_dw_stride,
929 base_addr, vertex_index,
930 ind_index, input_index,
931 name, index,
932 !reg.Register.Dimension);
933 }
934
935 /* The offchip buffer layout for TCS->TES is
936 *
937 * - attribute 0 of patch 0 vertex 0
938 * - attribute 0 of patch 0 vertex 1
939 * - attribute 0 of patch 0 vertex 2
940 * ...
941 * - attribute 0 of patch 1 vertex 0
942 * - attribute 0 of patch 1 vertex 1
943 * ...
944 * - attribute 1 of patch 0 vertex 0
945 * - attribute 1 of patch 0 vertex 1
946 * ...
947 * - per patch attribute 0 of patch 0
948 * - per patch attribute 0 of patch 1
949 * ...
950 *
951 * Note that every attribute has 4 components.
952 */
953 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
954 LLVMValueRef rel_patch_id,
955 LLVMValueRef vertex_index,
956 LLVMValueRef param_index)
957 {
958 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
959 LLVMValueRef param_stride, constant16;
960
961 vertices_per_patch = get_num_tcs_out_vertices(ctx);
962 num_patches = unpack_param(ctx, ctx->param_tcs_offchip_layout, 0, 6);
963 total_vertices = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
964 num_patches, "");
965
966 constant16 = LLVMConstInt(ctx->i32, 16, 0);
967 if (vertex_index) {
968 base_addr = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
969 vertices_per_patch, "");
970
971 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
972 vertex_index, "");
973
974 param_stride = total_vertices;
975 } else {
976 base_addr = rel_patch_id;
977 param_stride = num_patches;
978 }
979
980 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
981 LLVMBuildMul(ctx->ac.builder, param_index,
982 param_stride, ""), "");
983
984 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
985
986 if (!vertex_index) {
987 LLVMValueRef patch_data_offset =
988 unpack_param(ctx, ctx->param_tcs_offchip_layout, 12, 20);
989
990 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
991 patch_data_offset, "");
992 }
993 return base_addr;
994 }
995
996 /* This is a generic helper that can be shared by the NIR and TGSI backends */
997 static LLVMValueRef get_tcs_tes_buffer_address_from_generic_indices(
998 struct si_shader_context *ctx,
999 LLVMValueRef vertex_index,
1000 LLVMValueRef param_index,
1001 unsigned param_base,
1002 ubyte *name,
1003 ubyte *index,
1004 bool is_patch)
1005 {
1006 unsigned param_index_base;
1007
1008 param_index_base = is_patch ?
1009 si_shader_io_get_unique_index_patch(name[param_base], index[param_base]) :
1010 si_shader_io_get_unique_index(name[param_base], index[param_base]);
1011
1012 if (param_index) {
1013 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1014 LLVMConstInt(ctx->i32, param_index_base, 0),
1015 "");
1016 } else {
1017 param_index = LLVMConstInt(ctx->i32, param_index_base, 0);
1018 }
1019
1020 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
1021 vertex_index, param_index);
1022 }
1023
1024 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
1025 struct si_shader_context *ctx,
1026 const struct tgsi_full_dst_register *dst,
1027 const struct tgsi_full_src_register *src)
1028 {
1029 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1030 ubyte *name, *index, *array_first;
1031 struct tgsi_full_src_register reg;
1032 LLVMValueRef vertex_index = NULL;
1033 LLVMValueRef param_index = NULL;
1034 unsigned param_base;
1035
1036 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
1037
1038 if (reg.Register.Dimension) {
1039
1040 if (reg.Dimension.Indirect)
1041 vertex_index = si_get_indirect_index(ctx, &reg.DimIndirect,
1042 1, reg.Dimension.Index);
1043 else
1044 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
1045 }
1046
1047 /* Get information about the register. */
1048 if (reg.Register.File == TGSI_FILE_INPUT) {
1049 name = info->input_semantic_name;
1050 index = info->input_semantic_index;
1051 array_first = info->input_array_first;
1052 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
1053 name = info->output_semantic_name;
1054 index = info->output_semantic_index;
1055 array_first = info->output_array_first;
1056 } else {
1057 assert(0);
1058 return NULL;
1059 }
1060
1061 if (reg.Register.Indirect) {
1062 if (reg.Indirect.ArrayID)
1063 param_base = array_first[reg.Indirect.ArrayID];
1064 else
1065 param_base = reg.Register.Index;
1066
1067 param_index = si_get_indirect_index(ctx, &reg.Indirect,
1068 1, reg.Register.Index - param_base);
1069
1070 } else {
1071 param_base = reg.Register.Index;
1072 }
1073
1074 return get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1075 param_index, param_base,
1076 name, index, !reg.Register.Dimension);
1077 }
1078
1079 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
1080 LLVMTypeRef type, unsigned swizzle,
1081 LLVMValueRef buffer, LLVMValueRef offset,
1082 LLVMValueRef base, bool can_speculate)
1083 {
1084 struct si_shader_context *ctx = si_shader_context(bld_base);
1085 LLVMValueRef value, value2;
1086 LLVMTypeRef vec_type = LLVMVectorType(type, 4);
1087
1088 if (swizzle == ~0) {
1089 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
1090 0, 1, 0, can_speculate, false);
1091
1092 return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
1093 }
1094
1095 if (!llvm_type_is_64bit(ctx, type)) {
1096 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
1097 0, 1, 0, can_speculate, false);
1098
1099 value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
1100 return LLVMBuildExtractElement(ctx->ac.builder, value,
1101 LLVMConstInt(ctx->i32, swizzle, 0), "");
1102 }
1103
1104 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
1105 swizzle * 4, 1, 0, can_speculate, false);
1106
1107 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
1108 swizzle * 4 + 4, 1, 0, can_speculate, false);
1109
1110 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
1111 }
1112
1113 /**
1114 * Load from LDS.
1115 *
1116 * \param type output value type
1117 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
1118 * \param dw_addr address in dwords
1119 */
1120 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
1121 LLVMTypeRef type, unsigned swizzle,
1122 LLVMValueRef dw_addr)
1123 {
1124 struct si_shader_context *ctx = si_shader_context(bld_base);
1125 LLVMValueRef value;
1126
1127 if (swizzle == ~0) {
1128 LLVMValueRef values[TGSI_NUM_CHANNELS];
1129
1130 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
1131 values[chan] = lds_load(bld_base, type, chan, dw_addr);
1132
1133 return lp_build_gather_values(&ctx->gallivm, values,
1134 TGSI_NUM_CHANNELS);
1135 }
1136
1137 /* Split 64-bit loads. */
1138 if (llvm_type_is_64bit(ctx, type)) {
1139 LLVMValueRef lo, hi;
1140
1141 lo = lds_load(bld_base, ctx->i32, swizzle, dw_addr);
1142 hi = lds_load(bld_base, ctx->i32, swizzle + 1, dw_addr);
1143 return si_llvm_emit_fetch_64bit(bld_base, type, lo, hi);
1144 }
1145
1146 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
1147 LLVMConstInt(ctx->i32, swizzle, 0));
1148
1149 value = ac_lds_load(&ctx->ac, dw_addr);
1150
1151 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1152 }
1153
1154 /**
1155 * Store to LDS.
1156 *
1157 * \param swizzle offset (typically 0..3)
1158 * \param dw_addr address in dwords
1159 * \param value value to store
1160 */
1161 static void lds_store(struct si_shader_context *ctx,
1162 unsigned dw_offset_imm, LLVMValueRef dw_addr,
1163 LLVMValueRef value)
1164 {
1165 dw_addr = lp_build_add(&ctx->bld_base.uint_bld, dw_addr,
1166 LLVMConstInt(ctx->i32, dw_offset_imm, 0));
1167
1168 ac_lds_store(&ctx->ac, dw_addr, value);
1169 }
1170
1171 static LLVMValueRef desc_from_addr_base64k(struct si_shader_context *ctx,
1172 unsigned param)
1173 {
1174 LLVMBuilderRef builder = ctx->ac.builder;
1175
1176 LLVMValueRef addr = LLVMGetParam(ctx->main_fn, param);
1177 addr = LLVMBuildZExt(builder, addr, ctx->i64, "");
1178 addr = LLVMBuildShl(builder, addr, LLVMConstInt(ctx->i64, 16, 0), "");
1179
1180 uint64_t desc2 = 0xffffffff;
1181 uint64_t desc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1182 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1183 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1184 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1185 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1186 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1187 LLVMValueRef hi = LLVMConstInt(ctx->i64, desc2 | (desc3 << 32), 0);
1188
1189 LLVMValueRef desc = LLVMGetUndef(LLVMVectorType(ctx->i64, 2));
1190 desc = LLVMBuildInsertElement(builder, desc, addr, ctx->i32_0, "");
1191 desc = LLVMBuildInsertElement(builder, desc, hi, ctx->i32_1, "");
1192 return LLVMBuildBitCast(builder, desc, ctx->v4i32, "");
1193 }
1194
1195 static LLVMValueRef fetch_input_tcs(
1196 struct lp_build_tgsi_context *bld_base,
1197 const struct tgsi_full_src_register *reg,
1198 enum tgsi_opcode_type type, unsigned swizzle)
1199 {
1200 struct si_shader_context *ctx = si_shader_context(bld_base);
1201 LLVMValueRef dw_addr, stride;
1202
1203 stride = get_tcs_in_vertex_dw_stride(ctx);
1204 dw_addr = get_tcs_in_current_patch_offset(ctx);
1205 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1206
1207 return lds_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle, dw_addr);
1208 }
1209
1210 static LLVMValueRef si_nir_load_tcs_varyings(struct ac_shader_abi *abi,
1211 LLVMValueRef vertex_index,
1212 LLVMValueRef param_index,
1213 unsigned const_index,
1214 unsigned location,
1215 unsigned driver_location,
1216 unsigned component,
1217 unsigned num_components,
1218 bool is_patch,
1219 bool is_compact,
1220 bool load_input)
1221 {
1222 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1223 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1224 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
1225 LLVMValueRef dw_addr, stride;
1226
1227 driver_location = driver_location / 4;
1228
1229 if (load_input) {
1230 stride = get_tcs_in_vertex_dw_stride(ctx);
1231 dw_addr = get_tcs_in_current_patch_offset(ctx);
1232 } else {
1233 if (is_patch) {
1234 stride = NULL;
1235 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1236 } else {
1237 stride = get_tcs_out_vertex_dw_stride(ctx);
1238 dw_addr = get_tcs_out_current_patch_offset(ctx);
1239 }
1240 }
1241
1242 if (param_index) {
1243 /* Add the constant index to the indirect index */
1244 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1245 LLVMConstInt(ctx->i32, const_index, 0), "");
1246 } else {
1247 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1248 }
1249
1250 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1251 vertex_index, param_index,
1252 driver_location,
1253 info->input_semantic_name,
1254 info->input_semantic_index,
1255 is_patch);
1256
1257 LLVMValueRef value[4];
1258 for (unsigned i = 0; i < num_components + component; i++) {
1259 value[i] = lds_load(bld_base, ctx->i32, i, dw_addr);
1260 }
1261
1262 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1263 }
1264
1265 static LLVMValueRef fetch_output_tcs(
1266 struct lp_build_tgsi_context *bld_base,
1267 const struct tgsi_full_src_register *reg,
1268 enum tgsi_opcode_type type, unsigned swizzle)
1269 {
1270 struct si_shader_context *ctx = si_shader_context(bld_base);
1271 LLVMValueRef dw_addr, stride;
1272
1273 if (reg->Register.Dimension) {
1274 stride = get_tcs_out_vertex_dw_stride(ctx);
1275 dw_addr = get_tcs_out_current_patch_offset(ctx);
1276 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1277 } else {
1278 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1279 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
1280 }
1281
1282 return lds_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle, dw_addr);
1283 }
1284
1285 static LLVMValueRef fetch_input_tes(
1286 struct lp_build_tgsi_context *bld_base,
1287 const struct tgsi_full_src_register *reg,
1288 enum tgsi_opcode_type type, unsigned swizzle)
1289 {
1290 struct si_shader_context *ctx = si_shader_context(bld_base);
1291 LLVMValueRef buffer, base, addr;
1292
1293 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1294
1295 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1296 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
1297
1298 return buffer_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle,
1299 buffer, base, addr, true);
1300 }
1301
1302 LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi,
1303 LLVMValueRef vertex_index,
1304 LLVMValueRef param_index,
1305 unsigned const_index,
1306 unsigned location,
1307 unsigned driver_location,
1308 unsigned component,
1309 unsigned num_components,
1310 bool is_patch,
1311 bool is_compact,
1312 bool load_input)
1313 {
1314 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1315 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1316 LLVMValueRef buffer, base, addr;
1317
1318 driver_location = driver_location / 4;
1319
1320 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1321
1322 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1323
1324 if (param_index) {
1325 /* Add the constant index to the indirect index */
1326 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1327 LLVMConstInt(ctx->i32, const_index, 0), "");
1328 } else {
1329 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1330 }
1331
1332 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1333 param_index, driver_location,
1334 info->input_semantic_name,
1335 info->input_semantic_index,
1336 is_patch);
1337
1338 /* TODO: This will generate rather ordinary llvm code, although it
1339 * should be easy for the optimiser to fix up. In future we might want
1340 * to refactor buffer_load(), but for now this maximises code sharing
1341 * between the NIR and TGSI backends.
1342 */
1343 LLVMValueRef value[4];
1344 for (unsigned i = component; i < num_components + component; i++) {
1345 value[i] = buffer_load(&ctx->bld_base, ctx->i32, i, buffer, base, addr, true);
1346 }
1347
1348 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1349 }
1350
1351 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1352 const struct tgsi_full_instruction *inst,
1353 const struct tgsi_opcode_info *info,
1354 unsigned index,
1355 LLVMValueRef dst[4])
1356 {
1357 struct si_shader_context *ctx = si_shader_context(bld_base);
1358 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
1359 const struct tgsi_shader_info *sh_info = &ctx->shader->selector->info;
1360 unsigned chan_index;
1361 LLVMValueRef dw_addr, stride;
1362 LLVMValueRef buffer, base, buf_addr;
1363 LLVMValueRef values[4];
1364 bool skip_lds_store;
1365 bool is_tess_factor = false, is_tess_inner = false;
1366
1367 /* Only handle per-patch and per-vertex outputs here.
1368 * Vectors will be lowered to scalars and this function will be called again.
1369 */
1370 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1371 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1372 si_llvm_emit_store(bld_base, inst, info, index, dst);
1373 return;
1374 }
1375
1376 if (reg->Register.Dimension) {
1377 stride = get_tcs_out_vertex_dw_stride(ctx);
1378 dw_addr = get_tcs_out_current_patch_offset(ctx);
1379 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1380 skip_lds_store = !sh_info->reads_pervertex_outputs;
1381 } else {
1382 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1383 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1384 skip_lds_store = !sh_info->reads_perpatch_outputs;
1385
1386 if (!reg->Register.Indirect) {
1387 int name = sh_info->output_semantic_name[reg->Register.Index];
1388
1389 /* Always write tess factors into LDS for the TCS epilog. */
1390 if (name == TGSI_SEMANTIC_TESSINNER ||
1391 name == TGSI_SEMANTIC_TESSOUTER) {
1392 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1393 skip_lds_store = !sh_info->reads_tessfactor_outputs &&
1394 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs;
1395 is_tess_factor = true;
1396 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1397 }
1398 }
1399 }
1400
1401 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1402
1403 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1404 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1405
1406 uint32_t writemask = reg->Register.WriteMask;
1407 while (writemask) {
1408 chan_index = u_bit_scan(&writemask);
1409 LLVMValueRef value = dst[chan_index];
1410
1411 if (inst->Instruction.Saturate)
1412 value = ac_build_clamp(&ctx->ac, value);
1413
1414 /* Skip LDS stores if there is no LDS read of this output. */
1415 if (!skip_lds_store)
1416 lds_store(ctx, chan_index, dw_addr, value);
1417
1418 value = ac_to_integer(&ctx->ac, value);
1419 values[chan_index] = value;
1420
1421 if (reg->Register.WriteMask != 0xF && !is_tess_factor) {
1422 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1423 buf_addr, base,
1424 4 * chan_index, 1, 0, true, false);
1425 }
1426
1427 /* Write tess factors into VGPRs for the epilog. */
1428 if (is_tess_factor &&
1429 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
1430 if (!is_tess_inner) {
1431 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1432 ctx->invoc0_tess_factors[chan_index]);
1433 } else if (chan_index < 2) {
1434 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1435 ctx->invoc0_tess_factors[4 + chan_index]);
1436 }
1437 }
1438 }
1439
1440 if (reg->Register.WriteMask == 0xF && !is_tess_factor) {
1441 LLVMValueRef value = lp_build_gather_values(&ctx->gallivm,
1442 values, 4);
1443 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buf_addr,
1444 base, 0, 1, 0, true, false);
1445 }
1446 }
1447
1448 static void si_nir_store_output_tcs(struct ac_shader_abi *abi,
1449 LLVMValueRef vertex_index,
1450 LLVMValueRef param_index,
1451 unsigned const_index,
1452 unsigned location,
1453 unsigned driver_location,
1454 LLVMValueRef src,
1455 unsigned component,
1456 bool is_patch,
1457 bool is_compact,
1458 unsigned writemask)
1459 {
1460 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1461 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1462 LLVMValueRef dw_addr, stride;
1463 LLVMValueRef buffer, base, addr;
1464 LLVMValueRef values[4];
1465 bool skip_lds_store;
1466 bool is_tess_factor = false, is_tess_inner = false;
1467
1468 driver_location = driver_location / 4;
1469
1470 if (param_index) {
1471 /* Add the constant index to the indirect index */
1472 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1473 LLVMConstInt(ctx->i32, const_index, 0), "");
1474 } else {
1475 if (const_index != 0)
1476 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1477 }
1478
1479 if (!is_patch) {
1480 stride = get_tcs_out_vertex_dw_stride(ctx);
1481 dw_addr = get_tcs_out_current_patch_offset(ctx);
1482 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1483 vertex_index, param_index,
1484 driver_location,
1485 info->output_semantic_name,
1486 info->output_semantic_index,
1487 is_patch);
1488
1489 skip_lds_store = !info->reads_pervertex_outputs;
1490 } else {
1491 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1492 dw_addr = get_dw_address_from_generic_indices(ctx, NULL, dw_addr,
1493 vertex_index, param_index,
1494 driver_location,
1495 info->output_semantic_name,
1496 info->output_semantic_index,
1497 is_patch);
1498
1499 skip_lds_store = !info->reads_perpatch_outputs;
1500
1501 if (!param_index) {
1502 int name = info->output_semantic_name[driver_location];
1503
1504 /* Always write tess factors into LDS for the TCS epilog. */
1505 if (name == TGSI_SEMANTIC_TESSINNER ||
1506 name == TGSI_SEMANTIC_TESSOUTER) {
1507 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1508 skip_lds_store = !info->reads_tessfactor_outputs &&
1509 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs;
1510 is_tess_factor = true;
1511 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1512 }
1513 }
1514 }
1515
1516 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1517
1518 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1519
1520 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1521 param_index, driver_location,
1522 info->output_semantic_name,
1523 info->output_semantic_index,
1524 is_patch);
1525
1526 for (unsigned chan = 0; chan < 4; chan++) {
1527 if (!(writemask & (1 << chan)))
1528 continue;
1529 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1530
1531 /* Skip LDS stores if there is no LDS read of this output. */
1532 if (!skip_lds_store)
1533 ac_lds_store(&ctx->ac, dw_addr, value);
1534
1535 value = ac_to_integer(&ctx->ac, value);
1536 values[chan] = value;
1537
1538 if (writemask != 0xF && !is_tess_factor) {
1539 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1540 addr, base,
1541 4 * chan, 1, 0, true, false);
1542 }
1543
1544 /* Write tess factors into VGPRs for the epilog. */
1545 if (is_tess_factor &&
1546 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
1547 if (!is_tess_inner) {
1548 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1549 ctx->invoc0_tess_factors[chan]);
1550 } else if (chan < 2) {
1551 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1552 ctx->invoc0_tess_factors[4 + chan]);
1553 }
1554 }
1555 }
1556
1557 if (writemask == 0xF && !is_tess_factor) {
1558 LLVMValueRef value = lp_build_gather_values(&ctx->gallivm,
1559 values, 4);
1560 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr,
1561 base, 0, 1, 0, true, false);
1562 }
1563 }
1564
1565 LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi,
1566 unsigned input_index,
1567 unsigned vtx_offset_param,
1568 LLVMTypeRef type,
1569 unsigned swizzle)
1570 {
1571 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1572 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
1573 struct si_shader *shader = ctx->shader;
1574 struct lp_build_context *uint = &ctx->bld_base.uint_bld;
1575 LLVMValueRef vtx_offset, soffset;
1576 struct tgsi_shader_info *info = &shader->selector->info;
1577 unsigned semantic_name = info->input_semantic_name[input_index];
1578 unsigned semantic_index = info->input_semantic_index[input_index];
1579 unsigned param;
1580 LLVMValueRef value;
1581
1582 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1583
1584 /* GFX9 has the ESGS ring in LDS. */
1585 if (ctx->screen->info.chip_class >= GFX9) {
1586 unsigned index = vtx_offset_param;
1587
1588 switch (index / 2) {
1589 case 0:
1590 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx01_offset,
1591 index % 2 ? 16 : 0, 16);
1592 break;
1593 case 1:
1594 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx23_offset,
1595 index % 2 ? 16 : 0, 16);
1596 break;
1597 case 2:
1598 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx45_offset,
1599 index % 2 ? 16 : 0, 16);
1600 break;
1601 default:
1602 assert(0);
1603 return NULL;
1604 }
1605
1606 vtx_offset = LLVMBuildAdd(ctx->ac.builder, vtx_offset,
1607 LLVMConstInt(ctx->i32, param * 4, 0), "");
1608 return lds_load(bld_base, type, swizzle, vtx_offset);
1609 }
1610
1611 /* GFX6: input load from the ESGS ring in memory. */
1612 if (swizzle == ~0) {
1613 LLVMValueRef values[TGSI_NUM_CHANNELS];
1614 unsigned chan;
1615 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1616 values[chan] = si_llvm_load_input_gs(abi, input_index, vtx_offset_param,
1617 type, chan);
1618 }
1619 return lp_build_gather_values(&ctx->gallivm, values,
1620 TGSI_NUM_CHANNELS);
1621 }
1622
1623 /* Get the vertex offset parameter on GFX6. */
1624 LLVMValueRef gs_vtx_offset = ctx->gs_vtx_offset[vtx_offset_param];
1625
1626 vtx_offset = lp_build_mul_imm(uint, gs_vtx_offset, 4);
1627
1628 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1629
1630 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1631 vtx_offset, soffset, 0, 1, 0, true, false);
1632 if (llvm_type_is_64bit(ctx, type)) {
1633 LLVMValueRef value2;
1634 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1635
1636 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1637 ctx->i32_0, vtx_offset, soffset,
1638 0, 1, 0, true, false);
1639 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
1640 }
1641 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1642 }
1643
1644 static LLVMValueRef fetch_input_gs(
1645 struct lp_build_tgsi_context *bld_base,
1646 const struct tgsi_full_src_register *reg,
1647 enum tgsi_opcode_type type,
1648 unsigned swizzle)
1649 {
1650 struct si_shader_context *ctx = si_shader_context(bld_base);
1651 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1652
1653 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1654 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1655 return get_primitive_id(ctx, swizzle);
1656
1657 if (!reg->Register.Dimension)
1658 return NULL;
1659
1660 return si_llvm_load_input_gs(&ctx->abi, reg->Register.Index,
1661 reg->Dimension.Index,
1662 tgsi2llvmtype(bld_base, type),
1663 swizzle);
1664 }
1665
1666 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1667 {
1668 switch (interpolate) {
1669 case TGSI_INTERPOLATE_CONSTANT:
1670 return 0;
1671
1672 case TGSI_INTERPOLATE_LINEAR:
1673 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1674 return SI_PARAM_LINEAR_SAMPLE;
1675 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1676 return SI_PARAM_LINEAR_CENTROID;
1677 else
1678 return SI_PARAM_LINEAR_CENTER;
1679 break;
1680 case TGSI_INTERPOLATE_COLOR:
1681 case TGSI_INTERPOLATE_PERSPECTIVE:
1682 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1683 return SI_PARAM_PERSP_SAMPLE;
1684 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1685 return SI_PARAM_PERSP_CENTROID;
1686 else
1687 return SI_PARAM_PERSP_CENTER;
1688 break;
1689 default:
1690 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1691 return -1;
1692 }
1693 }
1694
1695 static LLVMValueRef si_build_fs_interp(struct si_shader_context *ctx,
1696 unsigned attr_index, unsigned chan,
1697 LLVMValueRef prim_mask,
1698 LLVMValueRef i, LLVMValueRef j)
1699 {
1700 if (i || j) {
1701 return ac_build_fs_interp(&ctx->ac,
1702 LLVMConstInt(ctx->i32, chan, 0),
1703 LLVMConstInt(ctx->i32, attr_index, 0),
1704 prim_mask, i, j);
1705 }
1706 return ac_build_fs_interp_mov(&ctx->ac,
1707 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1708 LLVMConstInt(ctx->i32, chan, 0),
1709 LLVMConstInt(ctx->i32, attr_index, 0),
1710 prim_mask);
1711 }
1712
1713 /**
1714 * Interpolate a fragment shader input.
1715 *
1716 * @param ctx context
1717 * @param input_index index of the input in hardware
1718 * @param semantic_name TGSI_SEMANTIC_*
1719 * @param semantic_index semantic index
1720 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1721 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1722 * @param interp_param interpolation weights (i,j)
1723 * @param prim_mask SI_PARAM_PRIM_MASK
1724 * @param face SI_PARAM_FRONT_FACE
1725 * @param result the return value (4 components)
1726 */
1727 static void interp_fs_input(struct si_shader_context *ctx,
1728 unsigned input_index,
1729 unsigned semantic_name,
1730 unsigned semantic_index,
1731 unsigned num_interp_inputs,
1732 unsigned colors_read_mask,
1733 LLVMValueRef interp_param,
1734 LLVMValueRef prim_mask,
1735 LLVMValueRef face,
1736 LLVMValueRef result[4])
1737 {
1738 LLVMValueRef i = NULL, j = NULL;
1739 unsigned chan;
1740
1741 /* fs.constant returns the param from the middle vertex, so it's not
1742 * really useful for flat shading. It's meant to be used for custom
1743 * interpolation (but the intrinsic can't fetch from the other two
1744 * vertices).
1745 *
1746 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1747 * to do the right thing. The only reason we use fs.constant is that
1748 * fs.interp cannot be used on integers, because they can be equal
1749 * to NaN.
1750 *
1751 * When interp is false we will use fs.constant or for newer llvm,
1752 * amdgcn.interp.mov.
1753 */
1754 bool interp = interp_param != NULL;
1755
1756 if (interp) {
1757 interp_param = LLVMBuildBitCast(ctx->ac.builder, interp_param,
1758 LLVMVectorType(ctx->f32, 2), "");
1759
1760 i = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1761 ctx->i32_0, "");
1762 j = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1763 ctx->i32_1, "");
1764 }
1765
1766 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1767 ctx->shader->key.part.ps.prolog.color_two_side) {
1768 LLVMValueRef is_face_positive;
1769
1770 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1771 * otherwise it's at offset "num_inputs".
1772 */
1773 unsigned back_attr_offset = num_interp_inputs;
1774 if (semantic_index == 1 && colors_read_mask & 0xf)
1775 back_attr_offset += 1;
1776
1777 is_face_positive = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
1778 face, ctx->i32_0, "");
1779
1780 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1781 LLVMValueRef front, back;
1782
1783 front = si_build_fs_interp(ctx,
1784 input_index, chan,
1785 prim_mask, i, j);
1786 back = si_build_fs_interp(ctx,
1787 back_attr_offset, chan,
1788 prim_mask, i, j);
1789
1790 result[chan] = LLVMBuildSelect(ctx->ac.builder,
1791 is_face_positive,
1792 front,
1793 back,
1794 "");
1795 }
1796 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1797 result[0] = si_build_fs_interp(ctx, input_index,
1798 0, prim_mask, i, j);
1799 result[1] =
1800 result[2] = LLVMConstReal(ctx->f32, 0.0f);
1801 result[3] = LLVMConstReal(ctx->f32, 1.0f);
1802 } else {
1803 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1804 result[chan] = si_build_fs_interp(ctx,
1805 input_index, chan,
1806 prim_mask, i, j);
1807 }
1808 }
1809 }
1810
1811 void si_llvm_load_input_fs(
1812 struct si_shader_context *ctx,
1813 unsigned input_index,
1814 LLVMValueRef out[4])
1815 {
1816 struct lp_build_context *base = &ctx->bld_base.base;
1817 struct si_shader *shader = ctx->shader;
1818 struct tgsi_shader_info *info = &shader->selector->info;
1819 LLVMValueRef main_fn = ctx->main_fn;
1820 LLVMValueRef interp_param = NULL;
1821 int interp_param_idx;
1822 enum tgsi_semantic semantic_name = info->input_semantic_name[input_index];
1823 unsigned semantic_index = info->input_semantic_index[input_index];
1824 enum tgsi_interpolate_mode interp_mode = info->input_interpolate[input_index];
1825 enum tgsi_interpolate_loc interp_loc = info->input_interpolate_loc[input_index];
1826
1827 /* Get colors from input VGPRs (set by the prolog). */
1828 if (semantic_name == TGSI_SEMANTIC_COLOR) {
1829 unsigned colors_read = shader->selector->info.colors_read;
1830 unsigned mask = colors_read >> (semantic_index * 4);
1831 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1832 (semantic_index ? util_bitcount(colors_read & 0xf) : 0);
1833
1834 out[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1835 out[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1836 out[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1837 out[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1838 return;
1839 }
1840
1841 interp_param_idx = lookup_interp_param_index(interp_mode, interp_loc);
1842 if (interp_param_idx == -1)
1843 return;
1844 else if (interp_param_idx) {
1845 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
1846 }
1847
1848 interp_fs_input(ctx, input_index, semantic_name,
1849 semantic_index, 0, /* this param is unused */
1850 shader->selector->info.colors_read, interp_param,
1851 ctx->abi.prim_mask,
1852 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1853 &out[0]);
1854 }
1855
1856 static void declare_input_fs(
1857 struct si_shader_context *ctx,
1858 unsigned input_index,
1859 const struct tgsi_full_declaration *decl,
1860 LLVMValueRef out[4])
1861 {
1862 si_llvm_load_input_fs(ctx, input_index, out);
1863 }
1864
1865 static LLVMValueRef get_sample_id(struct si_shader_context *ctx)
1866 {
1867 return unpack_param(ctx, SI_PARAM_ANCILLARY, 8, 4);
1868 }
1869
1870
1871 /**
1872 * Load a dword from a constant buffer.
1873 */
1874 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1875 LLVMValueRef resource,
1876 LLVMValueRef offset)
1877 {
1878 return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL,
1879 0, 0, 0, true, true);
1880 }
1881
1882 static LLVMValueRef load_sample_position(struct ac_shader_abi *abi, LLVMValueRef sample_id)
1883 {
1884 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1885 struct lp_build_context *uint_bld = &ctx->bld_base.uint_bld;
1886 LLVMValueRef desc = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1887 LLVMValueRef buf_index = LLVMConstInt(ctx->i32, SI_PS_CONST_SAMPLE_POSITIONS, 0);
1888 LLVMValueRef resource = ac_build_load_to_sgpr(&ctx->ac, desc, buf_index);
1889
1890 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1891 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1892 LLVMValueRef offset1 = LLVMBuildAdd(ctx->ac.builder, offset0, LLVMConstInt(ctx->i32, 4, 0), "");
1893
1894 LLVMValueRef pos[4] = {
1895 buffer_load_const(ctx, resource, offset0),
1896 buffer_load_const(ctx, resource, offset1),
1897 LLVMConstReal(ctx->f32, 0),
1898 LLVMConstReal(ctx->f32, 0)
1899 };
1900
1901 return lp_build_gather_values(&ctx->gallivm, pos, 4);
1902 }
1903
1904 static LLVMValueRef si_load_tess_coord(struct ac_shader_abi *abi,
1905 LLVMTypeRef type,
1906 unsigned num_components)
1907 {
1908 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1909 struct lp_build_context *bld = &ctx->bld_base.base;
1910
1911 LLVMValueRef coord[4] = {
1912 LLVMGetParam(ctx->main_fn, ctx->param_tes_u),
1913 LLVMGetParam(ctx->main_fn, ctx->param_tes_v),
1914 ctx->ac.f32_0,
1915 ctx->ac.f32_0
1916 };
1917
1918 /* For triangles, the vector should be (u, v, 1-u-v). */
1919 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1920 PIPE_PRIM_TRIANGLES)
1921 coord[2] = lp_build_sub(bld, ctx->ac.f32_1,
1922 lp_build_add(bld, coord[0], coord[1]));
1923
1924 return lp_build_gather_values(&ctx->gallivm, coord, 4);
1925 }
1926
1927 static LLVMValueRef load_tess_level(struct si_shader_context *ctx,
1928 unsigned semantic_name)
1929 {
1930 LLVMValueRef buffer, base, addr;
1931
1932 int param = si_shader_io_get_unique_index_patch(semantic_name, 0);
1933
1934 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1935
1936 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1937 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1938 LLVMConstInt(ctx->i32, param, 0));
1939
1940 return buffer_load(&ctx->bld_base, ctx->f32,
1941 ~0, buffer, base, addr, true);
1942
1943 }
1944
1945 static LLVMValueRef si_load_tess_level(struct ac_shader_abi *abi,
1946 unsigned varying_id)
1947 {
1948 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1949 unsigned semantic_name;
1950
1951 switch (varying_id) {
1952 case VARYING_SLOT_TESS_LEVEL_INNER:
1953 semantic_name = TGSI_SEMANTIC_TESSINNER;
1954 break;
1955 case VARYING_SLOT_TESS_LEVEL_OUTER:
1956 semantic_name = TGSI_SEMANTIC_TESSOUTER;
1957 break;
1958 default:
1959 unreachable("unknown tess level");
1960 }
1961
1962 return load_tess_level(ctx, semantic_name);
1963
1964 }
1965
1966 static LLVMValueRef si_load_patch_vertices_in(struct ac_shader_abi *abi)
1967 {
1968 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1969 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1970 return unpack_param(ctx, ctx->param_tcs_out_lds_layout, 26, 6);
1971 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1972 return get_num_tcs_out_vertices(ctx);
1973 else
1974 unreachable("invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1975 }
1976
1977 void si_load_system_value(struct si_shader_context *ctx,
1978 unsigned index,
1979 const struct tgsi_full_declaration *decl)
1980 {
1981 LLVMValueRef value = 0;
1982
1983 assert(index < RADEON_LLVM_MAX_SYSTEM_VALUES);
1984
1985 switch (decl->Semantic.Name) {
1986 case TGSI_SEMANTIC_INSTANCEID:
1987 value = ctx->abi.instance_id;
1988 break;
1989
1990 case TGSI_SEMANTIC_VERTEXID:
1991 value = LLVMBuildAdd(ctx->ac.builder,
1992 ctx->abi.vertex_id,
1993 ctx->abi.base_vertex, "");
1994 break;
1995
1996 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1997 /* Unused. Clarify the meaning in indexed vs. non-indexed
1998 * draws if this is ever used again. */
1999 assert(false);
2000 break;
2001
2002 case TGSI_SEMANTIC_BASEVERTEX:
2003 {
2004 /* For non-indexed draws, the base vertex set by the driver
2005 * (for direct draws) or the CP (for indirect draws) is the
2006 * first vertex ID, but GLSL expects 0 to be returned.
2007 */
2008 LLVMValueRef vs_state = LLVMGetParam(ctx->main_fn, ctx->param_vs_state_bits);
2009 LLVMValueRef indexed;
2010
2011 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
2012 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
2013
2014 value = LLVMBuildSelect(ctx->ac.builder, indexed,
2015 ctx->abi.base_vertex, ctx->i32_0, "");
2016 break;
2017 }
2018
2019 case TGSI_SEMANTIC_BASEINSTANCE:
2020 value = ctx->abi.start_instance;
2021 break;
2022
2023 case TGSI_SEMANTIC_DRAWID:
2024 value = ctx->abi.draw_id;
2025 break;
2026
2027 case TGSI_SEMANTIC_INVOCATIONID:
2028 if (ctx->type == PIPE_SHADER_TESS_CTRL)
2029 value = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
2030 else if (ctx->type == PIPE_SHADER_GEOMETRY)
2031 value = ctx->abi.gs_invocation_id;
2032 else
2033 assert(!"INVOCATIONID not implemented");
2034 break;
2035
2036 case TGSI_SEMANTIC_POSITION:
2037 {
2038 LLVMValueRef pos[4] = {
2039 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
2040 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
2041 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Z_FLOAT),
2042 lp_build_emit_llvm_unary(&ctx->bld_base, TGSI_OPCODE_RCP,
2043 LLVMGetParam(ctx->main_fn,
2044 SI_PARAM_POS_W_FLOAT)),
2045 };
2046 value = lp_build_gather_values(&ctx->gallivm, pos, 4);
2047 break;
2048 }
2049
2050 case TGSI_SEMANTIC_FACE:
2051 value = ctx->abi.front_face;
2052 break;
2053
2054 case TGSI_SEMANTIC_SAMPLEID:
2055 value = get_sample_id(ctx);
2056 break;
2057
2058 case TGSI_SEMANTIC_SAMPLEPOS: {
2059 LLVMValueRef pos[4] = {
2060 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
2061 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
2062 LLVMConstReal(ctx->f32, 0),
2063 LLVMConstReal(ctx->f32, 0)
2064 };
2065 pos[0] = lp_build_emit_llvm_unary(&ctx->bld_base,
2066 TGSI_OPCODE_FRC, pos[0]);
2067 pos[1] = lp_build_emit_llvm_unary(&ctx->bld_base,
2068 TGSI_OPCODE_FRC, pos[1]);
2069 value = lp_build_gather_values(&ctx->gallivm, pos, 4);
2070 break;
2071 }
2072
2073 case TGSI_SEMANTIC_SAMPLEMASK:
2074 /* This can only occur with the OpenGL Core profile, which
2075 * doesn't support smoothing.
2076 */
2077 value = LLVMGetParam(ctx->main_fn, SI_PARAM_SAMPLE_COVERAGE);
2078 break;
2079
2080 case TGSI_SEMANTIC_TESSCOORD:
2081 value = si_load_tess_coord(&ctx->abi, NULL, 4);
2082 break;
2083
2084 case TGSI_SEMANTIC_VERTICESIN:
2085 value = si_load_patch_vertices_in(&ctx->abi);
2086 break;
2087
2088 case TGSI_SEMANTIC_TESSINNER:
2089 case TGSI_SEMANTIC_TESSOUTER:
2090 value = load_tess_level(ctx, decl->Semantic.Name);
2091 break;
2092
2093 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
2094 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
2095 {
2096 LLVMValueRef buf, slot, val[4];
2097 int i, offset;
2098
2099 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
2100 buf = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2101 buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
2102 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
2103
2104 for (i = 0; i < 4; i++)
2105 val[i] = buffer_load_const(ctx, buf,
2106 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
2107 value = lp_build_gather_values(&ctx->gallivm, val, 4);
2108 break;
2109 }
2110
2111 case TGSI_SEMANTIC_PRIMID:
2112 value = get_primitive_id(ctx, 0);
2113 break;
2114
2115 case TGSI_SEMANTIC_GRID_SIZE:
2116 value = LLVMGetParam(ctx->main_fn, ctx->param_grid_size);
2117 break;
2118
2119 case TGSI_SEMANTIC_BLOCK_SIZE:
2120 {
2121 LLVMValueRef values[3];
2122 unsigned i;
2123 unsigned *properties = ctx->shader->selector->info.properties;
2124
2125 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
2126 unsigned sizes[3] = {
2127 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
2128 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
2129 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
2130 };
2131
2132 for (i = 0; i < 3; ++i)
2133 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
2134
2135 value = lp_build_gather_values(&ctx->gallivm, values, 3);
2136 } else {
2137 value = LLVMGetParam(ctx->main_fn, ctx->param_block_size);
2138 }
2139 break;
2140 }
2141
2142 case TGSI_SEMANTIC_BLOCK_ID:
2143 {
2144 LLVMValueRef values[3];
2145
2146 for (int i = 0; i < 3; i++) {
2147 values[i] = ctx->i32_0;
2148 if (ctx->param_block_id[i] >= 0) {
2149 values[i] = LLVMGetParam(ctx->main_fn,
2150 ctx->param_block_id[i]);
2151 }
2152 }
2153 value = lp_build_gather_values(&ctx->gallivm, values, 3);
2154 break;
2155 }
2156
2157 case TGSI_SEMANTIC_THREAD_ID:
2158 value = LLVMGetParam(ctx->main_fn, ctx->param_thread_id);
2159 break;
2160
2161 case TGSI_SEMANTIC_HELPER_INVOCATION:
2162 value = lp_build_intrinsic(ctx->ac.builder,
2163 "llvm.amdgcn.ps.live",
2164 ctx->i1, NULL, 0,
2165 LP_FUNC_ATTR_READNONE);
2166 value = LLVMBuildNot(ctx->ac.builder, value, "");
2167 value = LLVMBuildSExt(ctx->ac.builder, value, ctx->i32, "");
2168 break;
2169
2170 case TGSI_SEMANTIC_SUBGROUP_SIZE:
2171 value = LLVMConstInt(ctx->i32, 64, 0);
2172 break;
2173
2174 case TGSI_SEMANTIC_SUBGROUP_INVOCATION:
2175 value = ac_get_thread_id(&ctx->ac);
2176 break;
2177
2178 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
2179 {
2180 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
2181 id = LLVMBuildZExt(ctx->ac.builder, id, ctx->i64, "");
2182 value = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->i64, 1, 0), id, "");
2183 value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->v2i32, "");
2184 break;
2185 }
2186
2187 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
2188 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
2189 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
2190 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
2191 {
2192 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
2193 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_GT_MASK ||
2194 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK) {
2195 /* All bits set except LSB */
2196 value = LLVMConstInt(ctx->i64, -2, 0);
2197 } else {
2198 /* All bits set */
2199 value = LLVMConstInt(ctx->i64, -1, 0);
2200 }
2201 id = LLVMBuildZExt(ctx->ac.builder, id, ctx->i64, "");
2202 value = LLVMBuildShl(ctx->ac.builder, value, id, "");
2203 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK ||
2204 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LT_MASK)
2205 value = LLVMBuildNot(ctx->ac.builder, value, "");
2206 value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->v2i32, "");
2207 break;
2208 }
2209
2210 default:
2211 assert(!"unknown system value");
2212 return;
2213 }
2214
2215 ctx->system_values[index] = value;
2216 }
2217
2218 void si_declare_compute_memory(struct si_shader_context *ctx,
2219 const struct tgsi_full_declaration *decl)
2220 {
2221 struct si_shader_selector *sel = ctx->shader->selector;
2222
2223 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, AC_LOCAL_ADDR_SPACE);
2224 LLVMValueRef var;
2225
2226 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
2227 assert(decl->Range.First == decl->Range.Last);
2228 assert(!ctx->ac.lds);
2229
2230 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2231 LLVMArrayType(ctx->i8, sel->local_size),
2232 "compute_lds",
2233 AC_LOCAL_ADDR_SPACE);
2234 LLVMSetAlignment(var, 4);
2235
2236 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
2237 }
2238
2239 static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
2240 {
2241 LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
2242 ctx->param_const_and_shader_buffers);
2243
2244 return ac_build_load_to_sgpr(&ctx->ac, list_ptr,
2245 LLVMConstInt(ctx->i32, si_get_constbuf_slot(i), 0));
2246 }
2247
2248 static LLVMValueRef load_ubo(struct ac_shader_abi *abi, LLVMValueRef index)
2249 {
2250 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2251 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2252
2253 index = si_llvm_bound_index(ctx, index, ctx->num_const_buffers);
2254 index = LLVMBuildAdd(ctx->ac.builder, index,
2255 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
2256
2257 return ac_build_load_to_sgpr(&ctx->ac, ptr, index);
2258 }
2259
2260 static LLVMValueRef
2261 load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, bool write)
2262 {
2263 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2264 LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
2265 ctx->param_const_and_shader_buffers);
2266
2267 index = si_llvm_bound_index(ctx, index, ctx->num_shader_buffers);
2268 index = LLVMBuildSub(ctx->ac.builder,
2269 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS - 1, 0),
2270 index, "");
2271
2272 return ac_build_load_to_sgpr(&ctx->ac, rsrc_ptr, index);
2273 }
2274
2275 static LLVMValueRef fetch_constant(
2276 struct lp_build_tgsi_context *bld_base,
2277 const struct tgsi_full_src_register *reg,
2278 enum tgsi_opcode_type type,
2279 unsigned swizzle)
2280 {
2281 struct si_shader_context *ctx = si_shader_context(bld_base);
2282 struct si_shader_selector *sel = ctx->shader->selector;
2283 const struct tgsi_ind_register *ireg = &reg->Indirect;
2284 unsigned buf, idx;
2285
2286 LLVMValueRef addr, bufp;
2287
2288 if (swizzle == LP_CHAN_ALL) {
2289 unsigned chan;
2290 LLVMValueRef values[4];
2291 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
2292 values[chan] = fetch_constant(bld_base, reg, type, chan);
2293
2294 return lp_build_gather_values(&ctx->gallivm, values, 4);
2295 }
2296
2297 /* Split 64-bit loads. */
2298 if (tgsi_type_is_64bit(type)) {
2299 LLVMValueRef lo, hi;
2300
2301 lo = fetch_constant(bld_base, reg, TGSI_TYPE_UNSIGNED, swizzle);
2302 hi = fetch_constant(bld_base, reg, TGSI_TYPE_UNSIGNED, swizzle + 1);
2303 return si_llvm_emit_fetch_64bit(bld_base, tgsi2llvmtype(bld_base, type),
2304 lo, hi);
2305 }
2306
2307 idx = reg->Register.Index * 4 + swizzle;
2308 if (reg->Register.Indirect) {
2309 addr = si_get_indirect_index(ctx, ireg, 16, idx * 4);
2310 } else {
2311 addr = LLVMConstInt(ctx->i32, idx * 4, 0);
2312 }
2313
2314 /* Fast path when user data SGPRs point to constant buffer 0 directly. */
2315 if (sel->info.const_buffers_declared == 1 &&
2316 sel->info.shader_buffers_declared == 0) {
2317 LLVMValueRef ptr =
2318 LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2319
2320 /* This enables use of s_load_dword and flat_load_dword for const buffer 0
2321 * loads, and up to x4 load opcode merging. However, it leads to horrible
2322 * code reducing SIMD wave occupancy from 8 to 2 in many cases.
2323 *
2324 * Using s_buffer_load_dword (x1) seems to be the best option right now.
2325 *
2326 * LLVM 5.0 on SI doesn't insert a required s_nop between SALU setting
2327 * a descriptor and s_buffer_load_dword using it, so we can't expand
2328 * the pointer into a full descriptor like below. We have to use
2329 * s_load_dword instead. The only case when LLVM 5.0 would select
2330 * s_buffer_load_dword (that we have to prevent) is when we use use
2331 * a literal offset where we don't need bounds checking.
2332 */
2333 if (ctx->screen->info.chip_class == SI &&
2334 HAVE_LLVM < 0x0600 &&
2335 !reg->Register.Indirect) {
2336 addr = LLVMBuildLShr(ctx->ac.builder, addr, LLVMConstInt(ctx->i32, 2, 0), "");
2337 LLVMValueRef result = ac_build_load_invariant(&ctx->ac, ptr, addr);
2338 return bitcast(bld_base, type, result);
2339 }
2340
2341 /* Do the bounds checking with a descriptor, because
2342 * doing computation and manual bounds checking of 64-bit
2343 * addresses generates horrible VALU code with very high
2344 * VGPR usage and very low SIMD occupancy.
2345 */
2346 ptr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->i64, "");
2347 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ctx->v2i32, "");
2348
2349 LLVMValueRef desc_elems[] = {
2350 LLVMBuildExtractElement(ctx->ac.builder, ptr, ctx->i32_0, ""),
2351 LLVMBuildExtractElement(ctx->ac.builder, ptr, ctx->i32_1, ""),
2352 LLVMConstInt(ctx->i32, (sel->info.const_file_max[0] + 1) * 16, 0),
2353 LLVMConstInt(ctx->i32,
2354 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2355 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2356 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2357 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2358 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2359 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32), 0)
2360 };
2361 LLVMValueRef desc = ac_build_gather_values(&ctx->ac, desc_elems, 4);
2362 LLVMValueRef result = buffer_load_const(ctx, desc, addr);
2363 return bitcast(bld_base, type, result);
2364 }
2365
2366 assert(reg->Register.Dimension);
2367 buf = reg->Dimension.Index;
2368
2369 if (reg->Dimension.Indirect) {
2370 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2371 LLVMValueRef index;
2372 index = si_get_bounded_indirect_index(ctx, &reg->DimIndirect,
2373 reg->Dimension.Index,
2374 ctx->num_const_buffers);
2375 index = LLVMBuildAdd(ctx->ac.builder, index,
2376 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
2377 bufp = ac_build_load_to_sgpr(&ctx->ac, ptr, index);
2378 } else
2379 bufp = load_const_buffer_desc(ctx, buf);
2380
2381 return bitcast(bld_base, type, buffer_load_const(ctx, bufp, addr));
2382 }
2383
2384 /* Upper 16 bits must be zero. */
2385 static LLVMValueRef si_llvm_pack_two_int16(struct si_shader_context *ctx,
2386 LLVMValueRef val[2])
2387 {
2388 return LLVMBuildOr(ctx->ac.builder, val[0],
2389 LLVMBuildShl(ctx->ac.builder, val[1],
2390 LLVMConstInt(ctx->i32, 16, 0),
2391 ""), "");
2392 }
2393
2394 /* Upper 16 bits are ignored and will be dropped. */
2395 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct si_shader_context *ctx,
2396 LLVMValueRef val[2])
2397 {
2398 LLVMValueRef v[2] = {
2399 LLVMBuildAnd(ctx->ac.builder, val[0],
2400 LLVMConstInt(ctx->i32, 0xffff, 0), ""),
2401 val[1],
2402 };
2403 return si_llvm_pack_two_int16(ctx, v);
2404 }
2405
2406 /* Initialize arguments for the shader export intrinsic */
2407 static void si_llvm_init_export_args(struct si_shader_context *ctx,
2408 LLVMValueRef *values,
2409 unsigned target,
2410 struct ac_export_args *args)
2411 {
2412 LLVMValueRef f32undef = LLVMGetUndef(ctx->ac.f32);
2413 LLVMBuilderRef builder = ctx->ac.builder;
2414 LLVMValueRef val[4];
2415 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
2416 unsigned chan;
2417 bool is_int8, is_int10;
2418
2419 /* Default is 0xf. Adjusted below depending on the format. */
2420 args->enabled_channels = 0xf; /* writemask */
2421
2422 /* Specify whether the EXEC mask represents the valid mask */
2423 args->valid_mask = 0;
2424
2425 /* Specify whether this is the last export */
2426 args->done = 0;
2427
2428 /* Specify the target we are exporting */
2429 args->target = target;
2430
2431 if (ctx->type == PIPE_SHADER_FRAGMENT) {
2432 const struct si_shader_key *key = &ctx->shader->key;
2433 unsigned col_formats = key->part.ps.epilog.spi_shader_col_format;
2434 int cbuf = target - V_008DFC_SQ_EXP_MRT;
2435
2436 assert(cbuf >= 0 && cbuf < 8);
2437 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
2438 is_int8 = (key->part.ps.epilog.color_is_int8 >> cbuf) & 0x1;
2439 is_int10 = (key->part.ps.epilog.color_is_int10 >> cbuf) & 0x1;
2440 }
2441
2442 args->compr = false;
2443 args->out[0] = f32undef;
2444 args->out[1] = f32undef;
2445 args->out[2] = f32undef;
2446 args->out[3] = f32undef;
2447
2448 switch (spi_shader_col_format) {
2449 case V_028714_SPI_SHADER_ZERO:
2450 args->enabled_channels = 0; /* writemask */
2451 args->target = V_008DFC_SQ_EXP_NULL;
2452 break;
2453
2454 case V_028714_SPI_SHADER_32_R:
2455 args->enabled_channels = 1; /* writemask */
2456 args->out[0] = values[0];
2457 break;
2458
2459 case V_028714_SPI_SHADER_32_GR:
2460 args->enabled_channels = 0x3; /* writemask */
2461 args->out[0] = values[0];
2462 args->out[1] = values[1];
2463 break;
2464
2465 case V_028714_SPI_SHADER_32_AR:
2466 args->enabled_channels = 0x9; /* writemask */
2467 args->out[0] = values[0];
2468 args->out[3] = values[3];
2469 break;
2470
2471 case V_028714_SPI_SHADER_FP16_ABGR:
2472 args->compr = 1; /* COMPR flag */
2473
2474 for (chan = 0; chan < 2; chan++) {
2475 LLVMValueRef pack_args[2] = {
2476 values[2 * chan],
2477 values[2 * chan + 1]
2478 };
2479 LLVMValueRef packed;
2480
2481 packed = ac_build_cvt_pkrtz_f16(&ctx->ac, pack_args);
2482 args->out[chan] = ac_to_float(&ctx->ac, packed);
2483 }
2484 break;
2485
2486 case V_028714_SPI_SHADER_UNORM16_ABGR:
2487 for (chan = 0; chan < 4; chan++) {
2488 val[chan] = ac_build_clamp(&ctx->ac, values[chan]);
2489 val[chan] = LLVMBuildFMul(builder, val[chan],
2490 LLVMConstReal(ctx->f32, 65535), "");
2491 val[chan] = LLVMBuildFAdd(builder, val[chan],
2492 LLVMConstReal(ctx->f32, 0.5), "");
2493 val[chan] = LLVMBuildFPToUI(builder, val[chan],
2494 ctx->i32, "");
2495 }
2496
2497 args->compr = 1; /* COMPR flag */
2498 args->out[0] = ac_to_float(&ctx->ac, si_llvm_pack_two_int16(ctx, val));
2499 args->out[1] = ac_to_float(&ctx->ac, si_llvm_pack_two_int16(ctx, val+2));
2500 break;
2501
2502 case V_028714_SPI_SHADER_SNORM16_ABGR:
2503 for (chan = 0; chan < 4; chan++) {
2504 /* Clamp between [-1, 1]. */
2505 val[chan] = lp_build_emit_llvm_binary(&ctx->bld_base, TGSI_OPCODE_MIN,
2506 values[chan],
2507 LLVMConstReal(ctx->f32, 1));
2508 val[chan] = lp_build_emit_llvm_binary(&ctx->bld_base, TGSI_OPCODE_MAX,
2509 val[chan],
2510 LLVMConstReal(ctx->f32, -1));
2511 /* Convert to a signed integer in [-32767, 32767]. */
2512 val[chan] = LLVMBuildFMul(builder, val[chan],
2513 LLVMConstReal(ctx->f32, 32767), "");
2514 /* If positive, add 0.5, else add -0.5. */
2515 val[chan] = LLVMBuildFAdd(builder, val[chan],
2516 LLVMBuildSelect(builder,
2517 LLVMBuildFCmp(builder, LLVMRealOGE,
2518 val[chan], ctx->ac.f32_0, ""),
2519 LLVMConstReal(ctx->f32, 0.5),
2520 LLVMConstReal(ctx->f32, -0.5), ""), "");
2521 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
2522 }
2523
2524 args->compr = 1; /* COMPR flag */
2525 args->out[0] = ac_to_float(&ctx->ac, si_llvm_pack_two_int32_as_int16(ctx, val));
2526 args->out[1] = ac_to_float(&ctx->ac, si_llvm_pack_two_int32_as_int16(ctx, val+2));
2527 break;
2528
2529 case V_028714_SPI_SHADER_UINT16_ABGR: {
2530 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
2531 is_int8 ? 255 : is_int10 ? 1023 : 65535, 0);
2532 LLVMValueRef max_alpha =
2533 !is_int10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
2534
2535 /* Clamp. */
2536 for (chan = 0; chan < 4; chan++) {
2537 val[chan] = ac_to_integer(&ctx->ac, values[chan]);
2538 val[chan] = lp_build_emit_llvm_binary(&ctx->bld_base, TGSI_OPCODE_UMIN,
2539 val[chan],
2540 chan == 3 ? max_alpha : max_rgb);
2541 }
2542
2543 args->compr = 1; /* COMPR flag */
2544 args->out[0] = ac_to_float(&ctx->ac, si_llvm_pack_two_int16(ctx, val));
2545 args->out[1] = ac_to_float(&ctx->ac, si_llvm_pack_two_int16(ctx, val+2));
2546 break;
2547 }
2548
2549 case V_028714_SPI_SHADER_SINT16_ABGR: {
2550 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
2551 is_int8 ? 127 : is_int10 ? 511 : 32767, 0);
2552 LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
2553 is_int8 ? -128 : is_int10 ? -512 : -32768, 0);
2554 LLVMValueRef max_alpha =
2555 !is_int10 ? max_rgb : ctx->i32_1;
2556 LLVMValueRef min_alpha =
2557 !is_int10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
2558
2559 /* Clamp. */
2560 for (chan = 0; chan < 4; chan++) {
2561 val[chan] = ac_to_integer(&ctx->ac, values[chan]);
2562 val[chan] = lp_build_emit_llvm_binary(&ctx->bld_base,
2563 TGSI_OPCODE_IMIN,
2564 val[chan], chan == 3 ? max_alpha : max_rgb);
2565 val[chan] = lp_build_emit_llvm_binary(&ctx->bld_base,
2566 TGSI_OPCODE_IMAX,
2567 val[chan], chan == 3 ? min_alpha : min_rgb);
2568 }
2569
2570 args->compr = 1; /* COMPR flag */
2571 args->out[0] = ac_to_float(&ctx->ac, si_llvm_pack_two_int32_as_int16(ctx, val));
2572 args->out[1] = ac_to_float(&ctx->ac, si_llvm_pack_two_int32_as_int16(ctx, val+2));
2573 break;
2574 }
2575
2576 case V_028714_SPI_SHADER_32_ABGR:
2577 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2578 break;
2579 }
2580 }
2581
2582 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2583 LLVMValueRef alpha)
2584 {
2585 struct si_shader_context *ctx = si_shader_context(bld_base);
2586
2587 if (ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2588 static LLVMRealPredicate cond_map[PIPE_FUNC_ALWAYS + 1] = {
2589 [PIPE_FUNC_LESS] = LLVMRealOLT,
2590 [PIPE_FUNC_EQUAL] = LLVMRealOEQ,
2591 [PIPE_FUNC_LEQUAL] = LLVMRealOLE,
2592 [PIPE_FUNC_GREATER] = LLVMRealOGT,
2593 [PIPE_FUNC_NOTEQUAL] = LLVMRealONE,
2594 [PIPE_FUNC_GEQUAL] = LLVMRealOGE,
2595 };
2596 LLVMRealPredicate cond = cond_map[ctx->shader->key.part.ps.epilog.alpha_func];
2597 assert(cond);
2598
2599 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
2600 SI_PARAM_ALPHA_REF);
2601 LLVMValueRef alpha_pass =
2602 LLVMBuildFCmp(ctx->ac.builder, cond, alpha, alpha_ref, "");
2603 ac_build_kill_if_false(&ctx->ac, alpha_pass);
2604 } else {
2605 ac_build_kill_if_false(&ctx->ac, LLVMConstInt(ctx->i1, 0, 0));
2606 }
2607 }
2608
2609 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2610 LLVMValueRef alpha,
2611 unsigned samplemask_param)
2612 {
2613 struct si_shader_context *ctx = si_shader_context(bld_base);
2614 LLVMValueRef coverage;
2615
2616 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2617 coverage = LLVMGetParam(ctx->main_fn,
2618 samplemask_param);
2619 coverage = ac_to_integer(&ctx->ac, coverage);
2620
2621 coverage = lp_build_intrinsic(ctx->ac.builder, "llvm.ctpop.i32",
2622 ctx->i32,
2623 &coverage, 1, LP_FUNC_ATTR_READNONE);
2624
2625 coverage = LLVMBuildUIToFP(ctx->ac.builder, coverage,
2626 ctx->f32, "");
2627
2628 coverage = LLVMBuildFMul(ctx->ac.builder, coverage,
2629 LLVMConstReal(ctx->f32,
2630 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2631
2632 return LLVMBuildFMul(ctx->ac.builder, alpha, coverage, "");
2633 }
2634
2635 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
2636 struct ac_export_args *pos, LLVMValueRef *out_elts)
2637 {
2638 unsigned reg_index;
2639 unsigned chan;
2640 unsigned const_chan;
2641 LLVMValueRef base_elt;
2642 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2643 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
2644 SI_VS_CONST_CLIP_PLANES, 0);
2645 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
2646
2647 for (reg_index = 0; reg_index < 2; reg_index ++) {
2648 struct ac_export_args *args = &pos[2 + reg_index];
2649
2650 args->out[0] =
2651 args->out[1] =
2652 args->out[2] =
2653 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
2654
2655 /* Compute dot products of position and user clip plane vectors */
2656 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2657 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2658 LLVMValueRef addr =
2659 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
2660 const_chan) * 4, 0);
2661 base_elt = buffer_load_const(ctx, const_resource,
2662 addr);
2663 args->out[chan] =
2664 lp_build_add(&ctx->bld_base.base, args->out[chan],
2665 lp_build_mul(&ctx->bld_base.base, base_elt,
2666 out_elts[const_chan]));
2667 }
2668 }
2669
2670 args->enabled_channels = 0xf;
2671 args->valid_mask = 0;
2672 args->done = 0;
2673 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
2674 args->compr = 0;
2675 }
2676 }
2677
2678 static void si_dump_streamout(struct pipe_stream_output_info *so)
2679 {
2680 unsigned i;
2681
2682 if (so->num_outputs)
2683 fprintf(stderr, "STREAMOUT\n");
2684
2685 for (i = 0; i < so->num_outputs; i++) {
2686 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2687 so->output[i].start_component;
2688 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2689 i, so->output[i].output_buffer,
2690 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2691 so->output[i].register_index,
2692 mask & 1 ? "x" : "",
2693 mask & 2 ? "y" : "",
2694 mask & 4 ? "z" : "",
2695 mask & 8 ? "w" : "");
2696 }
2697 }
2698
2699 static void emit_streamout_output(struct si_shader_context *ctx,
2700 LLVMValueRef const *so_buffers,
2701 LLVMValueRef const *so_write_offsets,
2702 struct pipe_stream_output *stream_out,
2703 struct si_shader_output_values *shader_out)
2704 {
2705 unsigned buf_idx = stream_out->output_buffer;
2706 unsigned start = stream_out->start_component;
2707 unsigned num_comps = stream_out->num_components;
2708 LLVMValueRef out[4];
2709
2710 assert(num_comps && num_comps <= 4);
2711 if (!num_comps || num_comps > 4)
2712 return;
2713
2714 /* Load the output as int. */
2715 for (int j = 0; j < num_comps; j++) {
2716 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
2717
2718 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
2719 }
2720
2721 /* Pack the output. */
2722 LLVMValueRef vdata = NULL;
2723
2724 switch (num_comps) {
2725 case 1: /* as i32 */
2726 vdata = out[0];
2727 break;
2728 case 2: /* as v2i32 */
2729 case 3: /* as v4i32 (aligned to 4) */
2730 case 4: /* as v4i32 */
2731 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2732 for (int j = 0; j < num_comps; j++) {
2733 vdata = LLVMBuildInsertElement(ctx->ac.builder, vdata, out[j],
2734 LLVMConstInt(ctx->i32, j, 0), "");
2735 }
2736 break;
2737 }
2738
2739 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
2740 vdata, num_comps,
2741 so_write_offsets[buf_idx],
2742 ctx->i32_0,
2743 stream_out->dst_offset * 4, 1, 1, true, false);
2744 }
2745
2746 /**
2747 * Write streamout data to buffers for vertex stream @p stream (different
2748 * vertex streams can occur for GS copy shaders).
2749 */
2750 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2751 struct si_shader_output_values *outputs,
2752 unsigned noutput, unsigned stream)
2753 {
2754 struct si_shader_selector *sel = ctx->shader->selector;
2755 struct pipe_stream_output_info *so = &sel->so;
2756 LLVMBuilderRef builder = ctx->ac.builder;
2757 int i;
2758 struct lp_build_if_state if_ctx;
2759
2760 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2761 LLVMValueRef so_vtx_count =
2762 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2763
2764 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
2765
2766 /* can_emit = tid < so_vtx_count; */
2767 LLVMValueRef can_emit =
2768 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2769
2770 /* Emit the streamout code conditionally. This actually avoids
2771 * out-of-bounds buffer access. The hw tells us via the SGPR
2772 * (so_vtx_count) which threads are allowed to emit streamout data. */
2773 lp_build_if(&if_ctx, &ctx->gallivm, can_emit);
2774 {
2775 /* The buffer offset is computed as follows:
2776 * ByteOffset = streamout_offset[buffer_id]*4 +
2777 * (streamout_write_index + thread_id)*stride[buffer_id] +
2778 * attrib_offset
2779 */
2780
2781 LLVMValueRef so_write_index =
2782 LLVMGetParam(ctx->main_fn,
2783 ctx->param_streamout_write_index);
2784
2785 /* Compute (streamout_write_index + thread_id). */
2786 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2787
2788 /* Load the descriptor and compute the write offset for each
2789 * enabled buffer. */
2790 LLVMValueRef so_write_offset[4] = {};
2791 LLVMValueRef so_buffers[4];
2792 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
2793 ctx->param_rw_buffers);
2794
2795 for (i = 0; i < 4; i++) {
2796 if (!so->stride[i])
2797 continue;
2798
2799 LLVMValueRef offset = LLVMConstInt(ctx->i32,
2800 SI_VS_STREAMOUT_BUF0 + i, 0);
2801
2802 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
2803
2804 LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
2805 ctx->param_streamout_offset[i]);
2806 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2807
2808 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2809 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2810 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2811 }
2812
2813 /* Write streamout data. */
2814 for (i = 0; i < so->num_outputs; i++) {
2815 unsigned reg = so->output[i].register_index;
2816
2817 if (reg >= noutput)
2818 continue;
2819
2820 if (stream != so->output[i].stream)
2821 continue;
2822
2823 emit_streamout_output(ctx, so_buffers, so_write_offset,
2824 &so->output[i], &outputs[reg]);
2825 }
2826 }
2827 lp_build_endif(&if_ctx);
2828 }
2829
2830 static void si_export_param(struct si_shader_context *ctx, unsigned index,
2831 LLVMValueRef *values)
2832 {
2833 struct ac_export_args args;
2834
2835 si_llvm_init_export_args(ctx, values,
2836 V_008DFC_SQ_EXP_PARAM + index, &args);
2837 ac_build_export(&ctx->ac, &args);
2838 }
2839
2840 static void si_build_param_exports(struct si_shader_context *ctx,
2841 struct si_shader_output_values *outputs,
2842 unsigned noutput)
2843 {
2844 struct si_shader *shader = ctx->shader;
2845 unsigned param_count = 0;
2846
2847 for (unsigned i = 0; i < noutput; i++) {
2848 unsigned semantic_name = outputs[i].semantic_name;
2849 unsigned semantic_index = outputs[i].semantic_index;
2850
2851 if (outputs[i].vertex_stream[0] != 0 &&
2852 outputs[i].vertex_stream[1] != 0 &&
2853 outputs[i].vertex_stream[2] != 0 &&
2854 outputs[i].vertex_stream[3] != 0)
2855 continue;
2856
2857 switch (semantic_name) {
2858 case TGSI_SEMANTIC_LAYER:
2859 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2860 case TGSI_SEMANTIC_CLIPDIST:
2861 case TGSI_SEMANTIC_COLOR:
2862 case TGSI_SEMANTIC_BCOLOR:
2863 case TGSI_SEMANTIC_PRIMID:
2864 case TGSI_SEMANTIC_FOG:
2865 case TGSI_SEMANTIC_TEXCOORD:
2866 case TGSI_SEMANTIC_GENERIC:
2867 break;
2868 default:
2869 continue;
2870 }
2871
2872 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
2873 semantic_index < SI_MAX_IO_GENERIC) &&
2874 shader->key.opt.kill_outputs &
2875 (1ull << si_shader_io_get_unique_index(semantic_name, semantic_index)))
2876 continue;
2877
2878 si_export_param(ctx, param_count, outputs[i].values);
2879
2880 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2881 shader->info.vs_output_param_offset[i] = param_count++;
2882 }
2883
2884 shader->info.nr_param_exports = param_count;
2885 }
2886
2887 /* Generate export instructions for hardware VS shader stage */
2888 static void si_llvm_export_vs(struct si_shader_context *ctx,
2889 struct si_shader_output_values *outputs,
2890 unsigned noutput)
2891 {
2892 struct si_shader *shader = ctx->shader;
2893 struct ac_export_args pos_args[4] = {};
2894 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2895 unsigned pos_idx;
2896 int i;
2897
2898 /* Build position exports. */
2899 for (i = 0; i < noutput; i++) {
2900 switch (outputs[i].semantic_name) {
2901 case TGSI_SEMANTIC_POSITION:
2902 si_llvm_init_export_args(ctx, outputs[i].values,
2903 V_008DFC_SQ_EXP_POS, &pos_args[0]);
2904 break;
2905 case TGSI_SEMANTIC_PSIZE:
2906 psize_value = outputs[i].values[0];
2907 break;
2908 case TGSI_SEMANTIC_LAYER:
2909 layer_value = outputs[i].values[0];
2910 break;
2911 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2912 viewport_index_value = outputs[i].values[0];
2913 break;
2914 case TGSI_SEMANTIC_EDGEFLAG:
2915 edgeflag_value = outputs[i].values[0];
2916 break;
2917 case TGSI_SEMANTIC_CLIPDIST:
2918 if (!shader->key.opt.clip_disable) {
2919 unsigned index = 2 + outputs[i].semantic_index;
2920 si_llvm_init_export_args(ctx, outputs[i].values,
2921 V_008DFC_SQ_EXP_POS + index,
2922 &pos_args[index]);
2923 }
2924 break;
2925 case TGSI_SEMANTIC_CLIPVERTEX:
2926 if (!shader->key.opt.clip_disable) {
2927 si_llvm_emit_clipvertex(ctx, pos_args,
2928 outputs[i].values);
2929 }
2930 break;
2931 }
2932 }
2933
2934 /* We need to add the position output manually if it's missing. */
2935 if (!pos_args[0].out[0]) {
2936 pos_args[0].enabled_channels = 0xf; /* writemask */
2937 pos_args[0].valid_mask = 0; /* EXEC mask */
2938 pos_args[0].done = 0; /* last export? */
2939 pos_args[0].target = V_008DFC_SQ_EXP_POS;
2940 pos_args[0].compr = 0; /* COMPR flag */
2941 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
2942 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
2943 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
2944 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
2945 }
2946
2947 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2948 if (shader->selector->info.writes_psize ||
2949 shader->selector->info.writes_edgeflag ||
2950 shader->selector->info.writes_viewport_index ||
2951 shader->selector->info.writes_layer) {
2952 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
2953 (shader->selector->info.writes_edgeflag << 1) |
2954 (shader->selector->info.writes_layer << 2);
2955
2956 pos_args[1].valid_mask = 0; /* EXEC mask */
2957 pos_args[1].done = 0; /* last export? */
2958 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2959 pos_args[1].compr = 0; /* COMPR flag */
2960 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
2961 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
2962 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
2963 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
2964
2965 if (shader->selector->info.writes_psize)
2966 pos_args[1].out[0] = psize_value;
2967
2968 if (shader->selector->info.writes_edgeflag) {
2969 /* The output is a float, but the hw expects an integer
2970 * with the first bit containing the edge flag. */
2971 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
2972 edgeflag_value,
2973 ctx->i32, "");
2974 edgeflag_value = ac_build_umin(&ctx->ac,
2975 edgeflag_value,
2976 ctx->i32_1);
2977
2978 /* The LLVM intrinsic expects a float. */
2979 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
2980 }
2981
2982 if (ctx->screen->info.chip_class >= GFX9) {
2983 /* GFX9 has the layer in out.z[10:0] and the viewport
2984 * index in out.z[19:16].
2985 */
2986 if (shader->selector->info.writes_layer)
2987 pos_args[1].out[2] = layer_value;
2988
2989 if (shader->selector->info.writes_viewport_index) {
2990 LLVMValueRef v = viewport_index_value;
2991
2992 v = ac_to_integer(&ctx->ac, v);
2993 v = LLVMBuildShl(ctx->ac.builder, v,
2994 LLVMConstInt(ctx->i32, 16, 0), "");
2995 v = LLVMBuildOr(ctx->ac.builder, v,
2996 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
2997 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
2998 pos_args[1].enabled_channels |= 1 << 2;
2999 }
3000 } else {
3001 if (shader->selector->info.writes_layer)
3002 pos_args[1].out[2] = layer_value;
3003
3004 if (shader->selector->info.writes_viewport_index) {
3005 pos_args[1].out[3] = viewport_index_value;
3006 pos_args[1].enabled_channels |= 1 << 3;
3007 }
3008 }
3009 }
3010
3011 for (i = 0; i < 4; i++)
3012 if (pos_args[i].out[0])
3013 shader->info.nr_pos_exports++;
3014
3015 pos_idx = 0;
3016 for (i = 0; i < 4; i++) {
3017 if (!pos_args[i].out[0])
3018 continue;
3019
3020 /* Specify the target we are exporting */
3021 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
3022
3023 if (pos_idx == shader->info.nr_pos_exports)
3024 /* Specify that this is the last export */
3025 pos_args[i].done = 1;
3026
3027 ac_build_export(&ctx->ac, &pos_args[i]);
3028 }
3029
3030 /* Build parameter exports. */
3031 si_build_param_exports(ctx, outputs, noutput);
3032 }
3033
3034 /**
3035 * Forward all outputs from the vertex shader to the TES. This is only used
3036 * for the fixed function TCS.
3037 */
3038 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
3039 {
3040 struct si_shader_context *ctx = si_shader_context(bld_base);
3041 LLVMValueRef invocation_id, buffer, buffer_offset;
3042 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
3043 uint64_t inputs;
3044
3045 invocation_id = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
3046 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
3047 buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
3048
3049 lds_vertex_stride = get_tcs_in_vertex_dw_stride(ctx);
3050 lds_vertex_offset = LLVMBuildMul(ctx->ac.builder, invocation_id,
3051 lds_vertex_stride, "");
3052 lds_base = get_tcs_in_current_patch_offset(ctx);
3053 lds_base = LLVMBuildAdd(ctx->ac.builder, lds_base, lds_vertex_offset, "");
3054
3055 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
3056 while (inputs) {
3057 unsigned i = u_bit_scan64(&inputs);
3058
3059 LLVMValueRef lds_ptr = LLVMBuildAdd(ctx->ac.builder, lds_base,
3060 LLVMConstInt(ctx->i32, 4 * i, 0),
3061 "");
3062
3063 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
3064 get_rel_patch_id(ctx),
3065 invocation_id,
3066 LLVMConstInt(ctx->i32, i, 0));
3067
3068 LLVMValueRef value = lds_load(bld_base, ctx->ac.i32, ~0,
3069 lds_ptr);
3070
3071 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
3072 buffer_offset, 0, 1, 0, true, false);
3073 }
3074 }
3075
3076 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
3077 LLVMValueRef rel_patch_id,
3078 LLVMValueRef invocation_id,
3079 LLVMValueRef tcs_out_current_patch_data_offset,
3080 LLVMValueRef invoc0_tf_outer[4],
3081 LLVMValueRef invoc0_tf_inner[2])
3082 {
3083 struct si_shader_context *ctx = si_shader_context(bld_base);
3084 struct si_shader *shader = ctx->shader;
3085 unsigned tess_inner_index, tess_outer_index;
3086 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
3087 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
3088 unsigned stride, outer_comps, inner_comps, i, offset;
3089 struct lp_build_if_state if_ctx, inner_if_ctx;
3090
3091 /* Add a barrier before loading tess factors from LDS. */
3092 if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
3093 si_llvm_emit_barrier(NULL, bld_base, NULL);
3094
3095 /* Do this only for invocation 0, because the tess levels are per-patch,
3096 * not per-vertex.
3097 *
3098 * This can't jump, because invocation 0 executes this. It should
3099 * at least mask out the loads and stores for other invocations.
3100 */
3101 lp_build_if(&if_ctx, &ctx->gallivm,
3102 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
3103 invocation_id, ctx->i32_0, ""));
3104
3105 /* Determine the layout of one tess factor element in the buffer. */
3106 switch (shader->key.part.tcs.epilog.prim_mode) {
3107 case PIPE_PRIM_LINES:
3108 stride = 2; /* 2 dwords, 1 vec2 store */
3109 outer_comps = 2;
3110 inner_comps = 0;
3111 break;
3112 case PIPE_PRIM_TRIANGLES:
3113 stride = 4; /* 4 dwords, 1 vec4 store */
3114 outer_comps = 3;
3115 inner_comps = 1;
3116 break;
3117 case PIPE_PRIM_QUADS:
3118 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
3119 outer_comps = 4;
3120 inner_comps = 2;
3121 break;
3122 default:
3123 assert(0);
3124 return;
3125 }
3126
3127 for (i = 0; i < 4; i++) {
3128 inner[i] = LLVMGetUndef(ctx->i32);
3129 outer[i] = LLVMGetUndef(ctx->i32);
3130 }
3131
3132 if (shader->key.part.tcs.epilog.invoc0_tess_factors_are_def) {
3133 /* Tess factors are in VGPRs. */
3134 for (i = 0; i < outer_comps; i++)
3135 outer[i] = out[i] = invoc0_tf_outer[i];
3136 for (i = 0; i < inner_comps; i++)
3137 inner[i] = out[outer_comps+i] = invoc0_tf_inner[i];
3138 } else {
3139 /* Load tess_inner and tess_outer from LDS.
3140 * Any invocation can write them, so we can't get them from a temporary.
3141 */
3142 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
3143 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
3144
3145 lds_base = tcs_out_current_patch_data_offset;
3146 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
3147 LLVMConstInt(ctx->i32,
3148 tess_inner_index * 4, 0), "");
3149 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
3150 LLVMConstInt(ctx->i32,
3151 tess_outer_index * 4, 0), "");
3152
3153 for (i = 0; i < outer_comps; i++) {
3154 outer[i] = out[i] =
3155 lds_load(bld_base, ctx->ac.i32, i, lds_outer);
3156 }
3157 for (i = 0; i < inner_comps; i++) {
3158 inner[i] = out[outer_comps+i] =
3159 lds_load(bld_base, ctx->ac.i32, i, lds_inner);
3160 }
3161 }
3162
3163 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
3164 /* For isolines, the hardware expects tess factors in the
3165 * reverse order from what GLSL / TGSI specify.
3166 */
3167 LLVMValueRef tmp = out[0];
3168 out[0] = out[1];
3169 out[1] = tmp;
3170 }
3171
3172 /* Convert the outputs to vectors for stores. */
3173 vec0 = lp_build_gather_values(&ctx->gallivm, out, MIN2(stride, 4));
3174 vec1 = NULL;
3175
3176 if (stride > 4)
3177 vec1 = lp_build_gather_values(&ctx->gallivm, out+4, stride - 4);
3178
3179 /* Get the buffer. */
3180 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_factor_addr_base64k);
3181
3182 /* Get the offset. */
3183 tf_base = LLVMGetParam(ctx->main_fn,
3184 ctx->param_tcs_factor_offset);
3185 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
3186 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
3187
3188 lp_build_if(&inner_if_ctx, &ctx->gallivm,
3189 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
3190 rel_patch_id, ctx->i32_0, ""));
3191
3192 /* Store the dynamic HS control word. */
3193 offset = 0;
3194 if (ctx->screen->info.chip_class <= VI) {
3195 ac_build_buffer_store_dword(&ctx->ac, buffer,
3196 LLVMConstInt(ctx->i32, 0x80000000, 0),
3197 1, ctx->i32_0, tf_base,
3198 offset, 1, 0, true, false);
3199 offset += 4;
3200 }
3201
3202 lp_build_endif(&inner_if_ctx);
3203
3204 /* Store the tessellation factors. */
3205 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
3206 MIN2(stride, 4), byteoffset, tf_base,
3207 offset, 1, 0, true, false);
3208 offset += 16;
3209 if (vec1)
3210 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
3211 stride - 4, byteoffset, tf_base,
3212 offset, 1, 0, true, false);
3213
3214 /* Store the tess factors into the offchip buffer if TES reads them. */
3215 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
3216 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
3217 LLVMValueRef tf_inner_offset;
3218 unsigned param_outer, param_inner;
3219
3220 buf = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
3221 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
3222
3223 param_outer = si_shader_io_get_unique_index_patch(
3224 TGSI_SEMANTIC_TESSOUTER, 0);
3225 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
3226 LLVMConstInt(ctx->i32, param_outer, 0));
3227
3228 outer_vec = lp_build_gather_values(&ctx->gallivm, outer,
3229 util_next_power_of_two(outer_comps));
3230
3231 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
3232 outer_comps, tf_outer_offset,
3233 base, 0, 1, 0, true, false);
3234 if (inner_comps) {
3235 param_inner = si_shader_io_get_unique_index_patch(
3236 TGSI_SEMANTIC_TESSINNER, 0);
3237 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
3238 LLVMConstInt(ctx->i32, param_inner, 0));
3239
3240 inner_vec = inner_comps == 1 ? inner[0] :
3241 lp_build_gather_values(&ctx->gallivm, inner, inner_comps);
3242 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
3243 inner_comps, tf_inner_offset,
3244 base, 0, 1, 0, true, false);
3245 }
3246 }
3247
3248 lp_build_endif(&if_ctx);
3249 }
3250
3251 static LLVMValueRef
3252 si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
3253 unsigned param, unsigned return_index)
3254 {
3255 return LLVMBuildInsertValue(ctx->ac.builder, ret,
3256 LLVMGetParam(ctx->main_fn, param),
3257 return_index, "");
3258 }
3259
3260 static LLVMValueRef
3261 si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
3262 unsigned param, unsigned return_index)
3263 {
3264 LLVMBuilderRef builder = ctx->ac.builder;
3265 LLVMValueRef p = LLVMGetParam(ctx->main_fn, param);
3266
3267 return LLVMBuildInsertValue(builder, ret,
3268 ac_to_float(&ctx->ac, p),
3269 return_index, "");
3270 }
3271
3272 static LLVMValueRef
3273 si_insert_input_ptr_as_2xi32(struct si_shader_context *ctx, LLVMValueRef ret,
3274 unsigned param, unsigned return_index)
3275 {
3276 LLVMBuilderRef builder = ctx->ac.builder;
3277 LLVMValueRef ptr, lo, hi;
3278
3279 ptr = LLVMGetParam(ctx->main_fn, param);
3280 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i64, "");
3281 ptr = LLVMBuildBitCast(builder, ptr, ctx->v2i32, "");
3282 lo = LLVMBuildExtractElement(builder, ptr, ctx->i32_0, "");
3283 hi = LLVMBuildExtractElement(builder, ptr, ctx->i32_1, "");
3284 ret = LLVMBuildInsertValue(builder, ret, lo, return_index, "");
3285 return LLVMBuildInsertValue(builder, ret, hi, return_index + 1, "");
3286 }
3287
3288 /* This only writes the tessellation factor levels. */
3289 static void si_llvm_emit_tcs_epilogue(struct ac_shader_abi *abi,
3290 unsigned max_outputs,
3291 LLVMValueRef *addrs)
3292 {
3293 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3294 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
3295 LLVMBuilderRef builder = ctx->ac.builder;
3296 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
3297
3298 si_copy_tcs_inputs(bld_base);
3299
3300 rel_patch_id = get_rel_patch_id(ctx);
3301 invocation_id = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
3302 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
3303
3304 if (ctx->screen->info.chip_class >= GFX9) {
3305 LLVMBasicBlockRef blocks[2] = {
3306 LLVMGetInsertBlock(builder),
3307 ctx->merged_wrap_if_state.entry_block
3308 };
3309 LLVMValueRef values[2];
3310
3311 lp_build_endif(&ctx->merged_wrap_if_state);
3312
3313 values[0] = rel_patch_id;
3314 values[1] = LLVMGetUndef(ctx->i32);
3315 rel_patch_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3316
3317 values[0] = tf_lds_offset;
3318 values[1] = LLVMGetUndef(ctx->i32);
3319 tf_lds_offset = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3320
3321 values[0] = invocation_id;
3322 values[1] = ctx->i32_1; /* cause the epilog to skip threads */
3323 invocation_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3324 }
3325
3326 /* Return epilog parameters from this function. */
3327 LLVMValueRef ret = ctx->return_value;
3328 unsigned vgpr;
3329
3330 if (ctx->screen->info.chip_class >= GFX9) {
3331 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3332 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
3333 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
3334 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
3335 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
3336 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
3337 /* Tess offchip and tess factor offsets are at the beginning. */
3338 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
3339 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
3340 vgpr = 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K + 1;
3341 } else {
3342 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3343 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
3344 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
3345 GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
3346 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
3347 GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K);
3348 /* Tess offchip and tess factor offsets are after user SGPRs. */
3349 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset,
3350 GFX6_TCS_NUM_USER_SGPR);
3351 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset,
3352 GFX6_TCS_NUM_USER_SGPR + 1);
3353 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
3354 }
3355
3356 /* VGPRs */
3357 rel_patch_id = ac_to_float(&ctx->ac, rel_patch_id);
3358 invocation_id = ac_to_float(&ctx->ac, invocation_id);
3359 tf_lds_offset = ac_to_float(&ctx->ac, tf_lds_offset);
3360
3361 /* Leave a hole corresponding to the two input VGPRs. This ensures that
3362 * the invocation_id output does not alias the tcs_rel_ids input,
3363 * which saves a V_MOV on gfx9.
3364 */
3365 vgpr += 2;
3366
3367 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
3368 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
3369
3370 if (ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
3371 vgpr++; /* skip the tess factor LDS offset */
3372 for (unsigned i = 0; i < 6; i++) {
3373 LLVMValueRef value =
3374 LLVMBuildLoad(builder, ctx->invoc0_tess_factors[i], "");
3375 value = ac_to_float(&ctx->ac, value);
3376 ret = LLVMBuildInsertValue(builder, ret, value, vgpr++, "");
3377 }
3378 } else {
3379 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
3380 }
3381 ctx->return_value = ret;
3382 }
3383
3384 /* Pass TCS inputs from LS to TCS on GFX9. */
3385 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
3386 {
3387 LLVMValueRef ret = ctx->return_value;
3388
3389 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
3390 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
3391 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
3392 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
3393
3394 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers,
3395 8 + SI_SGPR_RW_BUFFERS);
3396 ret = si_insert_input_ptr_as_2xi32(ctx, ret,
3397 ctx->param_bindless_samplers_and_images,
3398 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
3399
3400 ret = si_insert_input_ret(ctx, ret, ctx->param_vs_state_bits,
3401 8 + SI_SGPR_VS_STATE_BITS);
3402 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3403 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
3404 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_offsets,
3405 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
3406 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_layout,
3407 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
3408 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
3409 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
3410 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
3411 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
3412
3413 unsigned desc_param = ctx->param_tcs_factor_addr_base64k + 2;
3414 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
3415 8 + GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS);
3416 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
3417 8 + GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES);
3418
3419 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
3420 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
3421 ac_to_float(&ctx->ac, ctx->abi.tcs_patch_id),
3422 vgpr++, "");
3423 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
3424 ac_to_float(&ctx->ac, ctx->abi.tcs_rel_ids),
3425 vgpr++, "");
3426 ctx->return_value = ret;
3427 }
3428
3429 /* Pass GS inputs from ES to GS on GFX9. */
3430 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
3431 {
3432 LLVMValueRef ret = ctx->return_value;
3433
3434 ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
3435 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
3436 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
3437
3438 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers,
3439 8 + SI_SGPR_RW_BUFFERS);
3440 ret = si_insert_input_ptr_as_2xi32(ctx, ret,
3441 ctx->param_bindless_samplers_and_images,
3442 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
3443
3444 unsigned desc_param = ctx->param_vs_state_bits + 1;
3445 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
3446 8 + GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS);
3447 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
3448 8 + GFX9_SGPR_GS_SAMPLERS_AND_IMAGES);
3449
3450 unsigned vgpr = 8 + GFX9_GS_NUM_USER_SGPR;
3451 for (unsigned i = 0; i < 5; i++) {
3452 unsigned param = ctx->param_gs_vtx01_offset + i;
3453 ret = si_insert_input_ret_float(ctx, ret, param, vgpr++);
3454 }
3455 ctx->return_value = ret;
3456 }
3457
3458 static void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi,
3459 unsigned max_outputs,
3460 LLVMValueRef *addrs)
3461 {
3462 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3463 struct si_shader *shader = ctx->shader;
3464 struct tgsi_shader_info *info = &shader->selector->info;
3465 unsigned i, chan;
3466 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
3467 ctx->param_rel_auto_id);
3468 LLVMValueRef vertex_dw_stride = get_tcs_in_vertex_dw_stride(ctx);
3469 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
3470 vertex_dw_stride, "");
3471
3472 /* Write outputs to LDS. The next shader (TCS aka HS) will read
3473 * its inputs from it. */
3474 for (i = 0; i < info->num_outputs; i++) {
3475 unsigned name = info->output_semantic_name[i];
3476 unsigned index = info->output_semantic_index[i];
3477
3478 /* The ARB_shader_viewport_layer_array spec contains the
3479 * following issue:
3480 *
3481 * 2) What happens if gl_ViewportIndex or gl_Layer is
3482 * written in the vertex shader and a geometry shader is
3483 * present?
3484 *
3485 * RESOLVED: The value written by the last vertex processing
3486 * stage is used. If the last vertex processing stage
3487 * (vertex, tessellation evaluation or geometry) does not
3488 * statically assign to gl_ViewportIndex or gl_Layer, index
3489 * or layer zero is assumed.
3490 *
3491 * So writes to those outputs in VS-as-LS are simply ignored.
3492 */
3493 if (name == TGSI_SEMANTIC_LAYER ||
3494 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
3495 continue;
3496
3497 int param = si_shader_io_get_unique_index(name, index);
3498 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
3499 LLVMConstInt(ctx->i32, param * 4, 0), "");
3500
3501 for (chan = 0; chan < 4; chan++) {
3502 if (!(info->output_usagemask[i] & (1 << chan)))
3503 continue;
3504
3505 lds_store(ctx, chan, dw_addr,
3506 LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], ""));
3507 }
3508 }
3509
3510 if (ctx->screen->info.chip_class >= GFX9)
3511 si_set_ls_return_value_for_tcs(ctx);
3512 }
3513
3514 static void si_llvm_emit_es_epilogue(struct ac_shader_abi *abi,
3515 unsigned max_outputs,
3516 LLVMValueRef *addrs)
3517 {
3518 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3519 struct si_shader *es = ctx->shader;
3520 struct tgsi_shader_info *info = &es->selector->info;
3521 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
3522 ctx->param_es2gs_offset);
3523 LLVMValueRef lds_base = NULL;
3524 unsigned chan;
3525 int i;
3526
3527 if (ctx->screen->info.chip_class >= GFX9 && info->num_outputs) {
3528 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
3529 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
3530 LLVMValueRef wave_idx = unpack_param(ctx, ctx->param_merged_wave_info, 24, 4);
3531 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
3532 LLVMBuildMul(ctx->ac.builder, wave_idx,
3533 LLVMConstInt(ctx->i32, 64, false), ""), "");
3534 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
3535 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
3536 }
3537
3538 for (i = 0; i < info->num_outputs; i++) {
3539 int param;
3540
3541 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
3542 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
3543 continue;
3544
3545 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
3546 info->output_semantic_index[i]);
3547
3548 for (chan = 0; chan < 4; chan++) {
3549 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
3550 out_val = ac_to_integer(&ctx->ac, out_val);
3551
3552 /* GFX9 has the ESGS ring in LDS. */
3553 if (ctx->screen->info.chip_class >= GFX9) {
3554 lds_store(ctx, param * 4 + chan, lds_base, out_val);
3555 continue;
3556 }
3557
3558 ac_build_buffer_store_dword(&ctx->ac,
3559 ctx->esgs_ring,
3560 out_val, 1, NULL, soffset,
3561 (4 * param + chan) * 4,
3562 1, 1, true, true);
3563 }
3564 }
3565
3566 if (ctx->screen->info.chip_class >= GFX9)
3567 si_set_es_return_value_for_gs(ctx);
3568 }
3569
3570 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
3571 {
3572 if (ctx->screen->info.chip_class >= GFX9)
3573 return unpack_param(ctx, ctx->param_merged_wave_info, 16, 8);
3574 else
3575 return LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id);
3576 }
3577
3578 static void emit_gs_epilogue(struct si_shader_context *ctx)
3579 {
3580 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
3581 si_get_gs_wave_id(ctx));
3582
3583 if (ctx->screen->info.chip_class >= GFX9)
3584 lp_build_endif(&ctx->merged_wrap_if_state);
3585 }
3586
3587 static void si_llvm_emit_gs_epilogue(struct ac_shader_abi *abi,
3588 unsigned max_outputs,
3589 LLVMValueRef *addrs)
3590 {
3591 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3592 struct tgsi_shader_info UNUSED *info = &ctx->shader->selector->info;
3593
3594 assert(info->num_outputs <= max_outputs);
3595
3596 emit_gs_epilogue(ctx);
3597 }
3598
3599 static void si_tgsi_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
3600 {
3601 struct si_shader_context *ctx = si_shader_context(bld_base);
3602 emit_gs_epilogue(ctx);
3603 }
3604
3605 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
3606 unsigned max_outputs,
3607 LLVMValueRef *addrs)
3608 {
3609 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3610 struct tgsi_shader_info *info = &ctx->shader->selector->info;
3611 struct si_shader_output_values *outputs = NULL;
3612 int i,j;
3613
3614 assert(!ctx->shader->is_gs_copy_shader);
3615 assert(info->num_outputs <= max_outputs);
3616
3617 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
3618
3619 /* Vertex color clamping.
3620 *
3621 * This uses a state constant loaded in a user data SGPR and
3622 * an IF statement is added that clamps all colors if the constant
3623 * is true.
3624 */
3625 if (ctx->type == PIPE_SHADER_VERTEX) {
3626 struct lp_build_if_state if_ctx;
3627 LLVMValueRef cond = NULL;
3628 LLVMValueRef addr, val;
3629
3630 for (i = 0; i < info->num_outputs; i++) {
3631 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
3632 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
3633 continue;
3634
3635 /* We've found a color. */
3636 if (!cond) {
3637 /* The state is in the first bit of the user SGPR. */
3638 cond = LLVMGetParam(ctx->main_fn,
3639 ctx->param_vs_state_bits);
3640 cond = LLVMBuildTrunc(ctx->ac.builder, cond,
3641 ctx->i1, "");
3642 lp_build_if(&if_ctx, &ctx->gallivm, cond);
3643 }
3644
3645 for (j = 0; j < 4; j++) {
3646 addr = addrs[4 * i + j];
3647 val = LLVMBuildLoad(ctx->ac.builder, addr, "");
3648 val = ac_build_clamp(&ctx->ac, val);
3649 LLVMBuildStore(ctx->ac.builder, val, addr);
3650 }
3651 }
3652
3653 if (cond)
3654 lp_build_endif(&if_ctx);
3655 }
3656
3657 for (i = 0; i < info->num_outputs; i++) {
3658 outputs[i].semantic_name = info->output_semantic_name[i];
3659 outputs[i].semantic_index = info->output_semantic_index[i];
3660
3661 for (j = 0; j < 4; j++) {
3662 outputs[i].values[j] =
3663 LLVMBuildLoad(ctx->ac.builder,
3664 addrs[4 * i + j],
3665 "");
3666 outputs[i].vertex_stream[j] =
3667 (info->output_streams[i] >> (2 * j)) & 3;
3668 }
3669 }
3670
3671 if (ctx->shader->selector->so.num_outputs)
3672 si_llvm_emit_streamout(ctx, outputs, i, 0);
3673
3674 /* Export PrimitiveID. */
3675 if (ctx->shader->key.mono.u.vs_export_prim_id) {
3676 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
3677 outputs[i].semantic_index = 0;
3678 outputs[i].values[0] = ac_to_float(&ctx->ac, get_primitive_id(ctx, 0));
3679 for (j = 1; j < 4; j++)
3680 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
3681
3682 memset(outputs[i].vertex_stream, 0,
3683 sizeof(outputs[i].vertex_stream));
3684 i++;
3685 }
3686
3687 si_llvm_export_vs(ctx, outputs, i);
3688 FREE(outputs);
3689 }
3690
3691 static void si_tgsi_emit_epilogue(struct lp_build_tgsi_context *bld_base)
3692 {
3693 struct si_shader_context *ctx = si_shader_context(bld_base);
3694
3695 ctx->abi.emit_outputs(&ctx->abi, RADEON_LLVM_MAX_OUTPUTS,
3696 &ctx->outputs[0][0]);
3697 }
3698
3699 struct si_ps_exports {
3700 unsigned num;
3701 struct ac_export_args args[10];
3702 };
3703
3704 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
3705 LLVMValueRef depth, LLVMValueRef stencil,
3706 LLVMValueRef samplemask, struct si_ps_exports *exp)
3707 {
3708 struct si_shader_context *ctx = si_shader_context(bld_base);
3709 struct ac_export_args args;
3710
3711 ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
3712
3713 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3714 }
3715
3716 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
3717 LLVMValueRef *color, unsigned index,
3718 unsigned samplemask_param,
3719 bool is_last, struct si_ps_exports *exp)
3720 {
3721 struct si_shader_context *ctx = si_shader_context(bld_base);
3722 int i;
3723
3724 /* Clamp color */
3725 if (ctx->shader->key.part.ps.epilog.clamp_color)
3726 for (i = 0; i < 4; i++)
3727 color[i] = ac_build_clamp(&ctx->ac, color[i]);
3728
3729 /* Alpha to one */
3730 if (ctx->shader->key.part.ps.epilog.alpha_to_one)
3731 color[3] = ctx->ac.f32_1;
3732
3733 /* Alpha test */
3734 if (index == 0 &&
3735 ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
3736 si_alpha_test(bld_base, color[3]);
3737
3738 /* Line & polygon smoothing */
3739 if (ctx->shader->key.part.ps.epilog.poly_line_smoothing)
3740 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
3741 samplemask_param);
3742
3743 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3744 if (ctx->shader->key.part.ps.epilog.last_cbuf > 0) {
3745 struct ac_export_args args[8];
3746 int c, last = -1;
3747
3748 /* Get the export arguments, also find out what the last one is. */
3749 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3750 si_llvm_init_export_args(ctx, color,
3751 V_008DFC_SQ_EXP_MRT + c, &args[c]);
3752 if (args[c].enabled_channels)
3753 last = c;
3754 }
3755
3756 /* Emit all exports. */
3757 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3758 if (is_last && last == c) {
3759 args[c].valid_mask = 1; /* whether the EXEC mask is valid */
3760 args[c].done = 1; /* DONE bit */
3761 } else if (!args[c].enabled_channels)
3762 continue; /* unnecessary NULL export */
3763
3764 memcpy(&exp->args[exp->num++], &args[c], sizeof(args[c]));
3765 }
3766 } else {
3767 struct ac_export_args args;
3768
3769 /* Export */
3770 si_llvm_init_export_args(ctx, color, V_008DFC_SQ_EXP_MRT + index,
3771 &args);
3772 if (is_last) {
3773 args.valid_mask = 1; /* whether the EXEC mask is valid */
3774 args.done = 1; /* DONE bit */
3775 } else if (!args.enabled_channels)
3776 return; /* unnecessary NULL export */
3777
3778 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3779 }
3780 }
3781
3782 static void si_emit_ps_exports(struct si_shader_context *ctx,
3783 struct si_ps_exports *exp)
3784 {
3785 for (unsigned i = 0; i < exp->num; i++)
3786 ac_build_export(&ctx->ac, &exp->args[i]);
3787 }
3788
3789 static void si_export_null(struct lp_build_tgsi_context *bld_base)
3790 {
3791 struct si_shader_context *ctx = si_shader_context(bld_base);
3792 struct lp_build_context *base = &bld_base->base;
3793 struct ac_export_args args;
3794
3795 args.enabled_channels = 0x0; /* enabled channels */
3796 args.valid_mask = 1; /* whether the EXEC mask is valid */
3797 args.done = 1; /* DONE bit */
3798 args.target = V_008DFC_SQ_EXP_NULL;
3799 args.compr = 0; /* COMPR flag (0 = 32-bit export) */
3800 args.out[0] = base->undef; /* R */
3801 args.out[1] = base->undef; /* G */
3802 args.out[2] = base->undef; /* B */
3803 args.out[3] = base->undef; /* A */
3804
3805 ac_build_export(&ctx->ac, &args);
3806 }
3807
3808 /**
3809 * Return PS outputs in this order:
3810 *
3811 * v[0:3] = color0.xyzw
3812 * v[4:7] = color1.xyzw
3813 * ...
3814 * vN+0 = Depth
3815 * vN+1 = Stencil
3816 * vN+2 = SampleMask
3817 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3818 *
3819 * The alpha-ref SGPR is returned via its original location.
3820 */
3821 static void si_llvm_return_fs_outputs(struct ac_shader_abi *abi,
3822 unsigned max_outputs,
3823 LLVMValueRef *addrs)
3824 {
3825 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3826 struct si_shader *shader = ctx->shader;
3827 struct tgsi_shader_info *info = &shader->selector->info;
3828 LLVMBuilderRef builder = ctx->ac.builder;
3829 unsigned i, j, first_vgpr, vgpr;
3830
3831 LLVMValueRef color[8][4] = {};
3832 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3833 LLVMValueRef ret;
3834
3835 if (ctx->postponed_kill)
3836 ac_build_kill_if_false(&ctx->ac, LLVMBuildLoad(builder, ctx->postponed_kill, ""));
3837
3838 /* Read the output values. */
3839 for (i = 0; i < info->num_outputs; i++) {
3840 unsigned semantic_name = info->output_semantic_name[i];
3841 unsigned semantic_index = info->output_semantic_index[i];
3842
3843 switch (semantic_name) {
3844 case TGSI_SEMANTIC_COLOR:
3845 assert(semantic_index < 8);
3846 for (j = 0; j < 4; j++) {
3847 LLVMValueRef ptr = addrs[4 * i + j];
3848 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3849 color[semantic_index][j] = result;
3850 }
3851 break;
3852 case TGSI_SEMANTIC_POSITION:
3853 depth = LLVMBuildLoad(builder,
3854 addrs[4 * i + 2], "");
3855 break;
3856 case TGSI_SEMANTIC_STENCIL:
3857 stencil = LLVMBuildLoad(builder,
3858 addrs[4 * i + 1], "");
3859 break;
3860 case TGSI_SEMANTIC_SAMPLEMASK:
3861 samplemask = LLVMBuildLoad(builder,
3862 addrs[4 * i + 0], "");
3863 break;
3864 default:
3865 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3866 semantic_name);
3867 }
3868 }
3869
3870 /* Fill the return structure. */
3871 ret = ctx->return_value;
3872
3873 /* Set SGPRs. */
3874 ret = LLVMBuildInsertValue(builder, ret,
3875 ac_to_integer(&ctx->ac,
3876 LLVMGetParam(ctx->main_fn,
3877 SI_PARAM_ALPHA_REF)),
3878 SI_SGPR_ALPHA_REF, "");
3879
3880 /* Set VGPRs */
3881 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3882 for (i = 0; i < ARRAY_SIZE(color); i++) {
3883 if (!color[i][0])
3884 continue;
3885
3886 for (j = 0; j < 4; j++)
3887 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3888 }
3889 if (depth)
3890 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3891 if (stencil)
3892 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3893 if (samplemask)
3894 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3895
3896 /* Add the input sample mask for smoothing at the end. */
3897 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3898 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3899 ret = LLVMBuildInsertValue(builder, ret,
3900 LLVMGetParam(ctx->main_fn,
3901 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3902
3903 ctx->return_value = ret;
3904 }
3905
3906 static void membar_emit(
3907 const struct lp_build_tgsi_action *action,
3908 struct lp_build_tgsi_context *bld_base,
3909 struct lp_build_emit_data *emit_data)
3910 {
3911 struct si_shader_context *ctx = si_shader_context(bld_base);
3912 LLVMValueRef src0 = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
3913 unsigned flags = LLVMConstIntGetZExtValue(src0);
3914 unsigned waitcnt = NOOP_WAITCNT;
3915
3916 if (flags & TGSI_MEMBAR_THREAD_GROUP)
3917 waitcnt &= VM_CNT & LGKM_CNT;
3918
3919 if (flags & (TGSI_MEMBAR_ATOMIC_BUFFER |
3920 TGSI_MEMBAR_SHADER_BUFFER |
3921 TGSI_MEMBAR_SHADER_IMAGE))
3922 waitcnt &= VM_CNT;
3923
3924 if (flags & TGSI_MEMBAR_SHARED)
3925 waitcnt &= LGKM_CNT;
3926
3927 if (waitcnt != NOOP_WAITCNT)
3928 ac_build_waitcnt(&ctx->ac, waitcnt);
3929 }
3930
3931 static void clock_emit(
3932 const struct lp_build_tgsi_action *action,
3933 struct lp_build_tgsi_context *bld_base,
3934 struct lp_build_emit_data *emit_data)
3935 {
3936 struct si_shader_context *ctx = si_shader_context(bld_base);
3937 LLVMValueRef tmp;
3938
3939 tmp = lp_build_intrinsic(ctx->ac.builder, "llvm.readcyclecounter",
3940 ctx->i64, NULL, 0, 0);
3941 tmp = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->v2i32, "");
3942
3943 emit_data->output[0] =
3944 LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->i32_0, "");
3945 emit_data->output[1] =
3946 LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->i32_1, "");
3947 }
3948
3949 static void si_llvm_emit_ddxy(
3950 const struct lp_build_tgsi_action *action,
3951 struct lp_build_tgsi_context *bld_base,
3952 struct lp_build_emit_data *emit_data)
3953 {
3954 struct si_shader_context *ctx = si_shader_context(bld_base);
3955 unsigned opcode = emit_data->info->opcode;
3956 LLVMValueRef val;
3957 int idx;
3958 unsigned mask;
3959
3960 if (opcode == TGSI_OPCODE_DDX_FINE)
3961 mask = AC_TID_MASK_LEFT;
3962 else if (opcode == TGSI_OPCODE_DDY_FINE)
3963 mask = AC_TID_MASK_TOP;
3964 else
3965 mask = AC_TID_MASK_TOP_LEFT;
3966
3967 /* for DDX we want to next X pixel, DDY next Y pixel. */
3968 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
3969
3970 val = ac_to_integer(&ctx->ac, emit_data->args[0]);
3971 val = ac_build_ddxy(&ctx->ac, mask, idx, val);
3972 emit_data->output[emit_data->chan] = val;
3973 }
3974
3975 /*
3976 * this takes an I,J coordinate pair,
3977 * and works out the X and Y derivatives.
3978 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
3979 */
3980 static LLVMValueRef si_llvm_emit_ddxy_interp(
3981 struct lp_build_tgsi_context *bld_base,
3982 LLVMValueRef interp_ij)
3983 {
3984 struct si_shader_context *ctx = si_shader_context(bld_base);
3985 LLVMValueRef result[4], a;
3986 unsigned i;
3987
3988 for (i = 0; i < 2; i++) {
3989 a = LLVMBuildExtractElement(ctx->ac.builder, interp_ij,
3990 LLVMConstInt(ctx->i32, i, 0), "");
3991 result[i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDX, a);
3992 result[2+i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDY, a);
3993 }
3994
3995 return lp_build_gather_values(&ctx->gallivm, result, 4);
3996 }
3997
3998 static void interp_fetch_args(
3999 struct lp_build_tgsi_context *bld_base,
4000 struct lp_build_emit_data *emit_data)
4001 {
4002 struct si_shader_context *ctx = si_shader_context(bld_base);
4003 const struct tgsi_full_instruction *inst = emit_data->inst;
4004
4005 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
4006 /* offset is in second src, first two channels */
4007 emit_data->args[0] = lp_build_emit_fetch(bld_base,
4008 emit_data->inst, 1,
4009 TGSI_CHAN_X);
4010 emit_data->args[1] = lp_build_emit_fetch(bld_base,
4011 emit_data->inst, 1,
4012 TGSI_CHAN_Y);
4013 emit_data->arg_count = 2;
4014 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
4015 LLVMValueRef sample_position;
4016 LLVMValueRef sample_id;
4017 LLVMValueRef halfval = LLVMConstReal(ctx->f32, 0.5f);
4018
4019 /* fetch sample ID, then fetch its sample position,
4020 * and place into first two channels.
4021 */
4022 sample_id = lp_build_emit_fetch(bld_base,
4023 emit_data->inst, 1, TGSI_CHAN_X);
4024 sample_id = ac_to_integer(&ctx->ac, sample_id);
4025
4026 /* Section 8.13.2 (Interpolation Functions) of the OpenGL Shading
4027 * Language 4.50 spec says about interpolateAtSample:
4028 *
4029 * "Returns the value of the input interpolant variable at
4030 * the location of sample number sample. If multisample
4031 * buffers are not available, the input variable will be
4032 * evaluated at the center of the pixel. If sample sample
4033 * does not exist, the position used to interpolate the
4034 * input variable is undefined."
4035 *
4036 * This means that sample_id values outside of the valid are
4037 * in fact valid input, and the usual mechanism for loading the
4038 * sample position doesn't work.
4039 */
4040 if (ctx->shader->key.mono.u.ps.interpolate_at_sample_force_center) {
4041 LLVMValueRef center[4] = {
4042 LLVMConstReal(ctx->f32, 0.5),
4043 LLVMConstReal(ctx->f32, 0.5),
4044 ctx->ac.f32_0,
4045 ctx->ac.f32_0,
4046 };
4047
4048 sample_position = lp_build_gather_values(&ctx->gallivm, center, 4);
4049 } else {
4050 sample_position = load_sample_position(&ctx->abi, sample_id);
4051 }
4052
4053 emit_data->args[0] = LLVMBuildExtractElement(ctx->ac.builder,
4054 sample_position,
4055 ctx->i32_0, "");
4056
4057 emit_data->args[0] = LLVMBuildFSub(ctx->ac.builder, emit_data->args[0], halfval, "");
4058 emit_data->args[1] = LLVMBuildExtractElement(ctx->ac.builder,
4059 sample_position,
4060 ctx->i32_1, "");
4061 emit_data->args[1] = LLVMBuildFSub(ctx->ac.builder, emit_data->args[1], halfval, "");
4062 emit_data->arg_count = 2;
4063 }
4064 }
4065
4066 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
4067 struct lp_build_tgsi_context *bld_base,
4068 struct lp_build_emit_data *emit_data)
4069 {
4070 struct si_shader_context *ctx = si_shader_context(bld_base);
4071 struct si_shader *shader = ctx->shader;
4072 const struct tgsi_shader_info *info = &shader->selector->info;
4073 LLVMValueRef interp_param;
4074 const struct tgsi_full_instruction *inst = emit_data->inst;
4075 const struct tgsi_full_src_register *input = &inst->Src[0];
4076 int input_base, input_array_size;
4077 int chan;
4078 int i;
4079 LLVMValueRef prim_mask = ctx->abi.prim_mask;
4080 LLVMValueRef array_idx;
4081 int interp_param_idx;
4082 unsigned interp;
4083 unsigned location;
4084
4085 assert(input->Register.File == TGSI_FILE_INPUT);
4086
4087 if (input->Register.Indirect) {
4088 unsigned array_id = input->Indirect.ArrayID;
4089
4090 if (array_id) {
4091 input_base = info->input_array_first[array_id];
4092 input_array_size = info->input_array_last[array_id] - input_base + 1;
4093 } else {
4094 input_base = inst->Src[0].Register.Index;
4095 input_array_size = info->num_inputs - input_base;
4096 }
4097
4098 array_idx = si_get_indirect_index(ctx, &input->Indirect,
4099 1, input->Register.Index - input_base);
4100 } else {
4101 input_base = inst->Src[0].Register.Index;
4102 input_array_size = 1;
4103 array_idx = ctx->i32_0;
4104 }
4105
4106 interp = shader->selector->info.input_interpolate[input_base];
4107
4108 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
4109 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
4110 location = TGSI_INTERPOLATE_LOC_CENTER;
4111 else
4112 location = TGSI_INTERPOLATE_LOC_CENTROID;
4113
4114 interp_param_idx = lookup_interp_param_index(interp, location);
4115 if (interp_param_idx == -1)
4116 return;
4117 else if (interp_param_idx)
4118 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
4119 else
4120 interp_param = NULL;
4121
4122 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
4123 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
4124 LLVMValueRef ij_out[2];
4125 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
4126
4127 /*
4128 * take the I then J parameters, and the DDX/Y for it, and
4129 * calculate the IJ inputs for the interpolator.
4130 * temp1 = ddx * offset/sample.x + I;
4131 * interp_param.I = ddy * offset/sample.y + temp1;
4132 * temp1 = ddx * offset/sample.x + J;
4133 * interp_param.J = ddy * offset/sample.y + temp1;
4134 */
4135 for (i = 0; i < 2; i++) {
4136 LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, 0);
4137 LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, 0);
4138 LLVMValueRef ddx_el = LLVMBuildExtractElement(ctx->ac.builder,
4139 ddxy_out, ix_ll, "");
4140 LLVMValueRef ddy_el = LLVMBuildExtractElement(ctx->ac.builder,
4141 ddxy_out, iy_ll, "");
4142 LLVMValueRef interp_el = LLVMBuildExtractElement(ctx->ac.builder,
4143 interp_param, ix_ll, "");
4144 LLVMValueRef temp1, temp2;
4145
4146 interp_el = ac_to_float(&ctx->ac, interp_el);
4147
4148 temp1 = LLVMBuildFMul(ctx->ac.builder, ddx_el, emit_data->args[0], "");
4149
4150 temp1 = LLVMBuildFAdd(ctx->ac.builder, temp1, interp_el, "");
4151
4152 temp2 = LLVMBuildFMul(ctx->ac.builder, ddy_el, emit_data->args[1], "");
4153
4154 ij_out[i] = LLVMBuildFAdd(ctx->ac.builder, temp2, temp1, "");
4155 }
4156 interp_param = lp_build_gather_values(&ctx->gallivm, ij_out, 2);
4157 }
4158
4159 if (interp_param)
4160 interp_param = ac_to_float(&ctx->ac, interp_param);
4161
4162 for (chan = 0; chan < 4; chan++) {
4163 LLVMValueRef gather = LLVMGetUndef(LLVMVectorType(ctx->f32, input_array_size));
4164 unsigned schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
4165
4166 for (unsigned idx = 0; idx < input_array_size; ++idx) {
4167 LLVMValueRef v, i = NULL, j = NULL;
4168
4169 if (interp_param) {
4170 i = LLVMBuildExtractElement(
4171 ctx->ac.builder, interp_param, ctx->i32_0, "");
4172 j = LLVMBuildExtractElement(
4173 ctx->ac.builder, interp_param, ctx->i32_1, "");
4174 }
4175 v = si_build_fs_interp(ctx, input_base + idx, schan,
4176 prim_mask, i, j);
4177
4178 gather = LLVMBuildInsertElement(ctx->ac.builder,
4179 gather, v, LLVMConstInt(ctx->i32, idx, false), "");
4180 }
4181
4182 emit_data->output[chan] = LLVMBuildExtractElement(
4183 ctx->ac.builder, gather, array_idx, "");
4184 }
4185 }
4186
4187 static void vote_all_emit(
4188 const struct lp_build_tgsi_action *action,
4189 struct lp_build_tgsi_context *bld_base,
4190 struct lp_build_emit_data *emit_data)
4191 {
4192 struct si_shader_context *ctx = si_shader_context(bld_base);
4193
4194 LLVMValueRef tmp = ac_build_vote_all(&ctx->ac, emit_data->args[0]);
4195 emit_data->output[emit_data->chan] =
4196 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4197 }
4198
4199 static void vote_any_emit(
4200 const struct lp_build_tgsi_action *action,
4201 struct lp_build_tgsi_context *bld_base,
4202 struct lp_build_emit_data *emit_data)
4203 {
4204 struct si_shader_context *ctx = si_shader_context(bld_base);
4205
4206 LLVMValueRef tmp = ac_build_vote_any(&ctx->ac, emit_data->args[0]);
4207 emit_data->output[emit_data->chan] =
4208 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4209 }
4210
4211 static void vote_eq_emit(
4212 const struct lp_build_tgsi_action *action,
4213 struct lp_build_tgsi_context *bld_base,
4214 struct lp_build_emit_data *emit_data)
4215 {
4216 struct si_shader_context *ctx = si_shader_context(bld_base);
4217
4218 LLVMValueRef tmp = ac_build_vote_eq(&ctx->ac, emit_data->args[0]);
4219 emit_data->output[emit_data->chan] =
4220 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4221 }
4222
4223 static void ballot_emit(
4224 const struct lp_build_tgsi_action *action,
4225 struct lp_build_tgsi_context *bld_base,
4226 struct lp_build_emit_data *emit_data)
4227 {
4228 struct si_shader_context *ctx = si_shader_context(bld_base);
4229 LLVMBuilderRef builder = ctx->ac.builder;
4230 LLVMValueRef tmp;
4231
4232 tmp = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
4233 tmp = ac_build_ballot(&ctx->ac, tmp);
4234 tmp = LLVMBuildBitCast(builder, tmp, ctx->v2i32, "");
4235
4236 emit_data->output[0] = LLVMBuildExtractElement(builder, tmp, ctx->i32_0, "");
4237 emit_data->output[1] = LLVMBuildExtractElement(builder, tmp, ctx->i32_1, "");
4238 }
4239
4240 static void read_invoc_fetch_args(
4241 struct lp_build_tgsi_context *bld_base,
4242 struct lp_build_emit_data *emit_data)
4243 {
4244 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
4245 0, emit_data->src_chan);
4246
4247 /* Always read the source invocation (= lane) from the X channel. */
4248 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
4249 1, TGSI_CHAN_X);
4250 emit_data->arg_count = 2;
4251 }
4252
4253 static void read_lane_emit(
4254 const struct lp_build_tgsi_action *action,
4255 struct lp_build_tgsi_context *bld_base,
4256 struct lp_build_emit_data *emit_data)
4257 {
4258 struct si_shader_context *ctx = si_shader_context(bld_base);
4259
4260 /* We currently have no other way to prevent LLVM from lifting the icmp
4261 * calls to a dominating basic block.
4262 */
4263 ac_build_optimization_barrier(&ctx->ac, &emit_data->args[0]);
4264
4265 for (unsigned i = 0; i < emit_data->arg_count; ++i)
4266 emit_data->args[i] = ac_to_integer(&ctx->ac, emit_data->args[i]);
4267
4268 emit_data->output[emit_data->chan] =
4269 ac_build_intrinsic(&ctx->ac, action->intr_name,
4270 ctx->i32, emit_data->args, emit_data->arg_count,
4271 AC_FUNC_ATTR_READNONE |
4272 AC_FUNC_ATTR_CONVERGENT);
4273 }
4274
4275 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
4276 struct lp_build_emit_data *emit_data)
4277 {
4278 struct si_shader_context *ctx = si_shader_context(bld_base);
4279 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
4280 LLVMValueRef imm;
4281 unsigned stream;
4282
4283 assert(src0.File == TGSI_FILE_IMMEDIATE);
4284
4285 imm = ctx->imms[src0.Index * TGSI_NUM_CHANNELS + src0.SwizzleX];
4286 stream = LLVMConstIntGetZExtValue(imm) & 0x3;
4287 return stream;
4288 }
4289
4290 /* Emit one vertex from the geometry shader */
4291 static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
4292 unsigned stream,
4293 LLVMValueRef *addrs)
4294 {
4295 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
4296 struct tgsi_shader_info *info = &ctx->shader->selector->info;
4297 struct lp_build_context *uint = &ctx->bld_base.uint_bld;
4298 struct si_shader *shader = ctx->shader;
4299 struct lp_build_if_state if_state;
4300 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
4301 ctx->param_gs2vs_offset);
4302 LLVMValueRef gs_next_vertex;
4303 LLVMValueRef can_emit;
4304 unsigned chan, offset;
4305 int i;
4306
4307 /* Write vertex attribute values to GSVS ring */
4308 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
4309 ctx->gs_next_vertex[stream],
4310 "");
4311
4312 /* If this thread has already emitted the declared maximum number of
4313 * vertices, skip the write: excessive vertex emissions are not
4314 * supposed to have any effect.
4315 *
4316 * If the shader has no writes to memory, kill it instead. This skips
4317 * further memory loads and may allow LLVM to skip to the end
4318 * altogether.
4319 */
4320 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
4321 LLVMConstInt(ctx->i32,
4322 shader->selector->gs_max_out_vertices, 0), "");
4323
4324 bool use_kill = !info->writes_memory;
4325 if (use_kill) {
4326 ac_build_kill_if_false(&ctx->ac, can_emit);
4327 } else {
4328 lp_build_if(&if_state, &ctx->gallivm, can_emit);
4329 }
4330
4331 offset = 0;
4332 for (i = 0; i < info->num_outputs; i++) {
4333 for (chan = 0; chan < 4; chan++) {
4334 if (!(info->output_usagemask[i] & (1 << chan)) ||
4335 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
4336 continue;
4337
4338 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
4339 LLVMValueRef voffset =
4340 LLVMConstInt(ctx->i32, offset *
4341 shader->selector->gs_max_out_vertices, 0);
4342 offset++;
4343
4344 voffset = lp_build_add(uint, voffset, gs_next_vertex);
4345 voffset = lp_build_mul_imm(uint, voffset, 4);
4346
4347 out_val = ac_to_integer(&ctx->ac, out_val);
4348
4349 ac_build_buffer_store_dword(&ctx->ac,
4350 ctx->gsvs_ring[stream],
4351 out_val, 1,
4352 voffset, soffset, 0,
4353 1, 1, true, true);
4354 }
4355 }
4356
4357 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
4358 ctx->i32_1);
4359
4360 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
4361
4362 /* Signal vertex emission */
4363 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
4364 si_get_gs_wave_id(ctx));
4365 if (!use_kill)
4366 lp_build_endif(&if_state);
4367 }
4368
4369 /* Emit one vertex from the geometry shader */
4370 static void si_tgsi_emit_vertex(
4371 const struct lp_build_tgsi_action *action,
4372 struct lp_build_tgsi_context *bld_base,
4373 struct lp_build_emit_data *emit_data)
4374 {
4375 struct si_shader_context *ctx = si_shader_context(bld_base);
4376 unsigned stream = si_llvm_get_stream(bld_base, emit_data);
4377
4378 si_llvm_emit_vertex(&ctx->abi, stream, ctx->outputs[0]);
4379 }
4380
4381 /* Cut one primitive from the geometry shader */
4382 static void si_llvm_emit_primitive(struct ac_shader_abi *abi,
4383 unsigned stream)
4384 {
4385 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
4386
4387 /* Signal primitive cut */
4388 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
4389 si_get_gs_wave_id(ctx));
4390 }
4391
4392 /* Cut one primitive from the geometry shader */
4393 static void si_tgsi_emit_primitive(
4394 const struct lp_build_tgsi_action *action,
4395 struct lp_build_tgsi_context *bld_base,
4396 struct lp_build_emit_data *emit_data)
4397 {
4398 struct si_shader_context *ctx = si_shader_context(bld_base);
4399
4400 si_llvm_emit_primitive(&ctx->abi, si_llvm_get_stream(bld_base, emit_data));
4401 }
4402
4403 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
4404 struct lp_build_tgsi_context *bld_base,
4405 struct lp_build_emit_data *emit_data)
4406 {
4407 struct si_shader_context *ctx = si_shader_context(bld_base);
4408
4409 /* SI only (thanks to a hw bug workaround):
4410 * The real barrier instruction isn’t needed, because an entire patch
4411 * always fits into a single wave.
4412 */
4413 if (ctx->screen->info.chip_class == SI &&
4414 ctx->type == PIPE_SHADER_TESS_CTRL) {
4415 ac_build_waitcnt(&ctx->ac, LGKM_CNT & VM_CNT);
4416 return;
4417 }
4418
4419 lp_build_intrinsic(ctx->ac.builder,
4420 "llvm.amdgcn.s.barrier",
4421 ctx->voidt, NULL, 0, LP_FUNC_ATTR_CONVERGENT);
4422 }
4423
4424 static const struct lp_build_tgsi_action interp_action = {
4425 .fetch_args = interp_fetch_args,
4426 .emit = build_interp_intrinsic,
4427 };
4428
4429 static void si_create_function(struct si_shader_context *ctx,
4430 const char *name,
4431 LLVMTypeRef *returns, unsigned num_returns,
4432 struct si_function_info *fninfo,
4433 unsigned max_workgroup_size)
4434 {
4435 int i;
4436
4437 si_llvm_create_func(ctx, name, returns, num_returns,
4438 fninfo->types, fninfo->num_params);
4439 ctx->return_value = LLVMGetUndef(ctx->return_type);
4440
4441 for (i = 0; i < fninfo->num_sgpr_params; ++i) {
4442 LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
4443
4444 /* The combination of:
4445 * - noalias
4446 * - dereferenceable
4447 * - invariant.load
4448 * allows the optimization passes to move loads and reduces
4449 * SGPR spilling significantly.
4450 */
4451 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_INREG);
4452
4453 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
4454 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_NOALIAS);
4455 ac_add_attr_dereferenceable(P, UINT64_MAX);
4456 }
4457 }
4458
4459 for (i = 0; i < fninfo->num_params; ++i) {
4460 if (fninfo->assign[i])
4461 *fninfo->assign[i] = LLVMGetParam(ctx->main_fn, i);
4462 }
4463
4464 if (max_workgroup_size) {
4465 si_llvm_add_attribute(ctx->main_fn, "amdgpu-max-work-group-size",
4466 max_workgroup_size);
4467 }
4468 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4469 "no-signed-zeros-fp-math",
4470 "true");
4471
4472 if (ctx->screen->debug_flags & DBG(UNSAFE_MATH)) {
4473 /* These were copied from some LLVM test. */
4474 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4475 "less-precise-fpmad",
4476 "true");
4477 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4478 "no-infs-fp-math",
4479 "true");
4480 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4481 "no-nans-fp-math",
4482 "true");
4483 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4484 "unsafe-fp-math",
4485 "true");
4486 }
4487 }
4488
4489 static void declare_streamout_params(struct si_shader_context *ctx,
4490 struct pipe_stream_output_info *so,
4491 struct si_function_info *fninfo)
4492 {
4493 int i;
4494
4495 /* Streamout SGPRs. */
4496 if (so->num_outputs) {
4497 if (ctx->type != PIPE_SHADER_TESS_EVAL)
4498 ctx->param_streamout_config = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4499 else
4500 ctx->param_streamout_config = fninfo->num_params - 1;
4501
4502 ctx->param_streamout_write_index = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4503 }
4504 /* A streamout buffer offset is loaded if the stride is non-zero. */
4505 for (i = 0; i < 4; i++) {
4506 if (!so->stride[i])
4507 continue;
4508
4509 ctx->param_streamout_offset[i] = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4510 }
4511 }
4512
4513 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
4514 {
4515 switch (shader->selector->type) {
4516 case PIPE_SHADER_TESS_CTRL:
4517 /* Return this so that LLVM doesn't remove s_barrier
4518 * instructions on chips where we use s_barrier. */
4519 return shader->selector->screen->info.chip_class >= CIK ? 128 : 64;
4520
4521 case PIPE_SHADER_GEOMETRY:
4522 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 64;
4523
4524 case PIPE_SHADER_COMPUTE:
4525 break; /* see below */
4526
4527 default:
4528 return 0;
4529 }
4530
4531 const unsigned *properties = shader->selector->info.properties;
4532 unsigned max_work_group_size =
4533 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
4534 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
4535 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
4536
4537 if (!max_work_group_size) {
4538 /* This is a variable group size compute shader,
4539 * compile it for the maximum possible group size.
4540 */
4541 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
4542 }
4543 return max_work_group_size;
4544 }
4545
4546 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
4547 struct si_function_info *fninfo,
4548 bool assign_params)
4549 {
4550 LLVMTypeRef const_shader_buf_type;
4551
4552 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
4553 ctx->shader->selector->info.shader_buffers_declared == 0)
4554 const_shader_buf_type = ctx->f32;
4555 else
4556 const_shader_buf_type = ctx->v4i32;
4557
4558 unsigned const_and_shader_buffers =
4559 add_arg(fninfo, ARG_SGPR,
4560 ac_array_in_const_addr_space(const_shader_buf_type));
4561
4562 unsigned samplers_and_images =
4563 add_arg(fninfo, ARG_SGPR,
4564 ac_array_in_const_addr_space(ctx->v8i32));
4565
4566 if (assign_params) {
4567 ctx->param_const_and_shader_buffers = const_and_shader_buffers;
4568 ctx->param_samplers_and_images = samplers_and_images;
4569 }
4570 }
4571
4572 static void declare_global_desc_pointers(struct si_shader_context *ctx,
4573 struct si_function_info *fninfo)
4574 {
4575 ctx->param_rw_buffers = add_arg(fninfo, ARG_SGPR,
4576 ac_array_in_const_addr_space(ctx->v4i32));
4577 ctx->param_bindless_samplers_and_images = add_arg(fninfo, ARG_SGPR,
4578 ac_array_in_const_addr_space(ctx->v8i32));
4579 }
4580
4581 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx,
4582 struct si_function_info *fninfo)
4583 {
4584 ctx->param_vertex_buffers = add_arg(fninfo, ARG_SGPR,
4585 ac_array_in_const_addr_space(ctx->v4i32));
4586 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.base_vertex);
4587 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.start_instance);
4588 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.draw_id);
4589 ctx->param_vs_state_bits = add_arg(fninfo, ARG_SGPR, ctx->i32);
4590 }
4591
4592 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
4593 struct si_function_info *fninfo,
4594 unsigned *num_prolog_vgprs)
4595 {
4596 struct si_shader *shader = ctx->shader;
4597
4598 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.vertex_id);
4599 if (shader->key.as_ls) {
4600 ctx->param_rel_auto_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4601 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
4602 } else {
4603 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
4604 ctx->param_vs_prim_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4605 }
4606 add_arg(fninfo, ARG_VGPR, ctx->i32); /* unused */
4607
4608 if (!shader->is_gs_copy_shader) {
4609 /* Vertex load indices. */
4610 ctx->param_vertex_index0 = fninfo->num_params;
4611 for (unsigned i = 0; i < shader->selector->info.num_inputs; i++)
4612 add_arg(fninfo, ARG_VGPR, ctx->i32);
4613 *num_prolog_vgprs += shader->selector->info.num_inputs;
4614 }
4615 }
4616
4617 static void declare_tes_input_vgprs(struct si_shader_context *ctx,
4618 struct si_function_info *fninfo)
4619 {
4620 ctx->param_tes_u = add_arg(fninfo, ARG_VGPR, ctx->f32);
4621 ctx->param_tes_v = add_arg(fninfo, ARG_VGPR, ctx->f32);
4622 ctx->param_tes_rel_patch_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4623 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tes_patch_id);
4624 }
4625
4626 enum {
4627 /* Convenient merged shader definitions. */
4628 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
4629 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
4630 };
4631
4632 static void create_function(struct si_shader_context *ctx)
4633 {
4634 struct si_shader *shader = ctx->shader;
4635 struct si_function_info fninfo;
4636 LLVMTypeRef returns[16+32*4];
4637 unsigned i, num_return_sgprs;
4638 unsigned num_returns = 0;
4639 unsigned num_prolog_vgprs = 0;
4640 unsigned type = ctx->type;
4641 unsigned vs_blit_property =
4642 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
4643
4644 si_init_function_info(&fninfo);
4645
4646 /* Set MERGED shaders. */
4647 if (ctx->screen->info.chip_class >= GFX9) {
4648 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
4649 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
4650 else if (shader->key.as_es || type == PIPE_SHADER_GEOMETRY)
4651 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
4652 }
4653
4654 LLVMTypeRef v3i32 = LLVMVectorType(ctx->i32, 3);
4655
4656 switch (type) {
4657 case PIPE_SHADER_VERTEX:
4658 declare_global_desc_pointers(ctx, &fninfo);
4659
4660 if (vs_blit_property) {
4661 ctx->param_vs_blit_inputs = fninfo.num_params;
4662 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* i16 x1, y1 */
4663 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* i16 x2, y2 */
4664 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* depth */
4665
4666 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
4667 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color0 */
4668 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color1 */
4669 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color2 */
4670 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color3 */
4671 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
4672 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.x1 */
4673 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.y1 */
4674 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.x2 */
4675 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.y2 */
4676 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.z */
4677 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.w */
4678 }
4679
4680 /* VGPRs */
4681 declare_vs_input_vgprs(ctx, &fninfo, &num_prolog_vgprs);
4682 break;
4683 }
4684
4685 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4686 declare_vs_specific_input_sgprs(ctx, &fninfo);
4687
4688 if (shader->key.as_es) {
4689 ctx->param_es2gs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4690 } else if (shader->key.as_ls) {
4691 /* no extra parameters */
4692 } else {
4693 if (shader->is_gs_copy_shader) {
4694 fninfo.num_params = ctx->param_rw_buffers + 1;
4695 fninfo.num_sgpr_params = fninfo.num_params;
4696 }
4697
4698 /* The locations of the other parameters are assigned dynamically. */
4699 declare_streamout_params(ctx, &shader->selector->so,
4700 &fninfo);
4701 }
4702
4703 /* VGPRs */
4704 declare_vs_input_vgprs(ctx, &fninfo, &num_prolog_vgprs);
4705 break;
4706
4707 case PIPE_SHADER_TESS_CTRL: /* SI-CI-VI */
4708 declare_global_desc_pointers(ctx, &fninfo);
4709 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4710 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4711 ctx->param_tcs_out_lds_offsets = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4712 ctx->param_tcs_out_lds_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4713 ctx->param_vs_state_bits = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4714 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4715 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4716 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4717 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4718
4719 /* VGPRs */
4720 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_patch_id);
4721 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_rel_ids);
4722
4723 /* param_tcs_offchip_offset and param_tcs_factor_offset are
4724 * placed after the user SGPRs.
4725 */
4726 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
4727 returns[num_returns++] = ctx->i32; /* SGPRs */
4728 for (i = 0; i < 11; i++)
4729 returns[num_returns++] = ctx->f32; /* VGPRs */
4730 break;
4731
4732 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
4733 /* Merged stages have 8 system SGPRs at the beginning. */
4734 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* SPI_SHADER_USER_DATA_ADDR_LO_HS */
4735 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* SPI_SHADER_USER_DATA_ADDR_HI_HS */
4736 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4737 ctx->param_merged_wave_info = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4738 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4739 ctx->param_merged_scratch_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4740 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4741 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4742
4743 declare_global_desc_pointers(ctx, &fninfo);
4744 declare_per_stage_desc_pointers(ctx, &fninfo,
4745 ctx->type == PIPE_SHADER_VERTEX);
4746 declare_vs_specific_input_sgprs(ctx, &fninfo);
4747
4748 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4749 ctx->param_tcs_out_lds_offsets = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4750 ctx->param_tcs_out_lds_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4751 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4752 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4753 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4754
4755 declare_per_stage_desc_pointers(ctx, &fninfo,
4756 ctx->type == PIPE_SHADER_TESS_CTRL);
4757
4758 /* VGPRs (first TCS, then VS) */
4759 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_patch_id);
4760 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_rel_ids);
4761
4762 if (ctx->type == PIPE_SHADER_VERTEX) {
4763 declare_vs_input_vgprs(ctx, &fninfo,
4764 &num_prolog_vgprs);
4765
4766 /* LS return values are inputs to the TCS main shader part. */
4767 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
4768 returns[num_returns++] = ctx->i32; /* SGPRs */
4769 for (i = 0; i < 2; i++)
4770 returns[num_returns++] = ctx->f32; /* VGPRs */
4771 } else {
4772 /* TCS return values are inputs to the TCS epilog.
4773 *
4774 * param_tcs_offchip_offset, param_tcs_factor_offset,
4775 * param_tcs_offchip_layout, and param_rw_buffers
4776 * should be passed to the epilog.
4777 */
4778 for (i = 0; i <= 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K; i++)
4779 returns[num_returns++] = ctx->i32; /* SGPRs */
4780 for (i = 0; i < 11; i++)
4781 returns[num_returns++] = ctx->f32; /* VGPRs */
4782 }
4783 break;
4784
4785 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
4786 /* Merged stages have 8 system SGPRs at the beginning. */
4787 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_USER_DATA_ADDR_LO_GS) */
4788 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_USER_DATA_ADDR_HI_GS) */
4789 ctx->param_gs2vs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4790 ctx->param_merged_wave_info = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4791 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4792 ctx->param_merged_scratch_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4793 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
4794 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
4795
4796 declare_global_desc_pointers(ctx, &fninfo);
4797 declare_per_stage_desc_pointers(ctx, &fninfo,
4798 (ctx->type == PIPE_SHADER_VERTEX ||
4799 ctx->type == PIPE_SHADER_TESS_EVAL));
4800 if (ctx->type == PIPE_SHADER_VERTEX) {
4801 declare_vs_specific_input_sgprs(ctx, &fninfo);
4802 } else {
4803 /* TESS_EVAL (and also GEOMETRY):
4804 * Declare as many input SGPRs as the VS has. */
4805 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4806 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4807 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4808 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4809 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4810 ctx->param_vs_state_bits = add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4811 }
4812
4813 declare_per_stage_desc_pointers(ctx, &fninfo,
4814 ctx->type == PIPE_SHADER_GEOMETRY);
4815
4816 /* VGPRs (first GS, then VS/TES) */
4817 ctx->param_gs_vtx01_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4818 ctx->param_gs_vtx23_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4819 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_prim_id);
4820 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_invocation_id);
4821 ctx->param_gs_vtx45_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4822
4823 if (ctx->type == PIPE_SHADER_VERTEX) {
4824 declare_vs_input_vgprs(ctx, &fninfo,
4825 &num_prolog_vgprs);
4826 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
4827 declare_tes_input_vgprs(ctx, &fninfo);
4828 }
4829
4830 if (ctx->type == PIPE_SHADER_VERTEX ||
4831 ctx->type == PIPE_SHADER_TESS_EVAL) {
4832 /* ES return values are inputs to GS. */
4833 for (i = 0; i < 8 + GFX9_GS_NUM_USER_SGPR; i++)
4834 returns[num_returns++] = ctx->i32; /* SGPRs */
4835 for (i = 0; i < 5; i++)
4836 returns[num_returns++] = ctx->f32; /* VGPRs */
4837 }
4838 break;
4839
4840 case PIPE_SHADER_TESS_EVAL:
4841 declare_global_desc_pointers(ctx, &fninfo);
4842 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4843 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4844 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4845
4846 if (shader->key.as_es) {
4847 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4848 add_arg(&fninfo, ARG_SGPR, ctx->i32);
4849 ctx->param_es2gs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4850 } else {
4851 add_arg(&fninfo, ARG_SGPR, ctx->i32);
4852 declare_streamout_params(ctx, &shader->selector->so,
4853 &fninfo);
4854 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4855 }
4856
4857 /* VGPRs */
4858 declare_tes_input_vgprs(ctx, &fninfo);
4859 break;
4860
4861 case PIPE_SHADER_GEOMETRY:
4862 declare_global_desc_pointers(ctx, &fninfo);
4863 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4864 ctx->param_gs2vs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4865 ctx->param_gs_wave_id = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4866
4867 /* VGPRs */
4868 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[0]);
4869 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[1]);
4870 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_prim_id);
4871 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[2]);
4872 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[3]);
4873 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[4]);
4874 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[5]);
4875 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_invocation_id);
4876 break;
4877
4878 case PIPE_SHADER_FRAGMENT:
4879 declare_global_desc_pointers(ctx, &fninfo);
4880 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4881 add_arg_checked(&fninfo, ARG_SGPR, ctx->f32, SI_PARAM_ALPHA_REF);
4882 add_arg_assign_checked(&fninfo, ARG_SGPR, ctx->i32,
4883 &ctx->abi.prim_mask, SI_PARAM_PRIM_MASK);
4884
4885 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_SAMPLE);
4886 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_CENTER);
4887 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_CENTROID);
4888 add_arg_checked(&fninfo, ARG_VGPR, v3i32, SI_PARAM_PERSP_PULL_MODEL);
4889 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_SAMPLE);
4890 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_CENTER);
4891 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_CENTROID);
4892 add_arg_checked(&fninfo, ARG_VGPR, ctx->f32, SI_PARAM_LINE_STIPPLE_TEX);
4893 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4894 &ctx->abi.frag_pos[0], SI_PARAM_POS_X_FLOAT);
4895 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4896 &ctx->abi.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
4897 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4898 &ctx->abi.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
4899 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4900 &ctx->abi.frag_pos[3], SI_PARAM_POS_W_FLOAT);
4901 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->i32,
4902 &ctx->abi.front_face, SI_PARAM_FRONT_FACE);
4903 shader->info.face_vgpr_index = 20;
4904 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->i32,
4905 &ctx->abi.ancillary, SI_PARAM_ANCILLARY);
4906 shader->info.ancillary_vgpr_index = 21;
4907 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4908 &ctx->abi.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
4909 add_arg_checked(&fninfo, ARG_VGPR, ctx->i32, SI_PARAM_POS_FIXED_PT);
4910
4911 /* Color inputs from the prolog. */
4912 if (shader->selector->info.colors_read) {
4913 unsigned num_color_elements =
4914 util_bitcount(shader->selector->info.colors_read);
4915
4916 assert(fninfo.num_params + num_color_elements <= ARRAY_SIZE(fninfo.types));
4917 for (i = 0; i < num_color_elements; i++)
4918 add_arg(&fninfo, ARG_VGPR, ctx->f32);
4919
4920 num_prolog_vgprs += num_color_elements;
4921 }
4922
4923 /* Outputs for the epilog. */
4924 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
4925 num_returns =
4926 num_return_sgprs +
4927 util_bitcount(shader->selector->info.colors_written) * 4 +
4928 shader->selector->info.writes_z +
4929 shader->selector->info.writes_stencil +
4930 shader->selector->info.writes_samplemask +
4931 1 /* SampleMaskIn */;
4932
4933 num_returns = MAX2(num_returns,
4934 num_return_sgprs +
4935 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
4936
4937 for (i = 0; i < num_return_sgprs; i++)
4938 returns[i] = ctx->i32;
4939 for (; i < num_returns; i++)
4940 returns[i] = ctx->f32;
4941 break;
4942
4943 case PIPE_SHADER_COMPUTE:
4944 declare_global_desc_pointers(ctx, &fninfo);
4945 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4946 if (shader->selector->info.uses_grid_size)
4947 ctx->param_grid_size = add_arg(&fninfo, ARG_SGPR, v3i32);
4948 if (shader->selector->info.uses_block_size)
4949 ctx->param_block_size = add_arg(&fninfo, ARG_SGPR, v3i32);
4950
4951 for (i = 0; i < 3; i++) {
4952 ctx->param_block_id[i] = -1;
4953 if (shader->selector->info.uses_block_id[i])
4954 ctx->param_block_id[i] = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4955 }
4956
4957 ctx->param_thread_id = add_arg(&fninfo, ARG_VGPR, v3i32);
4958 break;
4959 default:
4960 assert(0 && "unimplemented shader");
4961 return;
4962 }
4963
4964 si_create_function(ctx, "main", returns, num_returns, &fninfo,
4965 si_get_max_workgroup_size(shader));
4966
4967 /* Reserve register locations for VGPR inputs the PS prolog may need. */
4968 if (ctx->type == PIPE_SHADER_FRAGMENT &&
4969 ctx->separate_prolog) {
4970 si_llvm_add_attribute(ctx->main_fn,
4971 "InitialPSInputAddr",
4972 S_0286D0_PERSP_SAMPLE_ENA(1) |
4973 S_0286D0_PERSP_CENTER_ENA(1) |
4974 S_0286D0_PERSP_CENTROID_ENA(1) |
4975 S_0286D0_LINEAR_SAMPLE_ENA(1) |
4976 S_0286D0_LINEAR_CENTER_ENA(1) |
4977 S_0286D0_LINEAR_CENTROID_ENA(1) |
4978 S_0286D0_FRONT_FACE_ENA(1) |
4979 S_0286D0_ANCILLARY_ENA(1) |
4980 S_0286D0_POS_FIXED_PT_ENA(1));
4981 }
4982
4983 shader->info.num_input_sgprs = 0;
4984 shader->info.num_input_vgprs = 0;
4985
4986 for (i = 0; i < fninfo.num_sgpr_params; ++i)
4987 shader->info.num_input_sgprs += ac_get_type_size(fninfo.types[i]) / 4;
4988
4989 for (; i < fninfo.num_params; ++i)
4990 shader->info.num_input_vgprs += ac_get_type_size(fninfo.types[i]) / 4;
4991
4992 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
4993 shader->info.num_input_vgprs -= num_prolog_vgprs;
4994
4995 if (shader->key.as_ls ||
4996 ctx->type == PIPE_SHADER_TESS_CTRL ||
4997 /* GFX9 has the ESGS ring buffer in LDS. */
4998 type == SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY)
4999 ac_declare_lds_as_pointer(&ctx->ac);
5000 }
5001
5002 /**
5003 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
5004 * for later use.
5005 */
5006 static void preload_ring_buffers(struct si_shader_context *ctx)
5007 {
5008 LLVMBuilderRef builder = ctx->ac.builder;
5009
5010 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
5011 ctx->param_rw_buffers);
5012
5013 if (ctx->screen->info.chip_class <= VI &&
5014 (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)) {
5015 unsigned ring =
5016 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
5017 : SI_ES_RING_ESGS;
5018 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
5019
5020 ctx->esgs_ring =
5021 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
5022 }
5023
5024 if (ctx->shader->is_gs_copy_shader) {
5025 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
5026
5027 ctx->gsvs_ring[0] =
5028 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
5029 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
5030 const struct si_shader_selector *sel = ctx->shader->selector;
5031 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
5032 LLVMValueRef base_ring;
5033
5034 base_ring = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
5035
5036 /* The conceptual layout of the GSVS ring is
5037 * v0c0 .. vLv0 v0c1 .. vLc1 ..
5038 * but the real memory layout is swizzled across
5039 * threads:
5040 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
5041 * t16v0c0 ..
5042 * Override the buffer descriptor accordingly.
5043 */
5044 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
5045 uint64_t stream_offset = 0;
5046
5047 for (unsigned stream = 0; stream < 4; ++stream) {
5048 unsigned num_components;
5049 unsigned stride;
5050 unsigned num_records;
5051 LLVMValueRef ring, tmp;
5052
5053 num_components = sel->info.num_stream_output_components[stream];
5054 if (!num_components)
5055 continue;
5056
5057 stride = 4 * num_components * sel->gs_max_out_vertices;
5058
5059 /* Limit on the stride field for <= CIK. */
5060 assert(stride < (1 << 14));
5061
5062 num_records = 64;
5063
5064 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
5065 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
5066 tmp = LLVMBuildAdd(builder, tmp,
5067 LLVMConstInt(ctx->i64,
5068 stream_offset, 0), "");
5069 stream_offset += stride * 64;
5070
5071 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
5072 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
5073 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
5074 tmp = LLVMBuildOr(builder, tmp,
5075 LLVMConstInt(ctx->i32,
5076 S_008F04_STRIDE(stride) |
5077 S_008F04_SWIZZLE_ENABLE(1), 0), "");
5078 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
5079 ring = LLVMBuildInsertElement(builder, ring,
5080 LLVMConstInt(ctx->i32, num_records, 0),
5081 LLVMConstInt(ctx->i32, 2, 0), "");
5082 ring = LLVMBuildInsertElement(builder, ring,
5083 LLVMConstInt(ctx->i32,
5084 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
5085 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
5086 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
5087 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
5088 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
5089 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
5090 S_008F0C_ELEMENT_SIZE(1) | /* element_size = 4 (bytes) */
5091 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
5092 S_008F0C_ADD_TID_ENABLE(1),
5093 0),
5094 LLVMConstInt(ctx->i32, 3, 0), "");
5095
5096 ctx->gsvs_ring[stream] = ring;
5097 }
5098 }
5099 }
5100
5101 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
5102 LLVMValueRef param_rw_buffers,
5103 unsigned param_pos_fixed_pt)
5104 {
5105 LLVMBuilderRef builder = ctx->ac.builder;
5106 LLVMValueRef slot, desc, offset, row, bit, address[2];
5107
5108 /* Use the fixed-point gl_FragCoord input.
5109 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
5110 * per coordinate to get the repeating effect.
5111 */
5112 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
5113 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
5114
5115 /* Load the buffer descriptor. */
5116 slot = LLVMConstInt(ctx->i32, SI_PS_CONST_POLY_STIPPLE, 0);
5117 desc = ac_build_load_to_sgpr(&ctx->ac, param_rw_buffers, slot);
5118
5119 /* The stipple pattern is 32x32, each row has 32 bits. */
5120 offset = LLVMBuildMul(builder, address[1],
5121 LLVMConstInt(ctx->i32, 4, 0), "");
5122 row = buffer_load_const(ctx, desc, offset);
5123 row = ac_to_integer(&ctx->ac, row);
5124 bit = LLVMBuildLShr(builder, row, address[0], "");
5125 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
5126 ac_build_kill_if_false(&ctx->ac, bit);
5127 }
5128
5129 void si_shader_binary_read_config(struct ac_shader_binary *binary,
5130 struct si_shader_config *conf,
5131 unsigned symbol_offset)
5132 {
5133 unsigned i;
5134 const unsigned char *config =
5135 ac_shader_binary_config_start(binary, symbol_offset);
5136 bool really_needs_scratch = false;
5137
5138 /* LLVM adds SGPR spills to the scratch size.
5139 * Find out if we really need the scratch buffer.
5140 */
5141 for (i = 0; i < binary->reloc_count; i++) {
5142 const struct ac_shader_reloc *reloc = &binary->relocs[i];
5143
5144 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
5145 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5146 really_needs_scratch = true;
5147 break;
5148 }
5149 }
5150
5151 /* XXX: We may be able to emit some of these values directly rather than
5152 * extracting fields to be emitted later.
5153 */
5154
5155 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
5156 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
5157 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
5158 switch (reg) {
5159 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
5160 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
5161 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
5162 case R_00B428_SPI_SHADER_PGM_RSRC1_HS:
5163 case R_00B848_COMPUTE_PGM_RSRC1:
5164 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
5165 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
5166 conf->float_mode = G_00B028_FLOAT_MODE(value);
5167 conf->rsrc1 = value;
5168 break;
5169 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
5170 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
5171 break;
5172 case R_00B84C_COMPUTE_PGM_RSRC2:
5173 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
5174 conf->rsrc2 = value;
5175 break;
5176 case R_0286CC_SPI_PS_INPUT_ENA:
5177 conf->spi_ps_input_ena = value;
5178 break;
5179 case R_0286D0_SPI_PS_INPUT_ADDR:
5180 conf->spi_ps_input_addr = value;
5181 break;
5182 case R_0286E8_SPI_TMPRING_SIZE:
5183 case R_00B860_COMPUTE_TMPRING_SIZE:
5184 /* WAVESIZE is in units of 256 dwords. */
5185 if (really_needs_scratch)
5186 conf->scratch_bytes_per_wave =
5187 G_00B860_WAVESIZE(value) * 256 * 4;
5188 break;
5189 case 0x4: /* SPILLED_SGPRS */
5190 conf->spilled_sgprs = value;
5191 break;
5192 case 0x8: /* SPILLED_VGPRS */
5193 conf->spilled_vgprs = value;
5194 break;
5195 default:
5196 {
5197 static bool printed;
5198
5199 if (!printed) {
5200 fprintf(stderr, "Warning: LLVM emitted unknown "
5201 "config register: 0x%x\n", reg);
5202 printed = true;
5203 }
5204 }
5205 break;
5206 }
5207 }
5208
5209 if (!conf->spi_ps_input_addr)
5210 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
5211 }
5212
5213 void si_shader_apply_scratch_relocs(struct si_shader *shader,
5214 uint64_t scratch_va)
5215 {
5216 unsigned i;
5217 uint32_t scratch_rsrc_dword0 = scratch_va;
5218 uint32_t scratch_rsrc_dword1 =
5219 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
5220
5221 /* Enable scratch coalescing. */
5222 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
5223
5224 for (i = 0 ; i < shader->binary.reloc_count; i++) {
5225 const struct ac_shader_reloc *reloc =
5226 &shader->binary.relocs[i];
5227 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
5228 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5229 &scratch_rsrc_dword0, 4);
5230 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5231 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5232 &scratch_rsrc_dword1, 4);
5233 }
5234 }
5235 }
5236
5237 static unsigned si_get_shader_binary_size(const struct si_shader *shader)
5238 {
5239 unsigned size = shader->binary.code_size;
5240
5241 if (shader->prolog)
5242 size += shader->prolog->binary.code_size;
5243 if (shader->previous_stage)
5244 size += shader->previous_stage->binary.code_size;
5245 if (shader->prolog2)
5246 size += shader->prolog2->binary.code_size;
5247 if (shader->epilog)
5248 size += shader->epilog->binary.code_size;
5249 return size;
5250 }
5251
5252 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
5253 {
5254 const struct ac_shader_binary *prolog =
5255 shader->prolog ? &shader->prolog->binary : NULL;
5256 const struct ac_shader_binary *previous_stage =
5257 shader->previous_stage ? &shader->previous_stage->binary : NULL;
5258 const struct ac_shader_binary *prolog2 =
5259 shader->prolog2 ? &shader->prolog2->binary : NULL;
5260 const struct ac_shader_binary *epilog =
5261 shader->epilog ? &shader->epilog->binary : NULL;
5262 const struct ac_shader_binary *mainb = &shader->binary;
5263 unsigned bo_size = si_get_shader_binary_size(shader) +
5264 (!epilog ? mainb->rodata_size : 0);
5265 unsigned char *ptr;
5266
5267 assert(!prolog || !prolog->rodata_size);
5268 assert(!previous_stage || !previous_stage->rodata_size);
5269 assert(!prolog2 || !prolog2->rodata_size);
5270 assert((!prolog && !previous_stage && !prolog2 && !epilog) ||
5271 !mainb->rodata_size);
5272 assert(!epilog || !epilog->rodata_size);
5273
5274 r600_resource_reference(&shader->bo, NULL);
5275 shader->bo = (struct r600_resource*)
5276 si_aligned_buffer_create(&sscreen->b,
5277 sscreen->cpdma_prefetch_writes_memory ?
5278 0 : R600_RESOURCE_FLAG_READ_ONLY,
5279 PIPE_USAGE_IMMUTABLE,
5280 align(bo_size, SI_CPDMA_ALIGNMENT),
5281 256);
5282 if (!shader->bo)
5283 return -ENOMEM;
5284
5285 /* Upload. */
5286 ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
5287 PIPE_TRANSFER_READ_WRITE |
5288 PIPE_TRANSFER_UNSYNCHRONIZED);
5289
5290 /* Don't use util_memcpy_cpu_to_le32. LLVM binaries are
5291 * endian-independent. */
5292 if (prolog) {
5293 memcpy(ptr, prolog->code, prolog->code_size);
5294 ptr += prolog->code_size;
5295 }
5296 if (previous_stage) {
5297 memcpy(ptr, previous_stage->code, previous_stage->code_size);
5298 ptr += previous_stage->code_size;
5299 }
5300 if (prolog2) {
5301 memcpy(ptr, prolog2->code, prolog2->code_size);
5302 ptr += prolog2->code_size;
5303 }
5304
5305 memcpy(ptr, mainb->code, mainb->code_size);
5306 ptr += mainb->code_size;
5307
5308 if (epilog)
5309 memcpy(ptr, epilog->code, epilog->code_size);
5310 else if (mainb->rodata_size > 0)
5311 memcpy(ptr, mainb->rodata, mainb->rodata_size);
5312
5313 sscreen->ws->buffer_unmap(shader->bo->buf);
5314 return 0;
5315 }
5316
5317 static void si_shader_dump_disassembly(const struct ac_shader_binary *binary,
5318 struct pipe_debug_callback *debug,
5319 const char *name, FILE *file)
5320 {
5321 char *line, *p;
5322 unsigned i, count;
5323
5324 if (binary->disasm_string) {
5325 fprintf(file, "Shader %s disassembly:\n", name);
5326 fprintf(file, "%s", binary->disasm_string);
5327
5328 if (debug && debug->debug_message) {
5329 /* Very long debug messages are cut off, so send the
5330 * disassembly one line at a time. This causes more
5331 * overhead, but on the plus side it simplifies
5332 * parsing of resulting logs.
5333 */
5334 pipe_debug_message(debug, SHADER_INFO,
5335 "Shader Disassembly Begin");
5336
5337 line = binary->disasm_string;
5338 while (*line) {
5339 p = util_strchrnul(line, '\n');
5340 count = p - line;
5341
5342 if (count) {
5343 pipe_debug_message(debug, SHADER_INFO,
5344 "%.*s", count, line);
5345 }
5346
5347 if (!*p)
5348 break;
5349 line = p + 1;
5350 }
5351
5352 pipe_debug_message(debug, SHADER_INFO,
5353 "Shader Disassembly End");
5354 }
5355 } else {
5356 fprintf(file, "Shader %s binary:\n", name);
5357 for (i = 0; i < binary->code_size; i += 4) {
5358 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
5359 binary->code[i + 3], binary->code[i + 2],
5360 binary->code[i + 1], binary->code[i]);
5361 }
5362 }
5363 }
5364
5365 static void si_calculate_max_simd_waves(struct si_shader *shader)
5366 {
5367 struct si_screen *sscreen = shader->selector->screen;
5368 struct si_shader_config *conf = &shader->config;
5369 unsigned num_inputs = shader->selector->info.num_inputs;
5370 unsigned lds_increment = sscreen->info.chip_class >= CIK ? 512 : 256;
5371 unsigned lds_per_wave = 0;
5372 unsigned max_simd_waves;
5373
5374 switch (sscreen->info.family) {
5375 /* These always have 8 waves: */
5376 case CHIP_POLARIS10:
5377 case CHIP_POLARIS11:
5378 case CHIP_POLARIS12:
5379 max_simd_waves = 8;
5380 break;
5381 default:
5382 max_simd_waves = 10;
5383 }
5384
5385 /* Compute LDS usage for PS. */
5386 switch (shader->selector->type) {
5387 case PIPE_SHADER_FRAGMENT:
5388 /* The minimum usage per wave is (num_inputs * 48). The maximum
5389 * usage is (num_inputs * 48 * 16).
5390 * We can get anything in between and it varies between waves.
5391 *
5392 * The 48 bytes per input for a single primitive is equal to
5393 * 4 bytes/component * 4 components/input * 3 points.
5394 *
5395 * Other stages don't know the size at compile time or don't
5396 * allocate LDS per wave, but instead they do it per thread group.
5397 */
5398 lds_per_wave = conf->lds_size * lds_increment +
5399 align(num_inputs * 48, lds_increment);
5400 break;
5401 case PIPE_SHADER_COMPUTE:
5402 if (shader->selector) {
5403 unsigned max_workgroup_size =
5404 si_get_max_workgroup_size(shader);
5405 lds_per_wave = (conf->lds_size * lds_increment) /
5406 DIV_ROUND_UP(max_workgroup_size, 64);
5407 }
5408 break;
5409 }
5410
5411 /* Compute the per-SIMD wave counts. */
5412 if (conf->num_sgprs) {
5413 if (sscreen->info.chip_class >= VI)
5414 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
5415 else
5416 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
5417 }
5418
5419 if (conf->num_vgprs)
5420 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
5421
5422 /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
5423 * 16KB makes some SIMDs unoccupied). */
5424 if (lds_per_wave)
5425 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
5426
5427 conf->max_simd_waves = max_simd_waves;
5428 }
5429
5430 void si_shader_dump_stats_for_shader_db(const struct si_shader *shader,
5431 struct pipe_debug_callback *debug)
5432 {
5433 const struct si_shader_config *conf = &shader->config;
5434
5435 pipe_debug_message(debug, SHADER_INFO,
5436 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
5437 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
5438 "Spilled VGPRs: %d PrivMem VGPRs: %d",
5439 conf->num_sgprs, conf->num_vgprs,
5440 si_get_shader_binary_size(shader),
5441 conf->lds_size, conf->scratch_bytes_per_wave,
5442 conf->max_simd_waves, conf->spilled_sgprs,
5443 conf->spilled_vgprs, conf->private_mem_vgprs);
5444 }
5445
5446 static void si_shader_dump_stats(struct si_screen *sscreen,
5447 const struct si_shader *shader,
5448 unsigned processor,
5449 FILE *file,
5450 bool check_debug_option)
5451 {
5452 const struct si_shader_config *conf = &shader->config;
5453
5454 if (!check_debug_option ||
5455 si_can_dump_shader(sscreen, processor)) {
5456 if (processor == PIPE_SHADER_FRAGMENT) {
5457 fprintf(file, "*** SHADER CONFIG ***\n"
5458 "SPI_PS_INPUT_ADDR = 0x%04x\n"
5459 "SPI_PS_INPUT_ENA = 0x%04x\n",
5460 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
5461 }
5462
5463 fprintf(file, "*** SHADER STATS ***\n"
5464 "SGPRS: %d\n"
5465 "VGPRS: %d\n"
5466 "Spilled SGPRs: %d\n"
5467 "Spilled VGPRs: %d\n"
5468 "Private memory VGPRs: %d\n"
5469 "Code Size: %d bytes\n"
5470 "LDS: %d blocks\n"
5471 "Scratch: %d bytes per wave\n"
5472 "Max Waves: %d\n"
5473 "********************\n\n\n",
5474 conf->num_sgprs, conf->num_vgprs,
5475 conf->spilled_sgprs, conf->spilled_vgprs,
5476 conf->private_mem_vgprs,
5477 si_get_shader_binary_size(shader),
5478 conf->lds_size, conf->scratch_bytes_per_wave,
5479 conf->max_simd_waves);
5480 }
5481 }
5482
5483 const char *si_get_shader_name(const struct si_shader *shader, unsigned processor)
5484 {
5485 switch (processor) {
5486 case PIPE_SHADER_VERTEX:
5487 if (shader->key.as_es)
5488 return "Vertex Shader as ES";
5489 else if (shader->key.as_ls)
5490 return "Vertex Shader as LS";
5491 else
5492 return "Vertex Shader as VS";
5493 case PIPE_SHADER_TESS_CTRL:
5494 return "Tessellation Control Shader";
5495 case PIPE_SHADER_TESS_EVAL:
5496 if (shader->key.as_es)
5497 return "Tessellation Evaluation Shader as ES";
5498 else
5499 return "Tessellation Evaluation Shader as VS";
5500 case PIPE_SHADER_GEOMETRY:
5501 if (shader->is_gs_copy_shader)
5502 return "GS Copy Shader as VS";
5503 else
5504 return "Geometry Shader";
5505 case PIPE_SHADER_FRAGMENT:
5506 return "Pixel Shader";
5507 case PIPE_SHADER_COMPUTE:
5508 return "Compute Shader";
5509 default:
5510 return "Unknown Shader";
5511 }
5512 }
5513
5514 void si_shader_dump(struct si_screen *sscreen, const struct si_shader *shader,
5515 struct pipe_debug_callback *debug, unsigned processor,
5516 FILE *file, bool check_debug_option)
5517 {
5518 if (!check_debug_option ||
5519 si_can_dump_shader(sscreen, processor))
5520 si_dump_shader_key(processor, shader, file);
5521
5522 if (!check_debug_option && shader->binary.llvm_ir_string) {
5523 if (shader->previous_stage &&
5524 shader->previous_stage->binary.llvm_ir_string) {
5525 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
5526 si_get_shader_name(shader, processor));
5527 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
5528 }
5529
5530 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
5531 si_get_shader_name(shader, processor));
5532 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
5533 }
5534
5535 if (!check_debug_option ||
5536 (si_can_dump_shader(sscreen, processor) &&
5537 !(sscreen->debug_flags & DBG(NO_ASM)))) {
5538 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
5539
5540 if (shader->prolog)
5541 si_shader_dump_disassembly(&shader->prolog->binary,
5542 debug, "prolog", file);
5543 if (shader->previous_stage)
5544 si_shader_dump_disassembly(&shader->previous_stage->binary,
5545 debug, "previous stage", file);
5546 if (shader->prolog2)
5547 si_shader_dump_disassembly(&shader->prolog2->binary,
5548 debug, "prolog2", file);
5549
5550 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
5551
5552 if (shader->epilog)
5553 si_shader_dump_disassembly(&shader->epilog->binary,
5554 debug, "epilog", file);
5555 fprintf(file, "\n");
5556 }
5557
5558 si_shader_dump_stats(sscreen, shader, processor, file,
5559 check_debug_option);
5560 }
5561
5562 static int si_compile_llvm(struct si_screen *sscreen,
5563 struct ac_shader_binary *binary,
5564 struct si_shader_config *conf,
5565 LLVMTargetMachineRef tm,
5566 LLVMModuleRef mod,
5567 struct pipe_debug_callback *debug,
5568 unsigned processor,
5569 const char *name)
5570 {
5571 int r = 0;
5572 unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
5573
5574 if (si_can_dump_shader(sscreen, processor)) {
5575 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
5576
5577 if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
5578 fprintf(stderr, "%s LLVM IR:\n\n", name);
5579 ac_dump_module(mod);
5580 fprintf(stderr, "\n");
5581 }
5582 }
5583
5584 if (sscreen->record_llvm_ir) {
5585 char *ir = LLVMPrintModuleToString(mod);
5586 binary->llvm_ir_string = strdup(ir);
5587 LLVMDisposeMessage(ir);
5588 }
5589
5590 if (!si_replace_shader(count, binary)) {
5591 r = si_llvm_compile(mod, binary, tm, debug);
5592 if (r)
5593 return r;
5594 }
5595
5596 si_shader_binary_read_config(binary, conf, 0);
5597
5598 /* Enable 64-bit and 16-bit denormals, because there is no performance
5599 * cost.
5600 *
5601 * If denormals are enabled, all floating-point output modifiers are
5602 * ignored.
5603 *
5604 * Don't enable denormals for 32-bit floats, because:
5605 * - Floating-point output modifiers would be ignored by the hw.
5606 * - Some opcodes don't support denormals, such as v_mad_f32. We would
5607 * have to stop using those.
5608 * - SI & CI would be very slow.
5609 */
5610 conf->float_mode |= V_00B028_FP_64_DENORMS;
5611
5612 FREE(binary->config);
5613 FREE(binary->global_symbol_offsets);
5614 binary->config = NULL;
5615 binary->global_symbol_offsets = NULL;
5616
5617 /* Some shaders can't have rodata because their binaries can be
5618 * concatenated.
5619 */
5620 if (binary->rodata_size &&
5621 (processor == PIPE_SHADER_VERTEX ||
5622 processor == PIPE_SHADER_TESS_CTRL ||
5623 processor == PIPE_SHADER_TESS_EVAL ||
5624 processor == PIPE_SHADER_FRAGMENT)) {
5625 fprintf(stderr, "radeonsi: The shader can't have rodata.");
5626 return -EINVAL;
5627 }
5628
5629 return r;
5630 }
5631
5632 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
5633 {
5634 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
5635 LLVMBuildRetVoid(ctx->ac.builder);
5636 else
5637 LLVMBuildRet(ctx->ac.builder, ret);
5638 }
5639
5640 /* Generate code for the hardware VS shader stage to go with a geometry shader */
5641 struct si_shader *
5642 si_generate_gs_copy_shader(struct si_screen *sscreen,
5643 LLVMTargetMachineRef tm,
5644 struct si_shader_selector *gs_selector,
5645 struct pipe_debug_callback *debug)
5646 {
5647 struct si_shader_context ctx;
5648 struct si_shader *shader;
5649 LLVMBuilderRef builder;
5650 struct lp_build_tgsi_context *bld_base = &ctx.bld_base;
5651 struct lp_build_context *uint = &bld_base->uint_bld;
5652 struct si_shader_output_values *outputs;
5653 struct tgsi_shader_info *gsinfo = &gs_selector->info;
5654 int i, r;
5655
5656 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
5657
5658 if (!outputs)
5659 return NULL;
5660
5661 shader = CALLOC_STRUCT(si_shader);
5662 if (!shader) {
5663 FREE(outputs);
5664 return NULL;
5665 }
5666
5667 /* We can leave the fence as permanently signaled because the GS copy
5668 * shader only becomes visible globally after it has been compiled. */
5669 util_queue_fence_init(&shader->ready);
5670
5671 shader->selector = gs_selector;
5672 shader->is_gs_copy_shader = true;
5673
5674 si_init_shader_ctx(&ctx, sscreen, tm);
5675 ctx.shader = shader;
5676 ctx.type = PIPE_SHADER_VERTEX;
5677
5678 builder = ctx.ac.builder;
5679
5680 create_function(&ctx);
5681 preload_ring_buffers(&ctx);
5682
5683 LLVMValueRef voffset =
5684 lp_build_mul_imm(uint, ctx.abi.vertex_id, 4);
5685
5686 /* Fetch the vertex stream ID.*/
5687 LLVMValueRef stream_id;
5688
5689 if (gs_selector->so.num_outputs)
5690 stream_id = unpack_param(&ctx, ctx.param_streamout_config, 24, 2);
5691 else
5692 stream_id = ctx.i32_0;
5693
5694 /* Fill in output information. */
5695 for (i = 0; i < gsinfo->num_outputs; ++i) {
5696 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
5697 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
5698
5699 for (int chan = 0; chan < 4; chan++) {
5700 outputs[i].vertex_stream[chan] =
5701 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
5702 }
5703 }
5704
5705 LLVMBasicBlockRef end_bb;
5706 LLVMValueRef switch_inst;
5707
5708 end_bb = LLVMAppendBasicBlockInContext(ctx.ac.context, ctx.main_fn, "end");
5709 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
5710
5711 for (int stream = 0; stream < 4; stream++) {
5712 LLVMBasicBlockRef bb;
5713 unsigned offset;
5714
5715 if (!gsinfo->num_stream_output_components[stream])
5716 continue;
5717
5718 if (stream > 0 && !gs_selector->so.num_outputs)
5719 continue;
5720
5721 bb = LLVMInsertBasicBlockInContext(ctx.ac.context, end_bb, "out");
5722 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
5723 LLVMPositionBuilderAtEnd(builder, bb);
5724
5725 /* Fetch vertex data from GSVS ring */
5726 offset = 0;
5727 for (i = 0; i < gsinfo->num_outputs; ++i) {
5728 for (unsigned chan = 0; chan < 4; chan++) {
5729 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
5730 outputs[i].vertex_stream[chan] != stream) {
5731 outputs[i].values[chan] = ctx.bld_base.base.undef;
5732 continue;
5733 }
5734
5735 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
5736 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
5737 offset++;
5738
5739 outputs[i].values[chan] =
5740 ac_build_buffer_load(&ctx.ac,
5741 ctx.gsvs_ring[0], 1,
5742 ctx.i32_0, voffset,
5743 soffset, 0, 1, 1,
5744 true, false);
5745 }
5746 }
5747
5748 /* Streamout and exports. */
5749 if (gs_selector->so.num_outputs) {
5750 si_llvm_emit_streamout(&ctx, outputs,
5751 gsinfo->num_outputs,
5752 stream);
5753 }
5754
5755 if (stream == 0)
5756 si_llvm_export_vs(&ctx, outputs, gsinfo->num_outputs);
5757
5758 LLVMBuildBr(builder, end_bb);
5759 }
5760
5761 LLVMPositionBuilderAtEnd(builder, end_bb);
5762
5763 LLVMBuildRetVoid(ctx.ac.builder);
5764
5765 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
5766 si_llvm_optimize_module(&ctx);
5767
5768 r = si_compile_llvm(sscreen, &ctx.shader->binary,
5769 &ctx.shader->config, ctx.tm,
5770 ctx.gallivm.module,
5771 debug, PIPE_SHADER_GEOMETRY,
5772 "GS Copy Shader");
5773 if (!r) {
5774 if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
5775 fprintf(stderr, "GS Copy Shader:\n");
5776 si_shader_dump(sscreen, ctx.shader, debug,
5777 PIPE_SHADER_GEOMETRY, stderr, true);
5778 r = si_shader_binary_upload(sscreen, ctx.shader);
5779 }
5780
5781 si_llvm_dispose(&ctx);
5782
5783 FREE(outputs);
5784
5785 if (r != 0) {
5786 FREE(shader);
5787 shader = NULL;
5788 }
5789 return shader;
5790 }
5791
5792 static void si_dump_shader_key_vs(const struct si_shader_key *key,
5793 const struct si_vs_prolog_bits *prolog,
5794 const char *prefix, FILE *f)
5795 {
5796 fprintf(f, " %s.instance_divisor_is_one = %u\n",
5797 prefix, prolog->instance_divisor_is_one);
5798 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
5799 prefix, prolog->instance_divisor_is_fetched);
5800 fprintf(f, " %s.ls_vgpr_fix = %u\n",
5801 prefix, prolog->ls_vgpr_fix);
5802
5803 fprintf(f, " mono.vs.fix_fetch = {");
5804 for (int i = 0; i < SI_MAX_ATTRIBS; i++)
5805 fprintf(f, !i ? "%u" : ", %u", key->mono.vs_fix_fetch[i]);
5806 fprintf(f, "}\n");
5807 }
5808
5809 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
5810 FILE *f)
5811 {
5812 const struct si_shader_key *key = &shader->key;
5813
5814 fprintf(f, "SHADER KEY\n");
5815
5816 switch (processor) {
5817 case PIPE_SHADER_VERTEX:
5818 si_dump_shader_key_vs(key, &key->part.vs.prolog,
5819 "part.vs.prolog", f);
5820 fprintf(f, " as_es = %u\n", key->as_es);
5821 fprintf(f, " as_ls = %u\n", key->as_ls);
5822 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5823 key->mono.u.vs_export_prim_id);
5824 break;
5825
5826 case PIPE_SHADER_TESS_CTRL:
5827 if (shader->selector->screen->info.chip_class >= GFX9) {
5828 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
5829 "part.tcs.ls_prolog", f);
5830 }
5831 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
5832 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
5833 break;
5834
5835 case PIPE_SHADER_TESS_EVAL:
5836 fprintf(f, " as_es = %u\n", key->as_es);
5837 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5838 key->mono.u.vs_export_prim_id);
5839 break;
5840
5841 case PIPE_SHADER_GEOMETRY:
5842 if (shader->is_gs_copy_shader)
5843 break;
5844
5845 if (shader->selector->screen->info.chip_class >= GFX9 &&
5846 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
5847 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
5848 "part.gs.vs_prolog", f);
5849 }
5850 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
5851 break;
5852
5853 case PIPE_SHADER_COMPUTE:
5854 break;
5855
5856 case PIPE_SHADER_FRAGMENT:
5857 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
5858 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
5859 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
5860 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
5861 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
5862 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
5863 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
5864 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
5865 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
5866 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
5867 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
5868 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
5869 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
5870 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
5871 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
5872 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
5873 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
5874 break;
5875
5876 default:
5877 assert(0);
5878 }
5879
5880 if ((processor == PIPE_SHADER_GEOMETRY ||
5881 processor == PIPE_SHADER_TESS_EVAL ||
5882 processor == PIPE_SHADER_VERTEX) &&
5883 !key->as_es && !key->as_ls) {
5884 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
5885 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
5886 }
5887 }
5888
5889 static void si_init_shader_ctx(struct si_shader_context *ctx,
5890 struct si_screen *sscreen,
5891 LLVMTargetMachineRef tm)
5892 {
5893 struct lp_build_tgsi_context *bld_base;
5894
5895 si_llvm_context_init(ctx, sscreen, tm);
5896
5897 bld_base = &ctx->bld_base;
5898 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
5899
5900 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
5901 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
5902 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
5903
5904 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
5905
5906 bld_base->op_actions[TGSI_OPCODE_CLOCK].emit = clock_emit;
5907
5908 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
5909 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
5910 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
5911 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
5912
5913 bld_base->op_actions[TGSI_OPCODE_VOTE_ALL].emit = vote_all_emit;
5914 bld_base->op_actions[TGSI_OPCODE_VOTE_ANY].emit = vote_any_emit;
5915 bld_base->op_actions[TGSI_OPCODE_VOTE_EQ].emit = vote_eq_emit;
5916 bld_base->op_actions[TGSI_OPCODE_BALLOT].emit = ballot_emit;
5917 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].intr_name = "llvm.amdgcn.readfirstlane";
5918 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].emit = read_lane_emit;
5919 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].intr_name = "llvm.amdgcn.readlane";
5920 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].fetch_args = read_invoc_fetch_args;
5921 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].emit = read_lane_emit;
5922
5923 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_tgsi_emit_vertex;
5924 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_tgsi_emit_primitive;
5925 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
5926 }
5927
5928 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
5929 {
5930 struct si_shader *shader = ctx->shader;
5931 struct tgsi_shader_info *info = &shader->selector->info;
5932
5933 if ((ctx->type != PIPE_SHADER_VERTEX &&
5934 ctx->type != PIPE_SHADER_TESS_EVAL) ||
5935 shader->key.as_ls ||
5936 shader->key.as_es)
5937 return;
5938
5939 ac_optimize_vs_outputs(&ctx->ac,
5940 ctx->main_fn,
5941 shader->info.vs_output_param_offset,
5942 info->num_outputs,
5943 &shader->info.nr_param_exports);
5944 }
5945
5946 static void si_count_scratch_private_memory(struct si_shader_context *ctx)
5947 {
5948 ctx->shader->config.private_mem_vgprs = 0;
5949
5950 /* Process all LLVM instructions. */
5951 LLVMBasicBlockRef bb = LLVMGetFirstBasicBlock(ctx->main_fn);
5952 while (bb) {
5953 LLVMValueRef next = LLVMGetFirstInstruction(bb);
5954
5955 while (next) {
5956 LLVMValueRef inst = next;
5957 next = LLVMGetNextInstruction(next);
5958
5959 if (LLVMGetInstructionOpcode(inst) != LLVMAlloca)
5960 continue;
5961
5962 LLVMTypeRef type = LLVMGetElementType(LLVMTypeOf(inst));
5963 /* No idea why LLVM aligns allocas to 4 elements. */
5964 unsigned alignment = LLVMGetAlignment(inst);
5965 unsigned dw_size = align(ac_get_type_size(type) / 4, alignment);
5966 ctx->shader->config.private_mem_vgprs += dw_size;
5967 }
5968 bb = LLVMGetNextBasicBlock(bb);
5969 }
5970 }
5971
5972 static void si_init_exec_from_input(struct si_shader_context *ctx,
5973 unsigned param, unsigned bitoffset)
5974 {
5975 LLVMValueRef args[] = {
5976 LLVMGetParam(ctx->main_fn, param),
5977 LLVMConstInt(ctx->i32, bitoffset, 0),
5978 };
5979 lp_build_intrinsic(ctx->ac.builder,
5980 "llvm.amdgcn.init.exec.from.input",
5981 ctx->voidt, args, 2, LP_FUNC_ATTR_CONVERGENT);
5982 }
5983
5984 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
5985 const struct si_vs_prolog_bits *key)
5986 {
5987 /* VGPR initialization fixup for Vega10 and Raven is always done in the
5988 * VS prolog. */
5989 return sel->vs_needs_prolog || key->ls_vgpr_fix;
5990 }
5991
5992 static bool si_compile_tgsi_main(struct si_shader_context *ctx,
5993 bool is_monolithic)
5994 {
5995 struct si_shader *shader = ctx->shader;
5996 struct si_shader_selector *sel = shader->selector;
5997 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
5998
5999 // TODO clean all this up!
6000 switch (ctx->type) {
6001 case PIPE_SHADER_VERTEX:
6002 ctx->load_input = declare_input_vs;
6003 if (shader->key.as_ls)
6004 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
6005 else if (shader->key.as_es)
6006 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
6007 else
6008 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
6009 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
6010 break;
6011 case PIPE_SHADER_TESS_CTRL:
6012 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
6013 ctx->abi.load_tess_varyings = si_nir_load_tcs_varyings;
6014 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
6015 bld_base->emit_store = store_output_tcs;
6016 ctx->abi.store_tcs_outputs = si_nir_store_output_tcs;
6017 ctx->abi.emit_outputs = si_llvm_emit_tcs_epilogue;
6018 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
6019 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
6020 break;
6021 case PIPE_SHADER_TESS_EVAL:
6022 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
6023 ctx->abi.load_tess_varyings = si_nir_load_input_tes;
6024 ctx->abi.load_tess_coord = si_load_tess_coord;
6025 ctx->abi.load_tess_level = si_load_tess_level;
6026 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
6027 if (shader->key.as_es)
6028 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
6029 else
6030 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
6031 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
6032 break;
6033 case PIPE_SHADER_GEOMETRY:
6034 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
6035 ctx->abi.load_inputs = si_nir_load_input_gs;
6036 ctx->abi.emit_vertex = si_llvm_emit_vertex;
6037 ctx->abi.emit_primitive = si_llvm_emit_primitive;
6038 ctx->abi.emit_outputs = si_llvm_emit_gs_epilogue;
6039 bld_base->emit_epilogue = si_tgsi_emit_gs_epilogue;
6040 break;
6041 case PIPE_SHADER_FRAGMENT:
6042 ctx->load_input = declare_input_fs;
6043 ctx->abi.emit_outputs = si_llvm_return_fs_outputs;
6044 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
6045 ctx->abi.lookup_interp_param = si_nir_lookup_interp_param;
6046 ctx->abi.load_sample_position = load_sample_position;
6047 break;
6048 case PIPE_SHADER_COMPUTE:
6049 break;
6050 default:
6051 assert(!"Unsupported shader type");
6052 return false;
6053 }
6054
6055 ctx->abi.load_ubo = load_ubo;
6056 ctx->abi.load_ssbo = load_ssbo;
6057
6058 create_function(ctx);
6059 preload_ring_buffers(ctx);
6060
6061 /* For GFX9 merged shaders:
6062 * - Set EXEC for the first shader. If the prolog is present, set
6063 * EXEC there instead.
6064 * - Add a barrier before the second shader.
6065 * - In the second shader, reset EXEC to ~0 and wrap the main part in
6066 * an if-statement. This is required for correctness in geometry
6067 * shaders, to ensure that empty GS waves do not send GS_EMIT and
6068 * GS_CUT messages.
6069 *
6070 * For monolithic merged shaders, the first shader is wrapped in an
6071 * if-block together with its prolog in si_build_wrapper_function.
6072 */
6073 if (ctx->screen->info.chip_class >= GFX9) {
6074 if (!is_monolithic &&
6075 sel->info.num_instructions > 1 && /* not empty shader */
6076 (shader->key.as_es || shader->key.as_ls) &&
6077 (ctx->type == PIPE_SHADER_TESS_EVAL ||
6078 (ctx->type == PIPE_SHADER_VERTEX &&
6079 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog)))) {
6080 si_init_exec_from_input(ctx,
6081 ctx->param_merged_wave_info, 0);
6082 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
6083 ctx->type == PIPE_SHADER_GEOMETRY) {
6084 if (!is_monolithic)
6085 ac_init_exec_full_mask(&ctx->ac);
6086
6087 /* The barrier must execute for all shaders in a
6088 * threadgroup.
6089 */
6090 si_llvm_emit_barrier(NULL, bld_base, NULL);
6091
6092 LLVMValueRef num_threads = unpack_param(ctx, ctx->param_merged_wave_info, 8, 8);
6093 LLVMValueRef ena =
6094 LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
6095 ac_get_thread_id(&ctx->ac), num_threads, "");
6096 lp_build_if(&ctx->merged_wrap_if_state, &ctx->gallivm, ena);
6097 }
6098 }
6099
6100 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
6101 sel->tcs_info.tessfactors_are_def_in_all_invocs) {
6102 for (unsigned i = 0; i < 6; i++) {
6103 ctx->invoc0_tess_factors[i] =
6104 lp_build_alloca_undef(&ctx->gallivm, ctx->i32, "");
6105 }
6106 }
6107
6108 if (ctx->type == PIPE_SHADER_GEOMETRY) {
6109 int i;
6110 for (i = 0; i < 4; i++) {
6111 ctx->gs_next_vertex[i] =
6112 lp_build_alloca(&ctx->gallivm,
6113 ctx->i32, "");
6114 }
6115 }
6116
6117 if (sel->force_correct_derivs_after_kill) {
6118 ctx->postponed_kill = lp_build_alloca_undef(&ctx->gallivm, ctx->i1, "");
6119 /* true = don't kill. */
6120 LLVMBuildStore(ctx->ac.builder, LLVMConstInt(ctx->i1, 1, 0),
6121 ctx->postponed_kill);
6122 }
6123
6124 if (sel->tokens) {
6125 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
6126 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
6127 return false;
6128 }
6129 } else {
6130 if (!si_nir_build_llvm(ctx, sel->nir)) {
6131 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
6132 return false;
6133 }
6134 }
6135
6136 si_llvm_build_ret(ctx, ctx->return_value);
6137 return true;
6138 }
6139
6140 /**
6141 * Compute the VS prolog key, which contains all the information needed to
6142 * build the VS prolog function, and set shader->info bits where needed.
6143 *
6144 * \param info Shader info of the vertex shader.
6145 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
6146 * \param prolog_key Key of the VS prolog
6147 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
6148 * \param key Output shader part key.
6149 */
6150 static void si_get_vs_prolog_key(const struct tgsi_shader_info *info,
6151 unsigned num_input_sgprs,
6152 const struct si_vs_prolog_bits *prolog_key,
6153 struct si_shader *shader_out,
6154 union si_shader_part_key *key)
6155 {
6156 memset(key, 0, sizeof(*key));
6157 key->vs_prolog.states = *prolog_key;
6158 key->vs_prolog.num_input_sgprs = num_input_sgprs;
6159 key->vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
6160 key->vs_prolog.as_ls = shader_out->key.as_ls;
6161 key->vs_prolog.as_es = shader_out->key.as_es;
6162
6163 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
6164 key->vs_prolog.as_ls = 1;
6165 key->vs_prolog.num_merged_next_stage_vgprs = 2;
6166 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
6167 key->vs_prolog.as_es = 1;
6168 key->vs_prolog.num_merged_next_stage_vgprs = 5;
6169 }
6170
6171 /* Enable loading the InstanceID VGPR. */
6172 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
6173
6174 if ((key->vs_prolog.states.instance_divisor_is_one |
6175 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
6176 shader_out->info.uses_instanceid = true;
6177 }
6178
6179 /**
6180 * Compute the PS prolog key, which contains all the information needed to
6181 * build the PS prolog function, and set related bits in shader->config.
6182 */
6183 static void si_get_ps_prolog_key(struct si_shader *shader,
6184 union si_shader_part_key *key,
6185 bool separate_prolog)
6186 {
6187 struct tgsi_shader_info *info = &shader->selector->info;
6188
6189 memset(key, 0, sizeof(*key));
6190 key->ps_prolog.states = shader->key.part.ps.prolog;
6191 key->ps_prolog.colors_read = info->colors_read;
6192 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
6193 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
6194 key->ps_prolog.wqm = info->uses_derivatives &&
6195 (key->ps_prolog.colors_read ||
6196 key->ps_prolog.states.force_persp_sample_interp ||
6197 key->ps_prolog.states.force_linear_sample_interp ||
6198 key->ps_prolog.states.force_persp_center_interp ||
6199 key->ps_prolog.states.force_linear_center_interp ||
6200 key->ps_prolog.states.bc_optimize_for_persp ||
6201 key->ps_prolog.states.bc_optimize_for_linear);
6202 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
6203
6204 if (info->colors_read) {
6205 unsigned *color = shader->selector->color_attr_index;
6206
6207 if (shader->key.part.ps.prolog.color_two_side) {
6208 /* BCOLORs are stored after the last input. */
6209 key->ps_prolog.num_interp_inputs = info->num_inputs;
6210 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
6211 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
6212 }
6213
6214 for (unsigned i = 0; i < 2; i++) {
6215 unsigned interp = info->input_interpolate[color[i]];
6216 unsigned location = info->input_interpolate_loc[color[i]];
6217
6218 if (!(info->colors_read & (0xf << i*4)))
6219 continue;
6220
6221 key->ps_prolog.color_attr_index[i] = color[i];
6222
6223 if (shader->key.part.ps.prolog.flatshade_colors &&
6224 interp == TGSI_INTERPOLATE_COLOR)
6225 interp = TGSI_INTERPOLATE_CONSTANT;
6226
6227 switch (interp) {
6228 case TGSI_INTERPOLATE_CONSTANT:
6229 key->ps_prolog.color_interp_vgpr_index[i] = -1;
6230 break;
6231 case TGSI_INTERPOLATE_PERSPECTIVE:
6232 case TGSI_INTERPOLATE_COLOR:
6233 /* Force the interpolation location for colors here. */
6234 if (shader->key.part.ps.prolog.force_persp_sample_interp)
6235 location = TGSI_INTERPOLATE_LOC_SAMPLE;
6236 if (shader->key.part.ps.prolog.force_persp_center_interp)
6237 location = TGSI_INTERPOLATE_LOC_CENTER;
6238
6239 switch (location) {
6240 case TGSI_INTERPOLATE_LOC_SAMPLE:
6241 key->ps_prolog.color_interp_vgpr_index[i] = 0;
6242 shader->config.spi_ps_input_ena |=
6243 S_0286CC_PERSP_SAMPLE_ENA(1);
6244 break;
6245 case TGSI_INTERPOLATE_LOC_CENTER:
6246 key->ps_prolog.color_interp_vgpr_index[i] = 2;
6247 shader->config.spi_ps_input_ena |=
6248 S_0286CC_PERSP_CENTER_ENA(1);
6249 break;
6250 case TGSI_INTERPOLATE_LOC_CENTROID:
6251 key->ps_prolog.color_interp_vgpr_index[i] = 4;
6252 shader->config.spi_ps_input_ena |=
6253 S_0286CC_PERSP_CENTROID_ENA(1);
6254 break;
6255 default:
6256 assert(0);
6257 }
6258 break;
6259 case TGSI_INTERPOLATE_LINEAR:
6260 /* Force the interpolation location for colors here. */
6261 if (shader->key.part.ps.prolog.force_linear_sample_interp)
6262 location = TGSI_INTERPOLATE_LOC_SAMPLE;
6263 if (shader->key.part.ps.prolog.force_linear_center_interp)
6264 location = TGSI_INTERPOLATE_LOC_CENTER;
6265
6266 /* The VGPR assignment for non-monolithic shaders
6267 * works because InitialPSInputAddr is set on the
6268 * main shader and PERSP_PULL_MODEL is never used.
6269 */
6270 switch (location) {
6271 case TGSI_INTERPOLATE_LOC_SAMPLE:
6272 key->ps_prolog.color_interp_vgpr_index[i] =
6273 separate_prolog ? 6 : 9;
6274 shader->config.spi_ps_input_ena |=
6275 S_0286CC_LINEAR_SAMPLE_ENA(1);
6276 break;
6277 case TGSI_INTERPOLATE_LOC_CENTER:
6278 key->ps_prolog.color_interp_vgpr_index[i] =
6279 separate_prolog ? 8 : 11;
6280 shader->config.spi_ps_input_ena |=
6281 S_0286CC_LINEAR_CENTER_ENA(1);
6282 break;
6283 case TGSI_INTERPOLATE_LOC_CENTROID:
6284 key->ps_prolog.color_interp_vgpr_index[i] =
6285 separate_prolog ? 10 : 13;
6286 shader->config.spi_ps_input_ena |=
6287 S_0286CC_LINEAR_CENTROID_ENA(1);
6288 break;
6289 default:
6290 assert(0);
6291 }
6292 break;
6293 default:
6294 assert(0);
6295 }
6296 }
6297 }
6298 }
6299
6300 /**
6301 * Check whether a PS prolog is required based on the key.
6302 */
6303 static bool si_need_ps_prolog(const union si_shader_part_key *key)
6304 {
6305 return key->ps_prolog.colors_read ||
6306 key->ps_prolog.states.force_persp_sample_interp ||
6307 key->ps_prolog.states.force_linear_sample_interp ||
6308 key->ps_prolog.states.force_persp_center_interp ||
6309 key->ps_prolog.states.force_linear_center_interp ||
6310 key->ps_prolog.states.bc_optimize_for_persp ||
6311 key->ps_prolog.states.bc_optimize_for_linear ||
6312 key->ps_prolog.states.poly_stipple ||
6313 key->ps_prolog.states.samplemask_log_ps_iter;
6314 }
6315
6316 /**
6317 * Compute the PS epilog key, which contains all the information needed to
6318 * build the PS epilog function.
6319 */
6320 static void si_get_ps_epilog_key(struct si_shader *shader,
6321 union si_shader_part_key *key)
6322 {
6323 struct tgsi_shader_info *info = &shader->selector->info;
6324 memset(key, 0, sizeof(*key));
6325 key->ps_epilog.colors_written = info->colors_written;
6326 key->ps_epilog.writes_z = info->writes_z;
6327 key->ps_epilog.writes_stencil = info->writes_stencil;
6328 key->ps_epilog.writes_samplemask = info->writes_samplemask;
6329 key->ps_epilog.states = shader->key.part.ps.epilog;
6330 }
6331
6332 /**
6333 * Build the GS prolog function. Rotate the input vertices for triangle strips
6334 * with adjacency.
6335 */
6336 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
6337 union si_shader_part_key *key)
6338 {
6339 unsigned num_sgprs, num_vgprs;
6340 struct si_function_info fninfo;
6341 LLVMBuilderRef builder = ctx->ac.builder;
6342 LLVMTypeRef returns[48];
6343 LLVMValueRef func, ret;
6344
6345 si_init_function_info(&fninfo);
6346
6347 if (ctx->screen->info.chip_class >= GFX9) {
6348 num_sgprs = 8 + GFX9_GS_NUM_USER_SGPR;
6349 num_vgprs = 5; /* ES inputs are not needed by GS */
6350 } else {
6351 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
6352 num_vgprs = 8;
6353 }
6354
6355 for (unsigned i = 0; i < num_sgprs; ++i) {
6356 add_arg(&fninfo, ARG_SGPR, ctx->i32);
6357 returns[i] = ctx->i32;
6358 }
6359
6360 for (unsigned i = 0; i < num_vgprs; ++i) {
6361 add_arg(&fninfo, ARG_VGPR, ctx->i32);
6362 returns[num_sgprs + i] = ctx->f32;
6363 }
6364
6365 /* Create the function. */
6366 si_create_function(ctx, "gs_prolog", returns, num_sgprs + num_vgprs,
6367 &fninfo, 0);
6368 func = ctx->main_fn;
6369
6370 /* Set the full EXEC mask for the prolog, because we are only fiddling
6371 * with registers here. The main shader part will set the correct EXEC
6372 * mask.
6373 */
6374 if (ctx->screen->info.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
6375 ac_init_exec_full_mask(&ctx->ac);
6376
6377 /* Copy inputs to outputs. This should be no-op, as the registers match,
6378 * but it will prevent the compiler from overwriting them unintentionally.
6379 */
6380 ret = ctx->return_value;
6381 for (unsigned i = 0; i < num_sgprs; i++) {
6382 LLVMValueRef p = LLVMGetParam(func, i);
6383 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
6384 }
6385 for (unsigned i = 0; i < num_vgprs; i++) {
6386 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
6387 p = ac_to_float(&ctx->ac, p);
6388 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
6389 }
6390
6391 if (key->gs_prolog.states.tri_strip_adj_fix) {
6392 /* Remap the input vertices for every other primitive. */
6393 const unsigned gfx6_vtx_params[6] = {
6394 num_sgprs,
6395 num_sgprs + 1,
6396 num_sgprs + 3,
6397 num_sgprs + 4,
6398 num_sgprs + 5,
6399 num_sgprs + 6
6400 };
6401 const unsigned gfx9_vtx_params[3] = {
6402 num_sgprs,
6403 num_sgprs + 1,
6404 num_sgprs + 4,
6405 };
6406 LLVMValueRef vtx_in[6], vtx_out[6];
6407 LLVMValueRef prim_id, rotate;
6408
6409 if (ctx->screen->info.chip_class >= GFX9) {
6410 for (unsigned i = 0; i < 3; i++) {
6411 vtx_in[i*2] = unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
6412 vtx_in[i*2+1] = unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
6413 }
6414 } else {
6415 for (unsigned i = 0; i < 6; i++)
6416 vtx_in[i] = LLVMGetParam(func, gfx6_vtx_params[i]);
6417 }
6418
6419 prim_id = LLVMGetParam(func, num_sgprs + 2);
6420 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
6421
6422 for (unsigned i = 0; i < 6; ++i) {
6423 LLVMValueRef base, rotated;
6424 base = vtx_in[i];
6425 rotated = vtx_in[(i + 4) % 6];
6426 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
6427 }
6428
6429 if (ctx->screen->info.chip_class >= GFX9) {
6430 for (unsigned i = 0; i < 3; i++) {
6431 LLVMValueRef hi, out;
6432
6433 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
6434 LLVMConstInt(ctx->i32, 16, 0), "");
6435 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
6436 out = ac_to_float(&ctx->ac, out);
6437 ret = LLVMBuildInsertValue(builder, ret, out,
6438 gfx9_vtx_params[i], "");
6439 }
6440 } else {
6441 for (unsigned i = 0; i < 6; i++) {
6442 LLVMValueRef out;
6443
6444 out = ac_to_float(&ctx->ac, vtx_out[i]);
6445 ret = LLVMBuildInsertValue(builder, ret, out,
6446 gfx6_vtx_params[i], "");
6447 }
6448 }
6449 }
6450
6451 LLVMBuildRet(builder, ret);
6452 }
6453
6454 /**
6455 * Given a list of shader part functions, build a wrapper function that
6456 * runs them in sequence to form a monolithic shader.
6457 */
6458 static void si_build_wrapper_function(struct si_shader_context *ctx,
6459 LLVMValueRef *parts,
6460 unsigned num_parts,
6461 unsigned main_part,
6462 unsigned next_shader_first_part)
6463 {
6464 LLVMBuilderRef builder = ctx->ac.builder;
6465 /* PS epilog has one arg per color component; gfx9 merged shader
6466 * prologs need to forward 32 user SGPRs.
6467 */
6468 struct si_function_info fninfo;
6469 LLVMValueRef initial[64], out[64];
6470 LLVMTypeRef function_type;
6471 unsigned num_first_params;
6472 unsigned num_out, initial_num_out;
6473 MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
6474 MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
6475 unsigned num_sgprs, num_vgprs;
6476 unsigned gprs;
6477 struct lp_build_if_state if_state;
6478
6479 si_init_function_info(&fninfo);
6480
6481 for (unsigned i = 0; i < num_parts; ++i) {
6482 lp_add_function_attr(parts[i], -1, LP_FUNC_ATTR_ALWAYSINLINE);
6483 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
6484 }
6485
6486 /* The parameters of the wrapper function correspond to those of the
6487 * first part in terms of SGPRs and VGPRs, but we use the types of the
6488 * main part to get the right types. This is relevant for the
6489 * dereferenceable attribute on descriptor table pointers.
6490 */
6491 num_sgprs = 0;
6492 num_vgprs = 0;
6493
6494 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
6495 num_first_params = LLVMCountParamTypes(function_type);
6496
6497 for (unsigned i = 0; i < num_first_params; ++i) {
6498 LLVMValueRef param = LLVMGetParam(parts[0], i);
6499
6500 if (ac_is_sgpr_param(param)) {
6501 assert(num_vgprs == 0);
6502 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
6503 } else {
6504 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
6505 }
6506 }
6507
6508 gprs = 0;
6509 while (gprs < num_sgprs + num_vgprs) {
6510 LLVMValueRef param = LLVMGetParam(parts[main_part], fninfo.num_params);
6511 LLVMTypeRef type = LLVMTypeOf(param);
6512 unsigned size = ac_get_type_size(type) / 4;
6513
6514 add_arg(&fninfo, gprs < num_sgprs ? ARG_SGPR : ARG_VGPR, type);
6515
6516 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
6517 assert(gprs + size <= num_sgprs + num_vgprs &&
6518 (gprs >= num_sgprs || gprs + size <= num_sgprs));
6519
6520 gprs += size;
6521 }
6522
6523 si_create_function(ctx, "wrapper", NULL, 0, &fninfo,
6524 si_get_max_workgroup_size(ctx->shader));
6525
6526 if (is_merged_shader(ctx->shader))
6527 ac_init_exec_full_mask(&ctx->ac);
6528
6529 /* Record the arguments of the function as if they were an output of
6530 * a previous part.
6531 */
6532 num_out = 0;
6533 num_out_sgpr = 0;
6534
6535 for (unsigned i = 0; i < fninfo.num_params; ++i) {
6536 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
6537 LLVMTypeRef param_type = LLVMTypeOf(param);
6538 LLVMTypeRef out_type = i < fninfo.num_sgpr_params ? ctx->i32 : ctx->f32;
6539 unsigned size = ac_get_type_size(param_type) / 4;
6540
6541 if (size == 1) {
6542 if (param_type != out_type)
6543 param = LLVMBuildBitCast(builder, param, out_type, "");
6544 out[num_out++] = param;
6545 } else {
6546 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
6547
6548 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6549 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
6550 param_type = ctx->i64;
6551 }
6552
6553 if (param_type != vector_type)
6554 param = LLVMBuildBitCast(builder, param, vector_type, "");
6555
6556 for (unsigned j = 0; j < size; ++j)
6557 out[num_out++] = LLVMBuildExtractElement(
6558 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
6559 }
6560
6561 if (i < fninfo.num_sgpr_params)
6562 num_out_sgpr = num_out;
6563 }
6564
6565 memcpy(initial, out, sizeof(out));
6566 initial_num_out = num_out;
6567 initial_num_out_sgpr = num_out_sgpr;
6568
6569 /* Now chain the parts. */
6570 for (unsigned part = 0; part < num_parts; ++part) {
6571 LLVMValueRef in[48];
6572 LLVMValueRef ret;
6573 LLVMTypeRef ret_type;
6574 unsigned out_idx = 0;
6575 unsigned num_params = LLVMCountParams(parts[part]);
6576
6577 /* Merged shaders are executed conditionally depending
6578 * on the number of enabled threads passed in the input SGPRs. */
6579 if (is_merged_shader(ctx->shader) && part == 0) {
6580 LLVMValueRef ena, count = initial[3];
6581
6582 count = LLVMBuildAnd(builder, count,
6583 LLVMConstInt(ctx->i32, 0x7f, 0), "");
6584 ena = LLVMBuildICmp(builder, LLVMIntULT,
6585 ac_get_thread_id(&ctx->ac), count, "");
6586 lp_build_if(&if_state, &ctx->gallivm, ena);
6587 }
6588
6589 /* Derive arguments for the next part from outputs of the
6590 * previous one.
6591 */
6592 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
6593 LLVMValueRef param;
6594 LLVMTypeRef param_type;
6595 bool is_sgpr;
6596 unsigned param_size;
6597 LLVMValueRef arg = NULL;
6598
6599 param = LLVMGetParam(parts[part], param_idx);
6600 param_type = LLVMTypeOf(param);
6601 param_size = ac_get_type_size(param_type) / 4;
6602 is_sgpr = ac_is_sgpr_param(param);
6603
6604 if (is_sgpr)
6605 lp_add_function_attr(parts[part], param_idx + 1, LP_FUNC_ATTR_INREG);
6606
6607 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
6608 assert(is_sgpr || out_idx >= num_out_sgpr);
6609
6610 if (param_size == 1)
6611 arg = out[out_idx];
6612 else
6613 arg = lp_build_gather_values(&ctx->gallivm, &out[out_idx], param_size);
6614
6615 if (LLVMTypeOf(arg) != param_type) {
6616 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6617 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
6618 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
6619 } else {
6620 arg = LLVMBuildBitCast(builder, arg, param_type, "");
6621 }
6622 }
6623
6624 in[param_idx] = arg;
6625 out_idx += param_size;
6626 }
6627
6628 ret = LLVMBuildCall(builder, parts[part], in, num_params, "");
6629
6630 if (is_merged_shader(ctx->shader) &&
6631 part + 1 == next_shader_first_part) {
6632 lp_build_endif(&if_state);
6633
6634 /* The second half of the merged shader should use
6635 * the inputs from the toplevel (wrapper) function,
6636 * not the return value from the last call.
6637 *
6638 * That's because the last call was executed condi-
6639 * tionally, so we can't consume it in the main
6640 * block.
6641 */
6642 memcpy(out, initial, sizeof(initial));
6643 num_out = initial_num_out;
6644 num_out_sgpr = initial_num_out_sgpr;
6645 continue;
6646 }
6647
6648 /* Extract the returned GPRs. */
6649 ret_type = LLVMTypeOf(ret);
6650 num_out = 0;
6651 num_out_sgpr = 0;
6652
6653 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
6654 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
6655
6656 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
6657
6658 for (unsigned i = 0; i < ret_size; ++i) {
6659 LLVMValueRef val =
6660 LLVMBuildExtractValue(builder, ret, i, "");
6661
6662 assert(num_out < ARRAY_SIZE(out));
6663 out[num_out++] = val;
6664
6665 if (LLVMTypeOf(val) == ctx->i32) {
6666 assert(num_out_sgpr + 1 == num_out);
6667 num_out_sgpr = num_out;
6668 }
6669 }
6670 }
6671 }
6672
6673 LLVMBuildRetVoid(builder);
6674 }
6675
6676 int si_compile_tgsi_shader(struct si_screen *sscreen,
6677 LLVMTargetMachineRef tm,
6678 struct si_shader *shader,
6679 bool is_monolithic,
6680 struct pipe_debug_callback *debug)
6681 {
6682 struct si_shader_selector *sel = shader->selector;
6683 struct si_shader_context ctx;
6684 int r = -1;
6685
6686 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6687 * conversion fails. */
6688 if (si_can_dump_shader(sscreen, sel->info.processor) &&
6689 !(sscreen->debug_flags & DBG(NO_TGSI))) {
6690 if (sel->tokens)
6691 tgsi_dump(sel->tokens, 0);
6692 else
6693 nir_print_shader(sel->nir, stderr);
6694 si_dump_streamout(&sel->so);
6695 }
6696
6697 si_init_shader_ctx(&ctx, sscreen, tm);
6698 si_llvm_context_set_tgsi(&ctx, shader);
6699 ctx.separate_prolog = !is_monolithic;
6700
6701 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
6702 sizeof(shader->info.vs_output_param_offset));
6703
6704 shader->info.uses_instanceid = sel->info.uses_instanceid;
6705
6706 if (!si_compile_tgsi_main(&ctx, is_monolithic)) {
6707 si_llvm_dispose(&ctx);
6708 return -1;
6709 }
6710
6711 if (is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
6712 LLVMValueRef parts[2];
6713 bool need_prolog = sel->vs_needs_prolog;
6714
6715 parts[1] = ctx.main_fn;
6716
6717 if (need_prolog) {
6718 union si_shader_part_key prolog_key;
6719 si_get_vs_prolog_key(&sel->info,
6720 shader->info.num_input_sgprs,
6721 &shader->key.part.vs.prolog,
6722 shader, &prolog_key);
6723 si_build_vs_prolog_function(&ctx, &prolog_key);
6724 parts[0] = ctx.main_fn;
6725 }
6726
6727 si_build_wrapper_function(&ctx, parts + !need_prolog,
6728 1 + need_prolog, need_prolog, 0);
6729 } else if (is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
6730 if (sscreen->info.chip_class >= GFX9) {
6731 struct si_shader_selector *ls = shader->key.part.tcs.ls;
6732 LLVMValueRef parts[4];
6733 bool vs_needs_prolog =
6734 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog);
6735
6736 /* TCS main part */
6737 parts[2] = ctx.main_fn;
6738
6739 /* TCS epilog */
6740 union si_shader_part_key tcs_epilog_key;
6741 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
6742 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6743 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
6744 parts[3] = ctx.main_fn;
6745
6746 /* VS prolog */
6747 if (vs_needs_prolog) {
6748 union si_shader_part_key vs_prolog_key;
6749 si_get_vs_prolog_key(&ls->info,
6750 shader->info.num_input_sgprs,
6751 &shader->key.part.tcs.ls_prolog,
6752 shader, &vs_prolog_key);
6753 vs_prolog_key.vs_prolog.is_monolithic = true;
6754 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6755 parts[0] = ctx.main_fn;
6756 }
6757
6758 /* VS as LS main part */
6759 struct si_shader shader_ls = {};
6760 shader_ls.selector = ls;
6761 shader_ls.key.as_ls = 1;
6762 shader_ls.key.mono = shader->key.mono;
6763 shader_ls.key.opt = shader->key.opt;
6764 si_llvm_context_set_tgsi(&ctx, &shader_ls);
6765
6766 if (!si_compile_tgsi_main(&ctx, true)) {
6767 si_llvm_dispose(&ctx);
6768 return -1;
6769 }
6770 shader->info.uses_instanceid |= ls->info.uses_instanceid;
6771 parts[1] = ctx.main_fn;
6772
6773 /* Reset the shader context. */
6774 ctx.shader = shader;
6775 ctx.type = PIPE_SHADER_TESS_CTRL;
6776
6777 si_build_wrapper_function(&ctx,
6778 parts + !vs_needs_prolog,
6779 4 - !vs_needs_prolog, 0,
6780 vs_needs_prolog ? 2 : 1);
6781 } else {
6782 LLVMValueRef parts[2];
6783 union si_shader_part_key epilog_key;
6784
6785 parts[0] = ctx.main_fn;
6786
6787 memset(&epilog_key, 0, sizeof(epilog_key));
6788 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6789 si_build_tcs_epilog_function(&ctx, &epilog_key);
6790 parts[1] = ctx.main_fn;
6791
6792 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
6793 }
6794 } else if (is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
6795 if (ctx.screen->info.chip_class >= GFX9) {
6796 struct si_shader_selector *es = shader->key.part.gs.es;
6797 LLVMValueRef es_prolog = NULL;
6798 LLVMValueRef es_main = NULL;
6799 LLVMValueRef gs_prolog = NULL;
6800 LLVMValueRef gs_main = ctx.main_fn;
6801
6802 /* GS prolog */
6803 union si_shader_part_key gs_prolog_key;
6804 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
6805 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6806 gs_prolog_key.gs_prolog.is_monolithic = true;
6807 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
6808 gs_prolog = ctx.main_fn;
6809
6810 /* ES prolog */
6811 if (es->vs_needs_prolog) {
6812 union si_shader_part_key vs_prolog_key;
6813 si_get_vs_prolog_key(&es->info,
6814 shader->info.num_input_sgprs,
6815 &shader->key.part.gs.vs_prolog,
6816 shader, &vs_prolog_key);
6817 vs_prolog_key.vs_prolog.is_monolithic = true;
6818 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6819 es_prolog = ctx.main_fn;
6820 }
6821
6822 /* ES main part */
6823 struct si_shader shader_es = {};
6824 shader_es.selector = es;
6825 shader_es.key.as_es = 1;
6826 shader_es.key.mono = shader->key.mono;
6827 shader_es.key.opt = shader->key.opt;
6828 si_llvm_context_set_tgsi(&ctx, &shader_es);
6829
6830 if (!si_compile_tgsi_main(&ctx, true)) {
6831 si_llvm_dispose(&ctx);
6832 return -1;
6833 }
6834 shader->info.uses_instanceid |= es->info.uses_instanceid;
6835 es_main = ctx.main_fn;
6836
6837 /* Reset the shader context. */
6838 ctx.shader = shader;
6839 ctx.type = PIPE_SHADER_GEOMETRY;
6840
6841 /* Prepare the array of shader parts. */
6842 LLVMValueRef parts[4];
6843 unsigned num_parts = 0, main_part, next_first_part;
6844
6845 if (es_prolog)
6846 parts[num_parts++] = es_prolog;
6847
6848 parts[main_part = num_parts++] = es_main;
6849 parts[next_first_part = num_parts++] = gs_prolog;
6850 parts[num_parts++] = gs_main;
6851
6852 si_build_wrapper_function(&ctx, parts, num_parts,
6853 main_part, next_first_part);
6854 } else {
6855 LLVMValueRef parts[2];
6856 union si_shader_part_key prolog_key;
6857
6858 parts[1] = ctx.main_fn;
6859
6860 memset(&prolog_key, 0, sizeof(prolog_key));
6861 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6862 si_build_gs_prolog_function(&ctx, &prolog_key);
6863 parts[0] = ctx.main_fn;
6864
6865 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
6866 }
6867 } else if (is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
6868 LLVMValueRef parts[3];
6869 union si_shader_part_key prolog_key;
6870 union si_shader_part_key epilog_key;
6871 bool need_prolog;
6872
6873 si_get_ps_prolog_key(shader, &prolog_key, false);
6874 need_prolog = si_need_ps_prolog(&prolog_key);
6875
6876 parts[need_prolog ? 1 : 0] = ctx.main_fn;
6877
6878 if (need_prolog) {
6879 si_build_ps_prolog_function(&ctx, &prolog_key);
6880 parts[0] = ctx.main_fn;
6881 }
6882
6883 si_get_ps_epilog_key(shader, &epilog_key);
6884 si_build_ps_epilog_function(&ctx, &epilog_key);
6885 parts[need_prolog ? 2 : 1] = ctx.main_fn;
6886
6887 si_build_wrapper_function(&ctx, parts, need_prolog ? 3 : 2,
6888 need_prolog ? 1 : 0, 0);
6889 }
6890
6891 si_llvm_optimize_module(&ctx);
6892
6893 /* Post-optimization transformations and analysis. */
6894 si_optimize_vs_outputs(&ctx);
6895
6896 if ((debug && debug->debug_message) ||
6897 si_can_dump_shader(sscreen, ctx.type))
6898 si_count_scratch_private_memory(&ctx);
6899
6900 /* Compile to bytecode. */
6901 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
6902 ctx.gallivm.module, debug, ctx.type, "TGSI shader");
6903 si_llvm_dispose(&ctx);
6904 if (r) {
6905 fprintf(stderr, "LLVM failed to compile shader\n");
6906 return r;
6907 }
6908
6909 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
6910 * LLVM 3.9svn has this bug.
6911 */
6912 if (sel->type == PIPE_SHADER_COMPUTE) {
6913 unsigned wave_size = 64;
6914 unsigned max_vgprs = 256;
6915 unsigned max_sgprs = sscreen->info.chip_class >= VI ? 800 : 512;
6916 unsigned max_sgprs_per_wave = 128;
6917 unsigned max_block_threads = si_get_max_workgroup_size(shader);
6918 unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
6919 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
6920
6921 max_vgprs = max_vgprs / min_waves_per_simd;
6922 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
6923
6924 if (shader->config.num_sgprs > max_sgprs ||
6925 shader->config.num_vgprs > max_vgprs) {
6926 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6927 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6928 shader->config.num_sgprs, shader->config.num_vgprs,
6929 max_sgprs, max_vgprs);
6930
6931 /* Just terminate the process, because dependent
6932 * shaders can hang due to bad input data, but use
6933 * the env var to allow shader-db to work.
6934 */
6935 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6936 abort();
6937 }
6938 }
6939
6940 /* Add the scratch offset to input SGPRs. */
6941 if (shader->config.scratch_bytes_per_wave && !is_merged_shader(shader))
6942 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6943
6944 /* Calculate the number of fragment input VGPRs. */
6945 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6946 shader->info.num_input_vgprs = 0;
6947 shader->info.face_vgpr_index = -1;
6948 shader->info.ancillary_vgpr_index = -1;
6949
6950 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6951 shader->info.num_input_vgprs += 2;
6952 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6953 shader->info.num_input_vgprs += 2;
6954 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6955 shader->info.num_input_vgprs += 2;
6956 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6957 shader->info.num_input_vgprs += 3;
6958 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6959 shader->info.num_input_vgprs += 2;
6960 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6961 shader->info.num_input_vgprs += 2;
6962 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6963 shader->info.num_input_vgprs += 2;
6964 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6965 shader->info.num_input_vgprs += 1;
6966 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6967 shader->info.num_input_vgprs += 1;
6968 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6969 shader->info.num_input_vgprs += 1;
6970 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6971 shader->info.num_input_vgprs += 1;
6972 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6973 shader->info.num_input_vgprs += 1;
6974 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6975 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6976 shader->info.num_input_vgprs += 1;
6977 }
6978 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr)) {
6979 shader->info.ancillary_vgpr_index = shader->info.num_input_vgprs;
6980 shader->info.num_input_vgprs += 1;
6981 }
6982 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6983 shader->info.num_input_vgprs += 1;
6984 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6985 shader->info.num_input_vgprs += 1;
6986 }
6987
6988 si_calculate_max_simd_waves(shader);
6989 si_shader_dump_stats_for_shader_db(shader, debug);
6990 return 0;
6991 }
6992
6993 /**
6994 * Create, compile and return a shader part (prolog or epilog).
6995 *
6996 * \param sscreen screen
6997 * \param list list of shader parts of the same category
6998 * \param type shader type
6999 * \param key shader part key
7000 * \param prolog whether the part being requested is a prolog
7001 * \param tm LLVM target machine
7002 * \param debug debug callback
7003 * \param build the callback responsible for building the main function
7004 * \return non-NULL on success
7005 */
7006 static struct si_shader_part *
7007 si_get_shader_part(struct si_screen *sscreen,
7008 struct si_shader_part **list,
7009 enum pipe_shader_type type,
7010 bool prolog,
7011 union si_shader_part_key *key,
7012 LLVMTargetMachineRef tm,
7013 struct pipe_debug_callback *debug,
7014 void (*build)(struct si_shader_context *,
7015 union si_shader_part_key *),
7016 const char *name)
7017 {
7018 struct si_shader_part *result;
7019
7020 mtx_lock(&sscreen->shader_parts_mutex);
7021
7022 /* Find existing. */
7023 for (result = *list; result; result = result->next) {
7024 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
7025 mtx_unlock(&sscreen->shader_parts_mutex);
7026 return result;
7027 }
7028 }
7029
7030 /* Compile a new one. */
7031 result = CALLOC_STRUCT(si_shader_part);
7032 result->key = *key;
7033
7034 struct si_shader shader = {};
7035 struct si_shader_context ctx;
7036
7037 si_init_shader_ctx(&ctx, sscreen, tm);
7038 ctx.shader = &shader;
7039 ctx.type = type;
7040
7041 switch (type) {
7042 case PIPE_SHADER_VERTEX:
7043 shader.key.as_ls = key->vs_prolog.as_ls;
7044 shader.key.as_es = key->vs_prolog.as_es;
7045 break;
7046 case PIPE_SHADER_TESS_CTRL:
7047 assert(!prolog);
7048 shader.key.part.tcs.epilog = key->tcs_epilog.states;
7049 break;
7050 case PIPE_SHADER_GEOMETRY:
7051 assert(prolog);
7052 break;
7053 case PIPE_SHADER_FRAGMENT:
7054 if (prolog)
7055 shader.key.part.ps.prolog = key->ps_prolog.states;
7056 else
7057 shader.key.part.ps.epilog = key->ps_epilog.states;
7058 break;
7059 default:
7060 unreachable("bad shader part");
7061 }
7062
7063 build(&ctx, key);
7064
7065 /* Compile. */
7066 si_llvm_optimize_module(&ctx);
7067
7068 if (si_compile_llvm(sscreen, &result->binary, &result->config, tm,
7069 ctx.ac.module, debug, ctx.type, name)) {
7070 FREE(result);
7071 result = NULL;
7072 goto out;
7073 }
7074
7075 result->next = *list;
7076 *list = result;
7077
7078 out:
7079 si_llvm_dispose(&ctx);
7080 mtx_unlock(&sscreen->shader_parts_mutex);
7081 return result;
7082 }
7083
7084 static LLVMValueRef si_prolog_get_rw_buffers(struct si_shader_context *ctx)
7085 {
7086 LLVMValueRef ptr[2], list;
7087 bool is_merged_shader =
7088 ctx->screen->info.chip_class >= GFX9 &&
7089 (ctx->type == PIPE_SHADER_TESS_CTRL ||
7090 ctx->type == PIPE_SHADER_GEOMETRY ||
7091 ctx->shader->key.as_ls || ctx->shader->key.as_es);
7092
7093 /* Get the pointer to rw buffers. */
7094 ptr[0] = LLVMGetParam(ctx->main_fn, (is_merged_shader ? 8 : 0) + SI_SGPR_RW_BUFFERS);
7095 ptr[1] = LLVMGetParam(ctx->main_fn, (is_merged_shader ? 8 : 0) + SI_SGPR_RW_BUFFERS_HI);
7096 list = lp_build_gather_values(&ctx->gallivm, ptr, 2);
7097 list = LLVMBuildBitCast(ctx->ac.builder, list, ctx->i64, "");
7098 list = LLVMBuildIntToPtr(ctx->ac.builder, list,
7099 ac_array_in_const_addr_space(ctx->v4i32), "");
7100 return list;
7101 }
7102
7103 /**
7104 * Build the vertex shader prolog function.
7105 *
7106 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
7107 * All inputs are returned unmodified. The vertex load indices are
7108 * stored after them, which will be used by the API VS for fetching inputs.
7109 *
7110 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
7111 * input_v0,
7112 * input_v1,
7113 * input_v2,
7114 * input_v3,
7115 * (VertexID + BaseVertex),
7116 * (InstanceID + StartInstance),
7117 * (InstanceID / 2 + StartInstance)
7118 */
7119 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
7120 union si_shader_part_key *key)
7121 {
7122 struct si_function_info fninfo;
7123 LLVMTypeRef *returns;
7124 LLVMValueRef ret, func;
7125 int num_returns, i;
7126 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
7127 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
7128 LLVMValueRef input_vgprs[9];
7129 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
7130 num_input_vgprs;
7131 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
7132
7133 si_init_function_info(&fninfo);
7134
7135 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
7136 returns = alloca((num_all_input_regs + key->vs_prolog.last_input + 1) *
7137 sizeof(LLVMTypeRef));
7138 num_returns = 0;
7139
7140 /* Declare input and output SGPRs. */
7141 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7142 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7143 returns[num_returns++] = ctx->i32;
7144 }
7145
7146 /* Preloaded VGPRs (outputs must be floats) */
7147 for (i = 0; i < num_input_vgprs; i++) {
7148 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &input_vgprs[i]);
7149 returns[num_returns++] = ctx->f32;
7150 }
7151
7152 /* Vertex load indices. */
7153 for (i = 0; i <= key->vs_prolog.last_input; i++)
7154 returns[num_returns++] = ctx->f32;
7155
7156 /* Create the function. */
7157 si_create_function(ctx, "vs_prolog", returns, num_returns, &fninfo, 0);
7158 func = ctx->main_fn;
7159
7160 if (key->vs_prolog.num_merged_next_stage_vgprs) {
7161 if (!key->vs_prolog.is_monolithic)
7162 si_init_exec_from_input(ctx, 3, 0);
7163
7164 if (key->vs_prolog.as_ls &&
7165 ctx->screen->has_ls_vgpr_init_bug) {
7166 /* If there are no HS threads, SPI loads the LS VGPRs
7167 * starting at VGPR 0. Shift them back to where they
7168 * belong.
7169 */
7170 LLVMValueRef has_hs_threads =
7171 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
7172 unpack_param(ctx, 3, 8, 8),
7173 ctx->i32_0, "");
7174
7175 for (i = 4; i > 0; --i) {
7176 input_vgprs[i + 1] =
7177 LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
7178 input_vgprs[i + 1],
7179 input_vgprs[i - 1], "");
7180 }
7181 }
7182 }
7183
7184 ctx->abi.vertex_id = input_vgprs[first_vs_vgpr];
7185 ctx->abi.instance_id = input_vgprs[first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1)];
7186
7187 /* Copy inputs to outputs. This should be no-op, as the registers match,
7188 * but it will prevent the compiler from overwriting them unintentionally.
7189 */
7190 ret = ctx->return_value;
7191 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7192 LLVMValueRef p = LLVMGetParam(func, i);
7193 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
7194 }
7195 for (i = 0; i < num_input_vgprs; i++) {
7196 LLVMValueRef p = input_vgprs[i];
7197 p = ac_to_float(&ctx->ac, p);
7198 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p,
7199 key->vs_prolog.num_input_sgprs + i, "");
7200 }
7201
7202 /* Compute vertex load indices from instance divisors. */
7203 LLVMValueRef instance_divisor_constbuf = NULL;
7204
7205 if (key->vs_prolog.states.instance_divisor_is_fetched) {
7206 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
7207 LLVMValueRef buf_index =
7208 LLVMConstInt(ctx->i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
7209 instance_divisor_constbuf =
7210 ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
7211 }
7212
7213 for (i = 0; i <= key->vs_prolog.last_input; i++) {
7214 bool divisor_is_one =
7215 key->vs_prolog.states.instance_divisor_is_one & (1u << i);
7216 bool divisor_is_fetched =
7217 key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
7218 LLVMValueRef index;
7219
7220 if (divisor_is_one || divisor_is_fetched) {
7221 LLVMValueRef divisor = ctx->i32_1;
7222
7223 if (divisor_is_fetched) {
7224 divisor = buffer_load_const(ctx, instance_divisor_constbuf,
7225 LLVMConstInt(ctx->i32, i * 4, 0));
7226 divisor = ac_to_integer(&ctx->ac, divisor);
7227 }
7228
7229 /* InstanceID / Divisor + StartInstance */
7230 index = get_instance_index_for_fetch(ctx,
7231 user_sgpr_base +
7232 SI_SGPR_START_INSTANCE,
7233 divisor);
7234 } else {
7235 /* VertexID + BaseVertex */
7236 index = LLVMBuildAdd(ctx->ac.builder,
7237 ctx->abi.vertex_id,
7238 LLVMGetParam(func, user_sgpr_base +
7239 SI_SGPR_BASE_VERTEX), "");
7240 }
7241
7242 index = ac_to_float(&ctx->ac, index);
7243 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index,
7244 fninfo.num_params + i, "");
7245 }
7246
7247 si_llvm_build_ret(ctx, ret);
7248 }
7249
7250 static bool si_get_vs_prolog(struct si_screen *sscreen,
7251 LLVMTargetMachineRef tm,
7252 struct si_shader *shader,
7253 struct pipe_debug_callback *debug,
7254 struct si_shader *main_part,
7255 const struct si_vs_prolog_bits *key)
7256 {
7257 struct si_shader_selector *vs = main_part->selector;
7258
7259 if (!si_vs_needs_prolog(vs, key))
7260 return true;
7261
7262 /* Get the prolog. */
7263 union si_shader_part_key prolog_key;
7264 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
7265 key, shader, &prolog_key);
7266
7267 shader->prolog =
7268 si_get_shader_part(sscreen, &sscreen->vs_prologs,
7269 PIPE_SHADER_VERTEX, true, &prolog_key, tm,
7270 debug, si_build_vs_prolog_function,
7271 "Vertex Shader Prolog");
7272 return shader->prolog != NULL;
7273 }
7274
7275 /**
7276 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
7277 */
7278 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
7279 LLVMTargetMachineRef tm,
7280 struct si_shader *shader,
7281 struct pipe_debug_callback *debug)
7282 {
7283 return si_get_vs_prolog(sscreen, tm, shader, debug, shader,
7284 &shader->key.part.vs.prolog);
7285 }
7286
7287 /**
7288 * Compile the TCS epilog function. This writes tesselation factors to memory
7289 * based on the output primitive type of the tesselator (determined by TES).
7290 */
7291 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
7292 union si_shader_part_key *key)
7293 {
7294 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7295 struct si_function_info fninfo;
7296 LLVMValueRef func;
7297
7298 si_init_function_info(&fninfo);
7299
7300 if (ctx->screen->info.chip_class >= GFX9) {
7301 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7302 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7303 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* wave info */
7304 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7305 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7306 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7307 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7308 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7309 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7310 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7311 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7312 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7313 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7314 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7315 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7316 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7317 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7318 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7319 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7320 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7321 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7322 } else {
7323 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7324 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7325 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7326 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7327 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7328 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7329 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7330 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7331 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7332 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7333 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7334 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7335 }
7336
7337 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* VGPR gap */
7338 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* VGPR gap */
7339 unsigned tess_factors_idx =
7340 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* patch index within the wave (REL_PATCH_ID) */
7341 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* invocation ID within the patch */
7342 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* LDS offset where tess factors should be loaded from */
7343
7344 for (unsigned i = 0; i < 6; i++)
7345 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* tess factors */
7346
7347 /* Create the function. */
7348 si_create_function(ctx, "tcs_epilog", NULL, 0, &fninfo,
7349 ctx->screen->info.chip_class >= CIK ? 128 : 64);
7350 ac_declare_lds_as_pointer(&ctx->ac);
7351 func = ctx->main_fn;
7352
7353 LLVMValueRef invoc0_tess_factors[6];
7354 for (unsigned i = 0; i < 6; i++)
7355 invoc0_tess_factors[i] = LLVMGetParam(func, tess_factors_idx + 3 + i);
7356
7357 si_write_tess_factors(bld_base,
7358 LLVMGetParam(func, tess_factors_idx),
7359 LLVMGetParam(func, tess_factors_idx + 1),
7360 LLVMGetParam(func, tess_factors_idx + 2),
7361 invoc0_tess_factors, invoc0_tess_factors + 4);
7362
7363 LLVMBuildRetVoid(ctx->ac.builder);
7364 }
7365
7366 /**
7367 * Select and compile (or reuse) TCS parts (epilog).
7368 */
7369 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
7370 LLVMTargetMachineRef tm,
7371 struct si_shader *shader,
7372 struct pipe_debug_callback *debug)
7373 {
7374 if (sscreen->info.chip_class >= GFX9) {
7375 struct si_shader *ls_main_part =
7376 shader->key.part.tcs.ls->main_shader_part_ls;
7377
7378 if (!si_get_vs_prolog(sscreen, tm, shader, debug, ls_main_part,
7379 &shader->key.part.tcs.ls_prolog))
7380 return false;
7381
7382 shader->previous_stage = ls_main_part;
7383 }
7384
7385 /* Get the epilog. */
7386 union si_shader_part_key epilog_key;
7387 memset(&epilog_key, 0, sizeof(epilog_key));
7388 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
7389
7390 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
7391 PIPE_SHADER_TESS_CTRL, false,
7392 &epilog_key, tm, debug,
7393 si_build_tcs_epilog_function,
7394 "Tessellation Control Shader Epilog");
7395 return shader->epilog != NULL;
7396 }
7397
7398 /**
7399 * Select and compile (or reuse) GS parts (prolog).
7400 */
7401 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
7402 LLVMTargetMachineRef tm,
7403 struct si_shader *shader,
7404 struct pipe_debug_callback *debug)
7405 {
7406 if (sscreen->info.chip_class >= GFX9) {
7407 struct si_shader *es_main_part =
7408 shader->key.part.gs.es->main_shader_part_es;
7409
7410 if (shader->key.part.gs.es->type == PIPE_SHADER_VERTEX &&
7411 !si_get_vs_prolog(sscreen, tm, shader, debug, es_main_part,
7412 &shader->key.part.gs.vs_prolog))
7413 return false;
7414
7415 shader->previous_stage = es_main_part;
7416 }
7417
7418 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
7419 return true;
7420
7421 union si_shader_part_key prolog_key;
7422 memset(&prolog_key, 0, sizeof(prolog_key));
7423 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
7424
7425 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
7426 PIPE_SHADER_GEOMETRY, true,
7427 &prolog_key, tm, debug,
7428 si_build_gs_prolog_function,
7429 "Geometry Shader Prolog");
7430 return shader->prolog2 != NULL;
7431 }
7432
7433 /**
7434 * Build the pixel shader prolog function. This handles:
7435 * - two-side color selection and interpolation
7436 * - overriding interpolation parameters for the API PS
7437 * - polygon stippling
7438 *
7439 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
7440 * overriden by other states. (e.g. per-sample interpolation)
7441 * Interpolated colors are stored after the preloaded VGPRs.
7442 */
7443 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
7444 union si_shader_part_key *key)
7445 {
7446 struct si_function_info fninfo;
7447 LLVMValueRef ret, func;
7448 int num_returns, i, num_color_channels;
7449
7450 assert(si_need_ps_prolog(key));
7451
7452 si_init_function_info(&fninfo);
7453
7454 /* Declare inputs. */
7455 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
7456 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7457
7458 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
7459 add_arg(&fninfo, ARG_VGPR, ctx->f32);
7460
7461 /* Declare outputs (same as inputs + add colors if needed) */
7462 num_returns = fninfo.num_params;
7463 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
7464 for (i = 0; i < num_color_channels; i++)
7465 fninfo.types[num_returns++] = ctx->f32;
7466
7467 /* Create the function. */
7468 si_create_function(ctx, "ps_prolog", fninfo.types, num_returns,
7469 &fninfo, 0);
7470 func = ctx->main_fn;
7471
7472 /* Copy inputs to outputs. This should be no-op, as the registers match,
7473 * but it will prevent the compiler from overwriting them unintentionally.
7474 */
7475 ret = ctx->return_value;
7476 for (i = 0; i < fninfo.num_params; i++) {
7477 LLVMValueRef p = LLVMGetParam(func, i);
7478 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
7479 }
7480
7481 /* Polygon stippling. */
7482 if (key->ps_prolog.states.poly_stipple) {
7483 /* POS_FIXED_PT is always last. */
7484 unsigned pos = key->ps_prolog.num_input_sgprs +
7485 key->ps_prolog.num_input_vgprs - 1;
7486 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
7487
7488 si_llvm_emit_polygon_stipple(ctx, list, pos);
7489 }
7490
7491 if (key->ps_prolog.states.bc_optimize_for_persp ||
7492 key->ps_prolog.states.bc_optimize_for_linear) {
7493 unsigned i, base = key->ps_prolog.num_input_sgprs;
7494 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
7495
7496 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
7497 * The hw doesn't compute CENTROID if the whole wave only
7498 * contains fully-covered quads.
7499 *
7500 * PRIM_MASK is after user SGPRs.
7501 */
7502 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7503 bc_optimize = LLVMBuildLShr(ctx->ac.builder, bc_optimize,
7504 LLVMConstInt(ctx->i32, 31, 0), "");
7505 bc_optimize = LLVMBuildTrunc(ctx->ac.builder, bc_optimize,
7506 ctx->i1, "");
7507
7508 if (key->ps_prolog.states.bc_optimize_for_persp) {
7509 /* Read PERSP_CENTER. */
7510 for (i = 0; i < 2; i++)
7511 center[i] = LLVMGetParam(func, base + 2 + i);
7512 /* Read PERSP_CENTROID. */
7513 for (i = 0; i < 2; i++)
7514 centroid[i] = LLVMGetParam(func, base + 4 + i);
7515 /* Select PERSP_CENTROID. */
7516 for (i = 0; i < 2; i++) {
7517 tmp = LLVMBuildSelect(ctx->ac.builder, bc_optimize,
7518 center[i], centroid[i], "");
7519 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7520 tmp, base + 4 + i, "");
7521 }
7522 }
7523 if (key->ps_prolog.states.bc_optimize_for_linear) {
7524 /* Read LINEAR_CENTER. */
7525 for (i = 0; i < 2; i++)
7526 center[i] = LLVMGetParam(func, base + 8 + i);
7527 /* Read LINEAR_CENTROID. */
7528 for (i = 0; i < 2; i++)
7529 centroid[i] = LLVMGetParam(func, base + 10 + i);
7530 /* Select LINEAR_CENTROID. */
7531 for (i = 0; i < 2; i++) {
7532 tmp = LLVMBuildSelect(ctx->ac.builder, bc_optimize,
7533 center[i], centroid[i], "");
7534 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7535 tmp, base + 10 + i, "");
7536 }
7537 }
7538 }
7539
7540 /* Force per-sample interpolation. */
7541 if (key->ps_prolog.states.force_persp_sample_interp) {
7542 unsigned i, base = key->ps_prolog.num_input_sgprs;
7543 LLVMValueRef persp_sample[2];
7544
7545 /* Read PERSP_SAMPLE. */
7546 for (i = 0; i < 2; i++)
7547 persp_sample[i] = LLVMGetParam(func, base + i);
7548 /* Overwrite PERSP_CENTER. */
7549 for (i = 0; i < 2; i++)
7550 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7551 persp_sample[i], base + 2 + i, "");
7552 /* Overwrite PERSP_CENTROID. */
7553 for (i = 0; i < 2; i++)
7554 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7555 persp_sample[i], base + 4 + i, "");
7556 }
7557 if (key->ps_prolog.states.force_linear_sample_interp) {
7558 unsigned i, base = key->ps_prolog.num_input_sgprs;
7559 LLVMValueRef linear_sample[2];
7560
7561 /* Read LINEAR_SAMPLE. */
7562 for (i = 0; i < 2; i++)
7563 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
7564 /* Overwrite LINEAR_CENTER. */
7565 for (i = 0; i < 2; i++)
7566 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7567 linear_sample[i], base + 8 + i, "");
7568 /* Overwrite LINEAR_CENTROID. */
7569 for (i = 0; i < 2; i++)
7570 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7571 linear_sample[i], base + 10 + i, "");
7572 }
7573
7574 /* Force center interpolation. */
7575 if (key->ps_prolog.states.force_persp_center_interp) {
7576 unsigned i, base = key->ps_prolog.num_input_sgprs;
7577 LLVMValueRef persp_center[2];
7578
7579 /* Read PERSP_CENTER. */
7580 for (i = 0; i < 2; i++)
7581 persp_center[i] = LLVMGetParam(func, base + 2 + i);
7582 /* Overwrite PERSP_SAMPLE. */
7583 for (i = 0; i < 2; i++)
7584 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7585 persp_center[i], base + i, "");
7586 /* Overwrite PERSP_CENTROID. */
7587 for (i = 0; i < 2; i++)
7588 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7589 persp_center[i], base + 4 + i, "");
7590 }
7591 if (key->ps_prolog.states.force_linear_center_interp) {
7592 unsigned i, base = key->ps_prolog.num_input_sgprs;
7593 LLVMValueRef linear_center[2];
7594
7595 /* Read LINEAR_CENTER. */
7596 for (i = 0; i < 2; i++)
7597 linear_center[i] = LLVMGetParam(func, base + 8 + i);
7598 /* Overwrite LINEAR_SAMPLE. */
7599 for (i = 0; i < 2; i++)
7600 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7601 linear_center[i], base + 6 + i, "");
7602 /* Overwrite LINEAR_CENTROID. */
7603 for (i = 0; i < 2; i++)
7604 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7605 linear_center[i], base + 10 + i, "");
7606 }
7607
7608 /* Interpolate colors. */
7609 unsigned color_out_idx = 0;
7610 for (i = 0; i < 2; i++) {
7611 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
7612 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
7613 key->ps_prolog.face_vgpr_index;
7614 LLVMValueRef interp[2], color[4];
7615 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
7616
7617 if (!writemask)
7618 continue;
7619
7620 /* If the interpolation qualifier is not CONSTANT (-1). */
7621 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
7622 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
7623 key->ps_prolog.color_interp_vgpr_index[i];
7624
7625 /* Get the (i,j) updated by bc_optimize handling. */
7626 interp[0] = LLVMBuildExtractValue(ctx->ac.builder, ret,
7627 interp_vgpr, "");
7628 interp[1] = LLVMBuildExtractValue(ctx->ac.builder, ret,
7629 interp_vgpr + 1, "");
7630 interp_ij = lp_build_gather_values(&ctx->gallivm, interp, 2);
7631 }
7632
7633 /* Use the absolute location of the input. */
7634 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7635
7636 if (key->ps_prolog.states.color_two_side) {
7637 face = LLVMGetParam(func, face_vgpr);
7638 face = ac_to_integer(&ctx->ac, face);
7639 }
7640
7641 interp_fs_input(ctx,
7642 key->ps_prolog.color_attr_index[i],
7643 TGSI_SEMANTIC_COLOR, i,
7644 key->ps_prolog.num_interp_inputs,
7645 key->ps_prolog.colors_read, interp_ij,
7646 prim_mask, face, color);
7647
7648 while (writemask) {
7649 unsigned chan = u_bit_scan(&writemask);
7650 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, color[chan],
7651 fninfo.num_params + color_out_idx++, "");
7652 }
7653 }
7654
7655 /* Section 15.2.2 (Shader Inputs) of the OpenGL 4.5 (Core Profile) spec
7656 * says:
7657 *
7658 * "When per-sample shading is active due to the use of a fragment
7659 * input qualified by sample or due to the use of the gl_SampleID
7660 * or gl_SamplePosition variables, only the bit for the current
7661 * sample is set in gl_SampleMaskIn. When state specifies multiple
7662 * fragment shader invocations for a given fragment, the sample
7663 * mask for any single fragment shader invocation may specify a
7664 * subset of the covered samples for the fragment. In this case,
7665 * the bit corresponding to each covered sample will be set in
7666 * exactly one fragment shader invocation."
7667 *
7668 * The samplemask loaded by hardware is always the coverage of the
7669 * entire pixel/fragment, so mask bits out based on the sample ID.
7670 */
7671 if (key->ps_prolog.states.samplemask_log_ps_iter) {
7672 /* The bit pattern matches that used by fixed function fragment
7673 * processing. */
7674 static const uint16_t ps_iter_masks[] = {
7675 0xffff, /* not used */
7676 0x5555,
7677 0x1111,
7678 0x0101,
7679 0x0001,
7680 };
7681 assert(key->ps_prolog.states.samplemask_log_ps_iter < ARRAY_SIZE(ps_iter_masks));
7682
7683 uint32_t ps_iter_mask = ps_iter_masks[key->ps_prolog.states.samplemask_log_ps_iter];
7684 unsigned ancillary_vgpr = key->ps_prolog.num_input_sgprs +
7685 key->ps_prolog.ancillary_vgpr_index;
7686 LLVMValueRef sampleid = unpack_param(ctx, ancillary_vgpr, 8, 4);
7687 LLVMValueRef samplemask = LLVMGetParam(func, ancillary_vgpr + 1);
7688
7689 samplemask = ac_to_integer(&ctx->ac, samplemask);
7690 samplemask = LLVMBuildAnd(
7691 ctx->ac.builder,
7692 samplemask,
7693 LLVMBuildShl(ctx->ac.builder,
7694 LLVMConstInt(ctx->i32, ps_iter_mask, false),
7695 sampleid, ""),
7696 "");
7697 samplemask = ac_to_float(&ctx->ac, samplemask);
7698
7699 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, samplemask,
7700 ancillary_vgpr + 1, "");
7701 }
7702
7703 /* Tell LLVM to insert WQM instruction sequence when needed. */
7704 if (key->ps_prolog.wqm) {
7705 LLVMAddTargetDependentFunctionAttr(func,
7706 "amdgpu-ps-wqm-outputs", "");
7707 }
7708
7709 si_llvm_build_ret(ctx, ret);
7710 }
7711
7712 /**
7713 * Build the pixel shader epilog function. This handles everything that must be
7714 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7715 */
7716 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
7717 union si_shader_part_key *key)
7718 {
7719 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7720 struct si_function_info fninfo;
7721 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7722 int i;
7723 struct si_ps_exports exp = {};
7724
7725 si_init_function_info(&fninfo);
7726
7727 /* Declare input SGPRs. */
7728 ctx->param_rw_buffers = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7729 ctx->param_bindless_samplers_and_images = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7730 ctx->param_const_and_shader_buffers = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7731 ctx->param_samplers_and_images = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7732 add_arg_checked(&fninfo, ARG_SGPR, ctx->f32, SI_PARAM_ALPHA_REF);
7733
7734 /* Declare input VGPRs. */
7735 unsigned required_num_params =
7736 fninfo.num_sgpr_params +
7737 util_bitcount(key->ps_epilog.colors_written) * 4 +
7738 key->ps_epilog.writes_z +
7739 key->ps_epilog.writes_stencil +
7740 key->ps_epilog.writes_samplemask;
7741
7742 required_num_params = MAX2(required_num_params,
7743 fninfo.num_sgpr_params + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7744
7745 while (fninfo.num_params < required_num_params)
7746 add_arg(&fninfo, ARG_VGPR, ctx->f32);
7747
7748 /* Create the function. */
7749 si_create_function(ctx, "ps_epilog", NULL, 0, &fninfo, 0);
7750 /* Disable elimination of unused inputs. */
7751 si_llvm_add_attribute(ctx->main_fn,
7752 "InitialPSInputAddr", 0xffffff);
7753
7754 /* Process colors. */
7755 unsigned vgpr = fninfo.num_sgpr_params;
7756 unsigned colors_written = key->ps_epilog.colors_written;
7757 int last_color_export = -1;
7758
7759 /* Find the last color export. */
7760 if (!key->ps_epilog.writes_z &&
7761 !key->ps_epilog.writes_stencil &&
7762 !key->ps_epilog.writes_samplemask) {
7763 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7764
7765 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7766 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7767 /* Just set this if any of the colorbuffers are enabled. */
7768 if (spi_format &
7769 ((1ull << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7770 last_color_export = 0;
7771 } else {
7772 for (i = 0; i < 8; i++)
7773 if (colors_written & (1 << i) &&
7774 (spi_format >> (i * 4)) & 0xf)
7775 last_color_export = i;
7776 }
7777 }
7778
7779 while (colors_written) {
7780 LLVMValueRef color[4];
7781 int mrt = u_bit_scan(&colors_written);
7782
7783 for (i = 0; i < 4; i++)
7784 color[i] = LLVMGetParam(ctx->main_fn, vgpr++);
7785
7786 si_export_mrt_color(bld_base, color, mrt,
7787 fninfo.num_params - 1,
7788 mrt == last_color_export, &exp);
7789 }
7790
7791 /* Process depth, stencil, samplemask. */
7792 if (key->ps_epilog.writes_z)
7793 depth = LLVMGetParam(ctx->main_fn, vgpr++);
7794 if (key->ps_epilog.writes_stencil)
7795 stencil = LLVMGetParam(ctx->main_fn, vgpr++);
7796 if (key->ps_epilog.writes_samplemask)
7797 samplemask = LLVMGetParam(ctx->main_fn, vgpr++);
7798
7799 if (depth || stencil || samplemask)
7800 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
7801 else if (last_color_export == -1)
7802 si_export_null(bld_base);
7803
7804 if (exp.num)
7805 si_emit_ps_exports(ctx, &exp);
7806
7807 /* Compile. */
7808 LLVMBuildRetVoid(ctx->ac.builder);
7809 }
7810
7811 /**
7812 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7813 */
7814 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7815 LLVMTargetMachineRef tm,
7816 struct si_shader *shader,
7817 struct pipe_debug_callback *debug)
7818 {
7819 union si_shader_part_key prolog_key;
7820 union si_shader_part_key epilog_key;
7821
7822 /* Get the prolog. */
7823 si_get_ps_prolog_key(shader, &prolog_key, true);
7824
7825 /* The prolog is a no-op if these aren't set. */
7826 if (si_need_ps_prolog(&prolog_key)) {
7827 shader->prolog =
7828 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7829 PIPE_SHADER_FRAGMENT, true,
7830 &prolog_key, tm, debug,
7831 si_build_ps_prolog_function,
7832 "Fragment Shader Prolog");
7833 if (!shader->prolog)
7834 return false;
7835 }
7836
7837 /* Get the epilog. */
7838 si_get_ps_epilog_key(shader, &epilog_key);
7839
7840 shader->epilog =
7841 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7842 PIPE_SHADER_FRAGMENT, false,
7843 &epilog_key, tm, debug,
7844 si_build_ps_epilog_function,
7845 "Fragment Shader Epilog");
7846 if (!shader->epilog)
7847 return false;
7848
7849 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7850 if (shader->key.part.ps.prolog.poly_stipple) {
7851 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7852 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7853 }
7854
7855 /* Set up the enable bits for per-sample shading if needed. */
7856 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
7857 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7858 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7859 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7860 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7861 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7862 }
7863 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
7864 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7865 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7866 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7867 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7868 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7869 }
7870 if (shader->key.part.ps.prolog.force_persp_center_interp &&
7871 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7872 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7873 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
7874 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7875 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7876 }
7877 if (shader->key.part.ps.prolog.force_linear_center_interp &&
7878 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7879 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7880 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
7881 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7882 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7883 }
7884
7885 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7886 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7887 !(shader->config.spi_ps_input_ena & 0xf)) {
7888 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7889 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7890 }
7891
7892 /* At least one pair of interpolation weights must be enabled. */
7893 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7894 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7895 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7896 }
7897
7898 /* Samplemask fixup requires the sample ID. */
7899 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
7900 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
7901 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
7902 }
7903
7904 /* The sample mask input is always enabled, because the API shader always
7905 * passes it through to the epilog. Disable it here if it's unused.
7906 */
7907 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
7908 !shader->selector->info.reads_samplemask)
7909 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7910
7911 return true;
7912 }
7913
7914 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
7915 unsigned *lds_size)
7916 {
7917 /* SPI barrier management bug:
7918 * Make sure we have at least 4k of LDS in use to avoid the bug.
7919 * It applies to workgroup sizes of more than one wavefront.
7920 */
7921 if (sscreen->info.family == CHIP_BONAIRE ||
7922 sscreen->info.family == CHIP_KABINI ||
7923 sscreen->info.family == CHIP_MULLINS)
7924 *lds_size = MAX2(*lds_size, 8);
7925 }
7926
7927 static void si_fix_resource_usage(struct si_screen *sscreen,
7928 struct si_shader *shader)
7929 {
7930 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7931
7932 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7933
7934 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
7935 si_get_max_workgroup_size(shader) > 64) {
7936 si_multiwave_lds_size_workaround(sscreen,
7937 &shader->config.lds_size);
7938 }
7939 }
7940
7941 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
7942 struct si_shader *shader,
7943 struct pipe_debug_callback *debug)
7944 {
7945 struct si_shader_selector *sel = shader->selector;
7946 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
7947 int r;
7948
7949 /* LS, ES, VS are compiled on demand if the main part hasn't been
7950 * compiled for that stage.
7951 *
7952 * Vertex shaders are compiled on demand when a vertex fetch
7953 * workaround must be applied.
7954 */
7955 if (shader->is_monolithic) {
7956 /* Monolithic shader (compiled as a whole, has many variants,
7957 * may take a long time to compile).
7958 */
7959 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
7960 if (r)
7961 return r;
7962 } else {
7963 /* The shader consists of several parts:
7964 *
7965 * - the middle part is the user shader, it has 1 variant only
7966 * and it was compiled during the creation of the shader
7967 * selector
7968 * - the prolog part is inserted at the beginning
7969 * - the epilog part is inserted at the end
7970 *
7971 * The prolog and epilog have many (but simple) variants.
7972 *
7973 * Starting with gfx9, geometry and tessellation control
7974 * shaders also contain the prolog and user shader parts of
7975 * the previous shader stage.
7976 */
7977
7978 if (!mainp)
7979 return -1;
7980
7981 /* Copy the compiled TGSI shader data over. */
7982 shader->is_binary_shared = true;
7983 shader->binary = mainp->binary;
7984 shader->config = mainp->config;
7985 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7986 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7987 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7988 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
7989 memcpy(shader->info.vs_output_param_offset,
7990 mainp->info.vs_output_param_offset,
7991 sizeof(mainp->info.vs_output_param_offset));
7992 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7993 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7994 shader->info.nr_param_exports = mainp->info.nr_param_exports;
7995
7996 /* Select prologs and/or epilogs. */
7997 switch (sel->type) {
7998 case PIPE_SHADER_VERTEX:
7999 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
8000 return -1;
8001 break;
8002 case PIPE_SHADER_TESS_CTRL:
8003 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
8004 return -1;
8005 break;
8006 case PIPE_SHADER_TESS_EVAL:
8007 break;
8008 case PIPE_SHADER_GEOMETRY:
8009 if (!si_shader_select_gs_parts(sscreen, tm, shader, debug))
8010 return -1;
8011 break;
8012 case PIPE_SHADER_FRAGMENT:
8013 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
8014 return -1;
8015
8016 /* Make sure we have at least as many VGPRs as there
8017 * are allocated inputs.
8018 */
8019 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8020 shader->info.num_input_vgprs);
8021 break;
8022 }
8023
8024 /* Update SGPR and VGPR counts. */
8025 if (shader->prolog) {
8026 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8027 shader->prolog->config.num_sgprs);
8028 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8029 shader->prolog->config.num_vgprs);
8030 }
8031 if (shader->previous_stage) {
8032 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8033 shader->previous_stage->config.num_sgprs);
8034 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8035 shader->previous_stage->config.num_vgprs);
8036 shader->config.spilled_sgprs =
8037 MAX2(shader->config.spilled_sgprs,
8038 shader->previous_stage->config.spilled_sgprs);
8039 shader->config.spilled_vgprs =
8040 MAX2(shader->config.spilled_vgprs,
8041 shader->previous_stage->config.spilled_vgprs);
8042 shader->config.private_mem_vgprs =
8043 MAX2(shader->config.private_mem_vgprs,
8044 shader->previous_stage->config.private_mem_vgprs);
8045 shader->config.scratch_bytes_per_wave =
8046 MAX2(shader->config.scratch_bytes_per_wave,
8047 shader->previous_stage->config.scratch_bytes_per_wave);
8048 shader->info.uses_instanceid |=
8049 shader->previous_stage->info.uses_instanceid;
8050 }
8051 if (shader->prolog2) {
8052 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8053 shader->prolog2->config.num_sgprs);
8054 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8055 shader->prolog2->config.num_vgprs);
8056 }
8057 if (shader->epilog) {
8058 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8059 shader->epilog->config.num_sgprs);
8060 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8061 shader->epilog->config.num_vgprs);
8062 }
8063 si_calculate_max_simd_waves(shader);
8064 }
8065
8066 si_fix_resource_usage(sscreen, shader);
8067 si_shader_dump(sscreen, shader, debug, sel->info.processor,
8068 stderr, true);
8069
8070 /* Upload. */
8071 r = si_shader_binary_upload(sscreen, shader);
8072 if (r) {
8073 fprintf(stderr, "LLVM failed to upload shader\n");
8074 return r;
8075 }
8076
8077 return 0;
8078 }
8079
8080 void si_shader_destroy(struct si_shader *shader)
8081 {
8082 if (shader->scratch_bo)
8083 r600_resource_reference(&shader->scratch_bo, NULL);
8084
8085 r600_resource_reference(&shader->bo, NULL);
8086
8087 if (!shader->is_binary_shared)
8088 ac_shader_binary_clean(&shader->binary);
8089
8090 free(shader->shader_log);
8091 }