ac/radeonsi: add tcs_rel_ids to the abi
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "gallivm/lp_bld_const.h"
25 #include "gallivm/lp_bld_gather.h"
26 #include "gallivm/lp_bld_intr.h"
27 #include "gallivm/lp_bld_logic.h"
28 #include "gallivm/lp_bld_arit.h"
29 #include "gallivm/lp_bld_flow.h"
30 #include "gallivm/lp_bld_misc.h"
31 #include "util/u_memory.h"
32 #include "util/u_string.h"
33 #include "tgsi/tgsi_build.h"
34 #include "tgsi/tgsi_util.h"
35 #include "tgsi/tgsi_dump.h"
36
37 #include "ac_binary.h"
38 #include "ac_llvm_util.h"
39 #include "ac_exp_param.h"
40 #include "ac_shader_util.h"
41 #include "si_shader_internal.h"
42 #include "si_pipe.h"
43 #include "sid.h"
44
45 #include "compiler/nir/nir.h"
46
47 static const char *scratch_rsrc_dword0_symbol =
48 "SCRATCH_RSRC_DWORD0";
49
50 static const char *scratch_rsrc_dword1_symbol =
51 "SCRATCH_RSRC_DWORD1";
52
53 struct si_shader_output_values
54 {
55 LLVMValueRef values[4];
56 unsigned semantic_name;
57 unsigned semantic_index;
58 ubyte vertex_stream[4];
59 };
60
61 /**
62 * Used to collect types and other info about arguments of the LLVM function
63 * before the function is created.
64 */
65 struct si_function_info {
66 LLVMTypeRef types[100];
67 LLVMValueRef *assign[100];
68 unsigned num_sgpr_params;
69 unsigned num_params;
70 };
71
72 enum si_arg_regfile {
73 ARG_SGPR,
74 ARG_VGPR
75 };
76
77 static void si_init_shader_ctx(struct si_shader_context *ctx,
78 struct si_screen *sscreen,
79 LLVMTargetMachineRef tm);
80
81 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
82 struct lp_build_tgsi_context *bld_base,
83 struct lp_build_emit_data *emit_data);
84
85 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
86 FILE *f);
87
88 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
89 union si_shader_part_key *key);
90 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
91 union si_shader_part_key *key);
92 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
93 union si_shader_part_key *key);
94 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
95 union si_shader_part_key *key);
96
97 /* Ideally pass the sample mask input to the PS epilog as v14, which
98 * is its usual location, so that the shader doesn't have to add v_mov.
99 */
100 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 14
101
102 enum {
103 CONST_ADDR_SPACE = 2,
104 LOCAL_ADDR_SPACE = 3,
105 };
106
107 static bool llvm_type_is_64bit(struct si_shader_context *ctx,
108 LLVMTypeRef type)
109 {
110 if (type == ctx->ac.i64 || type == ctx->ac.f64)
111 return true;
112
113 return false;
114 }
115
116 static bool is_merged_shader(struct si_shader *shader)
117 {
118 if (shader->selector->screen->info.chip_class <= VI)
119 return false;
120
121 return shader->key.as_ls ||
122 shader->key.as_es ||
123 shader->selector->type == PIPE_SHADER_TESS_CTRL ||
124 shader->selector->type == PIPE_SHADER_GEOMETRY;
125 }
126
127 static void si_init_function_info(struct si_function_info *fninfo)
128 {
129 fninfo->num_params = 0;
130 fninfo->num_sgpr_params = 0;
131 }
132
133 static unsigned add_arg_assign(struct si_function_info *fninfo,
134 enum si_arg_regfile regfile, LLVMTypeRef type,
135 LLVMValueRef *assign)
136 {
137 assert(regfile != ARG_SGPR || fninfo->num_sgpr_params == fninfo->num_params);
138
139 unsigned idx = fninfo->num_params++;
140 assert(idx < ARRAY_SIZE(fninfo->types));
141
142 if (regfile == ARG_SGPR)
143 fninfo->num_sgpr_params = fninfo->num_params;
144
145 fninfo->types[idx] = type;
146 fninfo->assign[idx] = assign;
147 return idx;
148 }
149
150 static unsigned add_arg(struct si_function_info *fninfo,
151 enum si_arg_regfile regfile, LLVMTypeRef type)
152 {
153 return add_arg_assign(fninfo, regfile, type, NULL);
154 }
155
156 static void add_arg_assign_checked(struct si_function_info *fninfo,
157 enum si_arg_regfile regfile, LLVMTypeRef type,
158 LLVMValueRef *assign, unsigned idx)
159 {
160 MAYBE_UNUSED unsigned actual = add_arg_assign(fninfo, regfile, type, assign);
161 assert(actual == idx);
162 }
163
164 static void add_arg_checked(struct si_function_info *fninfo,
165 enum si_arg_regfile regfile, LLVMTypeRef type,
166 unsigned idx)
167 {
168 add_arg_assign_checked(fninfo, regfile, type, NULL, idx);
169 }
170
171 /**
172 * Returns a unique index for a per-patch semantic name and index. The index
173 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
174 * can be calculated.
175 */
176 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
177 {
178 switch (semantic_name) {
179 case TGSI_SEMANTIC_TESSOUTER:
180 return 0;
181 case TGSI_SEMANTIC_TESSINNER:
182 return 1;
183 case TGSI_SEMANTIC_PATCH:
184 assert(index < 30);
185 return 2 + index;
186
187 default:
188 assert(!"invalid semantic name");
189 return 0;
190 }
191 }
192
193 /**
194 * Returns a unique index for a semantic name and index. The index must be
195 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
196 * calculated.
197 */
198 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
199 {
200 switch (semantic_name) {
201 case TGSI_SEMANTIC_POSITION:
202 return 0;
203 case TGSI_SEMANTIC_GENERIC:
204 /* Since some shader stages use the the highest used IO index
205 * to determine the size to allocate for inputs/outputs
206 * (in LDS, tess and GS rings). GENERIC should be placed right
207 * after POSITION to make that size as small as possible.
208 */
209 if (index < SI_MAX_IO_GENERIC)
210 return 1 + index;
211
212 assert(!"invalid generic index");
213 return 0;
214 case TGSI_SEMANTIC_PSIZE:
215 return SI_MAX_IO_GENERIC + 1;
216 case TGSI_SEMANTIC_CLIPDIST:
217 assert(index <= 1);
218 return SI_MAX_IO_GENERIC + 2 + index;
219 case TGSI_SEMANTIC_FOG:
220 return SI_MAX_IO_GENERIC + 4;
221 case TGSI_SEMANTIC_LAYER:
222 return SI_MAX_IO_GENERIC + 5;
223 case TGSI_SEMANTIC_VIEWPORT_INDEX:
224 return SI_MAX_IO_GENERIC + 6;
225 case TGSI_SEMANTIC_PRIMID:
226 return SI_MAX_IO_GENERIC + 7;
227 case TGSI_SEMANTIC_COLOR: /* these alias */
228 case TGSI_SEMANTIC_BCOLOR:
229 assert(index < 2);
230 return SI_MAX_IO_GENERIC + 8 + index;
231 case TGSI_SEMANTIC_TEXCOORD:
232 assert(index < 8);
233 assert(SI_MAX_IO_GENERIC + 10 + index < 64);
234 return SI_MAX_IO_GENERIC + 10 + index;
235 default:
236 assert(!"invalid semantic name");
237 return 0;
238 }
239 }
240
241 /**
242 * Get the value of a shader input parameter and extract a bitfield.
243 */
244 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
245 LLVMValueRef value, unsigned rshift,
246 unsigned bitwidth)
247 {
248 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
249 value = ac_to_integer(&ctx->ac, value);
250
251 if (rshift)
252 value = LLVMBuildLShr(ctx->ac.builder, value,
253 LLVMConstInt(ctx->i32, rshift, 0), "");
254
255 if (rshift + bitwidth < 32) {
256 unsigned mask = (1 << bitwidth) - 1;
257 value = LLVMBuildAnd(ctx->ac.builder, value,
258 LLVMConstInt(ctx->i32, mask, 0), "");
259 }
260
261 return value;
262 }
263
264 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
265 unsigned param, unsigned rshift,
266 unsigned bitwidth)
267 {
268 LLVMValueRef value = LLVMGetParam(ctx->main_fn, param);
269
270 return unpack_llvm_param(ctx, value, rshift, bitwidth);
271 }
272
273 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
274 {
275 switch (ctx->type) {
276 case PIPE_SHADER_TESS_CTRL:
277 return unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 0, 8);
278
279 case PIPE_SHADER_TESS_EVAL:
280 return LLVMGetParam(ctx->main_fn,
281 ctx->param_tes_rel_patch_id);
282
283 default:
284 assert(0);
285 return NULL;
286 }
287 }
288
289 /* Tessellation shaders pass outputs to the next shader using LDS.
290 *
291 * LS outputs = TCS inputs
292 * TCS outputs = TES inputs
293 *
294 * The LDS layout is:
295 * - TCS inputs for patch 0
296 * - TCS inputs for patch 1
297 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
298 * - ...
299 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
300 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
301 * - TCS outputs for patch 1
302 * - Per-patch TCS outputs for patch 1
303 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
304 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
305 * - ...
306 *
307 * All three shaders VS(LS), TCS, TES share the same LDS space.
308 */
309
310 static LLVMValueRef
311 get_tcs_in_patch_stride(struct si_shader_context *ctx)
312 {
313 return unpack_param(ctx, ctx->param_vs_state_bits, 8, 13);
314 }
315
316 static unsigned get_tcs_out_vertex_dw_stride_constant(struct si_shader_context *ctx)
317 {
318 assert(ctx->type == PIPE_SHADER_TESS_CTRL);
319
320 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
321 return util_last_bit64(ctx->shader->key.mono.u.ff_tcs_inputs_to_copy) * 4;
322
323 return util_last_bit64(ctx->shader->selector->outputs_written) * 4;
324 }
325
326 static LLVMValueRef get_tcs_out_vertex_dw_stride(struct si_shader_context *ctx)
327 {
328 unsigned stride = get_tcs_out_vertex_dw_stride_constant(ctx);
329
330 return LLVMConstInt(ctx->i32, stride, 0);
331 }
332
333 static LLVMValueRef get_tcs_out_patch_stride(struct si_shader_context *ctx)
334 {
335 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
336 return unpack_param(ctx, ctx->param_tcs_out_lds_layout, 0, 13);
337
338 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
339 unsigned tcs_out_vertices = info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
340 unsigned vertex_dw_stride = get_tcs_out_vertex_dw_stride_constant(ctx);
341 unsigned num_patch_outputs = util_last_bit64(ctx->shader->selector->patch_outputs_written);
342 unsigned patch_dw_stride = tcs_out_vertices * vertex_dw_stride +
343 num_patch_outputs * 4;
344 return LLVMConstInt(ctx->i32, patch_dw_stride, 0);
345 }
346
347 static LLVMValueRef
348 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
349 {
350 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
351 unpack_param(ctx,
352 ctx->param_tcs_out_lds_offsets,
353 0, 16),
354 4);
355 }
356
357 static LLVMValueRef
358 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
359 {
360 return lp_build_mul_imm(&ctx->bld_base.uint_bld,
361 unpack_param(ctx,
362 ctx->param_tcs_out_lds_offsets,
363 16, 16),
364 4);
365 }
366
367 static LLVMValueRef
368 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
369 {
370 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
371 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
372
373 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
374 }
375
376 static LLVMValueRef
377 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
378 {
379 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
380 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
381 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
382
383 return LLVMBuildAdd(ctx->ac.builder, patch0_offset,
384 LLVMBuildMul(ctx->ac.builder, patch_stride,
385 rel_patch_id, ""),
386 "");
387 }
388
389 static LLVMValueRef
390 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
391 {
392 LLVMValueRef patch0_patch_data_offset =
393 get_tcs_out_patch0_patch_data_offset(ctx);
394 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
395 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
396
397 return LLVMBuildAdd(ctx->ac.builder, patch0_patch_data_offset,
398 LLVMBuildMul(ctx->ac.builder, patch_stride,
399 rel_patch_id, ""),
400 "");
401 }
402
403 static LLVMValueRef get_num_tcs_out_vertices(struct si_shader_context *ctx)
404 {
405 unsigned tcs_out_vertices =
406 ctx->shader->selector ?
407 ctx->shader->selector->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] : 0;
408
409 /* If !tcs_out_vertices, it's either the fixed-func TCS or the TCS epilog. */
410 if (ctx->type == PIPE_SHADER_TESS_CTRL && tcs_out_vertices)
411 return LLVMConstInt(ctx->i32, tcs_out_vertices, 0);
412
413 return unpack_param(ctx, ctx->param_tcs_offchip_layout, 6, 6);
414 }
415
416 static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
417 {
418 unsigned stride;
419
420 switch (ctx->type) {
421 case PIPE_SHADER_VERTEX:
422 stride = util_last_bit64(ctx->shader->selector->outputs_written);
423 return LLVMConstInt(ctx->i32, stride * 4, 0);
424
425 case PIPE_SHADER_TESS_CTRL:
426 if (ctx->screen->info.chip_class >= GFX9 &&
427 ctx->shader->is_monolithic) {
428 stride = util_last_bit64(ctx->shader->key.part.tcs.ls->outputs_written);
429 return LLVMConstInt(ctx->i32, stride * 4, 0);
430 }
431 return unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
432
433 default:
434 assert(0);
435 return NULL;
436 }
437 }
438
439 static LLVMValueRef get_instance_index_for_fetch(
440 struct si_shader_context *ctx,
441 unsigned param_start_instance, LLVMValueRef divisor)
442 {
443 LLVMValueRef result = ctx->abi.instance_id;
444
445 /* The division must be done before START_INSTANCE is added. */
446 if (divisor != ctx->i32_1)
447 result = LLVMBuildUDiv(ctx->ac.builder, result, divisor, "");
448
449 return LLVMBuildAdd(ctx->ac.builder, result,
450 LLVMGetParam(ctx->main_fn, param_start_instance), "");
451 }
452
453 /* Bitcast <4 x float> to <2 x double>, extract the component, and convert
454 * to float. */
455 static LLVMValueRef extract_double_to_float(struct si_shader_context *ctx,
456 LLVMValueRef vec4,
457 unsigned double_index)
458 {
459 LLVMBuilderRef builder = ctx->ac.builder;
460 LLVMTypeRef f64 = LLVMDoubleTypeInContext(ctx->ac.context);
461 LLVMValueRef dvec2 = LLVMBuildBitCast(builder, vec4,
462 LLVMVectorType(f64, 2), "");
463 LLVMValueRef index = LLVMConstInt(ctx->i32, double_index, 0);
464 LLVMValueRef value = LLVMBuildExtractElement(builder, dvec2, index, "");
465 return LLVMBuildFPTrunc(builder, value, ctx->f32, "");
466 }
467
468 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
469 LLVMValueRef i32, unsigned index)
470 {
471 assert(index <= 1);
472
473 if (index == 1)
474 return LLVMBuildAShr(ctx->ac.builder, i32,
475 LLVMConstInt(ctx->i32, 16, 0), "");
476
477 return LLVMBuildSExt(ctx->ac.builder,
478 LLVMBuildTrunc(ctx->ac.builder, i32,
479 ctx->ac.i16, ""),
480 ctx->i32, "");
481 }
482
483 void si_llvm_load_input_vs(
484 struct si_shader_context *ctx,
485 unsigned input_index,
486 LLVMValueRef out[4])
487 {
488 unsigned vs_blit_property =
489 ctx->shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
490
491 if (vs_blit_property) {
492 LLVMValueRef vertex_id = ctx->abi.vertex_id;
493 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
494 LLVMIntULE, vertex_id,
495 ctx->i32_1, "");
496 /* Use LLVMIntNE, because we have 3 vertices and only
497 * the middle one should use y2.
498 */
499 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
500 LLVMIntNE, vertex_id,
501 ctx->i32_1, "");
502
503 if (input_index == 0) {
504 /* Position: */
505 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
506 ctx->param_vs_blit_inputs);
507 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
508 ctx->param_vs_blit_inputs + 1);
509
510 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
511 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
512 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
513 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
514
515 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
516 x1, x2, "");
517 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
518 y1, y2, "");
519
520 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
521 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
522 out[2] = LLVMGetParam(ctx->main_fn,
523 ctx->param_vs_blit_inputs + 2);
524 out[3] = ctx->ac.f32_1;
525 return;
526 }
527
528 /* Color or texture coordinates: */
529 assert(input_index == 1);
530
531 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
532 for (int i = 0; i < 4; i++) {
533 out[i] = LLVMGetParam(ctx->main_fn,
534 ctx->param_vs_blit_inputs + 3 + i);
535 }
536 } else {
537 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
538 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
539 ctx->param_vs_blit_inputs + 3);
540 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
541 ctx->param_vs_blit_inputs + 4);
542 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
543 ctx->param_vs_blit_inputs + 5);
544 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
545 ctx->param_vs_blit_inputs + 6);
546
547 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
548 x1, x2, "");
549 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
550 y1, y2, "");
551 out[2] = LLVMGetParam(ctx->main_fn,
552 ctx->param_vs_blit_inputs + 7);
553 out[3] = LLVMGetParam(ctx->main_fn,
554 ctx->param_vs_blit_inputs + 8);
555 }
556 return;
557 }
558
559 unsigned chan;
560 unsigned fix_fetch;
561 unsigned num_fetches;
562 unsigned fetch_stride;
563
564 LLVMValueRef t_list_ptr;
565 LLVMValueRef t_offset;
566 LLVMValueRef t_list;
567 LLVMValueRef vertex_index;
568 LLVMValueRef input[3];
569
570 /* Load the T list */
571 t_list_ptr = LLVMGetParam(ctx->main_fn, ctx->param_vertex_buffers);
572
573 t_offset = LLVMConstInt(ctx->i32, input_index, 0);
574
575 t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
576
577 vertex_index = LLVMGetParam(ctx->main_fn,
578 ctx->param_vertex_index0 +
579 input_index);
580
581 fix_fetch = ctx->shader->key.mono.vs_fix_fetch[input_index];
582
583 /* Do multiple loads for special formats. */
584 switch (fix_fetch) {
585 case SI_FIX_FETCH_RGB_64_FLOAT:
586 num_fetches = 3; /* 3 2-dword loads */
587 fetch_stride = 8;
588 break;
589 case SI_FIX_FETCH_RGBA_64_FLOAT:
590 num_fetches = 2; /* 2 4-dword loads */
591 fetch_stride = 16;
592 break;
593 case SI_FIX_FETCH_RGB_8:
594 case SI_FIX_FETCH_RGB_8_INT:
595 num_fetches = 3;
596 fetch_stride = 1;
597 break;
598 case SI_FIX_FETCH_RGB_16:
599 case SI_FIX_FETCH_RGB_16_INT:
600 num_fetches = 3;
601 fetch_stride = 2;
602 break;
603 default:
604 num_fetches = 1;
605 fetch_stride = 0;
606 }
607
608 for (unsigned i = 0; i < num_fetches; i++) {
609 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
610
611 input[i] = ac_build_buffer_load_format(&ctx->ac, t_list,
612 vertex_index, voffset,
613 true);
614 }
615
616 /* Break up the vec4 into individual components */
617 for (chan = 0; chan < 4; chan++) {
618 LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, 0);
619 out[chan] = LLVMBuildExtractElement(ctx->ac.builder,
620 input[0], llvm_chan, "");
621 }
622
623 switch (fix_fetch) {
624 case SI_FIX_FETCH_A2_SNORM:
625 case SI_FIX_FETCH_A2_SSCALED:
626 case SI_FIX_FETCH_A2_SINT: {
627 /* The hardware returns an unsigned value; convert it to a
628 * signed one.
629 */
630 LLVMValueRef tmp = out[3];
631 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
632
633 /* First, recover the sign-extended signed integer value. */
634 if (fix_fetch == SI_FIX_FETCH_A2_SSCALED)
635 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
636 else
637 tmp = ac_to_integer(&ctx->ac, tmp);
638
639 /* For the integer-like cases, do a natural sign extension.
640 *
641 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
642 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
643 * exponent.
644 */
645 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
646 fix_fetch == SI_FIX_FETCH_A2_SNORM ?
647 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
648 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
649
650 /* Convert back to the right type. */
651 if (fix_fetch == SI_FIX_FETCH_A2_SNORM) {
652 LLVMValueRef clamp;
653 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
654 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
655 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
656 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
657 } else if (fix_fetch == SI_FIX_FETCH_A2_SSCALED) {
658 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
659 }
660
661 out[3] = tmp;
662 break;
663 }
664 case SI_FIX_FETCH_RGBA_32_UNORM:
665 case SI_FIX_FETCH_RGBX_32_UNORM:
666 for (chan = 0; chan < 4; chan++) {
667 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
668 out[chan] = LLVMBuildUIToFP(ctx->ac.builder,
669 out[chan], ctx->f32, "");
670 out[chan] = LLVMBuildFMul(ctx->ac.builder, out[chan],
671 LLVMConstReal(ctx->f32, 1.0 / UINT_MAX), "");
672 }
673 /* RGBX UINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
674 if (fix_fetch == SI_FIX_FETCH_RGBX_32_UNORM)
675 out[3] = LLVMConstReal(ctx->f32, 1);
676 break;
677 case SI_FIX_FETCH_RGBA_32_SNORM:
678 case SI_FIX_FETCH_RGBX_32_SNORM:
679 case SI_FIX_FETCH_RGBA_32_FIXED:
680 case SI_FIX_FETCH_RGBX_32_FIXED: {
681 double scale;
682 if (fix_fetch >= SI_FIX_FETCH_RGBA_32_FIXED)
683 scale = 1.0 / 0x10000;
684 else
685 scale = 1.0 / INT_MAX;
686
687 for (chan = 0; chan < 4; chan++) {
688 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
689 out[chan] = LLVMBuildSIToFP(ctx->ac.builder,
690 out[chan], ctx->f32, "");
691 out[chan] = LLVMBuildFMul(ctx->ac.builder, out[chan],
692 LLVMConstReal(ctx->f32, scale), "");
693 }
694 /* RGBX SINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
695 if (fix_fetch == SI_FIX_FETCH_RGBX_32_SNORM ||
696 fix_fetch == SI_FIX_FETCH_RGBX_32_FIXED)
697 out[3] = LLVMConstReal(ctx->f32, 1);
698 break;
699 }
700 case SI_FIX_FETCH_RGBA_32_USCALED:
701 for (chan = 0; chan < 4; chan++) {
702 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
703 out[chan] = LLVMBuildUIToFP(ctx->ac.builder,
704 out[chan], ctx->f32, "");
705 }
706 break;
707 case SI_FIX_FETCH_RGBA_32_SSCALED:
708 for (chan = 0; chan < 4; chan++) {
709 out[chan] = ac_to_integer(&ctx->ac, out[chan]);
710 out[chan] = LLVMBuildSIToFP(ctx->ac.builder,
711 out[chan], ctx->f32, "");
712 }
713 break;
714 case SI_FIX_FETCH_RG_64_FLOAT:
715 for (chan = 0; chan < 2; chan++)
716 out[chan] = extract_double_to_float(ctx, input[0], chan);
717
718 out[2] = LLVMConstReal(ctx->f32, 0);
719 out[3] = LLVMConstReal(ctx->f32, 1);
720 break;
721 case SI_FIX_FETCH_RGB_64_FLOAT:
722 for (chan = 0; chan < 3; chan++)
723 out[chan] = extract_double_to_float(ctx, input[chan], 0);
724
725 out[3] = LLVMConstReal(ctx->f32, 1);
726 break;
727 case SI_FIX_FETCH_RGBA_64_FLOAT:
728 for (chan = 0; chan < 4; chan++) {
729 out[chan] = extract_double_to_float(ctx, input[chan / 2],
730 chan % 2);
731 }
732 break;
733 case SI_FIX_FETCH_RGB_8:
734 case SI_FIX_FETCH_RGB_8_INT:
735 case SI_FIX_FETCH_RGB_16:
736 case SI_FIX_FETCH_RGB_16_INT:
737 for (chan = 0; chan < 3; chan++) {
738 out[chan] = LLVMBuildExtractElement(ctx->ac.builder,
739 input[chan],
740 ctx->i32_0, "");
741 }
742 if (fix_fetch == SI_FIX_FETCH_RGB_8 ||
743 fix_fetch == SI_FIX_FETCH_RGB_16) {
744 out[3] = LLVMConstReal(ctx->f32, 1);
745 } else {
746 out[3] = ac_to_float(&ctx->ac, ctx->i32_1);
747 }
748 break;
749 }
750 }
751
752 static void declare_input_vs(
753 struct si_shader_context *ctx,
754 unsigned input_index,
755 const struct tgsi_full_declaration *decl,
756 LLVMValueRef out[4])
757 {
758 si_llvm_load_input_vs(ctx, input_index, out);
759 }
760
761 static LLVMValueRef get_primitive_id(struct si_shader_context *ctx,
762 unsigned swizzle)
763 {
764 if (swizzle > 0)
765 return ctx->i32_0;
766
767 switch (ctx->type) {
768 case PIPE_SHADER_VERTEX:
769 return LLVMGetParam(ctx->main_fn,
770 ctx->param_vs_prim_id);
771 case PIPE_SHADER_TESS_CTRL:
772 return ctx->abi.tcs_patch_id;
773 case PIPE_SHADER_TESS_EVAL:
774 return ctx->abi.tes_patch_id;
775 case PIPE_SHADER_GEOMETRY:
776 return ctx->abi.gs_prim_id;
777 default:
778 assert(0);
779 return ctx->i32_0;
780 }
781 }
782
783 /**
784 * Return the value of tgsi_ind_register for indexing.
785 * This is the indirect index with the constant offset added to it.
786 */
787 LLVMValueRef si_get_indirect_index(struct si_shader_context *ctx,
788 const struct tgsi_ind_register *ind,
789 unsigned addr_mul,
790 int rel_index)
791 {
792 LLVMValueRef result;
793
794 if (ind->File == TGSI_FILE_ADDRESS) {
795 result = ctx->addrs[ind->Index][ind->Swizzle];
796 result = LLVMBuildLoad(ctx->ac.builder, result, "");
797 } else {
798 struct tgsi_full_src_register src = {};
799
800 src.Register.File = ind->File;
801 src.Register.Index = ind->Index;
802
803 /* Set the second index to 0 for constants. */
804 if (ind->File == TGSI_FILE_CONSTANT)
805 src.Register.Dimension = 1;
806
807 result = ctx->bld_base.emit_fetch_funcs[ind->File](&ctx->bld_base, &src,
808 TGSI_TYPE_SIGNED,
809 ind->Swizzle);
810 result = ac_to_integer(&ctx->ac, result);
811 }
812
813 if (addr_mul != 1)
814 result = LLVMBuildMul(ctx->ac.builder, result,
815 LLVMConstInt(ctx->i32, addr_mul, 0), "");
816 result = LLVMBuildAdd(ctx->ac.builder, result,
817 LLVMConstInt(ctx->i32, rel_index, 0), "");
818 return result;
819 }
820
821 /**
822 * Like si_get_indirect_index, but restricts the return value to a (possibly
823 * undefined) value inside [0..num).
824 */
825 LLVMValueRef si_get_bounded_indirect_index(struct si_shader_context *ctx,
826 const struct tgsi_ind_register *ind,
827 int rel_index, unsigned num)
828 {
829 LLVMValueRef result = si_get_indirect_index(ctx, ind, 1, rel_index);
830
831 return si_llvm_bound_index(ctx, result, num);
832 }
833
834 static LLVMValueRef get_dw_address_from_generic_indices(struct si_shader_context *ctx,
835 LLVMValueRef vertex_dw_stride,
836 LLVMValueRef base_addr,
837 LLVMValueRef vertex_index,
838 LLVMValueRef param_index,
839 unsigned input_index,
840 ubyte *name,
841 ubyte *index,
842 bool is_patch)
843 {
844 if (vertex_dw_stride) {
845 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
846 LLVMBuildMul(ctx->ac.builder, vertex_index,
847 vertex_dw_stride, ""), "");
848 }
849
850 if (param_index) {
851 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
852 LLVMBuildMul(ctx->ac.builder, param_index,
853 LLVMConstInt(ctx->i32, 4, 0), ""), "");
854 }
855
856 int param = is_patch ?
857 si_shader_io_get_unique_index_patch(name[input_index],
858 index[input_index]) :
859 si_shader_io_get_unique_index(name[input_index],
860 index[input_index]);
861
862 /* Add the base address of the element. */
863 return LLVMBuildAdd(ctx->ac.builder, base_addr,
864 LLVMConstInt(ctx->i32, param * 4, 0), "");
865 }
866
867 /**
868 * Calculate a dword address given an input or output register and a stride.
869 */
870 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
871 const struct tgsi_full_dst_register *dst,
872 const struct tgsi_full_src_register *src,
873 LLVMValueRef vertex_dw_stride,
874 LLVMValueRef base_addr)
875 {
876 struct tgsi_shader_info *info = &ctx->shader->selector->info;
877 ubyte *name, *index, *array_first;
878 int input_index;
879 struct tgsi_full_dst_register reg;
880 LLVMValueRef vertex_index = NULL;
881 LLVMValueRef ind_index = NULL;
882
883 /* Set the register description. The address computation is the same
884 * for sources and destinations. */
885 if (src) {
886 reg.Register.File = src->Register.File;
887 reg.Register.Index = src->Register.Index;
888 reg.Register.Indirect = src->Register.Indirect;
889 reg.Register.Dimension = src->Register.Dimension;
890 reg.Indirect = src->Indirect;
891 reg.Dimension = src->Dimension;
892 reg.DimIndirect = src->DimIndirect;
893 } else
894 reg = *dst;
895
896 /* If the register is 2-dimensional (e.g. an array of vertices
897 * in a primitive), calculate the base address of the vertex. */
898 if (reg.Register.Dimension) {
899 if (reg.Dimension.Indirect)
900 vertex_index = si_get_indirect_index(ctx, &reg.DimIndirect,
901 1, reg.Dimension.Index);
902 else
903 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
904 }
905
906 /* Get information about the register. */
907 if (reg.Register.File == TGSI_FILE_INPUT) {
908 name = info->input_semantic_name;
909 index = info->input_semantic_index;
910 array_first = info->input_array_first;
911 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
912 name = info->output_semantic_name;
913 index = info->output_semantic_index;
914 array_first = info->output_array_first;
915 } else {
916 assert(0);
917 return NULL;
918 }
919
920 if (reg.Register.Indirect) {
921 /* Add the relative address of the element. */
922 if (reg.Indirect.ArrayID)
923 input_index = array_first[reg.Indirect.ArrayID];
924 else
925 input_index = reg.Register.Index;
926
927 ind_index = si_get_indirect_index(ctx, &reg.Indirect,
928 1, reg.Register.Index - input_index);
929 } else {
930 input_index = reg.Register.Index;
931 }
932
933 return get_dw_address_from_generic_indices(ctx, vertex_dw_stride,
934 base_addr, vertex_index,
935 ind_index, input_index,
936 name, index,
937 !reg.Register.Dimension);
938 }
939
940 /* The offchip buffer layout for TCS->TES is
941 *
942 * - attribute 0 of patch 0 vertex 0
943 * - attribute 0 of patch 0 vertex 1
944 * - attribute 0 of patch 0 vertex 2
945 * ...
946 * - attribute 0 of patch 1 vertex 0
947 * - attribute 0 of patch 1 vertex 1
948 * ...
949 * - attribute 1 of patch 0 vertex 0
950 * - attribute 1 of patch 0 vertex 1
951 * ...
952 * - per patch attribute 0 of patch 0
953 * - per patch attribute 0 of patch 1
954 * ...
955 *
956 * Note that every attribute has 4 components.
957 */
958 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
959 LLVMValueRef rel_patch_id,
960 LLVMValueRef vertex_index,
961 LLVMValueRef param_index)
962 {
963 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
964 LLVMValueRef param_stride, constant16;
965
966 vertices_per_patch = get_num_tcs_out_vertices(ctx);
967 num_patches = unpack_param(ctx, ctx->param_tcs_offchip_layout, 0, 6);
968 total_vertices = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
969 num_patches, "");
970
971 constant16 = LLVMConstInt(ctx->i32, 16, 0);
972 if (vertex_index) {
973 base_addr = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
974 vertices_per_patch, "");
975
976 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
977 vertex_index, "");
978
979 param_stride = total_vertices;
980 } else {
981 base_addr = rel_patch_id;
982 param_stride = num_patches;
983 }
984
985 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
986 LLVMBuildMul(ctx->ac.builder, param_index,
987 param_stride, ""), "");
988
989 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
990
991 if (!vertex_index) {
992 LLVMValueRef patch_data_offset =
993 unpack_param(ctx, ctx->param_tcs_offchip_layout, 12, 20);
994
995 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
996 patch_data_offset, "");
997 }
998 return base_addr;
999 }
1000
1001 /* This is a generic helper that can be shared by the NIR and TGSI backends */
1002 static LLVMValueRef get_tcs_tes_buffer_address_from_generic_indices(
1003 struct si_shader_context *ctx,
1004 LLVMValueRef vertex_index,
1005 LLVMValueRef param_index,
1006 unsigned param_base,
1007 ubyte *name,
1008 ubyte *index,
1009 bool is_patch)
1010 {
1011 unsigned param_index_base;
1012
1013 param_index_base = is_patch ?
1014 si_shader_io_get_unique_index_patch(name[param_base], index[param_base]) :
1015 si_shader_io_get_unique_index(name[param_base], index[param_base]);
1016
1017 if (param_index) {
1018 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1019 LLVMConstInt(ctx->i32, param_index_base, 0),
1020 "");
1021 } else {
1022 param_index = LLVMConstInt(ctx->i32, param_index_base, 0);
1023 }
1024
1025 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
1026 vertex_index, param_index);
1027 }
1028
1029 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
1030 struct si_shader_context *ctx,
1031 const struct tgsi_full_dst_register *dst,
1032 const struct tgsi_full_src_register *src)
1033 {
1034 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1035 ubyte *name, *index, *array_first;
1036 struct tgsi_full_src_register reg;
1037 LLVMValueRef vertex_index = NULL;
1038 LLVMValueRef param_index = NULL;
1039 unsigned param_base;
1040
1041 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
1042
1043 if (reg.Register.Dimension) {
1044
1045 if (reg.Dimension.Indirect)
1046 vertex_index = si_get_indirect_index(ctx, &reg.DimIndirect,
1047 1, reg.Dimension.Index);
1048 else
1049 vertex_index = LLVMConstInt(ctx->i32, reg.Dimension.Index, 0);
1050 }
1051
1052 /* Get information about the register. */
1053 if (reg.Register.File == TGSI_FILE_INPUT) {
1054 name = info->input_semantic_name;
1055 index = info->input_semantic_index;
1056 array_first = info->input_array_first;
1057 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
1058 name = info->output_semantic_name;
1059 index = info->output_semantic_index;
1060 array_first = info->output_array_first;
1061 } else {
1062 assert(0);
1063 return NULL;
1064 }
1065
1066 if (reg.Register.Indirect) {
1067 if (reg.Indirect.ArrayID)
1068 param_base = array_first[reg.Indirect.ArrayID];
1069 else
1070 param_base = reg.Register.Index;
1071
1072 param_index = si_get_indirect_index(ctx, &reg.Indirect,
1073 1, reg.Register.Index - param_base);
1074
1075 } else {
1076 param_base = reg.Register.Index;
1077 }
1078
1079 return get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1080 param_index, param_base,
1081 name, index, !reg.Register.Dimension);
1082 }
1083
1084 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
1085 LLVMTypeRef type, unsigned swizzle,
1086 LLVMValueRef buffer, LLVMValueRef offset,
1087 LLVMValueRef base, bool can_speculate)
1088 {
1089 struct si_shader_context *ctx = si_shader_context(bld_base);
1090 LLVMValueRef value, value2;
1091 LLVMTypeRef vec_type = LLVMVectorType(type, 4);
1092
1093 if (swizzle == ~0) {
1094 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
1095 0, 1, 0, can_speculate, false);
1096
1097 return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
1098 }
1099
1100 if (!llvm_type_is_64bit(ctx, type)) {
1101 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
1102 0, 1, 0, can_speculate, false);
1103
1104 value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
1105 return LLVMBuildExtractElement(ctx->ac.builder, value,
1106 LLVMConstInt(ctx->i32, swizzle, 0), "");
1107 }
1108
1109 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
1110 swizzle * 4, 1, 0, can_speculate, false);
1111
1112 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
1113 swizzle * 4 + 4, 1, 0, can_speculate, false);
1114
1115 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
1116 }
1117
1118 /**
1119 * Load from LDS.
1120 *
1121 * \param type output value type
1122 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
1123 * \param dw_addr address in dwords
1124 */
1125 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
1126 LLVMTypeRef type, unsigned swizzle,
1127 LLVMValueRef dw_addr)
1128 {
1129 struct si_shader_context *ctx = si_shader_context(bld_base);
1130 LLVMValueRef value;
1131
1132 if (swizzle == ~0) {
1133 LLVMValueRef values[TGSI_NUM_CHANNELS];
1134
1135 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
1136 values[chan] = lds_load(bld_base, type, chan, dw_addr);
1137
1138 return lp_build_gather_values(&ctx->gallivm, values,
1139 TGSI_NUM_CHANNELS);
1140 }
1141
1142 /* Split 64-bit loads. */
1143 if (llvm_type_is_64bit(ctx, type)) {
1144 LLVMValueRef lo, hi;
1145
1146 lo = lds_load(bld_base, ctx->i32, swizzle, dw_addr);
1147 hi = lds_load(bld_base, ctx->i32, swizzle + 1, dw_addr);
1148 return si_llvm_emit_fetch_64bit(bld_base, type, lo, hi);
1149 }
1150
1151 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
1152 LLVMConstInt(ctx->i32, swizzle, 0));
1153
1154 value = ac_lds_load(&ctx->ac, dw_addr);
1155
1156 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1157 }
1158
1159 /**
1160 * Store to LDS.
1161 *
1162 * \param swizzle offset (typically 0..3)
1163 * \param dw_addr address in dwords
1164 * \param value value to store
1165 */
1166 static void lds_store(struct si_shader_context *ctx,
1167 unsigned dw_offset_imm, LLVMValueRef dw_addr,
1168 LLVMValueRef value)
1169 {
1170 dw_addr = lp_build_add(&ctx->bld_base.uint_bld, dw_addr,
1171 LLVMConstInt(ctx->i32, dw_offset_imm, 0));
1172
1173 ac_lds_store(&ctx->ac, dw_addr, value);
1174 }
1175
1176 static LLVMValueRef desc_from_addr_base64k(struct si_shader_context *ctx,
1177 unsigned param)
1178 {
1179 LLVMBuilderRef builder = ctx->ac.builder;
1180
1181 LLVMValueRef addr = LLVMGetParam(ctx->main_fn, param);
1182 addr = LLVMBuildZExt(builder, addr, ctx->i64, "");
1183 addr = LLVMBuildShl(builder, addr, LLVMConstInt(ctx->i64, 16, 0), "");
1184
1185 uint64_t desc2 = 0xffffffff;
1186 uint64_t desc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1187 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1188 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1189 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1190 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1191 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1192 LLVMValueRef hi = LLVMConstInt(ctx->i64, desc2 | (desc3 << 32), 0);
1193
1194 LLVMValueRef desc = LLVMGetUndef(LLVMVectorType(ctx->i64, 2));
1195 desc = LLVMBuildInsertElement(builder, desc, addr, ctx->i32_0, "");
1196 desc = LLVMBuildInsertElement(builder, desc, hi, ctx->i32_1, "");
1197 return LLVMBuildBitCast(builder, desc, ctx->v4i32, "");
1198 }
1199
1200 static LLVMValueRef fetch_input_tcs(
1201 struct lp_build_tgsi_context *bld_base,
1202 const struct tgsi_full_src_register *reg,
1203 enum tgsi_opcode_type type, unsigned swizzle)
1204 {
1205 struct si_shader_context *ctx = si_shader_context(bld_base);
1206 LLVMValueRef dw_addr, stride;
1207
1208 stride = get_tcs_in_vertex_dw_stride(ctx);
1209 dw_addr = get_tcs_in_current_patch_offset(ctx);
1210 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1211
1212 return lds_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle, dw_addr);
1213 }
1214
1215 static LLVMValueRef si_nir_load_input_tcs(struct ac_shader_abi *abi,
1216 LLVMValueRef vertex_index,
1217 LLVMValueRef param_index,
1218 unsigned const_index,
1219 unsigned location,
1220 unsigned driver_location,
1221 unsigned component,
1222 unsigned num_components,
1223 bool is_patch,
1224 bool is_compact)
1225 {
1226 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1227 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1228 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
1229 LLVMValueRef dw_addr, stride;
1230
1231 driver_location = driver_location / 4;
1232
1233 stride = get_tcs_in_vertex_dw_stride(ctx);
1234 dw_addr = get_tcs_in_current_patch_offset(ctx);
1235
1236 if (param_index) {
1237 /* Add the constant index to the indirect index */
1238 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1239 LLVMConstInt(ctx->i32, const_index, 0), "");
1240 } else {
1241 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1242 }
1243
1244 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1245 vertex_index, param_index,
1246 driver_location,
1247 info->input_semantic_name,
1248 info->input_semantic_index,
1249 is_patch);
1250
1251 LLVMValueRef value[4];
1252 for (unsigned i = 0; i < num_components + component; i++) {
1253 value[i] = lds_load(bld_base, ctx->i32, i, dw_addr);
1254 }
1255
1256 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1257 }
1258
1259 static LLVMValueRef fetch_output_tcs(
1260 struct lp_build_tgsi_context *bld_base,
1261 const struct tgsi_full_src_register *reg,
1262 enum tgsi_opcode_type type, unsigned swizzle)
1263 {
1264 struct si_shader_context *ctx = si_shader_context(bld_base);
1265 LLVMValueRef dw_addr, stride;
1266
1267 if (reg->Register.Dimension) {
1268 stride = get_tcs_out_vertex_dw_stride(ctx);
1269 dw_addr = get_tcs_out_current_patch_offset(ctx);
1270 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1271 } else {
1272 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1273 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
1274 }
1275
1276 return lds_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle, dw_addr);
1277 }
1278
1279 static LLVMValueRef fetch_input_tes(
1280 struct lp_build_tgsi_context *bld_base,
1281 const struct tgsi_full_src_register *reg,
1282 enum tgsi_opcode_type type, unsigned swizzle)
1283 {
1284 struct si_shader_context *ctx = si_shader_context(bld_base);
1285 LLVMValueRef buffer, base, addr;
1286
1287 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1288
1289 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1290 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
1291
1292 return buffer_load(bld_base, tgsi2llvmtype(bld_base, type), swizzle,
1293 buffer, base, addr, true);
1294 }
1295
1296 LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi,
1297 LLVMValueRef vertex_index,
1298 LLVMValueRef param_index,
1299 unsigned const_index,
1300 unsigned location,
1301 unsigned driver_location,
1302 unsigned component,
1303 unsigned num_components,
1304 bool is_patch,
1305 bool is_compact)
1306 {
1307 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1308 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1309 LLVMValueRef buffer, base, addr;
1310
1311 driver_location = driver_location / 4;
1312
1313 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1314
1315 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1316
1317 if (param_index) {
1318 /* Add the constant index to the indirect index */
1319 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1320 LLVMConstInt(ctx->i32, const_index, 0), "");
1321 } else {
1322 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1323 }
1324
1325 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1326 param_index, driver_location,
1327 info->input_semantic_name,
1328 info->input_semantic_index,
1329 is_patch);
1330
1331 /* TODO: This will generate rather ordinary llvm code, although it
1332 * should be easy for the optimiser to fix up. In future we might want
1333 * to refactor buffer_load(), but for now this maximises code sharing
1334 * between the NIR and TGSI backends.
1335 */
1336 LLVMValueRef value[4];
1337 for (unsigned i = component; i < num_components + component; i++) {
1338 value[i] = buffer_load(&ctx->bld_base, ctx->i32, i, buffer, base, addr, true);
1339 }
1340
1341 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1342 }
1343
1344 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1345 const struct tgsi_full_instruction *inst,
1346 const struct tgsi_opcode_info *info,
1347 unsigned index,
1348 LLVMValueRef dst[4])
1349 {
1350 struct si_shader_context *ctx = si_shader_context(bld_base);
1351 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
1352 const struct tgsi_shader_info *sh_info = &ctx->shader->selector->info;
1353 unsigned chan_index;
1354 LLVMValueRef dw_addr, stride;
1355 LLVMValueRef buffer, base, buf_addr;
1356 LLVMValueRef values[4];
1357 bool skip_lds_store;
1358 bool is_tess_factor = false, is_tess_inner = false;
1359
1360 /* Only handle per-patch and per-vertex outputs here.
1361 * Vectors will be lowered to scalars and this function will be called again.
1362 */
1363 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1364 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1365 si_llvm_emit_store(bld_base, inst, info, index, dst);
1366 return;
1367 }
1368
1369 if (reg->Register.Dimension) {
1370 stride = get_tcs_out_vertex_dw_stride(ctx);
1371 dw_addr = get_tcs_out_current_patch_offset(ctx);
1372 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1373 skip_lds_store = !sh_info->reads_pervertex_outputs;
1374 } else {
1375 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1376 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1377 skip_lds_store = !sh_info->reads_perpatch_outputs;
1378
1379 if (!reg->Register.Indirect) {
1380 int name = sh_info->output_semantic_name[reg->Register.Index];
1381
1382 /* Always write tess factors into LDS for the TCS epilog. */
1383 if (name == TGSI_SEMANTIC_TESSINNER ||
1384 name == TGSI_SEMANTIC_TESSOUTER) {
1385 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1386 skip_lds_store = !sh_info->reads_tessfactor_outputs &&
1387 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs;
1388 is_tess_factor = true;
1389 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1390 }
1391 }
1392 }
1393
1394 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1395
1396 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1397 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1398
1399 uint32_t writemask = reg->Register.WriteMask;
1400 while (writemask) {
1401 chan_index = u_bit_scan(&writemask);
1402 LLVMValueRef value = dst[chan_index];
1403
1404 if (inst->Instruction.Saturate)
1405 value = ac_build_clamp(&ctx->ac, value);
1406
1407 /* Skip LDS stores if there is no LDS read of this output. */
1408 if (!skip_lds_store)
1409 lds_store(ctx, chan_index, dw_addr, value);
1410
1411 value = ac_to_integer(&ctx->ac, value);
1412 values[chan_index] = value;
1413
1414 if (reg->Register.WriteMask != 0xF && !is_tess_factor) {
1415 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1416 buf_addr, base,
1417 4 * chan_index, 1, 0, true, false);
1418 }
1419
1420 /* Write tess factors into VGPRs for the epilog. */
1421 if (is_tess_factor &&
1422 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
1423 if (!is_tess_inner) {
1424 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1425 ctx->invoc0_tess_factors[chan_index]);
1426 } else if (chan_index < 2) {
1427 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1428 ctx->invoc0_tess_factors[4 + chan_index]);
1429 }
1430 }
1431 }
1432
1433 if (reg->Register.WriteMask == 0xF && !is_tess_factor) {
1434 LLVMValueRef value = lp_build_gather_values(&ctx->gallivm,
1435 values, 4);
1436 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buf_addr,
1437 base, 0, 1, 0, true, false);
1438 }
1439 }
1440
1441 static void si_nir_store_output_tcs(struct ac_shader_abi *abi,
1442 LLVMValueRef vertex_index,
1443 LLVMValueRef param_index,
1444 unsigned const_index,
1445 unsigned location,
1446 unsigned driver_location,
1447 LLVMValueRef src,
1448 unsigned component,
1449 bool is_patch,
1450 bool is_compact,
1451 unsigned writemask)
1452 {
1453 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1454 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1455 LLVMValueRef dw_addr, stride;
1456 LLVMValueRef buffer, base, addr;
1457 LLVMValueRef values[4];
1458 bool skip_lds_store;
1459 bool is_tess_factor = false, is_tess_inner = false;
1460
1461 driver_location = driver_location / 4;
1462
1463 if (param_index) {
1464 /* Add the constant index to the indirect index */
1465 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
1466 LLVMConstInt(ctx->i32, const_index, 0), "");
1467 } else {
1468 if (const_index != 0)
1469 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1470 }
1471
1472 if (!is_patch) {
1473 stride = get_tcs_out_vertex_dw_stride(ctx);
1474 dw_addr = get_tcs_out_current_patch_offset(ctx);
1475 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1476 vertex_index, param_index,
1477 driver_location,
1478 info->output_semantic_name,
1479 info->output_semantic_index,
1480 is_patch);
1481
1482 skip_lds_store = !info->reads_pervertex_outputs;
1483 } else {
1484 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1485 dw_addr = get_dw_address_from_generic_indices(ctx, NULL, dw_addr,
1486 vertex_index, param_index,
1487 driver_location,
1488 info->output_semantic_name,
1489 info->output_semantic_index,
1490 is_patch);
1491
1492 skip_lds_store = !info->reads_perpatch_outputs;
1493
1494 if (!param_index) {
1495 int name = info->output_semantic_name[driver_location];
1496
1497 /* Always write tess factors into LDS for the TCS epilog. */
1498 if (name == TGSI_SEMANTIC_TESSINNER ||
1499 name == TGSI_SEMANTIC_TESSOUTER) {
1500 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1501 skip_lds_store = !info->reads_tessfactor_outputs &&
1502 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs;
1503 is_tess_factor = true;
1504 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1505 }
1506 }
1507 }
1508
1509 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
1510
1511 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
1512
1513 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1514 param_index, driver_location,
1515 info->output_semantic_name,
1516 info->output_semantic_index,
1517 is_patch);
1518
1519 for (unsigned chan = 0; chan < 4; chan++) {
1520 if (!(writemask & (1 << chan)))
1521 continue;
1522 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1523
1524 /* Skip LDS stores if there is no LDS read of this output. */
1525 if (!skip_lds_store)
1526 ac_lds_store(&ctx->ac, dw_addr, value);
1527
1528 value = ac_to_integer(&ctx->ac, value);
1529 values[chan] = value;
1530
1531 if (writemask != 0xF && !is_tess_factor) {
1532 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1533 addr, base,
1534 4 * chan, 1, 0, true, false);
1535 }
1536
1537 /* Write tess factors into VGPRs for the epilog. */
1538 if (is_tess_factor &&
1539 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
1540 if (!is_tess_inner) {
1541 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1542 ctx->invoc0_tess_factors[chan]);
1543 } else if (chan < 2) {
1544 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1545 ctx->invoc0_tess_factors[4 + chan]);
1546 }
1547 }
1548 }
1549
1550 if (writemask == 0xF && !is_tess_factor) {
1551 LLVMValueRef value = lp_build_gather_values(&ctx->gallivm,
1552 values, 4);
1553 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr,
1554 base, 0, 1, 0, true, false);
1555 }
1556 }
1557
1558 LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi,
1559 unsigned input_index,
1560 unsigned vtx_offset_param,
1561 LLVMTypeRef type,
1562 unsigned swizzle)
1563 {
1564 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1565 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
1566 struct si_shader *shader = ctx->shader;
1567 struct lp_build_context *uint = &ctx->bld_base.uint_bld;
1568 LLVMValueRef vtx_offset, soffset;
1569 struct tgsi_shader_info *info = &shader->selector->info;
1570 unsigned semantic_name = info->input_semantic_name[input_index];
1571 unsigned semantic_index = info->input_semantic_index[input_index];
1572 unsigned param;
1573 LLVMValueRef value;
1574
1575 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1576
1577 /* GFX9 has the ESGS ring in LDS. */
1578 if (ctx->screen->info.chip_class >= GFX9) {
1579 unsigned index = vtx_offset_param;
1580
1581 switch (index / 2) {
1582 case 0:
1583 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx01_offset,
1584 index % 2 ? 16 : 0, 16);
1585 break;
1586 case 1:
1587 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx23_offset,
1588 index % 2 ? 16 : 0, 16);
1589 break;
1590 case 2:
1591 vtx_offset = unpack_param(ctx, ctx->param_gs_vtx45_offset,
1592 index % 2 ? 16 : 0, 16);
1593 break;
1594 default:
1595 assert(0);
1596 return NULL;
1597 }
1598
1599 vtx_offset = LLVMBuildAdd(ctx->ac.builder, vtx_offset,
1600 LLVMConstInt(ctx->i32, param * 4, 0), "");
1601 return lds_load(bld_base, type, swizzle, vtx_offset);
1602 }
1603
1604 /* GFX6: input load from the ESGS ring in memory. */
1605 if (swizzle == ~0) {
1606 LLVMValueRef values[TGSI_NUM_CHANNELS];
1607 unsigned chan;
1608 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1609 values[chan] = si_llvm_load_input_gs(abi, input_index, vtx_offset_param,
1610 type, chan);
1611 }
1612 return lp_build_gather_values(&ctx->gallivm, values,
1613 TGSI_NUM_CHANNELS);
1614 }
1615
1616 /* Get the vertex offset parameter on GFX6. */
1617 LLVMValueRef gs_vtx_offset = ctx->gs_vtx_offset[vtx_offset_param];
1618
1619 vtx_offset = lp_build_mul_imm(uint, gs_vtx_offset, 4);
1620
1621 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1622
1623 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1624 vtx_offset, soffset, 0, 1, 0, true, false);
1625 if (llvm_type_is_64bit(ctx, type)) {
1626 LLVMValueRef value2;
1627 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1628
1629 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1630 ctx->i32_0, vtx_offset, soffset,
1631 0, 1, 0, true, false);
1632 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
1633 }
1634 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1635 }
1636
1637 static LLVMValueRef fetch_input_gs(
1638 struct lp_build_tgsi_context *bld_base,
1639 const struct tgsi_full_src_register *reg,
1640 enum tgsi_opcode_type type,
1641 unsigned swizzle)
1642 {
1643 struct si_shader_context *ctx = si_shader_context(bld_base);
1644 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1645
1646 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1647 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1648 return get_primitive_id(ctx, swizzle);
1649
1650 if (!reg->Register.Dimension)
1651 return NULL;
1652
1653 return si_llvm_load_input_gs(&ctx->abi, reg->Register.Index,
1654 reg->Dimension.Index,
1655 tgsi2llvmtype(bld_base, type),
1656 swizzle);
1657 }
1658
1659 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1660 {
1661 switch (interpolate) {
1662 case TGSI_INTERPOLATE_CONSTANT:
1663 return 0;
1664
1665 case TGSI_INTERPOLATE_LINEAR:
1666 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1667 return SI_PARAM_LINEAR_SAMPLE;
1668 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1669 return SI_PARAM_LINEAR_CENTROID;
1670 else
1671 return SI_PARAM_LINEAR_CENTER;
1672 break;
1673 case TGSI_INTERPOLATE_COLOR:
1674 case TGSI_INTERPOLATE_PERSPECTIVE:
1675 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1676 return SI_PARAM_PERSP_SAMPLE;
1677 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1678 return SI_PARAM_PERSP_CENTROID;
1679 else
1680 return SI_PARAM_PERSP_CENTER;
1681 break;
1682 default:
1683 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1684 return -1;
1685 }
1686 }
1687
1688 static LLVMValueRef si_build_fs_interp(struct si_shader_context *ctx,
1689 unsigned attr_index, unsigned chan,
1690 LLVMValueRef prim_mask,
1691 LLVMValueRef i, LLVMValueRef j)
1692 {
1693 if (i || j) {
1694 return ac_build_fs_interp(&ctx->ac,
1695 LLVMConstInt(ctx->i32, chan, 0),
1696 LLVMConstInt(ctx->i32, attr_index, 0),
1697 prim_mask, i, j);
1698 }
1699 return ac_build_fs_interp_mov(&ctx->ac,
1700 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1701 LLVMConstInt(ctx->i32, chan, 0),
1702 LLVMConstInt(ctx->i32, attr_index, 0),
1703 prim_mask);
1704 }
1705
1706 /**
1707 * Interpolate a fragment shader input.
1708 *
1709 * @param ctx context
1710 * @param input_index index of the input in hardware
1711 * @param semantic_name TGSI_SEMANTIC_*
1712 * @param semantic_index semantic index
1713 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1714 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1715 * @param interp_param interpolation weights (i,j)
1716 * @param prim_mask SI_PARAM_PRIM_MASK
1717 * @param face SI_PARAM_FRONT_FACE
1718 * @param result the return value (4 components)
1719 */
1720 static void interp_fs_input(struct si_shader_context *ctx,
1721 unsigned input_index,
1722 unsigned semantic_name,
1723 unsigned semantic_index,
1724 unsigned num_interp_inputs,
1725 unsigned colors_read_mask,
1726 LLVMValueRef interp_param,
1727 LLVMValueRef prim_mask,
1728 LLVMValueRef face,
1729 LLVMValueRef result[4])
1730 {
1731 LLVMValueRef i = NULL, j = NULL;
1732 unsigned chan;
1733
1734 /* fs.constant returns the param from the middle vertex, so it's not
1735 * really useful for flat shading. It's meant to be used for custom
1736 * interpolation (but the intrinsic can't fetch from the other two
1737 * vertices).
1738 *
1739 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1740 * to do the right thing. The only reason we use fs.constant is that
1741 * fs.interp cannot be used on integers, because they can be equal
1742 * to NaN.
1743 *
1744 * When interp is false we will use fs.constant or for newer llvm,
1745 * amdgcn.interp.mov.
1746 */
1747 bool interp = interp_param != NULL;
1748
1749 if (interp) {
1750 interp_param = LLVMBuildBitCast(ctx->ac.builder, interp_param,
1751 LLVMVectorType(ctx->f32, 2), "");
1752
1753 i = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1754 ctx->i32_0, "");
1755 j = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1756 ctx->i32_1, "");
1757 }
1758
1759 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1760 ctx->shader->key.part.ps.prolog.color_two_side) {
1761 LLVMValueRef is_face_positive;
1762
1763 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1764 * otherwise it's at offset "num_inputs".
1765 */
1766 unsigned back_attr_offset = num_interp_inputs;
1767 if (semantic_index == 1 && colors_read_mask & 0xf)
1768 back_attr_offset += 1;
1769
1770 is_face_positive = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
1771 face, ctx->i32_0, "");
1772
1773 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1774 LLVMValueRef front, back;
1775
1776 front = si_build_fs_interp(ctx,
1777 input_index, chan,
1778 prim_mask, i, j);
1779 back = si_build_fs_interp(ctx,
1780 back_attr_offset, chan,
1781 prim_mask, i, j);
1782
1783 result[chan] = LLVMBuildSelect(ctx->ac.builder,
1784 is_face_positive,
1785 front,
1786 back,
1787 "");
1788 }
1789 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1790 result[0] = si_build_fs_interp(ctx, input_index,
1791 0, prim_mask, i, j);
1792 result[1] =
1793 result[2] = LLVMConstReal(ctx->f32, 0.0f);
1794 result[3] = LLVMConstReal(ctx->f32, 1.0f);
1795 } else {
1796 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1797 result[chan] = si_build_fs_interp(ctx,
1798 input_index, chan,
1799 prim_mask, i, j);
1800 }
1801 }
1802 }
1803
1804 void si_llvm_load_input_fs(
1805 struct si_shader_context *ctx,
1806 unsigned input_index,
1807 LLVMValueRef out[4])
1808 {
1809 struct lp_build_context *base = &ctx->bld_base.base;
1810 struct si_shader *shader = ctx->shader;
1811 struct tgsi_shader_info *info = &shader->selector->info;
1812 LLVMValueRef main_fn = ctx->main_fn;
1813 LLVMValueRef interp_param = NULL;
1814 int interp_param_idx;
1815 enum tgsi_semantic semantic_name = info->input_semantic_name[input_index];
1816 unsigned semantic_index = info->input_semantic_index[input_index];
1817 enum tgsi_interpolate_mode interp_mode = info->input_interpolate[input_index];
1818 enum tgsi_interpolate_loc interp_loc = info->input_interpolate_loc[input_index];
1819
1820 /* Get colors from input VGPRs (set by the prolog). */
1821 if (semantic_name == TGSI_SEMANTIC_COLOR) {
1822 unsigned colors_read = shader->selector->info.colors_read;
1823 unsigned mask = colors_read >> (semantic_index * 4);
1824 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1825 (semantic_index ? util_bitcount(colors_read & 0xf) : 0);
1826
1827 out[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1828 out[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1829 out[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1830 out[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1831 return;
1832 }
1833
1834 interp_param_idx = lookup_interp_param_index(interp_mode, interp_loc);
1835 if (interp_param_idx == -1)
1836 return;
1837 else if (interp_param_idx) {
1838 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
1839 }
1840
1841 interp_fs_input(ctx, input_index, semantic_name,
1842 semantic_index, 0, /* this param is unused */
1843 shader->selector->info.colors_read, interp_param,
1844 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1845 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1846 &out[0]);
1847 }
1848
1849 static void declare_input_fs(
1850 struct si_shader_context *ctx,
1851 unsigned input_index,
1852 const struct tgsi_full_declaration *decl,
1853 LLVMValueRef out[4])
1854 {
1855 si_llvm_load_input_fs(ctx, input_index, out);
1856 }
1857
1858 static LLVMValueRef get_sample_id(struct si_shader_context *ctx)
1859 {
1860 return unpack_param(ctx, SI_PARAM_ANCILLARY, 8, 4);
1861 }
1862
1863
1864 /**
1865 * Load a dword from a constant buffer.
1866 */
1867 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1868 LLVMValueRef resource,
1869 LLVMValueRef offset)
1870 {
1871 return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL,
1872 0, 0, 0, true, true);
1873 }
1874
1875 static LLVMValueRef load_sample_position(struct si_shader_context *ctx, LLVMValueRef sample_id)
1876 {
1877 struct lp_build_context *uint_bld = &ctx->bld_base.uint_bld;
1878 LLVMValueRef desc = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
1879 LLVMValueRef buf_index = LLVMConstInt(ctx->i32, SI_PS_CONST_SAMPLE_POSITIONS, 0);
1880 LLVMValueRef resource = ac_build_load_to_sgpr(&ctx->ac, desc, buf_index);
1881
1882 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1883 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1884 LLVMValueRef offset1 = LLVMBuildAdd(ctx->ac.builder, offset0, LLVMConstInt(ctx->i32, 4, 0), "");
1885
1886 LLVMValueRef pos[4] = {
1887 buffer_load_const(ctx, resource, offset0),
1888 buffer_load_const(ctx, resource, offset1),
1889 LLVMConstReal(ctx->f32, 0),
1890 LLVMConstReal(ctx->f32, 0)
1891 };
1892
1893 return lp_build_gather_values(&ctx->gallivm, pos, 4);
1894 }
1895
1896 void si_load_system_value(struct si_shader_context *ctx,
1897 unsigned index,
1898 const struct tgsi_full_declaration *decl)
1899 {
1900 struct lp_build_context *bld = &ctx->bld_base.base;
1901 LLVMValueRef value = 0;
1902
1903 assert(index < RADEON_LLVM_MAX_SYSTEM_VALUES);
1904
1905 switch (decl->Semantic.Name) {
1906 case TGSI_SEMANTIC_INSTANCEID:
1907 value = ctx->abi.instance_id;
1908 break;
1909
1910 case TGSI_SEMANTIC_VERTEXID:
1911 value = LLVMBuildAdd(ctx->ac.builder,
1912 ctx->abi.vertex_id,
1913 ctx->abi.base_vertex, "");
1914 break;
1915
1916 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1917 /* Unused. Clarify the meaning in indexed vs. non-indexed
1918 * draws if this is ever used again. */
1919 assert(false);
1920 break;
1921
1922 case TGSI_SEMANTIC_BASEVERTEX:
1923 {
1924 /* For non-indexed draws, the base vertex set by the driver
1925 * (for direct draws) or the CP (for indirect draws) is the
1926 * first vertex ID, but GLSL expects 0 to be returned.
1927 */
1928 LLVMValueRef vs_state = LLVMGetParam(ctx->main_fn, ctx->param_vs_state_bits);
1929 LLVMValueRef indexed;
1930
1931 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
1932 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
1933
1934 value = LLVMBuildSelect(ctx->ac.builder, indexed,
1935 ctx->abi.base_vertex, ctx->i32_0, "");
1936 break;
1937 }
1938
1939 case TGSI_SEMANTIC_BASEINSTANCE:
1940 value = ctx->abi.start_instance;
1941 break;
1942
1943 case TGSI_SEMANTIC_DRAWID:
1944 value = ctx->abi.draw_id;
1945 break;
1946
1947 case TGSI_SEMANTIC_INVOCATIONID:
1948 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1949 value = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
1950 else if (ctx->type == PIPE_SHADER_GEOMETRY)
1951 value = ctx->abi.gs_invocation_id;
1952 else
1953 assert(!"INVOCATIONID not implemented");
1954 break;
1955
1956 case TGSI_SEMANTIC_POSITION:
1957 {
1958 LLVMValueRef pos[4] = {
1959 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
1960 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
1961 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Z_FLOAT),
1962 lp_build_emit_llvm_unary(&ctx->bld_base, TGSI_OPCODE_RCP,
1963 LLVMGetParam(ctx->main_fn,
1964 SI_PARAM_POS_W_FLOAT)),
1965 };
1966 value = lp_build_gather_values(&ctx->gallivm, pos, 4);
1967 break;
1968 }
1969
1970 case TGSI_SEMANTIC_FACE:
1971 value = ctx->abi.front_face;
1972 break;
1973
1974 case TGSI_SEMANTIC_SAMPLEID:
1975 value = get_sample_id(ctx);
1976 break;
1977
1978 case TGSI_SEMANTIC_SAMPLEPOS: {
1979 LLVMValueRef pos[4] = {
1980 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT),
1981 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT),
1982 LLVMConstReal(ctx->f32, 0),
1983 LLVMConstReal(ctx->f32, 0)
1984 };
1985 pos[0] = lp_build_emit_llvm_unary(&ctx->bld_base,
1986 TGSI_OPCODE_FRC, pos[0]);
1987 pos[1] = lp_build_emit_llvm_unary(&ctx->bld_base,
1988 TGSI_OPCODE_FRC, pos[1]);
1989 value = lp_build_gather_values(&ctx->gallivm, pos, 4);
1990 break;
1991 }
1992
1993 case TGSI_SEMANTIC_SAMPLEMASK:
1994 /* This can only occur with the OpenGL Core profile, which
1995 * doesn't support smoothing.
1996 */
1997 value = LLVMGetParam(ctx->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1998 break;
1999
2000 case TGSI_SEMANTIC_TESSCOORD:
2001 {
2002 LLVMValueRef coord[4] = {
2003 LLVMGetParam(ctx->main_fn, ctx->param_tes_u),
2004 LLVMGetParam(ctx->main_fn, ctx->param_tes_v),
2005 ctx->ac.f32_0,
2006 ctx->ac.f32_0
2007 };
2008
2009 /* For triangles, the vector should be (u, v, 1-u-v). */
2010 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
2011 PIPE_PRIM_TRIANGLES)
2012 coord[2] = lp_build_sub(bld, ctx->ac.f32_1,
2013 lp_build_add(bld, coord[0], coord[1]));
2014
2015 value = lp_build_gather_values(&ctx->gallivm, coord, 4);
2016 break;
2017 }
2018
2019 case TGSI_SEMANTIC_VERTICESIN:
2020 if (ctx->type == PIPE_SHADER_TESS_CTRL)
2021 value = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 26, 6);
2022 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
2023 value = get_num_tcs_out_vertices(ctx);
2024 else
2025 assert(!"invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
2026 break;
2027
2028 case TGSI_SEMANTIC_TESSINNER:
2029 case TGSI_SEMANTIC_TESSOUTER:
2030 {
2031 LLVMValueRef buffer, base, addr;
2032 int param = si_shader_io_get_unique_index_patch(decl->Semantic.Name, 0);
2033
2034 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
2035
2036 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
2037 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
2038 LLVMConstInt(ctx->i32, param, 0));
2039
2040 value = buffer_load(&ctx->bld_base, ctx->f32,
2041 ~0, buffer, base, addr, true);
2042
2043 break;
2044 }
2045
2046 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
2047 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
2048 {
2049 LLVMValueRef buf, slot, val[4];
2050 int i, offset;
2051
2052 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
2053 buf = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2054 buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
2055 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
2056
2057 for (i = 0; i < 4; i++)
2058 val[i] = buffer_load_const(ctx, buf,
2059 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
2060 value = lp_build_gather_values(&ctx->gallivm, val, 4);
2061 break;
2062 }
2063
2064 case TGSI_SEMANTIC_PRIMID:
2065 value = get_primitive_id(ctx, 0);
2066 break;
2067
2068 case TGSI_SEMANTIC_GRID_SIZE:
2069 value = LLVMGetParam(ctx->main_fn, ctx->param_grid_size);
2070 break;
2071
2072 case TGSI_SEMANTIC_BLOCK_SIZE:
2073 {
2074 LLVMValueRef values[3];
2075 unsigned i;
2076 unsigned *properties = ctx->shader->selector->info.properties;
2077
2078 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
2079 unsigned sizes[3] = {
2080 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
2081 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
2082 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
2083 };
2084
2085 for (i = 0; i < 3; ++i)
2086 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
2087
2088 value = lp_build_gather_values(&ctx->gallivm, values, 3);
2089 } else {
2090 value = LLVMGetParam(ctx->main_fn, ctx->param_block_size);
2091 }
2092 break;
2093 }
2094
2095 case TGSI_SEMANTIC_BLOCK_ID:
2096 {
2097 LLVMValueRef values[3];
2098
2099 for (int i = 0; i < 3; i++) {
2100 values[i] = ctx->i32_0;
2101 if (ctx->param_block_id[i] >= 0) {
2102 values[i] = LLVMGetParam(ctx->main_fn,
2103 ctx->param_block_id[i]);
2104 }
2105 }
2106 value = lp_build_gather_values(&ctx->gallivm, values, 3);
2107 break;
2108 }
2109
2110 case TGSI_SEMANTIC_THREAD_ID:
2111 value = LLVMGetParam(ctx->main_fn, ctx->param_thread_id);
2112 break;
2113
2114 case TGSI_SEMANTIC_HELPER_INVOCATION:
2115 value = lp_build_intrinsic(ctx->ac.builder,
2116 "llvm.amdgcn.ps.live",
2117 ctx->i1, NULL, 0,
2118 LP_FUNC_ATTR_READNONE);
2119 value = LLVMBuildNot(ctx->ac.builder, value, "");
2120 value = LLVMBuildSExt(ctx->ac.builder, value, ctx->i32, "");
2121 break;
2122
2123 case TGSI_SEMANTIC_SUBGROUP_SIZE:
2124 value = LLVMConstInt(ctx->i32, 64, 0);
2125 break;
2126
2127 case TGSI_SEMANTIC_SUBGROUP_INVOCATION:
2128 value = ac_get_thread_id(&ctx->ac);
2129 break;
2130
2131 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
2132 {
2133 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
2134 id = LLVMBuildZExt(ctx->ac.builder, id, ctx->i64, "");
2135 value = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->i64, 1, 0), id, "");
2136 value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->v2i32, "");
2137 break;
2138 }
2139
2140 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
2141 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
2142 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
2143 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
2144 {
2145 LLVMValueRef id = ac_get_thread_id(&ctx->ac);
2146 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_GT_MASK ||
2147 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK) {
2148 /* All bits set except LSB */
2149 value = LLVMConstInt(ctx->i64, -2, 0);
2150 } else {
2151 /* All bits set */
2152 value = LLVMConstInt(ctx->i64, -1, 0);
2153 }
2154 id = LLVMBuildZExt(ctx->ac.builder, id, ctx->i64, "");
2155 value = LLVMBuildShl(ctx->ac.builder, value, id, "");
2156 if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK ||
2157 decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LT_MASK)
2158 value = LLVMBuildNot(ctx->ac.builder, value, "");
2159 value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->v2i32, "");
2160 break;
2161 }
2162
2163 default:
2164 assert(!"unknown system value");
2165 return;
2166 }
2167
2168 ctx->system_values[index] = value;
2169 }
2170
2171 void si_declare_compute_memory(struct si_shader_context *ctx,
2172 const struct tgsi_full_declaration *decl)
2173 {
2174 struct si_shader_selector *sel = ctx->shader->selector;
2175
2176 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, LOCAL_ADDR_SPACE);
2177 LLVMValueRef var;
2178
2179 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
2180 assert(decl->Range.First == decl->Range.Last);
2181 assert(!ctx->ac.lds);
2182
2183 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2184 LLVMArrayType(ctx->i8, sel->local_size),
2185 "compute_lds",
2186 LOCAL_ADDR_SPACE);
2187 LLVMSetAlignment(var, 4);
2188
2189 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
2190 }
2191
2192 static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
2193 {
2194 LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
2195 ctx->param_const_and_shader_buffers);
2196
2197 return ac_build_load_to_sgpr(&ctx->ac, list_ptr,
2198 LLVMConstInt(ctx->i32, si_get_constbuf_slot(i), 0));
2199 }
2200
2201 static LLVMValueRef load_ubo(struct ac_shader_abi *abi, LLVMValueRef index)
2202 {
2203 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2204 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2205
2206 index = si_llvm_bound_index(ctx, index, ctx->num_const_buffers);
2207 index = LLVMBuildAdd(ctx->ac.builder, index,
2208 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
2209
2210 return ac_build_load_to_sgpr(&ctx->ac, ptr, index);
2211 }
2212
2213 static LLVMValueRef
2214 load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, bool write)
2215 {
2216 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2217 LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
2218 ctx->param_const_and_shader_buffers);
2219
2220 index = si_llvm_bound_index(ctx, index, ctx->num_shader_buffers);
2221 index = LLVMBuildSub(ctx->ac.builder,
2222 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS - 1, 0),
2223 index, "");
2224
2225 return ac_build_load_to_sgpr(&ctx->ac, rsrc_ptr, index);
2226 }
2227
2228 static LLVMValueRef fetch_constant(
2229 struct lp_build_tgsi_context *bld_base,
2230 const struct tgsi_full_src_register *reg,
2231 enum tgsi_opcode_type type,
2232 unsigned swizzle)
2233 {
2234 struct si_shader_context *ctx = si_shader_context(bld_base);
2235 struct si_shader_selector *sel = ctx->shader->selector;
2236 const struct tgsi_ind_register *ireg = &reg->Indirect;
2237 unsigned buf, idx;
2238
2239 LLVMValueRef addr, bufp;
2240
2241 if (swizzle == LP_CHAN_ALL) {
2242 unsigned chan;
2243 LLVMValueRef values[4];
2244 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
2245 values[chan] = fetch_constant(bld_base, reg, type, chan);
2246
2247 return lp_build_gather_values(&ctx->gallivm, values, 4);
2248 }
2249
2250 /* Split 64-bit loads. */
2251 if (tgsi_type_is_64bit(type)) {
2252 LLVMValueRef lo, hi;
2253
2254 lo = fetch_constant(bld_base, reg, TGSI_TYPE_UNSIGNED, swizzle);
2255 hi = fetch_constant(bld_base, reg, TGSI_TYPE_UNSIGNED, swizzle + 1);
2256 return si_llvm_emit_fetch_64bit(bld_base, tgsi2llvmtype(bld_base, type),
2257 lo, hi);
2258 }
2259
2260 idx = reg->Register.Index * 4 + swizzle;
2261 if (reg->Register.Indirect) {
2262 addr = si_get_indirect_index(ctx, ireg, 16, idx * 4);
2263 } else {
2264 addr = LLVMConstInt(ctx->i32, idx * 4, 0);
2265 }
2266
2267 /* Fast path when user data SGPRs point to constant buffer 0 directly. */
2268 if (sel->info.const_buffers_declared == 1 &&
2269 sel->info.shader_buffers_declared == 0) {
2270 LLVMValueRef ptr =
2271 LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2272
2273 /* This enables use of s_load_dword and flat_load_dword for const buffer 0
2274 * loads, and up to x4 load opcode merging. However, it leads to horrible
2275 * code reducing SIMD wave occupancy from 8 to 2 in many cases.
2276 *
2277 * Using s_buffer_load_dword (x1) seems to be the best option right now.
2278 *
2279 * LLVM 5.0 on SI doesn't insert a required s_nop between SALU setting
2280 * a descriptor and s_buffer_load_dword using it, so we can't expand
2281 * the pointer into a full descriptor like below. We have to use
2282 * s_load_dword instead. The only case when LLVM 5.0 would select
2283 * s_buffer_load_dword (that we have to prevent) is when we use use
2284 * a literal offset where we don't need bounds checking.
2285 */
2286 if (ctx->screen->info.chip_class == SI &&
2287 HAVE_LLVM < 0x0600 &&
2288 !reg->Register.Indirect) {
2289 addr = LLVMBuildLShr(ctx->ac.builder, addr, LLVMConstInt(ctx->i32, 2, 0), "");
2290 LLVMValueRef result = ac_build_load_invariant(&ctx->ac, ptr, addr);
2291 return bitcast(bld_base, type, result);
2292 }
2293
2294 /* Do the bounds checking with a descriptor, because
2295 * doing computation and manual bounds checking of 64-bit
2296 * addresses generates horrible VALU code with very high
2297 * VGPR usage and very low SIMD occupancy.
2298 */
2299 ptr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->i64, "");
2300 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ctx->v2i32, "");
2301
2302 LLVMValueRef desc_elems[] = {
2303 LLVMBuildExtractElement(ctx->ac.builder, ptr, ctx->i32_0, ""),
2304 LLVMBuildExtractElement(ctx->ac.builder, ptr, ctx->i32_1, ""),
2305 LLVMConstInt(ctx->i32, (sel->info.const_file_max[0] + 1) * 16, 0),
2306 LLVMConstInt(ctx->i32,
2307 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2308 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2309 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2310 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2311 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2312 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32), 0)
2313 };
2314 LLVMValueRef desc = ac_build_gather_values(&ctx->ac, desc_elems, 4);
2315 LLVMValueRef result = buffer_load_const(ctx, desc, addr);
2316 return bitcast(bld_base, type, result);
2317 }
2318
2319 assert(reg->Register.Dimension);
2320 buf = reg->Dimension.Index;
2321
2322 if (reg->Dimension.Indirect) {
2323 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_and_shader_buffers);
2324 LLVMValueRef index;
2325 index = si_get_bounded_indirect_index(ctx, &reg->DimIndirect,
2326 reg->Dimension.Index,
2327 ctx->num_const_buffers);
2328 index = LLVMBuildAdd(ctx->ac.builder, index,
2329 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
2330 bufp = ac_build_load_to_sgpr(&ctx->ac, ptr, index);
2331 } else
2332 bufp = load_const_buffer_desc(ctx, buf);
2333
2334 return bitcast(bld_base, type, buffer_load_const(ctx, bufp, addr));
2335 }
2336
2337 /* Upper 16 bits must be zero. */
2338 static LLVMValueRef si_llvm_pack_two_int16(struct si_shader_context *ctx,
2339 LLVMValueRef val[2])
2340 {
2341 return LLVMBuildOr(ctx->ac.builder, val[0],
2342 LLVMBuildShl(ctx->ac.builder, val[1],
2343 LLVMConstInt(ctx->i32, 16, 0),
2344 ""), "");
2345 }
2346
2347 /* Upper 16 bits are ignored and will be dropped. */
2348 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct si_shader_context *ctx,
2349 LLVMValueRef val[2])
2350 {
2351 LLVMValueRef v[2] = {
2352 LLVMBuildAnd(ctx->ac.builder, val[0],
2353 LLVMConstInt(ctx->i32, 0xffff, 0), ""),
2354 val[1],
2355 };
2356 return si_llvm_pack_two_int16(ctx, v);
2357 }
2358
2359 /* Initialize arguments for the shader export intrinsic */
2360 static void si_llvm_init_export_args(struct si_shader_context *ctx,
2361 LLVMValueRef *values,
2362 unsigned target,
2363 struct ac_export_args *args)
2364 {
2365 LLVMValueRef f32undef = LLVMGetUndef(ctx->ac.f32);
2366 LLVMBuilderRef builder = ctx->ac.builder;
2367 LLVMValueRef val[4];
2368 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
2369 unsigned chan;
2370 bool is_int8, is_int10;
2371
2372 /* Default is 0xf. Adjusted below depending on the format. */
2373 args->enabled_channels = 0xf; /* writemask */
2374
2375 /* Specify whether the EXEC mask represents the valid mask */
2376 args->valid_mask = 0;
2377
2378 /* Specify whether this is the last export */
2379 args->done = 0;
2380
2381 /* Specify the target we are exporting */
2382 args->target = target;
2383
2384 if (ctx->type == PIPE_SHADER_FRAGMENT) {
2385 const struct si_shader_key *key = &ctx->shader->key;
2386 unsigned col_formats = key->part.ps.epilog.spi_shader_col_format;
2387 int cbuf = target - V_008DFC_SQ_EXP_MRT;
2388
2389 assert(cbuf >= 0 && cbuf < 8);
2390 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
2391 is_int8 = (key->part.ps.epilog.color_is_int8 >> cbuf) & 0x1;
2392 is_int10 = (key->part.ps.epilog.color_is_int10 >> cbuf) & 0x1;
2393 }
2394
2395 args->compr = false;
2396 args->out[0] = f32undef;
2397 args->out[1] = f32undef;
2398 args->out[2] = f32undef;
2399 args->out[3] = f32undef;
2400
2401 switch (spi_shader_col_format) {
2402 case V_028714_SPI_SHADER_ZERO:
2403 args->enabled_channels = 0; /* writemask */
2404 args->target = V_008DFC_SQ_EXP_NULL;
2405 break;
2406
2407 case V_028714_SPI_SHADER_32_R:
2408 args->enabled_channels = 1; /* writemask */
2409 args->out[0] = values[0];
2410 break;
2411
2412 case V_028714_SPI_SHADER_32_GR:
2413 args->enabled_channels = 0x3; /* writemask */
2414 args->out[0] = values[0];
2415 args->out[1] = values[1];
2416 break;
2417
2418 case V_028714_SPI_SHADER_32_AR:
2419 args->enabled_channels = 0x9; /* writemask */
2420 args->out[0] = values[0];
2421 args->out[3] = values[3];
2422 break;
2423
2424 case V_028714_SPI_SHADER_FP16_ABGR:
2425 args->compr = 1; /* COMPR flag */
2426
2427 for (chan = 0; chan < 2; chan++) {
2428 LLVMValueRef pack_args[2] = {
2429 values[2 * chan],
2430 values[2 * chan + 1]
2431 };
2432 LLVMValueRef packed;
2433
2434 packed = ac_build_cvt_pkrtz_f16(&ctx->ac, pack_args);
2435 args->out[chan] = ac_to_float(&ctx->ac, packed);
2436 }
2437 break;
2438
2439 case V_028714_SPI_SHADER_UNORM16_ABGR:
2440 for (chan = 0; chan < 4; chan++) {
2441 val[chan] = ac_build_clamp(&ctx->ac, values[chan]);
2442 val[chan] = LLVMBuildFMul(builder, val[chan],
2443 LLVMConstReal(ctx->f32, 65535), "");
2444 val[chan] = LLVMBuildFAdd(builder, val[chan],
2445 LLVMConstReal(ctx->f32, 0.5), "");
2446 val[chan] = LLVMBuildFPToUI(builder, val[chan],
2447 ctx->i32, "");
2448 }
2449
2450 args->compr = 1; /* COMPR flag */
2451 args->out[0] = ac_to_float(&ctx->ac, si_llvm_pack_two_int16(ctx, val));
2452 args->out[1] = ac_to_float(&ctx->ac, si_llvm_pack_two_int16(ctx, val+2));
2453 break;
2454
2455 case V_028714_SPI_SHADER_SNORM16_ABGR:
2456 for (chan = 0; chan < 4; chan++) {
2457 /* Clamp between [-1, 1]. */
2458 val[chan] = lp_build_emit_llvm_binary(&ctx->bld_base, TGSI_OPCODE_MIN,
2459 values[chan],
2460 LLVMConstReal(ctx->f32, 1));
2461 val[chan] = lp_build_emit_llvm_binary(&ctx->bld_base, TGSI_OPCODE_MAX,
2462 val[chan],
2463 LLVMConstReal(ctx->f32, -1));
2464 /* Convert to a signed integer in [-32767, 32767]. */
2465 val[chan] = LLVMBuildFMul(builder, val[chan],
2466 LLVMConstReal(ctx->f32, 32767), "");
2467 /* If positive, add 0.5, else add -0.5. */
2468 val[chan] = LLVMBuildFAdd(builder, val[chan],
2469 LLVMBuildSelect(builder,
2470 LLVMBuildFCmp(builder, LLVMRealOGE,
2471 val[chan], ctx->ac.f32_0, ""),
2472 LLVMConstReal(ctx->f32, 0.5),
2473 LLVMConstReal(ctx->f32, -0.5), ""), "");
2474 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
2475 }
2476
2477 args->compr = 1; /* COMPR flag */
2478 args->out[0] = ac_to_float(&ctx->ac, si_llvm_pack_two_int32_as_int16(ctx, val));
2479 args->out[1] = ac_to_float(&ctx->ac, si_llvm_pack_two_int32_as_int16(ctx, val+2));
2480 break;
2481
2482 case V_028714_SPI_SHADER_UINT16_ABGR: {
2483 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
2484 is_int8 ? 255 : is_int10 ? 1023 : 65535, 0);
2485 LLVMValueRef max_alpha =
2486 !is_int10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
2487
2488 /* Clamp. */
2489 for (chan = 0; chan < 4; chan++) {
2490 val[chan] = ac_to_integer(&ctx->ac, values[chan]);
2491 val[chan] = lp_build_emit_llvm_binary(&ctx->bld_base, TGSI_OPCODE_UMIN,
2492 val[chan],
2493 chan == 3 ? max_alpha : max_rgb);
2494 }
2495
2496 args->compr = 1; /* COMPR flag */
2497 args->out[0] = ac_to_float(&ctx->ac, si_llvm_pack_two_int16(ctx, val));
2498 args->out[1] = ac_to_float(&ctx->ac, si_llvm_pack_two_int16(ctx, val+2));
2499 break;
2500 }
2501
2502 case V_028714_SPI_SHADER_SINT16_ABGR: {
2503 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
2504 is_int8 ? 127 : is_int10 ? 511 : 32767, 0);
2505 LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
2506 is_int8 ? -128 : is_int10 ? -512 : -32768, 0);
2507 LLVMValueRef max_alpha =
2508 !is_int10 ? max_rgb : ctx->i32_1;
2509 LLVMValueRef min_alpha =
2510 !is_int10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
2511
2512 /* Clamp. */
2513 for (chan = 0; chan < 4; chan++) {
2514 val[chan] = ac_to_integer(&ctx->ac, values[chan]);
2515 val[chan] = lp_build_emit_llvm_binary(&ctx->bld_base,
2516 TGSI_OPCODE_IMIN,
2517 val[chan], chan == 3 ? max_alpha : max_rgb);
2518 val[chan] = lp_build_emit_llvm_binary(&ctx->bld_base,
2519 TGSI_OPCODE_IMAX,
2520 val[chan], chan == 3 ? min_alpha : min_rgb);
2521 }
2522
2523 args->compr = 1; /* COMPR flag */
2524 args->out[0] = ac_to_float(&ctx->ac, si_llvm_pack_two_int32_as_int16(ctx, val));
2525 args->out[1] = ac_to_float(&ctx->ac, si_llvm_pack_two_int32_as_int16(ctx, val+2));
2526 break;
2527 }
2528
2529 case V_028714_SPI_SHADER_32_ABGR:
2530 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2531 break;
2532 }
2533 }
2534
2535 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2536 LLVMValueRef alpha)
2537 {
2538 struct si_shader_context *ctx = si_shader_context(bld_base);
2539
2540 if (ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2541 static LLVMRealPredicate cond_map[PIPE_FUNC_ALWAYS + 1] = {
2542 [PIPE_FUNC_LESS] = LLVMRealOLT,
2543 [PIPE_FUNC_EQUAL] = LLVMRealOEQ,
2544 [PIPE_FUNC_LEQUAL] = LLVMRealOLE,
2545 [PIPE_FUNC_GREATER] = LLVMRealOGT,
2546 [PIPE_FUNC_NOTEQUAL] = LLVMRealONE,
2547 [PIPE_FUNC_GEQUAL] = LLVMRealOGE,
2548 };
2549 LLVMRealPredicate cond = cond_map[ctx->shader->key.part.ps.epilog.alpha_func];
2550 assert(cond);
2551
2552 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
2553 SI_PARAM_ALPHA_REF);
2554 LLVMValueRef alpha_pass =
2555 LLVMBuildFCmp(ctx->ac.builder, cond, alpha, alpha_ref, "");
2556 ac_build_kill_if_false(&ctx->ac, alpha_pass);
2557 } else {
2558 ac_build_kill_if_false(&ctx->ac, LLVMConstInt(ctx->i1, 0, 0));
2559 }
2560 }
2561
2562 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2563 LLVMValueRef alpha,
2564 unsigned samplemask_param)
2565 {
2566 struct si_shader_context *ctx = si_shader_context(bld_base);
2567 LLVMValueRef coverage;
2568
2569 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2570 coverage = LLVMGetParam(ctx->main_fn,
2571 samplemask_param);
2572 coverage = ac_to_integer(&ctx->ac, coverage);
2573
2574 coverage = lp_build_intrinsic(ctx->ac.builder, "llvm.ctpop.i32",
2575 ctx->i32,
2576 &coverage, 1, LP_FUNC_ATTR_READNONE);
2577
2578 coverage = LLVMBuildUIToFP(ctx->ac.builder, coverage,
2579 ctx->f32, "");
2580
2581 coverage = LLVMBuildFMul(ctx->ac.builder, coverage,
2582 LLVMConstReal(ctx->f32,
2583 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2584
2585 return LLVMBuildFMul(ctx->ac.builder, alpha, coverage, "");
2586 }
2587
2588 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
2589 struct ac_export_args *pos, LLVMValueRef *out_elts)
2590 {
2591 unsigned reg_index;
2592 unsigned chan;
2593 unsigned const_chan;
2594 LLVMValueRef base_elt;
2595 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
2596 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
2597 SI_VS_CONST_CLIP_PLANES, 0);
2598 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
2599
2600 for (reg_index = 0; reg_index < 2; reg_index ++) {
2601 struct ac_export_args *args = &pos[2 + reg_index];
2602
2603 args->out[0] =
2604 args->out[1] =
2605 args->out[2] =
2606 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
2607
2608 /* Compute dot products of position and user clip plane vectors */
2609 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2610 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2611 LLVMValueRef addr =
2612 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
2613 const_chan) * 4, 0);
2614 base_elt = buffer_load_const(ctx, const_resource,
2615 addr);
2616 args->out[chan] =
2617 lp_build_add(&ctx->bld_base.base, args->out[chan],
2618 lp_build_mul(&ctx->bld_base.base, base_elt,
2619 out_elts[const_chan]));
2620 }
2621 }
2622
2623 args->enabled_channels = 0xf;
2624 args->valid_mask = 0;
2625 args->done = 0;
2626 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
2627 args->compr = 0;
2628 }
2629 }
2630
2631 static void si_dump_streamout(struct pipe_stream_output_info *so)
2632 {
2633 unsigned i;
2634
2635 if (so->num_outputs)
2636 fprintf(stderr, "STREAMOUT\n");
2637
2638 for (i = 0; i < so->num_outputs; i++) {
2639 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2640 so->output[i].start_component;
2641 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2642 i, so->output[i].output_buffer,
2643 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2644 so->output[i].register_index,
2645 mask & 1 ? "x" : "",
2646 mask & 2 ? "y" : "",
2647 mask & 4 ? "z" : "",
2648 mask & 8 ? "w" : "");
2649 }
2650 }
2651
2652 static void emit_streamout_output(struct si_shader_context *ctx,
2653 LLVMValueRef const *so_buffers,
2654 LLVMValueRef const *so_write_offsets,
2655 struct pipe_stream_output *stream_out,
2656 struct si_shader_output_values *shader_out)
2657 {
2658 unsigned buf_idx = stream_out->output_buffer;
2659 unsigned start = stream_out->start_component;
2660 unsigned num_comps = stream_out->num_components;
2661 LLVMValueRef out[4];
2662
2663 assert(num_comps && num_comps <= 4);
2664 if (!num_comps || num_comps > 4)
2665 return;
2666
2667 /* Load the output as int. */
2668 for (int j = 0; j < num_comps; j++) {
2669 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
2670
2671 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
2672 }
2673
2674 /* Pack the output. */
2675 LLVMValueRef vdata = NULL;
2676
2677 switch (num_comps) {
2678 case 1: /* as i32 */
2679 vdata = out[0];
2680 break;
2681 case 2: /* as v2i32 */
2682 case 3: /* as v4i32 (aligned to 4) */
2683 case 4: /* as v4i32 */
2684 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2685 for (int j = 0; j < num_comps; j++) {
2686 vdata = LLVMBuildInsertElement(ctx->ac.builder, vdata, out[j],
2687 LLVMConstInt(ctx->i32, j, 0), "");
2688 }
2689 break;
2690 }
2691
2692 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
2693 vdata, num_comps,
2694 so_write_offsets[buf_idx],
2695 ctx->i32_0,
2696 stream_out->dst_offset * 4, 1, 1, true, false);
2697 }
2698
2699 /**
2700 * Write streamout data to buffers for vertex stream @p stream (different
2701 * vertex streams can occur for GS copy shaders).
2702 */
2703 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2704 struct si_shader_output_values *outputs,
2705 unsigned noutput, unsigned stream)
2706 {
2707 struct si_shader_selector *sel = ctx->shader->selector;
2708 struct pipe_stream_output_info *so = &sel->so;
2709 LLVMBuilderRef builder = ctx->ac.builder;
2710 int i;
2711 struct lp_build_if_state if_ctx;
2712
2713 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2714 LLVMValueRef so_vtx_count =
2715 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2716
2717 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
2718
2719 /* can_emit = tid < so_vtx_count; */
2720 LLVMValueRef can_emit =
2721 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2722
2723 /* Emit the streamout code conditionally. This actually avoids
2724 * out-of-bounds buffer access. The hw tells us via the SGPR
2725 * (so_vtx_count) which threads are allowed to emit streamout data. */
2726 lp_build_if(&if_ctx, &ctx->gallivm, can_emit);
2727 {
2728 /* The buffer offset is computed as follows:
2729 * ByteOffset = streamout_offset[buffer_id]*4 +
2730 * (streamout_write_index + thread_id)*stride[buffer_id] +
2731 * attrib_offset
2732 */
2733
2734 LLVMValueRef so_write_index =
2735 LLVMGetParam(ctx->main_fn,
2736 ctx->param_streamout_write_index);
2737
2738 /* Compute (streamout_write_index + thread_id). */
2739 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2740
2741 /* Load the descriptor and compute the write offset for each
2742 * enabled buffer. */
2743 LLVMValueRef so_write_offset[4] = {};
2744 LLVMValueRef so_buffers[4];
2745 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
2746 ctx->param_rw_buffers);
2747
2748 for (i = 0; i < 4; i++) {
2749 if (!so->stride[i])
2750 continue;
2751
2752 LLVMValueRef offset = LLVMConstInt(ctx->i32,
2753 SI_VS_STREAMOUT_BUF0 + i, 0);
2754
2755 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
2756
2757 LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
2758 ctx->param_streamout_offset[i]);
2759 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2760
2761 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2762 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2763 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2764 }
2765
2766 /* Write streamout data. */
2767 for (i = 0; i < so->num_outputs; i++) {
2768 unsigned reg = so->output[i].register_index;
2769
2770 if (reg >= noutput)
2771 continue;
2772
2773 if (stream != so->output[i].stream)
2774 continue;
2775
2776 emit_streamout_output(ctx, so_buffers, so_write_offset,
2777 &so->output[i], &outputs[reg]);
2778 }
2779 }
2780 lp_build_endif(&if_ctx);
2781 }
2782
2783 static void si_export_param(struct si_shader_context *ctx, unsigned index,
2784 LLVMValueRef *values)
2785 {
2786 struct ac_export_args args;
2787
2788 si_llvm_init_export_args(ctx, values,
2789 V_008DFC_SQ_EXP_PARAM + index, &args);
2790 ac_build_export(&ctx->ac, &args);
2791 }
2792
2793 static void si_build_param_exports(struct si_shader_context *ctx,
2794 struct si_shader_output_values *outputs,
2795 unsigned noutput)
2796 {
2797 struct si_shader *shader = ctx->shader;
2798 unsigned param_count = 0;
2799
2800 for (unsigned i = 0; i < noutput; i++) {
2801 unsigned semantic_name = outputs[i].semantic_name;
2802 unsigned semantic_index = outputs[i].semantic_index;
2803
2804 if (outputs[i].vertex_stream[0] != 0 &&
2805 outputs[i].vertex_stream[1] != 0 &&
2806 outputs[i].vertex_stream[2] != 0 &&
2807 outputs[i].vertex_stream[3] != 0)
2808 continue;
2809
2810 switch (semantic_name) {
2811 case TGSI_SEMANTIC_LAYER:
2812 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2813 case TGSI_SEMANTIC_CLIPDIST:
2814 case TGSI_SEMANTIC_COLOR:
2815 case TGSI_SEMANTIC_BCOLOR:
2816 case TGSI_SEMANTIC_PRIMID:
2817 case TGSI_SEMANTIC_FOG:
2818 case TGSI_SEMANTIC_TEXCOORD:
2819 case TGSI_SEMANTIC_GENERIC:
2820 break;
2821 default:
2822 continue;
2823 }
2824
2825 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
2826 semantic_index < SI_MAX_IO_GENERIC) &&
2827 shader->key.opt.kill_outputs &
2828 (1ull << si_shader_io_get_unique_index(semantic_name, semantic_index)))
2829 continue;
2830
2831 si_export_param(ctx, param_count, outputs[i].values);
2832
2833 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2834 shader->info.vs_output_param_offset[i] = param_count++;
2835 }
2836
2837 shader->info.nr_param_exports = param_count;
2838 }
2839
2840 /* Generate export instructions for hardware VS shader stage */
2841 static void si_llvm_export_vs(struct si_shader_context *ctx,
2842 struct si_shader_output_values *outputs,
2843 unsigned noutput)
2844 {
2845 struct si_shader *shader = ctx->shader;
2846 struct ac_export_args pos_args[4] = {};
2847 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2848 unsigned pos_idx;
2849 int i;
2850
2851 /* Build position exports. */
2852 for (i = 0; i < noutput; i++) {
2853 switch (outputs[i].semantic_name) {
2854 case TGSI_SEMANTIC_POSITION:
2855 si_llvm_init_export_args(ctx, outputs[i].values,
2856 V_008DFC_SQ_EXP_POS, &pos_args[0]);
2857 break;
2858 case TGSI_SEMANTIC_PSIZE:
2859 psize_value = outputs[i].values[0];
2860 break;
2861 case TGSI_SEMANTIC_LAYER:
2862 layer_value = outputs[i].values[0];
2863 break;
2864 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2865 viewport_index_value = outputs[i].values[0];
2866 break;
2867 case TGSI_SEMANTIC_EDGEFLAG:
2868 edgeflag_value = outputs[i].values[0];
2869 break;
2870 case TGSI_SEMANTIC_CLIPDIST:
2871 if (!shader->key.opt.clip_disable) {
2872 unsigned index = 2 + outputs[i].semantic_index;
2873 si_llvm_init_export_args(ctx, outputs[i].values,
2874 V_008DFC_SQ_EXP_POS + index,
2875 &pos_args[index]);
2876 }
2877 break;
2878 case TGSI_SEMANTIC_CLIPVERTEX:
2879 if (!shader->key.opt.clip_disable) {
2880 si_llvm_emit_clipvertex(ctx, pos_args,
2881 outputs[i].values);
2882 }
2883 break;
2884 }
2885 }
2886
2887 /* We need to add the position output manually if it's missing. */
2888 if (!pos_args[0].out[0]) {
2889 pos_args[0].enabled_channels = 0xf; /* writemask */
2890 pos_args[0].valid_mask = 0; /* EXEC mask */
2891 pos_args[0].done = 0; /* last export? */
2892 pos_args[0].target = V_008DFC_SQ_EXP_POS;
2893 pos_args[0].compr = 0; /* COMPR flag */
2894 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
2895 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
2896 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
2897 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
2898 }
2899
2900 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2901 if (shader->selector->info.writes_psize ||
2902 shader->selector->info.writes_edgeflag ||
2903 shader->selector->info.writes_viewport_index ||
2904 shader->selector->info.writes_layer) {
2905 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
2906 (shader->selector->info.writes_edgeflag << 1) |
2907 (shader->selector->info.writes_layer << 2);
2908
2909 pos_args[1].valid_mask = 0; /* EXEC mask */
2910 pos_args[1].done = 0; /* last export? */
2911 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2912 pos_args[1].compr = 0; /* COMPR flag */
2913 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
2914 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
2915 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
2916 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
2917
2918 if (shader->selector->info.writes_psize)
2919 pos_args[1].out[0] = psize_value;
2920
2921 if (shader->selector->info.writes_edgeflag) {
2922 /* The output is a float, but the hw expects an integer
2923 * with the first bit containing the edge flag. */
2924 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
2925 edgeflag_value,
2926 ctx->i32, "");
2927 edgeflag_value = ac_build_umin(&ctx->ac,
2928 edgeflag_value,
2929 ctx->i32_1);
2930
2931 /* The LLVM intrinsic expects a float. */
2932 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
2933 }
2934
2935 if (ctx->screen->info.chip_class >= GFX9) {
2936 /* GFX9 has the layer in out.z[10:0] and the viewport
2937 * index in out.z[19:16].
2938 */
2939 if (shader->selector->info.writes_layer)
2940 pos_args[1].out[2] = layer_value;
2941
2942 if (shader->selector->info.writes_viewport_index) {
2943 LLVMValueRef v = viewport_index_value;
2944
2945 v = ac_to_integer(&ctx->ac, v);
2946 v = LLVMBuildShl(ctx->ac.builder, v,
2947 LLVMConstInt(ctx->i32, 16, 0), "");
2948 v = LLVMBuildOr(ctx->ac.builder, v,
2949 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
2950 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
2951 pos_args[1].enabled_channels |= 1 << 2;
2952 }
2953 } else {
2954 if (shader->selector->info.writes_layer)
2955 pos_args[1].out[2] = layer_value;
2956
2957 if (shader->selector->info.writes_viewport_index) {
2958 pos_args[1].out[3] = viewport_index_value;
2959 pos_args[1].enabled_channels |= 1 << 3;
2960 }
2961 }
2962 }
2963
2964 for (i = 0; i < 4; i++)
2965 if (pos_args[i].out[0])
2966 shader->info.nr_pos_exports++;
2967
2968 pos_idx = 0;
2969 for (i = 0; i < 4; i++) {
2970 if (!pos_args[i].out[0])
2971 continue;
2972
2973 /* Specify the target we are exporting */
2974 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2975
2976 if (pos_idx == shader->info.nr_pos_exports)
2977 /* Specify that this is the last export */
2978 pos_args[i].done = 1;
2979
2980 ac_build_export(&ctx->ac, &pos_args[i]);
2981 }
2982
2983 /* Build parameter exports. */
2984 si_build_param_exports(ctx, outputs, noutput);
2985 }
2986
2987 /**
2988 * Forward all outputs from the vertex shader to the TES. This is only used
2989 * for the fixed function TCS.
2990 */
2991 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2992 {
2993 struct si_shader_context *ctx = si_shader_context(bld_base);
2994 LLVMValueRef invocation_id, buffer, buffer_offset;
2995 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
2996 uint64_t inputs;
2997
2998 invocation_id = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
2999 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
3000 buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
3001
3002 lds_vertex_stride = get_tcs_in_vertex_dw_stride(ctx);
3003 lds_vertex_offset = LLVMBuildMul(ctx->ac.builder, invocation_id,
3004 lds_vertex_stride, "");
3005 lds_base = get_tcs_in_current_patch_offset(ctx);
3006 lds_base = LLVMBuildAdd(ctx->ac.builder, lds_base, lds_vertex_offset, "");
3007
3008 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
3009 while (inputs) {
3010 unsigned i = u_bit_scan64(&inputs);
3011
3012 LLVMValueRef lds_ptr = LLVMBuildAdd(ctx->ac.builder, lds_base,
3013 LLVMConstInt(ctx->i32, 4 * i, 0),
3014 "");
3015
3016 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
3017 get_rel_patch_id(ctx),
3018 invocation_id,
3019 LLVMConstInt(ctx->i32, i, 0));
3020
3021 LLVMValueRef value = lds_load(bld_base, ctx->ac.i32, ~0,
3022 lds_ptr);
3023
3024 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
3025 buffer_offset, 0, 1, 0, true, false);
3026 }
3027 }
3028
3029 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
3030 LLVMValueRef rel_patch_id,
3031 LLVMValueRef invocation_id,
3032 LLVMValueRef tcs_out_current_patch_data_offset,
3033 LLVMValueRef invoc0_tf_outer[4],
3034 LLVMValueRef invoc0_tf_inner[2])
3035 {
3036 struct si_shader_context *ctx = si_shader_context(bld_base);
3037 struct si_shader *shader = ctx->shader;
3038 unsigned tess_inner_index, tess_outer_index;
3039 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
3040 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
3041 unsigned stride, outer_comps, inner_comps, i, offset;
3042 struct lp_build_if_state if_ctx, inner_if_ctx;
3043
3044 /* Add a barrier before loading tess factors from LDS. */
3045 if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
3046 si_llvm_emit_barrier(NULL, bld_base, NULL);
3047
3048 /* Do this only for invocation 0, because the tess levels are per-patch,
3049 * not per-vertex.
3050 *
3051 * This can't jump, because invocation 0 executes this. It should
3052 * at least mask out the loads and stores for other invocations.
3053 */
3054 lp_build_if(&if_ctx, &ctx->gallivm,
3055 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
3056 invocation_id, ctx->i32_0, ""));
3057
3058 /* Determine the layout of one tess factor element in the buffer. */
3059 switch (shader->key.part.tcs.epilog.prim_mode) {
3060 case PIPE_PRIM_LINES:
3061 stride = 2; /* 2 dwords, 1 vec2 store */
3062 outer_comps = 2;
3063 inner_comps = 0;
3064 break;
3065 case PIPE_PRIM_TRIANGLES:
3066 stride = 4; /* 4 dwords, 1 vec4 store */
3067 outer_comps = 3;
3068 inner_comps = 1;
3069 break;
3070 case PIPE_PRIM_QUADS:
3071 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
3072 outer_comps = 4;
3073 inner_comps = 2;
3074 break;
3075 default:
3076 assert(0);
3077 return;
3078 }
3079
3080 for (i = 0; i < 4; i++) {
3081 inner[i] = LLVMGetUndef(ctx->i32);
3082 outer[i] = LLVMGetUndef(ctx->i32);
3083 }
3084
3085 if (shader->key.part.tcs.epilog.invoc0_tess_factors_are_def) {
3086 /* Tess factors are in VGPRs. */
3087 for (i = 0; i < outer_comps; i++)
3088 outer[i] = out[i] = invoc0_tf_outer[i];
3089 for (i = 0; i < inner_comps; i++)
3090 inner[i] = out[outer_comps+i] = invoc0_tf_inner[i];
3091 } else {
3092 /* Load tess_inner and tess_outer from LDS.
3093 * Any invocation can write them, so we can't get them from a temporary.
3094 */
3095 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
3096 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
3097
3098 lds_base = tcs_out_current_patch_data_offset;
3099 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
3100 LLVMConstInt(ctx->i32,
3101 tess_inner_index * 4, 0), "");
3102 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
3103 LLVMConstInt(ctx->i32,
3104 tess_outer_index * 4, 0), "");
3105
3106 for (i = 0; i < outer_comps; i++) {
3107 outer[i] = out[i] =
3108 lds_load(bld_base, ctx->ac.i32, i, lds_outer);
3109 }
3110 for (i = 0; i < inner_comps; i++) {
3111 inner[i] = out[outer_comps+i] =
3112 lds_load(bld_base, ctx->ac.i32, i, lds_inner);
3113 }
3114 }
3115
3116 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
3117 /* For isolines, the hardware expects tess factors in the
3118 * reverse order from what GLSL / TGSI specify.
3119 */
3120 LLVMValueRef tmp = out[0];
3121 out[0] = out[1];
3122 out[1] = tmp;
3123 }
3124
3125 /* Convert the outputs to vectors for stores. */
3126 vec0 = lp_build_gather_values(&ctx->gallivm, out, MIN2(stride, 4));
3127 vec1 = NULL;
3128
3129 if (stride > 4)
3130 vec1 = lp_build_gather_values(&ctx->gallivm, out+4, stride - 4);
3131
3132 /* Get the buffer. */
3133 buffer = desc_from_addr_base64k(ctx, ctx->param_tcs_factor_addr_base64k);
3134
3135 /* Get the offset. */
3136 tf_base = LLVMGetParam(ctx->main_fn,
3137 ctx->param_tcs_factor_offset);
3138 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
3139 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
3140
3141 lp_build_if(&inner_if_ctx, &ctx->gallivm,
3142 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
3143 rel_patch_id, ctx->i32_0, ""));
3144
3145 /* Store the dynamic HS control word. */
3146 offset = 0;
3147 if (ctx->screen->info.chip_class <= VI) {
3148 ac_build_buffer_store_dword(&ctx->ac, buffer,
3149 LLVMConstInt(ctx->i32, 0x80000000, 0),
3150 1, ctx->i32_0, tf_base,
3151 offset, 1, 0, true, false);
3152 offset += 4;
3153 }
3154
3155 lp_build_endif(&inner_if_ctx);
3156
3157 /* Store the tessellation factors. */
3158 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
3159 MIN2(stride, 4), byteoffset, tf_base,
3160 offset, 1, 0, true, false);
3161 offset += 16;
3162 if (vec1)
3163 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
3164 stride - 4, byteoffset, tf_base,
3165 offset, 1, 0, true, false);
3166
3167 /* Store the tess factors into the offchip buffer if TES reads them. */
3168 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
3169 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
3170 LLVMValueRef tf_inner_offset;
3171 unsigned param_outer, param_inner;
3172
3173 buf = desc_from_addr_base64k(ctx, ctx->param_tcs_offchip_addr_base64k);
3174 base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
3175
3176 param_outer = si_shader_io_get_unique_index_patch(
3177 TGSI_SEMANTIC_TESSOUTER, 0);
3178 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
3179 LLVMConstInt(ctx->i32, param_outer, 0));
3180
3181 outer_vec = lp_build_gather_values(&ctx->gallivm, outer,
3182 util_next_power_of_two(outer_comps));
3183
3184 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
3185 outer_comps, tf_outer_offset,
3186 base, 0, 1, 0, true, false);
3187 if (inner_comps) {
3188 param_inner = si_shader_io_get_unique_index_patch(
3189 TGSI_SEMANTIC_TESSINNER, 0);
3190 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
3191 LLVMConstInt(ctx->i32, param_inner, 0));
3192
3193 inner_vec = inner_comps == 1 ? inner[0] :
3194 lp_build_gather_values(&ctx->gallivm, inner, inner_comps);
3195 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
3196 inner_comps, tf_inner_offset,
3197 base, 0, 1, 0, true, false);
3198 }
3199 }
3200
3201 lp_build_endif(&if_ctx);
3202 }
3203
3204 static LLVMValueRef
3205 si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
3206 unsigned param, unsigned return_index)
3207 {
3208 return LLVMBuildInsertValue(ctx->ac.builder, ret,
3209 LLVMGetParam(ctx->main_fn, param),
3210 return_index, "");
3211 }
3212
3213 static LLVMValueRef
3214 si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
3215 unsigned param, unsigned return_index)
3216 {
3217 LLVMBuilderRef builder = ctx->ac.builder;
3218 LLVMValueRef p = LLVMGetParam(ctx->main_fn, param);
3219
3220 return LLVMBuildInsertValue(builder, ret,
3221 ac_to_float(&ctx->ac, p),
3222 return_index, "");
3223 }
3224
3225 static LLVMValueRef
3226 si_insert_input_ptr_as_2xi32(struct si_shader_context *ctx, LLVMValueRef ret,
3227 unsigned param, unsigned return_index)
3228 {
3229 LLVMBuilderRef builder = ctx->ac.builder;
3230 LLVMValueRef ptr, lo, hi;
3231
3232 ptr = LLVMGetParam(ctx->main_fn, param);
3233 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i64, "");
3234 ptr = LLVMBuildBitCast(builder, ptr, ctx->v2i32, "");
3235 lo = LLVMBuildExtractElement(builder, ptr, ctx->i32_0, "");
3236 hi = LLVMBuildExtractElement(builder, ptr, ctx->i32_1, "");
3237 ret = LLVMBuildInsertValue(builder, ret, lo, return_index, "");
3238 return LLVMBuildInsertValue(builder, ret, hi, return_index + 1, "");
3239 }
3240
3241 /* This only writes the tessellation factor levels. */
3242 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
3243 {
3244 struct si_shader_context *ctx = si_shader_context(bld_base);
3245 LLVMBuilderRef builder = ctx->ac.builder;
3246 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
3247
3248 si_copy_tcs_inputs(bld_base);
3249
3250 rel_patch_id = get_rel_patch_id(ctx);
3251 invocation_id = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
3252 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
3253
3254 if (ctx->screen->info.chip_class >= GFX9) {
3255 LLVMBasicBlockRef blocks[2] = {
3256 LLVMGetInsertBlock(builder),
3257 ctx->merged_wrap_if_state.entry_block
3258 };
3259 LLVMValueRef values[2];
3260
3261 lp_build_endif(&ctx->merged_wrap_if_state);
3262
3263 values[0] = rel_patch_id;
3264 values[1] = LLVMGetUndef(ctx->i32);
3265 rel_patch_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3266
3267 values[0] = tf_lds_offset;
3268 values[1] = LLVMGetUndef(ctx->i32);
3269 tf_lds_offset = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3270
3271 values[0] = invocation_id;
3272 values[1] = ctx->i32_1; /* cause the epilog to skip threads */
3273 invocation_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
3274 }
3275
3276 /* Return epilog parameters from this function. */
3277 LLVMValueRef ret = ctx->return_value;
3278 unsigned vgpr;
3279
3280 if (ctx->screen->info.chip_class >= GFX9) {
3281 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3282 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
3283 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
3284 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
3285 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
3286 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
3287 /* Tess offchip and tess factor offsets are at the beginning. */
3288 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
3289 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
3290 vgpr = 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K + 1;
3291 } else {
3292 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3293 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
3294 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
3295 GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
3296 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
3297 GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K);
3298 /* Tess offchip and tess factor offsets are after user SGPRs. */
3299 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset,
3300 GFX6_TCS_NUM_USER_SGPR);
3301 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset,
3302 GFX6_TCS_NUM_USER_SGPR + 1);
3303 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
3304 }
3305
3306 /* VGPRs */
3307 rel_patch_id = ac_to_float(&ctx->ac, rel_patch_id);
3308 invocation_id = ac_to_float(&ctx->ac, invocation_id);
3309 tf_lds_offset = ac_to_float(&ctx->ac, tf_lds_offset);
3310
3311 /* Leave a hole corresponding to the two input VGPRs. This ensures that
3312 * the invocation_id output does not alias the tcs_rel_ids input,
3313 * which saves a V_MOV on gfx9.
3314 */
3315 vgpr += 2;
3316
3317 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
3318 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
3319
3320 if (ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
3321 vgpr++; /* skip the tess factor LDS offset */
3322 for (unsigned i = 0; i < 6; i++) {
3323 LLVMValueRef value =
3324 LLVMBuildLoad(builder, ctx->invoc0_tess_factors[i], "");
3325 value = ac_to_float(&ctx->ac, value);
3326 ret = LLVMBuildInsertValue(builder, ret, value, vgpr++, "");
3327 }
3328 } else {
3329 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
3330 }
3331 ctx->return_value = ret;
3332 }
3333
3334 /* Pass TCS inputs from LS to TCS on GFX9. */
3335 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
3336 {
3337 LLVMValueRef ret = ctx->return_value;
3338
3339 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_offset, 2);
3340 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
3341 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_offset, 4);
3342 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
3343
3344 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers,
3345 8 + SI_SGPR_RW_BUFFERS);
3346 ret = si_insert_input_ptr_as_2xi32(ctx, ret,
3347 ctx->param_bindless_samplers_and_images,
3348 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
3349
3350 ret = si_insert_input_ret(ctx, ret, ctx->param_vs_state_bits,
3351 8 + SI_SGPR_VS_STATE_BITS);
3352 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_layout,
3353 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
3354 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_offsets,
3355 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
3356 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_out_lds_layout,
3357 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
3358 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_offchip_addr_base64k,
3359 8 + GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K);
3360 ret = si_insert_input_ret(ctx, ret, ctx->param_tcs_factor_addr_base64k,
3361 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K);
3362
3363 unsigned desc_param = ctx->param_tcs_factor_addr_base64k + 2;
3364 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
3365 8 + GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS);
3366 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
3367 8 + GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES);
3368
3369 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
3370 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
3371 ac_to_float(&ctx->ac, ctx->abi.tcs_patch_id),
3372 vgpr++, "");
3373 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
3374 ac_to_float(&ctx->ac, ctx->abi.tcs_rel_ids),
3375 vgpr++, "");
3376 ctx->return_value = ret;
3377 }
3378
3379 /* Pass GS inputs from ES to GS on GFX9. */
3380 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
3381 {
3382 LLVMValueRef ret = ctx->return_value;
3383
3384 ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
3385 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
3386 ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
3387
3388 ret = si_insert_input_ptr_as_2xi32(ctx, ret, ctx->param_rw_buffers,
3389 8 + SI_SGPR_RW_BUFFERS);
3390 ret = si_insert_input_ptr_as_2xi32(ctx, ret,
3391 ctx->param_bindless_samplers_and_images,
3392 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
3393
3394 unsigned desc_param = ctx->param_vs_state_bits + 1;
3395 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param,
3396 8 + GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS);
3397 ret = si_insert_input_ptr_as_2xi32(ctx, ret, desc_param + 1,
3398 8 + GFX9_SGPR_GS_SAMPLERS_AND_IMAGES);
3399
3400 unsigned vgpr = 8 + GFX9_GS_NUM_USER_SGPR;
3401 for (unsigned i = 0; i < 5; i++) {
3402 unsigned param = ctx->param_gs_vtx01_offset + i;
3403 ret = si_insert_input_ret_float(ctx, ret, param, vgpr++);
3404 }
3405 ctx->return_value = ret;
3406 }
3407
3408 static void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi,
3409 unsigned max_outputs,
3410 LLVMValueRef *addrs)
3411 {
3412 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3413 struct si_shader *shader = ctx->shader;
3414 struct tgsi_shader_info *info = &shader->selector->info;
3415 unsigned i, chan;
3416 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
3417 ctx->param_rel_auto_id);
3418 LLVMValueRef vertex_dw_stride = get_tcs_in_vertex_dw_stride(ctx);
3419 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
3420 vertex_dw_stride, "");
3421
3422 /* Write outputs to LDS. The next shader (TCS aka HS) will read
3423 * its inputs from it. */
3424 for (i = 0; i < info->num_outputs; i++) {
3425 unsigned name = info->output_semantic_name[i];
3426 unsigned index = info->output_semantic_index[i];
3427
3428 /* The ARB_shader_viewport_layer_array spec contains the
3429 * following issue:
3430 *
3431 * 2) What happens if gl_ViewportIndex or gl_Layer is
3432 * written in the vertex shader and a geometry shader is
3433 * present?
3434 *
3435 * RESOLVED: The value written by the last vertex processing
3436 * stage is used. If the last vertex processing stage
3437 * (vertex, tessellation evaluation or geometry) does not
3438 * statically assign to gl_ViewportIndex or gl_Layer, index
3439 * or layer zero is assumed.
3440 *
3441 * So writes to those outputs in VS-as-LS are simply ignored.
3442 */
3443 if (name == TGSI_SEMANTIC_LAYER ||
3444 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
3445 continue;
3446
3447 int param = si_shader_io_get_unique_index(name, index);
3448 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
3449 LLVMConstInt(ctx->i32, param * 4, 0), "");
3450
3451 for (chan = 0; chan < 4; chan++) {
3452 if (!(info->output_usagemask[i] & (1 << chan)))
3453 continue;
3454
3455 lds_store(ctx, chan, dw_addr,
3456 LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], ""));
3457 }
3458 }
3459
3460 if (ctx->screen->info.chip_class >= GFX9)
3461 si_set_ls_return_value_for_tcs(ctx);
3462 }
3463
3464 static void si_llvm_emit_es_epilogue(struct ac_shader_abi *abi,
3465 unsigned max_outputs,
3466 LLVMValueRef *addrs)
3467 {
3468 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3469 struct si_shader *es = ctx->shader;
3470 struct tgsi_shader_info *info = &es->selector->info;
3471 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
3472 ctx->param_es2gs_offset);
3473 LLVMValueRef lds_base = NULL;
3474 unsigned chan;
3475 int i;
3476
3477 if (ctx->screen->info.chip_class >= GFX9 && info->num_outputs) {
3478 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
3479 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
3480 LLVMValueRef wave_idx = unpack_param(ctx, ctx->param_merged_wave_info, 24, 4);
3481 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
3482 LLVMBuildMul(ctx->ac.builder, wave_idx,
3483 LLVMConstInt(ctx->i32, 64, false), ""), "");
3484 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
3485 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
3486 }
3487
3488 for (i = 0; i < info->num_outputs; i++) {
3489 int param;
3490
3491 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
3492 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
3493 continue;
3494
3495 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
3496 info->output_semantic_index[i]);
3497
3498 for (chan = 0; chan < 4; chan++) {
3499 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
3500 out_val = ac_to_integer(&ctx->ac, out_val);
3501
3502 /* GFX9 has the ESGS ring in LDS. */
3503 if (ctx->screen->info.chip_class >= GFX9) {
3504 lds_store(ctx, param * 4 + chan, lds_base, out_val);
3505 continue;
3506 }
3507
3508 ac_build_buffer_store_dword(&ctx->ac,
3509 ctx->esgs_ring,
3510 out_val, 1, NULL, soffset,
3511 (4 * param + chan) * 4,
3512 1, 1, true, true);
3513 }
3514 }
3515
3516 if (ctx->screen->info.chip_class >= GFX9)
3517 si_set_es_return_value_for_gs(ctx);
3518 }
3519
3520 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
3521 {
3522 if (ctx->screen->info.chip_class >= GFX9)
3523 return unpack_param(ctx, ctx->param_merged_wave_info, 16, 8);
3524 else
3525 return LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id);
3526 }
3527
3528 static void emit_gs_epilogue(struct si_shader_context *ctx)
3529 {
3530 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
3531 si_get_gs_wave_id(ctx));
3532
3533 if (ctx->screen->info.chip_class >= GFX9)
3534 lp_build_endif(&ctx->merged_wrap_if_state);
3535 }
3536
3537 static void si_llvm_emit_gs_epilogue(struct ac_shader_abi *abi,
3538 unsigned max_outputs,
3539 LLVMValueRef *addrs)
3540 {
3541 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3542 struct tgsi_shader_info UNUSED *info = &ctx->shader->selector->info;
3543
3544 assert(info->num_outputs <= max_outputs);
3545
3546 emit_gs_epilogue(ctx);
3547 }
3548
3549 static void si_tgsi_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
3550 {
3551 struct si_shader_context *ctx = si_shader_context(bld_base);
3552 emit_gs_epilogue(ctx);
3553 }
3554
3555 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
3556 unsigned max_outputs,
3557 LLVMValueRef *addrs)
3558 {
3559 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3560 struct tgsi_shader_info *info = &ctx->shader->selector->info;
3561 struct si_shader_output_values *outputs = NULL;
3562 int i,j;
3563
3564 assert(!ctx->shader->is_gs_copy_shader);
3565 assert(info->num_outputs <= max_outputs);
3566
3567 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
3568
3569 /* Vertex color clamping.
3570 *
3571 * This uses a state constant loaded in a user data SGPR and
3572 * an IF statement is added that clamps all colors if the constant
3573 * is true.
3574 */
3575 if (ctx->type == PIPE_SHADER_VERTEX) {
3576 struct lp_build_if_state if_ctx;
3577 LLVMValueRef cond = NULL;
3578 LLVMValueRef addr, val;
3579
3580 for (i = 0; i < info->num_outputs; i++) {
3581 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
3582 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
3583 continue;
3584
3585 /* We've found a color. */
3586 if (!cond) {
3587 /* The state is in the first bit of the user SGPR. */
3588 cond = LLVMGetParam(ctx->main_fn,
3589 ctx->param_vs_state_bits);
3590 cond = LLVMBuildTrunc(ctx->ac.builder, cond,
3591 ctx->i1, "");
3592 lp_build_if(&if_ctx, &ctx->gallivm, cond);
3593 }
3594
3595 for (j = 0; j < 4; j++) {
3596 addr = addrs[4 * i + j];
3597 val = LLVMBuildLoad(ctx->ac.builder, addr, "");
3598 val = ac_build_clamp(&ctx->ac, val);
3599 LLVMBuildStore(ctx->ac.builder, val, addr);
3600 }
3601 }
3602
3603 if (cond)
3604 lp_build_endif(&if_ctx);
3605 }
3606
3607 for (i = 0; i < info->num_outputs; i++) {
3608 outputs[i].semantic_name = info->output_semantic_name[i];
3609 outputs[i].semantic_index = info->output_semantic_index[i];
3610
3611 for (j = 0; j < 4; j++) {
3612 outputs[i].values[j] =
3613 LLVMBuildLoad(ctx->ac.builder,
3614 addrs[4 * i + j],
3615 "");
3616 outputs[i].vertex_stream[j] =
3617 (info->output_streams[i] >> (2 * j)) & 3;
3618 }
3619 }
3620
3621 if (ctx->shader->selector->so.num_outputs)
3622 si_llvm_emit_streamout(ctx, outputs, i, 0);
3623
3624 /* Export PrimitiveID. */
3625 if (ctx->shader->key.mono.u.vs_export_prim_id) {
3626 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
3627 outputs[i].semantic_index = 0;
3628 outputs[i].values[0] = ac_to_float(&ctx->ac, get_primitive_id(ctx, 0));
3629 for (j = 1; j < 4; j++)
3630 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
3631
3632 memset(outputs[i].vertex_stream, 0,
3633 sizeof(outputs[i].vertex_stream));
3634 i++;
3635 }
3636
3637 si_llvm_export_vs(ctx, outputs, i);
3638 FREE(outputs);
3639 }
3640
3641 static void si_tgsi_emit_epilogue(struct lp_build_tgsi_context *bld_base)
3642 {
3643 struct si_shader_context *ctx = si_shader_context(bld_base);
3644
3645 ctx->abi.emit_outputs(&ctx->abi, RADEON_LLVM_MAX_OUTPUTS,
3646 &ctx->outputs[0][0]);
3647 }
3648
3649 struct si_ps_exports {
3650 unsigned num;
3651 struct ac_export_args args[10];
3652 };
3653
3654 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
3655 LLVMValueRef depth, LLVMValueRef stencil,
3656 LLVMValueRef samplemask, struct si_ps_exports *exp)
3657 {
3658 struct si_shader_context *ctx = si_shader_context(bld_base);
3659 struct ac_export_args args;
3660
3661 ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
3662
3663 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3664 }
3665
3666 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
3667 LLVMValueRef *color, unsigned index,
3668 unsigned samplemask_param,
3669 bool is_last, struct si_ps_exports *exp)
3670 {
3671 struct si_shader_context *ctx = si_shader_context(bld_base);
3672 int i;
3673
3674 /* Clamp color */
3675 if (ctx->shader->key.part.ps.epilog.clamp_color)
3676 for (i = 0; i < 4; i++)
3677 color[i] = ac_build_clamp(&ctx->ac, color[i]);
3678
3679 /* Alpha to one */
3680 if (ctx->shader->key.part.ps.epilog.alpha_to_one)
3681 color[3] = ctx->ac.f32_1;
3682
3683 /* Alpha test */
3684 if (index == 0 &&
3685 ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
3686 si_alpha_test(bld_base, color[3]);
3687
3688 /* Line & polygon smoothing */
3689 if (ctx->shader->key.part.ps.epilog.poly_line_smoothing)
3690 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
3691 samplemask_param);
3692
3693 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3694 if (ctx->shader->key.part.ps.epilog.last_cbuf > 0) {
3695 struct ac_export_args args[8];
3696 int c, last = -1;
3697
3698 /* Get the export arguments, also find out what the last one is. */
3699 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3700 si_llvm_init_export_args(ctx, color,
3701 V_008DFC_SQ_EXP_MRT + c, &args[c]);
3702 if (args[c].enabled_channels)
3703 last = c;
3704 }
3705
3706 /* Emit all exports. */
3707 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
3708 if (is_last && last == c) {
3709 args[c].valid_mask = 1; /* whether the EXEC mask is valid */
3710 args[c].done = 1; /* DONE bit */
3711 } else if (!args[c].enabled_channels)
3712 continue; /* unnecessary NULL export */
3713
3714 memcpy(&exp->args[exp->num++], &args[c], sizeof(args[c]));
3715 }
3716 } else {
3717 struct ac_export_args args;
3718
3719 /* Export */
3720 si_llvm_init_export_args(ctx, color, V_008DFC_SQ_EXP_MRT + index,
3721 &args);
3722 if (is_last) {
3723 args.valid_mask = 1; /* whether the EXEC mask is valid */
3724 args.done = 1; /* DONE bit */
3725 } else if (!args.enabled_channels)
3726 return; /* unnecessary NULL export */
3727
3728 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3729 }
3730 }
3731
3732 static void si_emit_ps_exports(struct si_shader_context *ctx,
3733 struct si_ps_exports *exp)
3734 {
3735 for (unsigned i = 0; i < exp->num; i++)
3736 ac_build_export(&ctx->ac, &exp->args[i]);
3737 }
3738
3739 static void si_export_null(struct lp_build_tgsi_context *bld_base)
3740 {
3741 struct si_shader_context *ctx = si_shader_context(bld_base);
3742 struct lp_build_context *base = &bld_base->base;
3743 struct ac_export_args args;
3744
3745 args.enabled_channels = 0x0; /* enabled channels */
3746 args.valid_mask = 1; /* whether the EXEC mask is valid */
3747 args.done = 1; /* DONE bit */
3748 args.target = V_008DFC_SQ_EXP_NULL;
3749 args.compr = 0; /* COMPR flag (0 = 32-bit export) */
3750 args.out[0] = base->undef; /* R */
3751 args.out[1] = base->undef; /* G */
3752 args.out[2] = base->undef; /* B */
3753 args.out[3] = base->undef; /* A */
3754
3755 ac_build_export(&ctx->ac, &args);
3756 }
3757
3758 /**
3759 * Return PS outputs in this order:
3760 *
3761 * v[0:3] = color0.xyzw
3762 * v[4:7] = color1.xyzw
3763 * ...
3764 * vN+0 = Depth
3765 * vN+1 = Stencil
3766 * vN+2 = SampleMask
3767 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3768 *
3769 * The alpha-ref SGPR is returned via its original location.
3770 */
3771 static void si_llvm_return_fs_outputs(struct ac_shader_abi *abi,
3772 unsigned max_outputs,
3773 LLVMValueRef *addrs)
3774 {
3775 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3776 struct si_shader *shader = ctx->shader;
3777 struct tgsi_shader_info *info = &shader->selector->info;
3778 LLVMBuilderRef builder = ctx->ac.builder;
3779 unsigned i, j, first_vgpr, vgpr;
3780
3781 LLVMValueRef color[8][4] = {};
3782 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3783 LLVMValueRef ret;
3784
3785 if (ctx->postponed_kill)
3786 ac_build_kill_if_false(&ctx->ac, LLVMBuildLoad(builder, ctx->postponed_kill, ""));
3787
3788 /* Read the output values. */
3789 for (i = 0; i < info->num_outputs; i++) {
3790 unsigned semantic_name = info->output_semantic_name[i];
3791 unsigned semantic_index = info->output_semantic_index[i];
3792
3793 switch (semantic_name) {
3794 case TGSI_SEMANTIC_COLOR:
3795 assert(semantic_index < 8);
3796 for (j = 0; j < 4; j++) {
3797 LLVMValueRef ptr = addrs[4 * i + j];
3798 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3799 color[semantic_index][j] = result;
3800 }
3801 break;
3802 case TGSI_SEMANTIC_POSITION:
3803 depth = LLVMBuildLoad(builder,
3804 addrs[4 * i + 2], "");
3805 break;
3806 case TGSI_SEMANTIC_STENCIL:
3807 stencil = LLVMBuildLoad(builder,
3808 addrs[4 * i + 1], "");
3809 break;
3810 case TGSI_SEMANTIC_SAMPLEMASK:
3811 samplemask = LLVMBuildLoad(builder,
3812 addrs[4 * i + 0], "");
3813 break;
3814 default:
3815 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3816 semantic_name);
3817 }
3818 }
3819
3820 /* Fill the return structure. */
3821 ret = ctx->return_value;
3822
3823 /* Set SGPRs. */
3824 ret = LLVMBuildInsertValue(builder, ret,
3825 ac_to_integer(&ctx->ac,
3826 LLVMGetParam(ctx->main_fn,
3827 SI_PARAM_ALPHA_REF)),
3828 SI_SGPR_ALPHA_REF, "");
3829
3830 /* Set VGPRs */
3831 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3832 for (i = 0; i < ARRAY_SIZE(color); i++) {
3833 if (!color[i][0])
3834 continue;
3835
3836 for (j = 0; j < 4; j++)
3837 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3838 }
3839 if (depth)
3840 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3841 if (stencil)
3842 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3843 if (samplemask)
3844 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3845
3846 /* Add the input sample mask for smoothing at the end. */
3847 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3848 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3849 ret = LLVMBuildInsertValue(builder, ret,
3850 LLVMGetParam(ctx->main_fn,
3851 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3852
3853 ctx->return_value = ret;
3854 }
3855
3856 static void membar_emit(
3857 const struct lp_build_tgsi_action *action,
3858 struct lp_build_tgsi_context *bld_base,
3859 struct lp_build_emit_data *emit_data)
3860 {
3861 struct si_shader_context *ctx = si_shader_context(bld_base);
3862 LLVMValueRef src0 = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
3863 unsigned flags = LLVMConstIntGetZExtValue(src0);
3864 unsigned waitcnt = NOOP_WAITCNT;
3865
3866 if (flags & TGSI_MEMBAR_THREAD_GROUP)
3867 waitcnt &= VM_CNT & LGKM_CNT;
3868
3869 if (flags & (TGSI_MEMBAR_ATOMIC_BUFFER |
3870 TGSI_MEMBAR_SHADER_BUFFER |
3871 TGSI_MEMBAR_SHADER_IMAGE))
3872 waitcnt &= VM_CNT;
3873
3874 if (flags & TGSI_MEMBAR_SHARED)
3875 waitcnt &= LGKM_CNT;
3876
3877 if (waitcnt != NOOP_WAITCNT)
3878 ac_build_waitcnt(&ctx->ac, waitcnt);
3879 }
3880
3881 static void clock_emit(
3882 const struct lp_build_tgsi_action *action,
3883 struct lp_build_tgsi_context *bld_base,
3884 struct lp_build_emit_data *emit_data)
3885 {
3886 struct si_shader_context *ctx = si_shader_context(bld_base);
3887 LLVMValueRef tmp;
3888
3889 tmp = lp_build_intrinsic(ctx->ac.builder, "llvm.readcyclecounter",
3890 ctx->i64, NULL, 0, 0);
3891 tmp = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->v2i32, "");
3892
3893 emit_data->output[0] =
3894 LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->i32_0, "");
3895 emit_data->output[1] =
3896 LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->i32_1, "");
3897 }
3898
3899 LLVMTypeRef si_const_array(LLVMTypeRef elem_type, int num_elements)
3900 {
3901 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
3902 CONST_ADDR_SPACE);
3903 }
3904
3905 static void si_llvm_emit_ddxy(
3906 const struct lp_build_tgsi_action *action,
3907 struct lp_build_tgsi_context *bld_base,
3908 struct lp_build_emit_data *emit_data)
3909 {
3910 struct si_shader_context *ctx = si_shader_context(bld_base);
3911 unsigned opcode = emit_data->info->opcode;
3912 LLVMValueRef val;
3913 int idx;
3914 unsigned mask;
3915
3916 if (opcode == TGSI_OPCODE_DDX_FINE)
3917 mask = AC_TID_MASK_LEFT;
3918 else if (opcode == TGSI_OPCODE_DDY_FINE)
3919 mask = AC_TID_MASK_TOP;
3920 else
3921 mask = AC_TID_MASK_TOP_LEFT;
3922
3923 /* for DDX we want to next X pixel, DDY next Y pixel. */
3924 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
3925
3926 val = ac_to_integer(&ctx->ac, emit_data->args[0]);
3927 val = ac_build_ddxy(&ctx->ac, mask, idx, val);
3928 emit_data->output[emit_data->chan] = val;
3929 }
3930
3931 /*
3932 * this takes an I,J coordinate pair,
3933 * and works out the X and Y derivatives.
3934 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
3935 */
3936 static LLVMValueRef si_llvm_emit_ddxy_interp(
3937 struct lp_build_tgsi_context *bld_base,
3938 LLVMValueRef interp_ij)
3939 {
3940 struct si_shader_context *ctx = si_shader_context(bld_base);
3941 LLVMValueRef result[4], a;
3942 unsigned i;
3943
3944 for (i = 0; i < 2; i++) {
3945 a = LLVMBuildExtractElement(ctx->ac.builder, interp_ij,
3946 LLVMConstInt(ctx->i32, i, 0), "");
3947 result[i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDX, a);
3948 result[2+i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDY, a);
3949 }
3950
3951 return lp_build_gather_values(&ctx->gallivm, result, 4);
3952 }
3953
3954 static void interp_fetch_args(
3955 struct lp_build_tgsi_context *bld_base,
3956 struct lp_build_emit_data *emit_data)
3957 {
3958 struct si_shader_context *ctx = si_shader_context(bld_base);
3959 const struct tgsi_full_instruction *inst = emit_data->inst;
3960
3961 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
3962 /* offset is in second src, first two channels */
3963 emit_data->args[0] = lp_build_emit_fetch(bld_base,
3964 emit_data->inst, 1,
3965 TGSI_CHAN_X);
3966 emit_data->args[1] = lp_build_emit_fetch(bld_base,
3967 emit_data->inst, 1,
3968 TGSI_CHAN_Y);
3969 emit_data->arg_count = 2;
3970 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3971 LLVMValueRef sample_position;
3972 LLVMValueRef sample_id;
3973 LLVMValueRef halfval = LLVMConstReal(ctx->f32, 0.5f);
3974
3975 /* fetch sample ID, then fetch its sample position,
3976 * and place into first two channels.
3977 */
3978 sample_id = lp_build_emit_fetch(bld_base,
3979 emit_data->inst, 1, TGSI_CHAN_X);
3980 sample_id = ac_to_integer(&ctx->ac, sample_id);
3981
3982 /* Section 8.13.2 (Interpolation Functions) of the OpenGL Shading
3983 * Language 4.50 spec says about interpolateAtSample:
3984 *
3985 * "Returns the value of the input interpolant variable at
3986 * the location of sample number sample. If multisample
3987 * buffers are not available, the input variable will be
3988 * evaluated at the center of the pixel. If sample sample
3989 * does not exist, the position used to interpolate the
3990 * input variable is undefined."
3991 *
3992 * This means that sample_id values outside of the valid are
3993 * in fact valid input, and the usual mechanism for loading the
3994 * sample position doesn't work.
3995 */
3996 if (ctx->shader->key.mono.u.ps.interpolate_at_sample_force_center) {
3997 LLVMValueRef center[4] = {
3998 LLVMConstReal(ctx->f32, 0.5),
3999 LLVMConstReal(ctx->f32, 0.5),
4000 ctx->ac.f32_0,
4001 ctx->ac.f32_0,
4002 };
4003
4004 sample_position = lp_build_gather_values(&ctx->gallivm, center, 4);
4005 } else {
4006 sample_position = load_sample_position(ctx, sample_id);
4007 }
4008
4009 emit_data->args[0] = LLVMBuildExtractElement(ctx->ac.builder,
4010 sample_position,
4011 ctx->i32_0, "");
4012
4013 emit_data->args[0] = LLVMBuildFSub(ctx->ac.builder, emit_data->args[0], halfval, "");
4014 emit_data->args[1] = LLVMBuildExtractElement(ctx->ac.builder,
4015 sample_position,
4016 ctx->i32_1, "");
4017 emit_data->args[1] = LLVMBuildFSub(ctx->ac.builder, emit_data->args[1], halfval, "");
4018 emit_data->arg_count = 2;
4019 }
4020 }
4021
4022 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
4023 struct lp_build_tgsi_context *bld_base,
4024 struct lp_build_emit_data *emit_data)
4025 {
4026 struct si_shader_context *ctx = si_shader_context(bld_base);
4027 struct si_shader *shader = ctx->shader;
4028 const struct tgsi_shader_info *info = &shader->selector->info;
4029 LLVMValueRef interp_param;
4030 const struct tgsi_full_instruction *inst = emit_data->inst;
4031 const struct tgsi_full_src_register *input = &inst->Src[0];
4032 int input_base, input_array_size;
4033 int chan;
4034 int i;
4035 LLVMValueRef prim_mask = LLVMGetParam(ctx->main_fn, SI_PARAM_PRIM_MASK);
4036 LLVMValueRef array_idx;
4037 int interp_param_idx;
4038 unsigned interp;
4039 unsigned location;
4040
4041 assert(input->Register.File == TGSI_FILE_INPUT);
4042
4043 if (input->Register.Indirect) {
4044 unsigned array_id = input->Indirect.ArrayID;
4045
4046 if (array_id) {
4047 input_base = info->input_array_first[array_id];
4048 input_array_size = info->input_array_last[array_id] - input_base + 1;
4049 } else {
4050 input_base = inst->Src[0].Register.Index;
4051 input_array_size = info->num_inputs - input_base;
4052 }
4053
4054 array_idx = si_get_indirect_index(ctx, &input->Indirect,
4055 1, input->Register.Index - input_base);
4056 } else {
4057 input_base = inst->Src[0].Register.Index;
4058 input_array_size = 1;
4059 array_idx = ctx->i32_0;
4060 }
4061
4062 interp = shader->selector->info.input_interpolate[input_base];
4063
4064 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
4065 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
4066 location = TGSI_INTERPOLATE_LOC_CENTER;
4067 else
4068 location = TGSI_INTERPOLATE_LOC_CENTROID;
4069
4070 interp_param_idx = lookup_interp_param_index(interp, location);
4071 if (interp_param_idx == -1)
4072 return;
4073 else if (interp_param_idx)
4074 interp_param = LLVMGetParam(ctx->main_fn, interp_param_idx);
4075 else
4076 interp_param = NULL;
4077
4078 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
4079 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
4080 LLVMValueRef ij_out[2];
4081 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
4082
4083 /*
4084 * take the I then J parameters, and the DDX/Y for it, and
4085 * calculate the IJ inputs for the interpolator.
4086 * temp1 = ddx * offset/sample.x + I;
4087 * interp_param.I = ddy * offset/sample.y + temp1;
4088 * temp1 = ddx * offset/sample.x + J;
4089 * interp_param.J = ddy * offset/sample.y + temp1;
4090 */
4091 for (i = 0; i < 2; i++) {
4092 LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, 0);
4093 LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, 0);
4094 LLVMValueRef ddx_el = LLVMBuildExtractElement(ctx->ac.builder,
4095 ddxy_out, ix_ll, "");
4096 LLVMValueRef ddy_el = LLVMBuildExtractElement(ctx->ac.builder,
4097 ddxy_out, iy_ll, "");
4098 LLVMValueRef interp_el = LLVMBuildExtractElement(ctx->ac.builder,
4099 interp_param, ix_ll, "");
4100 LLVMValueRef temp1, temp2;
4101
4102 interp_el = ac_to_float(&ctx->ac, interp_el);
4103
4104 temp1 = LLVMBuildFMul(ctx->ac.builder, ddx_el, emit_data->args[0], "");
4105
4106 temp1 = LLVMBuildFAdd(ctx->ac.builder, temp1, interp_el, "");
4107
4108 temp2 = LLVMBuildFMul(ctx->ac.builder, ddy_el, emit_data->args[1], "");
4109
4110 ij_out[i] = LLVMBuildFAdd(ctx->ac.builder, temp2, temp1, "");
4111 }
4112 interp_param = lp_build_gather_values(&ctx->gallivm, ij_out, 2);
4113 }
4114
4115 if (interp_param)
4116 interp_param = ac_to_float(&ctx->ac, interp_param);
4117
4118 for (chan = 0; chan < 4; chan++) {
4119 LLVMValueRef gather = LLVMGetUndef(LLVMVectorType(ctx->f32, input_array_size));
4120 unsigned schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
4121
4122 for (unsigned idx = 0; idx < input_array_size; ++idx) {
4123 LLVMValueRef v, i = NULL, j = NULL;
4124
4125 if (interp_param) {
4126 i = LLVMBuildExtractElement(
4127 ctx->ac.builder, interp_param, ctx->i32_0, "");
4128 j = LLVMBuildExtractElement(
4129 ctx->ac.builder, interp_param, ctx->i32_1, "");
4130 }
4131 v = si_build_fs_interp(ctx, input_base + idx, schan,
4132 prim_mask, i, j);
4133
4134 gather = LLVMBuildInsertElement(ctx->ac.builder,
4135 gather, v, LLVMConstInt(ctx->i32, idx, false), "");
4136 }
4137
4138 emit_data->output[chan] = LLVMBuildExtractElement(
4139 ctx->ac.builder, gather, array_idx, "");
4140 }
4141 }
4142
4143 static void vote_all_emit(
4144 const struct lp_build_tgsi_action *action,
4145 struct lp_build_tgsi_context *bld_base,
4146 struct lp_build_emit_data *emit_data)
4147 {
4148 struct si_shader_context *ctx = si_shader_context(bld_base);
4149
4150 LLVMValueRef tmp = ac_build_vote_all(&ctx->ac, emit_data->args[0]);
4151 emit_data->output[emit_data->chan] =
4152 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4153 }
4154
4155 static void vote_any_emit(
4156 const struct lp_build_tgsi_action *action,
4157 struct lp_build_tgsi_context *bld_base,
4158 struct lp_build_emit_data *emit_data)
4159 {
4160 struct si_shader_context *ctx = si_shader_context(bld_base);
4161
4162 LLVMValueRef tmp = ac_build_vote_any(&ctx->ac, emit_data->args[0]);
4163 emit_data->output[emit_data->chan] =
4164 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4165 }
4166
4167 static void vote_eq_emit(
4168 const struct lp_build_tgsi_action *action,
4169 struct lp_build_tgsi_context *bld_base,
4170 struct lp_build_emit_data *emit_data)
4171 {
4172 struct si_shader_context *ctx = si_shader_context(bld_base);
4173
4174 LLVMValueRef tmp = ac_build_vote_eq(&ctx->ac, emit_data->args[0]);
4175 emit_data->output[emit_data->chan] =
4176 LLVMBuildSExt(ctx->ac.builder, tmp, ctx->i32, "");
4177 }
4178
4179 static void ballot_emit(
4180 const struct lp_build_tgsi_action *action,
4181 struct lp_build_tgsi_context *bld_base,
4182 struct lp_build_emit_data *emit_data)
4183 {
4184 struct si_shader_context *ctx = si_shader_context(bld_base);
4185 LLVMBuilderRef builder = ctx->ac.builder;
4186 LLVMValueRef tmp;
4187
4188 tmp = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
4189 tmp = ac_build_ballot(&ctx->ac, tmp);
4190 tmp = LLVMBuildBitCast(builder, tmp, ctx->v2i32, "");
4191
4192 emit_data->output[0] = LLVMBuildExtractElement(builder, tmp, ctx->i32_0, "");
4193 emit_data->output[1] = LLVMBuildExtractElement(builder, tmp, ctx->i32_1, "");
4194 }
4195
4196 static void read_invoc_fetch_args(
4197 struct lp_build_tgsi_context *bld_base,
4198 struct lp_build_emit_data *emit_data)
4199 {
4200 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
4201 0, emit_data->src_chan);
4202
4203 /* Always read the source invocation (= lane) from the X channel. */
4204 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
4205 1, TGSI_CHAN_X);
4206 emit_data->arg_count = 2;
4207 }
4208
4209 static void read_lane_emit(
4210 const struct lp_build_tgsi_action *action,
4211 struct lp_build_tgsi_context *bld_base,
4212 struct lp_build_emit_data *emit_data)
4213 {
4214 struct si_shader_context *ctx = si_shader_context(bld_base);
4215
4216 /* We currently have no other way to prevent LLVM from lifting the icmp
4217 * calls to a dominating basic block.
4218 */
4219 ac_build_optimization_barrier(&ctx->ac, &emit_data->args[0]);
4220
4221 for (unsigned i = 0; i < emit_data->arg_count; ++i)
4222 emit_data->args[i] = ac_to_integer(&ctx->ac, emit_data->args[i]);
4223
4224 emit_data->output[emit_data->chan] =
4225 ac_build_intrinsic(&ctx->ac, action->intr_name,
4226 ctx->i32, emit_data->args, emit_data->arg_count,
4227 AC_FUNC_ATTR_READNONE |
4228 AC_FUNC_ATTR_CONVERGENT);
4229 }
4230
4231 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
4232 struct lp_build_emit_data *emit_data)
4233 {
4234 struct si_shader_context *ctx = si_shader_context(bld_base);
4235 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
4236 LLVMValueRef imm;
4237 unsigned stream;
4238
4239 assert(src0.File == TGSI_FILE_IMMEDIATE);
4240
4241 imm = ctx->imms[src0.Index * TGSI_NUM_CHANNELS + src0.SwizzleX];
4242 stream = LLVMConstIntGetZExtValue(imm) & 0x3;
4243 return stream;
4244 }
4245
4246 /* Emit one vertex from the geometry shader */
4247 static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
4248 unsigned stream,
4249 LLVMValueRef *addrs)
4250 {
4251 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
4252 struct tgsi_shader_info *info = &ctx->shader->selector->info;
4253 struct lp_build_context *uint = &ctx->bld_base.uint_bld;
4254 struct si_shader *shader = ctx->shader;
4255 struct lp_build_if_state if_state;
4256 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
4257 ctx->param_gs2vs_offset);
4258 LLVMValueRef gs_next_vertex;
4259 LLVMValueRef can_emit;
4260 unsigned chan, offset;
4261 int i;
4262
4263 /* Write vertex attribute values to GSVS ring */
4264 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
4265 ctx->gs_next_vertex[stream],
4266 "");
4267
4268 /* If this thread has already emitted the declared maximum number of
4269 * vertices, skip the write: excessive vertex emissions are not
4270 * supposed to have any effect.
4271 *
4272 * If the shader has no writes to memory, kill it instead. This skips
4273 * further memory loads and may allow LLVM to skip to the end
4274 * altogether.
4275 */
4276 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
4277 LLVMConstInt(ctx->i32,
4278 shader->selector->gs_max_out_vertices, 0), "");
4279
4280 bool use_kill = !info->writes_memory;
4281 if (use_kill) {
4282 ac_build_kill_if_false(&ctx->ac, can_emit);
4283 } else {
4284 lp_build_if(&if_state, &ctx->gallivm, can_emit);
4285 }
4286
4287 offset = 0;
4288 for (i = 0; i < info->num_outputs; i++) {
4289 for (chan = 0; chan < 4; chan++) {
4290 if (!(info->output_usagemask[i] & (1 << chan)) ||
4291 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
4292 continue;
4293
4294 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
4295 LLVMValueRef voffset =
4296 LLVMConstInt(ctx->i32, offset *
4297 shader->selector->gs_max_out_vertices, 0);
4298 offset++;
4299
4300 voffset = lp_build_add(uint, voffset, gs_next_vertex);
4301 voffset = lp_build_mul_imm(uint, voffset, 4);
4302
4303 out_val = ac_to_integer(&ctx->ac, out_val);
4304
4305 ac_build_buffer_store_dword(&ctx->ac,
4306 ctx->gsvs_ring[stream],
4307 out_val, 1,
4308 voffset, soffset, 0,
4309 1, 1, true, true);
4310 }
4311 }
4312
4313 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
4314 ctx->i32_1);
4315
4316 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
4317
4318 /* Signal vertex emission */
4319 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
4320 si_get_gs_wave_id(ctx));
4321 if (!use_kill)
4322 lp_build_endif(&if_state);
4323 }
4324
4325 /* Emit one vertex from the geometry shader */
4326 static void si_tgsi_emit_vertex(
4327 const struct lp_build_tgsi_action *action,
4328 struct lp_build_tgsi_context *bld_base,
4329 struct lp_build_emit_data *emit_data)
4330 {
4331 struct si_shader_context *ctx = si_shader_context(bld_base);
4332 unsigned stream = si_llvm_get_stream(bld_base, emit_data);
4333
4334 si_llvm_emit_vertex(&ctx->abi, stream, ctx->outputs[0]);
4335 }
4336
4337 /* Cut one primitive from the geometry shader */
4338 static void si_llvm_emit_primitive(
4339 const struct lp_build_tgsi_action *action,
4340 struct lp_build_tgsi_context *bld_base,
4341 struct lp_build_emit_data *emit_data)
4342 {
4343 struct si_shader_context *ctx = si_shader_context(bld_base);
4344 unsigned stream;
4345
4346 /* Signal primitive cut */
4347 stream = si_llvm_get_stream(bld_base, emit_data);
4348 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
4349 si_get_gs_wave_id(ctx));
4350 }
4351
4352 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
4353 struct lp_build_tgsi_context *bld_base,
4354 struct lp_build_emit_data *emit_data)
4355 {
4356 struct si_shader_context *ctx = si_shader_context(bld_base);
4357
4358 /* SI only (thanks to a hw bug workaround):
4359 * The real barrier instruction isn’t needed, because an entire patch
4360 * always fits into a single wave.
4361 */
4362 if (ctx->screen->info.chip_class == SI &&
4363 ctx->type == PIPE_SHADER_TESS_CTRL) {
4364 ac_build_waitcnt(&ctx->ac, LGKM_CNT & VM_CNT);
4365 return;
4366 }
4367
4368 lp_build_intrinsic(ctx->ac.builder,
4369 "llvm.amdgcn.s.barrier",
4370 ctx->voidt, NULL, 0, LP_FUNC_ATTR_CONVERGENT);
4371 }
4372
4373 static const struct lp_build_tgsi_action interp_action = {
4374 .fetch_args = interp_fetch_args,
4375 .emit = build_interp_intrinsic,
4376 };
4377
4378 static void si_create_function(struct si_shader_context *ctx,
4379 const char *name,
4380 LLVMTypeRef *returns, unsigned num_returns,
4381 struct si_function_info *fninfo,
4382 unsigned max_workgroup_size)
4383 {
4384 int i;
4385
4386 si_llvm_create_func(ctx, name, returns, num_returns,
4387 fninfo->types, fninfo->num_params);
4388 ctx->return_value = LLVMGetUndef(ctx->return_type);
4389
4390 for (i = 0; i < fninfo->num_sgpr_params; ++i) {
4391 LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
4392
4393 /* The combination of:
4394 * - ByVal
4395 * - dereferenceable
4396 * - invariant.load
4397 * allows the optimization passes to move loads and reduces
4398 * SGPR spilling significantly.
4399 */
4400 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
4401 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_BYVAL);
4402 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_NOALIAS);
4403 ac_add_attr_dereferenceable(P, UINT64_MAX);
4404 } else
4405 lp_add_function_attr(ctx->main_fn, i + 1, LP_FUNC_ATTR_INREG);
4406 }
4407
4408 for (i = 0; i < fninfo->num_params; ++i) {
4409 if (fninfo->assign[i])
4410 *fninfo->assign[i] = LLVMGetParam(ctx->main_fn, i);
4411 }
4412
4413 if (max_workgroup_size) {
4414 si_llvm_add_attribute(ctx->main_fn, "amdgpu-max-work-group-size",
4415 max_workgroup_size);
4416 }
4417 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4418 "no-signed-zeros-fp-math",
4419 "true");
4420
4421 if (ctx->screen->debug_flags & DBG(UNSAFE_MATH)) {
4422 /* These were copied from some LLVM test. */
4423 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4424 "less-precise-fpmad",
4425 "true");
4426 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4427 "no-infs-fp-math",
4428 "true");
4429 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4430 "no-nans-fp-math",
4431 "true");
4432 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
4433 "unsafe-fp-math",
4434 "true");
4435 }
4436 }
4437
4438 static void declare_streamout_params(struct si_shader_context *ctx,
4439 struct pipe_stream_output_info *so,
4440 struct si_function_info *fninfo)
4441 {
4442 int i;
4443
4444 /* Streamout SGPRs. */
4445 if (so->num_outputs) {
4446 if (ctx->type != PIPE_SHADER_TESS_EVAL)
4447 ctx->param_streamout_config = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4448 else
4449 ctx->param_streamout_config = fninfo->num_params - 1;
4450
4451 ctx->param_streamout_write_index = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4452 }
4453 /* A streamout buffer offset is loaded if the stride is non-zero. */
4454 for (i = 0; i < 4; i++) {
4455 if (!so->stride[i])
4456 continue;
4457
4458 ctx->param_streamout_offset[i] = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
4459 }
4460 }
4461
4462 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
4463 {
4464 switch (shader->selector->type) {
4465 case PIPE_SHADER_TESS_CTRL:
4466 /* Return this so that LLVM doesn't remove s_barrier
4467 * instructions on chips where we use s_barrier. */
4468 return shader->selector->screen->info.chip_class >= CIK ? 128 : 64;
4469
4470 case PIPE_SHADER_GEOMETRY:
4471 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 64;
4472
4473 case PIPE_SHADER_COMPUTE:
4474 break; /* see below */
4475
4476 default:
4477 return 0;
4478 }
4479
4480 const unsigned *properties = shader->selector->info.properties;
4481 unsigned max_work_group_size =
4482 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
4483 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
4484 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
4485
4486 if (!max_work_group_size) {
4487 /* This is a variable group size compute shader,
4488 * compile it for the maximum possible group size.
4489 */
4490 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
4491 }
4492 return max_work_group_size;
4493 }
4494
4495 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
4496 struct si_function_info *fninfo,
4497 bool assign_params)
4498 {
4499 LLVMTypeRef const_shader_buf_type;
4500
4501 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
4502 ctx->shader->selector->info.shader_buffers_declared == 0)
4503 const_shader_buf_type = ctx->f32;
4504 else
4505 const_shader_buf_type = ctx->v4i32;
4506
4507 unsigned const_and_shader_buffers =
4508 add_arg(fninfo, ARG_SGPR,
4509 si_const_array(const_shader_buf_type, 0));
4510
4511 unsigned samplers_and_images =
4512 add_arg(fninfo, ARG_SGPR,
4513 si_const_array(ctx->v8i32,
4514 SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2));
4515
4516 if (assign_params) {
4517 ctx->param_const_and_shader_buffers = const_and_shader_buffers;
4518 ctx->param_samplers_and_images = samplers_and_images;
4519 }
4520 }
4521
4522 static void declare_global_desc_pointers(struct si_shader_context *ctx,
4523 struct si_function_info *fninfo)
4524 {
4525 ctx->param_rw_buffers = add_arg(fninfo, ARG_SGPR,
4526 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS));
4527 ctx->param_bindless_samplers_and_images = add_arg(fninfo, ARG_SGPR,
4528 si_const_array(ctx->v8i32, 0));
4529 }
4530
4531 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx,
4532 struct si_function_info *fninfo)
4533 {
4534 ctx->param_vertex_buffers = add_arg(fninfo, ARG_SGPR,
4535 si_const_array(ctx->v4i32, SI_NUM_VERTEX_BUFFERS));
4536 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.base_vertex);
4537 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.start_instance);
4538 add_arg_assign(fninfo, ARG_SGPR, ctx->i32, &ctx->abi.draw_id);
4539 ctx->param_vs_state_bits = add_arg(fninfo, ARG_SGPR, ctx->i32);
4540 }
4541
4542 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
4543 struct si_function_info *fninfo,
4544 unsigned *num_prolog_vgprs)
4545 {
4546 struct si_shader *shader = ctx->shader;
4547
4548 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.vertex_id);
4549 if (shader->key.as_ls) {
4550 ctx->param_rel_auto_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4551 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
4552 } else {
4553 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
4554 ctx->param_vs_prim_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4555 }
4556 add_arg(fninfo, ARG_VGPR, ctx->i32); /* unused */
4557
4558 if (!shader->is_gs_copy_shader) {
4559 /* Vertex load indices. */
4560 ctx->param_vertex_index0 = fninfo->num_params;
4561 for (unsigned i = 0; i < shader->selector->info.num_inputs; i++)
4562 add_arg(fninfo, ARG_VGPR, ctx->i32);
4563 *num_prolog_vgprs += shader->selector->info.num_inputs;
4564 }
4565 }
4566
4567 static void declare_tes_input_vgprs(struct si_shader_context *ctx,
4568 struct si_function_info *fninfo)
4569 {
4570 ctx->param_tes_u = add_arg(fninfo, ARG_VGPR, ctx->f32);
4571 ctx->param_tes_v = add_arg(fninfo, ARG_VGPR, ctx->f32);
4572 ctx->param_tes_rel_patch_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
4573 add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tes_patch_id);
4574 }
4575
4576 enum {
4577 /* Convenient merged shader definitions. */
4578 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
4579 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
4580 };
4581
4582 static void create_function(struct si_shader_context *ctx)
4583 {
4584 struct si_shader *shader = ctx->shader;
4585 struct si_function_info fninfo;
4586 LLVMTypeRef returns[16+32*4];
4587 unsigned i, num_return_sgprs;
4588 unsigned num_returns = 0;
4589 unsigned num_prolog_vgprs = 0;
4590 unsigned type = ctx->type;
4591 unsigned vs_blit_property =
4592 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
4593
4594 si_init_function_info(&fninfo);
4595
4596 /* Set MERGED shaders. */
4597 if (ctx->screen->info.chip_class >= GFX9) {
4598 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
4599 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
4600 else if (shader->key.as_es || type == PIPE_SHADER_GEOMETRY)
4601 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
4602 }
4603
4604 LLVMTypeRef v3i32 = LLVMVectorType(ctx->i32, 3);
4605
4606 switch (type) {
4607 case PIPE_SHADER_VERTEX:
4608 declare_global_desc_pointers(ctx, &fninfo);
4609
4610 if (vs_blit_property) {
4611 ctx->param_vs_blit_inputs = fninfo.num_params;
4612 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* i16 x1, y1 */
4613 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* i16 x2, y2 */
4614 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* depth */
4615
4616 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
4617 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color0 */
4618 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color1 */
4619 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color2 */
4620 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* color3 */
4621 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
4622 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.x1 */
4623 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.y1 */
4624 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.x2 */
4625 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.y2 */
4626 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.z */
4627 add_arg(&fninfo, ARG_SGPR, ctx->f32); /* texcoord.w */
4628 }
4629
4630 /* VGPRs */
4631 declare_vs_input_vgprs(ctx, &fninfo, &num_prolog_vgprs);
4632 break;
4633 }
4634
4635 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4636 declare_vs_specific_input_sgprs(ctx, &fninfo);
4637
4638 if (shader->key.as_es) {
4639 ctx->param_es2gs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4640 } else if (shader->key.as_ls) {
4641 /* no extra parameters */
4642 } else {
4643 if (shader->is_gs_copy_shader) {
4644 fninfo.num_params = ctx->param_rw_buffers + 1;
4645 fninfo.num_sgpr_params = fninfo.num_params;
4646 }
4647
4648 /* The locations of the other parameters are assigned dynamically. */
4649 declare_streamout_params(ctx, &shader->selector->so,
4650 &fninfo);
4651 }
4652
4653 /* VGPRs */
4654 declare_vs_input_vgprs(ctx, &fninfo, &num_prolog_vgprs);
4655 break;
4656
4657 case PIPE_SHADER_TESS_CTRL: /* SI-CI-VI */
4658 declare_global_desc_pointers(ctx, &fninfo);
4659 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4660 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4661 ctx->param_tcs_out_lds_offsets = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4662 ctx->param_tcs_out_lds_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4663 ctx->param_vs_state_bits = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4664 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4665 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4666 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4667 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4668
4669 /* VGPRs */
4670 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_patch_id);
4671 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_rel_ids);
4672
4673 /* param_tcs_offchip_offset and param_tcs_factor_offset are
4674 * placed after the user SGPRs.
4675 */
4676 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
4677 returns[num_returns++] = ctx->i32; /* SGPRs */
4678 for (i = 0; i < 11; i++)
4679 returns[num_returns++] = ctx->f32; /* VGPRs */
4680 break;
4681
4682 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
4683 /* Merged stages have 8 system SGPRs at the beginning. */
4684 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* SPI_SHADER_USER_DATA_ADDR_LO_HS */
4685 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* SPI_SHADER_USER_DATA_ADDR_HI_HS */
4686 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4687 ctx->param_merged_wave_info = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4688 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4689 ctx->param_merged_scratch_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4690 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4691 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4692
4693 declare_global_desc_pointers(ctx, &fninfo);
4694 declare_per_stage_desc_pointers(ctx, &fninfo,
4695 ctx->type == PIPE_SHADER_VERTEX);
4696 declare_vs_specific_input_sgprs(ctx, &fninfo);
4697
4698 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4699 ctx->param_tcs_out_lds_offsets = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4700 ctx->param_tcs_out_lds_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4701 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4702 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4703 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4704
4705 declare_per_stage_desc_pointers(ctx, &fninfo,
4706 ctx->type == PIPE_SHADER_TESS_CTRL);
4707
4708 /* VGPRs (first TCS, then VS) */
4709 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_patch_id);
4710 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.tcs_rel_ids);
4711
4712 if (ctx->type == PIPE_SHADER_VERTEX) {
4713 declare_vs_input_vgprs(ctx, &fninfo,
4714 &num_prolog_vgprs);
4715
4716 /* LS return values are inputs to the TCS main shader part. */
4717 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
4718 returns[num_returns++] = ctx->i32; /* SGPRs */
4719 for (i = 0; i < 2; i++)
4720 returns[num_returns++] = ctx->f32; /* VGPRs */
4721 } else {
4722 /* TCS return values are inputs to the TCS epilog.
4723 *
4724 * param_tcs_offchip_offset, param_tcs_factor_offset,
4725 * param_tcs_offchip_layout, and param_rw_buffers
4726 * should be passed to the epilog.
4727 */
4728 for (i = 0; i <= 8 + GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K; i++)
4729 returns[num_returns++] = ctx->i32; /* SGPRs */
4730 for (i = 0; i < 11; i++)
4731 returns[num_returns++] = ctx->f32; /* VGPRs */
4732 }
4733 break;
4734
4735 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
4736 /* Merged stages have 8 system SGPRs at the beginning. */
4737 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_USER_DATA_ADDR_LO_GS) */
4738 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_USER_DATA_ADDR_HI_GS) */
4739 ctx->param_gs2vs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4740 ctx->param_merged_wave_info = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4741 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4742 ctx->param_merged_scratch_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4743 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
4744 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
4745
4746 declare_global_desc_pointers(ctx, &fninfo);
4747 declare_per_stage_desc_pointers(ctx, &fninfo,
4748 (ctx->type == PIPE_SHADER_VERTEX ||
4749 ctx->type == PIPE_SHADER_TESS_EVAL));
4750 if (ctx->type == PIPE_SHADER_VERTEX) {
4751 declare_vs_specific_input_sgprs(ctx, &fninfo);
4752 } else {
4753 /* TESS_EVAL (and also GEOMETRY):
4754 * Declare as many input SGPRs as the VS has. */
4755 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4756 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4757 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4758 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4759 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4760 ctx->param_vs_state_bits = add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused */
4761 }
4762
4763 declare_per_stage_desc_pointers(ctx, &fninfo,
4764 ctx->type == PIPE_SHADER_GEOMETRY);
4765
4766 /* VGPRs (first GS, then VS/TES) */
4767 ctx->param_gs_vtx01_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4768 ctx->param_gs_vtx23_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4769 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_prim_id);
4770 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_invocation_id);
4771 ctx->param_gs_vtx45_offset = add_arg(&fninfo, ARG_VGPR, ctx->i32);
4772
4773 if (ctx->type == PIPE_SHADER_VERTEX) {
4774 declare_vs_input_vgprs(ctx, &fninfo,
4775 &num_prolog_vgprs);
4776 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
4777 declare_tes_input_vgprs(ctx, &fninfo);
4778 }
4779
4780 if (ctx->type == PIPE_SHADER_VERTEX ||
4781 ctx->type == PIPE_SHADER_TESS_EVAL) {
4782 /* ES return values are inputs to GS. */
4783 for (i = 0; i < 8 + GFX9_GS_NUM_USER_SGPR; i++)
4784 returns[num_returns++] = ctx->i32; /* SGPRs */
4785 for (i = 0; i < 5; i++)
4786 returns[num_returns++] = ctx->f32; /* VGPRs */
4787 }
4788 break;
4789
4790 case PIPE_SHADER_TESS_EVAL:
4791 declare_global_desc_pointers(ctx, &fninfo);
4792 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4793 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4794 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4795
4796 if (shader->key.as_es) {
4797 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4798 add_arg(&fninfo, ARG_SGPR, ctx->i32);
4799 ctx->param_es2gs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4800 } else {
4801 add_arg(&fninfo, ARG_SGPR, ctx->i32);
4802 declare_streamout_params(ctx, &shader->selector->so,
4803 &fninfo);
4804 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4805 }
4806
4807 /* VGPRs */
4808 declare_tes_input_vgprs(ctx, &fninfo);
4809 break;
4810
4811 case PIPE_SHADER_GEOMETRY:
4812 declare_global_desc_pointers(ctx, &fninfo);
4813 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4814 ctx->param_gs2vs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4815 ctx->param_gs_wave_id = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4816
4817 /* VGPRs */
4818 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[0]);
4819 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[1]);
4820 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_prim_id);
4821 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[2]);
4822 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[3]);
4823 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[4]);
4824 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->gs_vtx_offset[5]);
4825 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &ctx->abi.gs_invocation_id);
4826 break;
4827
4828 case PIPE_SHADER_FRAGMENT:
4829 declare_global_desc_pointers(ctx, &fninfo);
4830 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4831 add_arg_checked(&fninfo, ARG_SGPR, ctx->f32, SI_PARAM_ALPHA_REF);
4832 add_arg_checked(&fninfo, ARG_SGPR, ctx->i32, SI_PARAM_PRIM_MASK);
4833
4834 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_SAMPLE);
4835 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_CENTER);
4836 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_PERSP_CENTROID);
4837 add_arg_checked(&fninfo, ARG_VGPR, v3i32, SI_PARAM_PERSP_PULL_MODEL);
4838 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_SAMPLE);
4839 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_CENTER);
4840 add_arg_checked(&fninfo, ARG_VGPR, ctx->v2i32, SI_PARAM_LINEAR_CENTROID);
4841 add_arg_checked(&fninfo, ARG_VGPR, ctx->f32, SI_PARAM_LINE_STIPPLE_TEX);
4842 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4843 &ctx->abi.frag_pos[0], SI_PARAM_POS_X_FLOAT);
4844 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4845 &ctx->abi.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
4846 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4847 &ctx->abi.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
4848 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4849 &ctx->abi.frag_pos[3], SI_PARAM_POS_W_FLOAT);
4850 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->i32,
4851 &ctx->abi.front_face, SI_PARAM_FRONT_FACE);
4852 shader->info.face_vgpr_index = 20;
4853 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->i32,
4854 &ctx->abi.ancillary, SI_PARAM_ANCILLARY);
4855 shader->info.ancillary_vgpr_index = 21;
4856 add_arg_assign_checked(&fninfo, ARG_VGPR, ctx->f32,
4857 &ctx->abi.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
4858 add_arg_checked(&fninfo, ARG_VGPR, ctx->i32, SI_PARAM_POS_FIXED_PT);
4859
4860 /* Color inputs from the prolog. */
4861 if (shader->selector->info.colors_read) {
4862 unsigned num_color_elements =
4863 util_bitcount(shader->selector->info.colors_read);
4864
4865 assert(fninfo.num_params + num_color_elements <= ARRAY_SIZE(fninfo.types));
4866 for (i = 0; i < num_color_elements; i++)
4867 add_arg(&fninfo, ARG_VGPR, ctx->f32);
4868
4869 num_prolog_vgprs += num_color_elements;
4870 }
4871
4872 /* Outputs for the epilog. */
4873 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
4874 num_returns =
4875 num_return_sgprs +
4876 util_bitcount(shader->selector->info.colors_written) * 4 +
4877 shader->selector->info.writes_z +
4878 shader->selector->info.writes_stencil +
4879 shader->selector->info.writes_samplemask +
4880 1 /* SampleMaskIn */;
4881
4882 num_returns = MAX2(num_returns,
4883 num_return_sgprs +
4884 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
4885
4886 for (i = 0; i < num_return_sgprs; i++)
4887 returns[i] = ctx->i32;
4888 for (; i < num_returns; i++)
4889 returns[i] = ctx->f32;
4890 break;
4891
4892 case PIPE_SHADER_COMPUTE:
4893 declare_global_desc_pointers(ctx, &fninfo);
4894 declare_per_stage_desc_pointers(ctx, &fninfo, true);
4895 if (shader->selector->info.uses_grid_size)
4896 ctx->param_grid_size = add_arg(&fninfo, ARG_SGPR, v3i32);
4897 if (shader->selector->info.uses_block_size)
4898 ctx->param_block_size = add_arg(&fninfo, ARG_SGPR, v3i32);
4899
4900 for (i = 0; i < 3; i++) {
4901 ctx->param_block_id[i] = -1;
4902 if (shader->selector->info.uses_block_id[i])
4903 ctx->param_block_id[i] = add_arg(&fninfo, ARG_SGPR, ctx->i32);
4904 }
4905
4906 ctx->param_thread_id = add_arg(&fninfo, ARG_VGPR, v3i32);
4907 break;
4908 default:
4909 assert(0 && "unimplemented shader");
4910 return;
4911 }
4912
4913 si_create_function(ctx, "main", returns, num_returns, &fninfo,
4914 si_get_max_workgroup_size(shader));
4915
4916 /* Reserve register locations for VGPR inputs the PS prolog may need. */
4917 if (ctx->type == PIPE_SHADER_FRAGMENT &&
4918 ctx->separate_prolog) {
4919 si_llvm_add_attribute(ctx->main_fn,
4920 "InitialPSInputAddr",
4921 S_0286D0_PERSP_SAMPLE_ENA(1) |
4922 S_0286D0_PERSP_CENTER_ENA(1) |
4923 S_0286D0_PERSP_CENTROID_ENA(1) |
4924 S_0286D0_LINEAR_SAMPLE_ENA(1) |
4925 S_0286D0_LINEAR_CENTER_ENA(1) |
4926 S_0286D0_LINEAR_CENTROID_ENA(1) |
4927 S_0286D0_FRONT_FACE_ENA(1) |
4928 S_0286D0_ANCILLARY_ENA(1) |
4929 S_0286D0_POS_FIXED_PT_ENA(1));
4930 }
4931
4932 shader->info.num_input_sgprs = 0;
4933 shader->info.num_input_vgprs = 0;
4934
4935 for (i = 0; i < fninfo.num_sgpr_params; ++i)
4936 shader->info.num_input_sgprs += ac_get_type_size(fninfo.types[i]) / 4;
4937
4938 for (; i < fninfo.num_params; ++i)
4939 shader->info.num_input_vgprs += ac_get_type_size(fninfo.types[i]) / 4;
4940
4941 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
4942 shader->info.num_input_vgprs -= num_prolog_vgprs;
4943
4944 if (shader->key.as_ls ||
4945 ctx->type == PIPE_SHADER_TESS_CTRL ||
4946 /* GFX9 has the ESGS ring buffer in LDS. */
4947 type == SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY)
4948 ac_declare_lds_as_pointer(&ctx->ac);
4949 }
4950
4951 /**
4952 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
4953 * for later use.
4954 */
4955 static void preload_ring_buffers(struct si_shader_context *ctx)
4956 {
4957 LLVMBuilderRef builder = ctx->ac.builder;
4958
4959 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
4960 ctx->param_rw_buffers);
4961
4962 if (ctx->screen->info.chip_class <= VI &&
4963 (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)) {
4964 unsigned ring =
4965 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
4966 : SI_ES_RING_ESGS;
4967 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
4968
4969 ctx->esgs_ring =
4970 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
4971 }
4972
4973 if (ctx->shader->is_gs_copy_shader) {
4974 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4975
4976 ctx->gsvs_ring[0] =
4977 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
4978 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
4979 const struct si_shader_selector *sel = ctx->shader->selector;
4980 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
4981 LLVMValueRef base_ring;
4982
4983 base_ring = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
4984
4985 /* The conceptual layout of the GSVS ring is
4986 * v0c0 .. vLv0 v0c1 .. vLc1 ..
4987 * but the real memory layout is swizzled across
4988 * threads:
4989 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
4990 * t16v0c0 ..
4991 * Override the buffer descriptor accordingly.
4992 */
4993 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
4994 uint64_t stream_offset = 0;
4995
4996 for (unsigned stream = 0; stream < 4; ++stream) {
4997 unsigned num_components;
4998 unsigned stride;
4999 unsigned num_records;
5000 LLVMValueRef ring, tmp;
5001
5002 num_components = sel->info.num_stream_output_components[stream];
5003 if (!num_components)
5004 continue;
5005
5006 stride = 4 * num_components * sel->gs_max_out_vertices;
5007
5008 /* Limit on the stride field for <= CIK. */
5009 assert(stride < (1 << 14));
5010
5011 num_records = 64;
5012
5013 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
5014 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
5015 tmp = LLVMBuildAdd(builder, tmp,
5016 LLVMConstInt(ctx->i64,
5017 stream_offset, 0), "");
5018 stream_offset += stride * 64;
5019
5020 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
5021 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
5022 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
5023 tmp = LLVMBuildOr(builder, tmp,
5024 LLVMConstInt(ctx->i32,
5025 S_008F04_STRIDE(stride) |
5026 S_008F04_SWIZZLE_ENABLE(1), 0), "");
5027 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
5028 ring = LLVMBuildInsertElement(builder, ring,
5029 LLVMConstInt(ctx->i32, num_records, 0),
5030 LLVMConstInt(ctx->i32, 2, 0), "");
5031 ring = LLVMBuildInsertElement(builder, ring,
5032 LLVMConstInt(ctx->i32,
5033 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
5034 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
5035 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
5036 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
5037 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
5038 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
5039 S_008F0C_ELEMENT_SIZE(1) | /* element_size = 4 (bytes) */
5040 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
5041 S_008F0C_ADD_TID_ENABLE(1),
5042 0),
5043 LLVMConstInt(ctx->i32, 3, 0), "");
5044
5045 ctx->gsvs_ring[stream] = ring;
5046 }
5047 }
5048 }
5049
5050 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
5051 LLVMValueRef param_rw_buffers,
5052 unsigned param_pos_fixed_pt)
5053 {
5054 LLVMBuilderRef builder = ctx->ac.builder;
5055 LLVMValueRef slot, desc, offset, row, bit, address[2];
5056
5057 /* Use the fixed-point gl_FragCoord input.
5058 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
5059 * per coordinate to get the repeating effect.
5060 */
5061 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
5062 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
5063
5064 /* Load the buffer descriptor. */
5065 slot = LLVMConstInt(ctx->i32, SI_PS_CONST_POLY_STIPPLE, 0);
5066 desc = ac_build_load_to_sgpr(&ctx->ac, param_rw_buffers, slot);
5067
5068 /* The stipple pattern is 32x32, each row has 32 bits. */
5069 offset = LLVMBuildMul(builder, address[1],
5070 LLVMConstInt(ctx->i32, 4, 0), "");
5071 row = buffer_load_const(ctx, desc, offset);
5072 row = ac_to_integer(&ctx->ac, row);
5073 bit = LLVMBuildLShr(builder, row, address[0], "");
5074 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
5075 ac_build_kill_if_false(&ctx->ac, bit);
5076 }
5077
5078 void si_shader_binary_read_config(struct ac_shader_binary *binary,
5079 struct si_shader_config *conf,
5080 unsigned symbol_offset)
5081 {
5082 unsigned i;
5083 const unsigned char *config =
5084 ac_shader_binary_config_start(binary, symbol_offset);
5085 bool really_needs_scratch = false;
5086
5087 /* LLVM adds SGPR spills to the scratch size.
5088 * Find out if we really need the scratch buffer.
5089 */
5090 for (i = 0; i < binary->reloc_count; i++) {
5091 const struct ac_shader_reloc *reloc = &binary->relocs[i];
5092
5093 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
5094 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5095 really_needs_scratch = true;
5096 break;
5097 }
5098 }
5099
5100 /* XXX: We may be able to emit some of these values directly rather than
5101 * extracting fields to be emitted later.
5102 */
5103
5104 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
5105 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
5106 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
5107 switch (reg) {
5108 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
5109 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
5110 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
5111 case R_00B428_SPI_SHADER_PGM_RSRC1_HS:
5112 case R_00B848_COMPUTE_PGM_RSRC1:
5113 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
5114 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
5115 conf->float_mode = G_00B028_FLOAT_MODE(value);
5116 conf->rsrc1 = value;
5117 break;
5118 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
5119 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
5120 break;
5121 case R_00B84C_COMPUTE_PGM_RSRC2:
5122 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
5123 conf->rsrc2 = value;
5124 break;
5125 case R_0286CC_SPI_PS_INPUT_ENA:
5126 conf->spi_ps_input_ena = value;
5127 break;
5128 case R_0286D0_SPI_PS_INPUT_ADDR:
5129 conf->spi_ps_input_addr = value;
5130 break;
5131 case R_0286E8_SPI_TMPRING_SIZE:
5132 case R_00B860_COMPUTE_TMPRING_SIZE:
5133 /* WAVESIZE is in units of 256 dwords. */
5134 if (really_needs_scratch)
5135 conf->scratch_bytes_per_wave =
5136 G_00B860_WAVESIZE(value) * 256 * 4;
5137 break;
5138 case 0x4: /* SPILLED_SGPRS */
5139 conf->spilled_sgprs = value;
5140 break;
5141 case 0x8: /* SPILLED_VGPRS */
5142 conf->spilled_vgprs = value;
5143 break;
5144 default:
5145 {
5146 static bool printed;
5147
5148 if (!printed) {
5149 fprintf(stderr, "Warning: LLVM emitted unknown "
5150 "config register: 0x%x\n", reg);
5151 printed = true;
5152 }
5153 }
5154 break;
5155 }
5156 }
5157
5158 if (!conf->spi_ps_input_addr)
5159 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
5160 }
5161
5162 void si_shader_apply_scratch_relocs(struct si_shader *shader,
5163 uint64_t scratch_va)
5164 {
5165 unsigned i;
5166 uint32_t scratch_rsrc_dword0 = scratch_va;
5167 uint32_t scratch_rsrc_dword1 =
5168 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
5169
5170 /* Enable scratch coalescing. */
5171 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
5172
5173 for (i = 0 ; i < shader->binary.reloc_count; i++) {
5174 const struct ac_shader_reloc *reloc =
5175 &shader->binary.relocs[i];
5176 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
5177 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5178 &scratch_rsrc_dword0, 4);
5179 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5180 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5181 &scratch_rsrc_dword1, 4);
5182 }
5183 }
5184 }
5185
5186 static unsigned si_get_shader_binary_size(const struct si_shader *shader)
5187 {
5188 unsigned size = shader->binary.code_size;
5189
5190 if (shader->prolog)
5191 size += shader->prolog->binary.code_size;
5192 if (shader->previous_stage)
5193 size += shader->previous_stage->binary.code_size;
5194 if (shader->prolog2)
5195 size += shader->prolog2->binary.code_size;
5196 if (shader->epilog)
5197 size += shader->epilog->binary.code_size;
5198 return size;
5199 }
5200
5201 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
5202 {
5203 const struct ac_shader_binary *prolog =
5204 shader->prolog ? &shader->prolog->binary : NULL;
5205 const struct ac_shader_binary *previous_stage =
5206 shader->previous_stage ? &shader->previous_stage->binary : NULL;
5207 const struct ac_shader_binary *prolog2 =
5208 shader->prolog2 ? &shader->prolog2->binary : NULL;
5209 const struct ac_shader_binary *epilog =
5210 shader->epilog ? &shader->epilog->binary : NULL;
5211 const struct ac_shader_binary *mainb = &shader->binary;
5212 unsigned bo_size = si_get_shader_binary_size(shader) +
5213 (!epilog ? mainb->rodata_size : 0);
5214 unsigned char *ptr;
5215
5216 assert(!prolog || !prolog->rodata_size);
5217 assert(!previous_stage || !previous_stage->rodata_size);
5218 assert(!prolog2 || !prolog2->rodata_size);
5219 assert((!prolog && !previous_stage && !prolog2 && !epilog) ||
5220 !mainb->rodata_size);
5221 assert(!epilog || !epilog->rodata_size);
5222
5223 r600_resource_reference(&shader->bo, NULL);
5224 shader->bo = (struct r600_resource*)
5225 si_aligned_buffer_create(&sscreen->b,
5226 sscreen->cpdma_prefetch_writes_memory ?
5227 0 : R600_RESOURCE_FLAG_READ_ONLY,
5228 PIPE_USAGE_IMMUTABLE,
5229 align(bo_size, SI_CPDMA_ALIGNMENT),
5230 256);
5231 if (!shader->bo)
5232 return -ENOMEM;
5233
5234 /* Upload. */
5235 ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
5236 PIPE_TRANSFER_READ_WRITE |
5237 PIPE_TRANSFER_UNSYNCHRONIZED);
5238
5239 /* Don't use util_memcpy_cpu_to_le32. LLVM binaries are
5240 * endian-independent. */
5241 if (prolog) {
5242 memcpy(ptr, prolog->code, prolog->code_size);
5243 ptr += prolog->code_size;
5244 }
5245 if (previous_stage) {
5246 memcpy(ptr, previous_stage->code, previous_stage->code_size);
5247 ptr += previous_stage->code_size;
5248 }
5249 if (prolog2) {
5250 memcpy(ptr, prolog2->code, prolog2->code_size);
5251 ptr += prolog2->code_size;
5252 }
5253
5254 memcpy(ptr, mainb->code, mainb->code_size);
5255 ptr += mainb->code_size;
5256
5257 if (epilog)
5258 memcpy(ptr, epilog->code, epilog->code_size);
5259 else if (mainb->rodata_size > 0)
5260 memcpy(ptr, mainb->rodata, mainb->rodata_size);
5261
5262 sscreen->ws->buffer_unmap(shader->bo->buf);
5263 return 0;
5264 }
5265
5266 static void si_shader_dump_disassembly(const struct ac_shader_binary *binary,
5267 struct pipe_debug_callback *debug,
5268 const char *name, FILE *file)
5269 {
5270 char *line, *p;
5271 unsigned i, count;
5272
5273 if (binary->disasm_string) {
5274 fprintf(file, "Shader %s disassembly:\n", name);
5275 fprintf(file, "%s", binary->disasm_string);
5276
5277 if (debug && debug->debug_message) {
5278 /* Very long debug messages are cut off, so send the
5279 * disassembly one line at a time. This causes more
5280 * overhead, but on the plus side it simplifies
5281 * parsing of resulting logs.
5282 */
5283 pipe_debug_message(debug, SHADER_INFO,
5284 "Shader Disassembly Begin");
5285
5286 line = binary->disasm_string;
5287 while (*line) {
5288 p = util_strchrnul(line, '\n');
5289 count = p - line;
5290
5291 if (count) {
5292 pipe_debug_message(debug, SHADER_INFO,
5293 "%.*s", count, line);
5294 }
5295
5296 if (!*p)
5297 break;
5298 line = p + 1;
5299 }
5300
5301 pipe_debug_message(debug, SHADER_INFO,
5302 "Shader Disassembly End");
5303 }
5304 } else {
5305 fprintf(file, "Shader %s binary:\n", name);
5306 for (i = 0; i < binary->code_size; i += 4) {
5307 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
5308 binary->code[i + 3], binary->code[i + 2],
5309 binary->code[i + 1], binary->code[i]);
5310 }
5311 }
5312 }
5313
5314 static void si_shader_dump_stats(struct si_screen *sscreen,
5315 const struct si_shader *shader,
5316 struct pipe_debug_callback *debug,
5317 unsigned processor,
5318 FILE *file,
5319 bool check_debug_option)
5320 {
5321 const struct si_shader_config *conf = &shader->config;
5322 unsigned num_inputs = shader->selector ? shader->selector->info.num_inputs : 0;
5323 unsigned code_size = si_get_shader_binary_size(shader);
5324 unsigned lds_increment = sscreen->info.chip_class >= CIK ? 512 : 256;
5325 unsigned lds_per_wave = 0;
5326 unsigned max_simd_waves;
5327
5328 switch (sscreen->info.family) {
5329 /* These always have 8 waves: */
5330 case CHIP_POLARIS10:
5331 case CHIP_POLARIS11:
5332 case CHIP_POLARIS12:
5333 max_simd_waves = 8;
5334 break;
5335 default:
5336 max_simd_waves = 10;
5337 }
5338
5339 /* Compute LDS usage for PS. */
5340 switch (processor) {
5341 case PIPE_SHADER_FRAGMENT:
5342 /* The minimum usage per wave is (num_inputs * 48). The maximum
5343 * usage is (num_inputs * 48 * 16).
5344 * We can get anything in between and it varies between waves.
5345 *
5346 * The 48 bytes per input for a single primitive is equal to
5347 * 4 bytes/component * 4 components/input * 3 points.
5348 *
5349 * Other stages don't know the size at compile time or don't
5350 * allocate LDS per wave, but instead they do it per thread group.
5351 */
5352 lds_per_wave = conf->lds_size * lds_increment +
5353 align(num_inputs * 48, lds_increment);
5354 break;
5355 case PIPE_SHADER_COMPUTE:
5356 if (shader->selector) {
5357 unsigned max_workgroup_size =
5358 si_get_max_workgroup_size(shader);
5359 lds_per_wave = (conf->lds_size * lds_increment) /
5360 DIV_ROUND_UP(max_workgroup_size, 64);
5361 }
5362 break;
5363 }
5364
5365 /* Compute the per-SIMD wave counts. */
5366 if (conf->num_sgprs) {
5367 if (sscreen->info.chip_class >= VI)
5368 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
5369 else
5370 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
5371 }
5372
5373 if (conf->num_vgprs)
5374 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
5375
5376 /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
5377 * 16KB makes some SIMDs unoccupied). */
5378 if (lds_per_wave)
5379 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
5380
5381 if (!check_debug_option ||
5382 si_can_dump_shader(sscreen, processor)) {
5383 if (processor == PIPE_SHADER_FRAGMENT) {
5384 fprintf(file, "*** SHADER CONFIG ***\n"
5385 "SPI_PS_INPUT_ADDR = 0x%04x\n"
5386 "SPI_PS_INPUT_ENA = 0x%04x\n",
5387 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
5388 }
5389
5390 fprintf(file, "*** SHADER STATS ***\n"
5391 "SGPRS: %d\n"
5392 "VGPRS: %d\n"
5393 "Spilled SGPRs: %d\n"
5394 "Spilled VGPRs: %d\n"
5395 "Private memory VGPRs: %d\n"
5396 "Code Size: %d bytes\n"
5397 "LDS: %d blocks\n"
5398 "Scratch: %d bytes per wave\n"
5399 "Max Waves: %d\n"
5400 "********************\n\n\n",
5401 conf->num_sgprs, conf->num_vgprs,
5402 conf->spilled_sgprs, conf->spilled_vgprs,
5403 conf->private_mem_vgprs, code_size,
5404 conf->lds_size, conf->scratch_bytes_per_wave,
5405 max_simd_waves);
5406 }
5407
5408 pipe_debug_message(debug, SHADER_INFO,
5409 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
5410 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
5411 "Spilled VGPRs: %d PrivMem VGPRs: %d",
5412 conf->num_sgprs, conf->num_vgprs, code_size,
5413 conf->lds_size, conf->scratch_bytes_per_wave,
5414 max_simd_waves, conf->spilled_sgprs,
5415 conf->spilled_vgprs, conf->private_mem_vgprs);
5416 }
5417
5418 const char *si_get_shader_name(const struct si_shader *shader, unsigned processor)
5419 {
5420 switch (processor) {
5421 case PIPE_SHADER_VERTEX:
5422 if (shader->key.as_es)
5423 return "Vertex Shader as ES";
5424 else if (shader->key.as_ls)
5425 return "Vertex Shader as LS";
5426 else
5427 return "Vertex Shader as VS";
5428 case PIPE_SHADER_TESS_CTRL:
5429 return "Tessellation Control Shader";
5430 case PIPE_SHADER_TESS_EVAL:
5431 if (shader->key.as_es)
5432 return "Tessellation Evaluation Shader as ES";
5433 else
5434 return "Tessellation Evaluation Shader as VS";
5435 case PIPE_SHADER_GEOMETRY:
5436 if (shader->is_gs_copy_shader)
5437 return "GS Copy Shader as VS";
5438 else
5439 return "Geometry Shader";
5440 case PIPE_SHADER_FRAGMENT:
5441 return "Pixel Shader";
5442 case PIPE_SHADER_COMPUTE:
5443 return "Compute Shader";
5444 default:
5445 return "Unknown Shader";
5446 }
5447 }
5448
5449 void si_shader_dump(struct si_screen *sscreen, const struct si_shader *shader,
5450 struct pipe_debug_callback *debug, unsigned processor,
5451 FILE *file, bool check_debug_option)
5452 {
5453 if (!check_debug_option ||
5454 si_can_dump_shader(sscreen, processor))
5455 si_dump_shader_key(processor, shader, file);
5456
5457 if (!check_debug_option && shader->binary.llvm_ir_string) {
5458 if (shader->previous_stage &&
5459 shader->previous_stage->binary.llvm_ir_string) {
5460 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
5461 si_get_shader_name(shader, processor));
5462 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
5463 }
5464
5465 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
5466 si_get_shader_name(shader, processor));
5467 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
5468 }
5469
5470 if (!check_debug_option ||
5471 (si_can_dump_shader(sscreen, processor) &&
5472 !(sscreen->debug_flags & DBG(NO_ASM)))) {
5473 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
5474
5475 if (shader->prolog)
5476 si_shader_dump_disassembly(&shader->prolog->binary,
5477 debug, "prolog", file);
5478 if (shader->previous_stage)
5479 si_shader_dump_disassembly(&shader->previous_stage->binary,
5480 debug, "previous stage", file);
5481 if (shader->prolog2)
5482 si_shader_dump_disassembly(&shader->prolog2->binary,
5483 debug, "prolog2", file);
5484
5485 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
5486
5487 if (shader->epilog)
5488 si_shader_dump_disassembly(&shader->epilog->binary,
5489 debug, "epilog", file);
5490 fprintf(file, "\n");
5491 }
5492
5493 si_shader_dump_stats(sscreen, shader, debug, processor, file,
5494 check_debug_option);
5495 }
5496
5497 static int si_compile_llvm(struct si_screen *sscreen,
5498 struct ac_shader_binary *binary,
5499 struct si_shader_config *conf,
5500 LLVMTargetMachineRef tm,
5501 LLVMModuleRef mod,
5502 struct pipe_debug_callback *debug,
5503 unsigned processor,
5504 const char *name)
5505 {
5506 int r = 0;
5507 unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
5508
5509 if (si_can_dump_shader(sscreen, processor)) {
5510 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
5511
5512 if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
5513 fprintf(stderr, "%s LLVM IR:\n\n", name);
5514 ac_dump_module(mod);
5515 fprintf(stderr, "\n");
5516 }
5517 }
5518
5519 if (sscreen->record_llvm_ir) {
5520 char *ir = LLVMPrintModuleToString(mod);
5521 binary->llvm_ir_string = strdup(ir);
5522 LLVMDisposeMessage(ir);
5523 }
5524
5525 if (!si_replace_shader(count, binary)) {
5526 r = si_llvm_compile(mod, binary, tm, debug);
5527 if (r)
5528 return r;
5529 }
5530
5531 si_shader_binary_read_config(binary, conf, 0);
5532
5533 /* Enable 64-bit and 16-bit denormals, because there is no performance
5534 * cost.
5535 *
5536 * If denormals are enabled, all floating-point output modifiers are
5537 * ignored.
5538 *
5539 * Don't enable denormals for 32-bit floats, because:
5540 * - Floating-point output modifiers would be ignored by the hw.
5541 * - Some opcodes don't support denormals, such as v_mad_f32. We would
5542 * have to stop using those.
5543 * - SI & CI would be very slow.
5544 */
5545 conf->float_mode |= V_00B028_FP_64_DENORMS;
5546
5547 FREE(binary->config);
5548 FREE(binary->global_symbol_offsets);
5549 binary->config = NULL;
5550 binary->global_symbol_offsets = NULL;
5551
5552 /* Some shaders can't have rodata because their binaries can be
5553 * concatenated.
5554 */
5555 if (binary->rodata_size &&
5556 (processor == PIPE_SHADER_VERTEX ||
5557 processor == PIPE_SHADER_TESS_CTRL ||
5558 processor == PIPE_SHADER_TESS_EVAL ||
5559 processor == PIPE_SHADER_FRAGMENT)) {
5560 fprintf(stderr, "radeonsi: The shader can't have rodata.");
5561 return -EINVAL;
5562 }
5563
5564 return r;
5565 }
5566
5567 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
5568 {
5569 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
5570 LLVMBuildRetVoid(ctx->ac.builder);
5571 else
5572 LLVMBuildRet(ctx->ac.builder, ret);
5573 }
5574
5575 /* Generate code for the hardware VS shader stage to go with a geometry shader */
5576 struct si_shader *
5577 si_generate_gs_copy_shader(struct si_screen *sscreen,
5578 LLVMTargetMachineRef tm,
5579 struct si_shader_selector *gs_selector,
5580 struct pipe_debug_callback *debug)
5581 {
5582 struct si_shader_context ctx;
5583 struct si_shader *shader;
5584 LLVMBuilderRef builder;
5585 struct lp_build_tgsi_context *bld_base = &ctx.bld_base;
5586 struct lp_build_context *uint = &bld_base->uint_bld;
5587 struct si_shader_output_values *outputs;
5588 struct tgsi_shader_info *gsinfo = &gs_selector->info;
5589 int i, r;
5590
5591 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
5592
5593 if (!outputs)
5594 return NULL;
5595
5596 shader = CALLOC_STRUCT(si_shader);
5597 if (!shader) {
5598 FREE(outputs);
5599 return NULL;
5600 }
5601
5602 /* We can leave the fence as permanently signaled because the GS copy
5603 * shader only becomes visible globally after it has been compiled. */
5604 util_queue_fence_init(&shader->ready);
5605
5606 shader->selector = gs_selector;
5607 shader->is_gs_copy_shader = true;
5608
5609 si_init_shader_ctx(&ctx, sscreen, tm);
5610 ctx.shader = shader;
5611 ctx.type = PIPE_SHADER_VERTEX;
5612
5613 builder = ctx.ac.builder;
5614
5615 create_function(&ctx);
5616 preload_ring_buffers(&ctx);
5617
5618 LLVMValueRef voffset =
5619 lp_build_mul_imm(uint, ctx.abi.vertex_id, 4);
5620
5621 /* Fetch the vertex stream ID.*/
5622 LLVMValueRef stream_id;
5623
5624 if (gs_selector->so.num_outputs)
5625 stream_id = unpack_param(&ctx, ctx.param_streamout_config, 24, 2);
5626 else
5627 stream_id = ctx.i32_0;
5628
5629 /* Fill in output information. */
5630 for (i = 0; i < gsinfo->num_outputs; ++i) {
5631 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
5632 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
5633
5634 for (int chan = 0; chan < 4; chan++) {
5635 outputs[i].vertex_stream[chan] =
5636 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
5637 }
5638 }
5639
5640 LLVMBasicBlockRef end_bb;
5641 LLVMValueRef switch_inst;
5642
5643 end_bb = LLVMAppendBasicBlockInContext(ctx.ac.context, ctx.main_fn, "end");
5644 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
5645
5646 for (int stream = 0; stream < 4; stream++) {
5647 LLVMBasicBlockRef bb;
5648 unsigned offset;
5649
5650 if (!gsinfo->num_stream_output_components[stream])
5651 continue;
5652
5653 if (stream > 0 && !gs_selector->so.num_outputs)
5654 continue;
5655
5656 bb = LLVMInsertBasicBlockInContext(ctx.ac.context, end_bb, "out");
5657 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
5658 LLVMPositionBuilderAtEnd(builder, bb);
5659
5660 /* Fetch vertex data from GSVS ring */
5661 offset = 0;
5662 for (i = 0; i < gsinfo->num_outputs; ++i) {
5663 for (unsigned chan = 0; chan < 4; chan++) {
5664 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
5665 outputs[i].vertex_stream[chan] != stream) {
5666 outputs[i].values[chan] = ctx.bld_base.base.undef;
5667 continue;
5668 }
5669
5670 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
5671 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
5672 offset++;
5673
5674 outputs[i].values[chan] =
5675 ac_build_buffer_load(&ctx.ac,
5676 ctx.gsvs_ring[0], 1,
5677 ctx.i32_0, voffset,
5678 soffset, 0, 1, 1,
5679 true, false);
5680 }
5681 }
5682
5683 /* Streamout and exports. */
5684 if (gs_selector->so.num_outputs) {
5685 si_llvm_emit_streamout(&ctx, outputs,
5686 gsinfo->num_outputs,
5687 stream);
5688 }
5689
5690 if (stream == 0)
5691 si_llvm_export_vs(&ctx, outputs, gsinfo->num_outputs);
5692
5693 LLVMBuildBr(builder, end_bb);
5694 }
5695
5696 LLVMPositionBuilderAtEnd(builder, end_bb);
5697
5698 LLVMBuildRetVoid(ctx.ac.builder);
5699
5700 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
5701 si_llvm_optimize_module(&ctx);
5702
5703 r = si_compile_llvm(sscreen, &ctx.shader->binary,
5704 &ctx.shader->config, ctx.tm,
5705 ctx.gallivm.module,
5706 debug, PIPE_SHADER_GEOMETRY,
5707 "GS Copy Shader");
5708 if (!r) {
5709 if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
5710 fprintf(stderr, "GS Copy Shader:\n");
5711 si_shader_dump(sscreen, ctx.shader, debug,
5712 PIPE_SHADER_GEOMETRY, stderr, true);
5713 r = si_shader_binary_upload(sscreen, ctx.shader);
5714 }
5715
5716 si_llvm_dispose(&ctx);
5717
5718 FREE(outputs);
5719
5720 if (r != 0) {
5721 FREE(shader);
5722 shader = NULL;
5723 }
5724 return shader;
5725 }
5726
5727 static void si_dump_shader_key_vs(const struct si_shader_key *key,
5728 const struct si_vs_prolog_bits *prolog,
5729 const char *prefix, FILE *f)
5730 {
5731 fprintf(f, " %s.instance_divisor_is_one = %u\n",
5732 prefix, prolog->instance_divisor_is_one);
5733 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
5734 prefix, prolog->instance_divisor_is_fetched);
5735 fprintf(f, " %s.ls_vgpr_fix = %u\n",
5736 prefix, prolog->ls_vgpr_fix);
5737
5738 fprintf(f, " mono.vs.fix_fetch = {");
5739 for (int i = 0; i < SI_MAX_ATTRIBS; i++)
5740 fprintf(f, !i ? "%u" : ", %u", key->mono.vs_fix_fetch[i]);
5741 fprintf(f, "}\n");
5742 }
5743
5744 static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
5745 FILE *f)
5746 {
5747 const struct si_shader_key *key = &shader->key;
5748
5749 fprintf(f, "SHADER KEY\n");
5750
5751 switch (processor) {
5752 case PIPE_SHADER_VERTEX:
5753 si_dump_shader_key_vs(key, &key->part.vs.prolog,
5754 "part.vs.prolog", f);
5755 fprintf(f, " as_es = %u\n", key->as_es);
5756 fprintf(f, " as_ls = %u\n", key->as_ls);
5757 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5758 key->mono.u.vs_export_prim_id);
5759 break;
5760
5761 case PIPE_SHADER_TESS_CTRL:
5762 if (shader->selector->screen->info.chip_class >= GFX9) {
5763 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
5764 "part.tcs.ls_prolog", f);
5765 }
5766 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
5767 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
5768 break;
5769
5770 case PIPE_SHADER_TESS_EVAL:
5771 fprintf(f, " as_es = %u\n", key->as_es);
5772 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
5773 key->mono.u.vs_export_prim_id);
5774 break;
5775
5776 case PIPE_SHADER_GEOMETRY:
5777 if (shader->is_gs_copy_shader)
5778 break;
5779
5780 if (shader->selector->screen->info.chip_class >= GFX9 &&
5781 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
5782 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
5783 "part.gs.vs_prolog", f);
5784 }
5785 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
5786 break;
5787
5788 case PIPE_SHADER_COMPUTE:
5789 break;
5790
5791 case PIPE_SHADER_FRAGMENT:
5792 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
5793 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
5794 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
5795 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
5796 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
5797 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
5798 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
5799 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
5800 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
5801 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
5802 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
5803 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
5804 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
5805 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
5806 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
5807 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
5808 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
5809 break;
5810
5811 default:
5812 assert(0);
5813 }
5814
5815 if ((processor == PIPE_SHADER_GEOMETRY ||
5816 processor == PIPE_SHADER_TESS_EVAL ||
5817 processor == PIPE_SHADER_VERTEX) &&
5818 !key->as_es && !key->as_ls) {
5819 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
5820 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
5821 }
5822 }
5823
5824 static void si_init_shader_ctx(struct si_shader_context *ctx,
5825 struct si_screen *sscreen,
5826 LLVMTargetMachineRef tm)
5827 {
5828 struct lp_build_tgsi_context *bld_base;
5829
5830 si_llvm_context_init(ctx, sscreen, tm);
5831
5832 bld_base = &ctx->bld_base;
5833 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
5834
5835 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
5836 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
5837 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
5838
5839 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
5840
5841 bld_base->op_actions[TGSI_OPCODE_CLOCK].emit = clock_emit;
5842
5843 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
5844 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
5845 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
5846 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
5847
5848 bld_base->op_actions[TGSI_OPCODE_VOTE_ALL].emit = vote_all_emit;
5849 bld_base->op_actions[TGSI_OPCODE_VOTE_ANY].emit = vote_any_emit;
5850 bld_base->op_actions[TGSI_OPCODE_VOTE_EQ].emit = vote_eq_emit;
5851 bld_base->op_actions[TGSI_OPCODE_BALLOT].emit = ballot_emit;
5852 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].intr_name = "llvm.amdgcn.readfirstlane";
5853 bld_base->op_actions[TGSI_OPCODE_READ_FIRST].emit = read_lane_emit;
5854 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].intr_name = "llvm.amdgcn.readlane";
5855 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].fetch_args = read_invoc_fetch_args;
5856 bld_base->op_actions[TGSI_OPCODE_READ_INVOC].emit = read_lane_emit;
5857
5858 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_tgsi_emit_vertex;
5859 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
5860 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
5861 }
5862
5863 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
5864 {
5865 struct si_shader *shader = ctx->shader;
5866 struct tgsi_shader_info *info = &shader->selector->info;
5867
5868 if ((ctx->type != PIPE_SHADER_VERTEX &&
5869 ctx->type != PIPE_SHADER_TESS_EVAL) ||
5870 shader->key.as_ls ||
5871 shader->key.as_es)
5872 return;
5873
5874 ac_optimize_vs_outputs(&ctx->ac,
5875 ctx->main_fn,
5876 shader->info.vs_output_param_offset,
5877 info->num_outputs,
5878 &shader->info.nr_param_exports);
5879 }
5880
5881 static void si_count_scratch_private_memory(struct si_shader_context *ctx)
5882 {
5883 ctx->shader->config.private_mem_vgprs = 0;
5884
5885 /* Process all LLVM instructions. */
5886 LLVMBasicBlockRef bb = LLVMGetFirstBasicBlock(ctx->main_fn);
5887 while (bb) {
5888 LLVMValueRef next = LLVMGetFirstInstruction(bb);
5889
5890 while (next) {
5891 LLVMValueRef inst = next;
5892 next = LLVMGetNextInstruction(next);
5893
5894 if (LLVMGetInstructionOpcode(inst) != LLVMAlloca)
5895 continue;
5896
5897 LLVMTypeRef type = LLVMGetElementType(LLVMTypeOf(inst));
5898 /* No idea why LLVM aligns allocas to 4 elements. */
5899 unsigned alignment = LLVMGetAlignment(inst);
5900 unsigned dw_size = align(ac_get_type_size(type) / 4, alignment);
5901 ctx->shader->config.private_mem_vgprs += dw_size;
5902 }
5903 bb = LLVMGetNextBasicBlock(bb);
5904 }
5905 }
5906
5907 static void si_init_exec_from_input(struct si_shader_context *ctx,
5908 unsigned param, unsigned bitoffset)
5909 {
5910 LLVMValueRef args[] = {
5911 LLVMGetParam(ctx->main_fn, param),
5912 LLVMConstInt(ctx->i32, bitoffset, 0),
5913 };
5914 lp_build_intrinsic(ctx->ac.builder,
5915 "llvm.amdgcn.init.exec.from.input",
5916 ctx->voidt, args, 2, LP_FUNC_ATTR_CONVERGENT);
5917 }
5918
5919 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
5920 const struct si_vs_prolog_bits *key)
5921 {
5922 /* VGPR initialization fixup for Vega10 and Raven is always done in the
5923 * VS prolog. */
5924 return sel->vs_needs_prolog || key->ls_vgpr_fix;
5925 }
5926
5927 static bool si_compile_tgsi_main(struct si_shader_context *ctx,
5928 bool is_monolithic)
5929 {
5930 struct si_shader *shader = ctx->shader;
5931 struct si_shader_selector *sel = shader->selector;
5932 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
5933
5934 // TODO clean all this up!
5935 switch (ctx->type) {
5936 case PIPE_SHADER_VERTEX:
5937 ctx->load_input = declare_input_vs;
5938 if (shader->key.as_ls)
5939 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
5940 else if (shader->key.as_es)
5941 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
5942 else
5943 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
5944 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5945 break;
5946 case PIPE_SHADER_TESS_CTRL:
5947 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
5948 ctx->abi.load_tess_inputs = si_nir_load_input_tcs;
5949 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
5950 bld_base->emit_store = store_output_tcs;
5951 ctx->abi.store_tcs_outputs = si_nir_store_output_tcs;
5952 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
5953 break;
5954 case PIPE_SHADER_TESS_EVAL:
5955 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
5956 ctx->abi.load_tess_inputs = si_nir_load_input_tes;
5957 if (shader->key.as_es)
5958 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
5959 else
5960 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
5961 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5962 break;
5963 case PIPE_SHADER_GEOMETRY:
5964 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
5965 ctx->abi.load_inputs = si_nir_load_input_gs;
5966 ctx->abi.emit_vertex = si_llvm_emit_vertex;
5967 ctx->abi.emit_outputs = si_llvm_emit_gs_epilogue;
5968 bld_base->emit_epilogue = si_tgsi_emit_gs_epilogue;
5969 break;
5970 case PIPE_SHADER_FRAGMENT:
5971 ctx->load_input = declare_input_fs;
5972 ctx->abi.emit_outputs = si_llvm_return_fs_outputs;
5973 bld_base->emit_epilogue = si_tgsi_emit_epilogue;
5974 break;
5975 case PIPE_SHADER_COMPUTE:
5976 break;
5977 default:
5978 assert(!"Unsupported shader type");
5979 return false;
5980 }
5981
5982 ctx->abi.load_ubo = load_ubo;
5983 ctx->abi.load_ssbo = load_ssbo;
5984
5985 create_function(ctx);
5986 preload_ring_buffers(ctx);
5987
5988 /* For GFX9 merged shaders:
5989 * - Set EXEC for the first shader. If the prolog is present, set
5990 * EXEC there instead.
5991 * - Add a barrier before the second shader.
5992 * - In the second shader, reset EXEC to ~0 and wrap the main part in
5993 * an if-statement. This is required for correctness in geometry
5994 * shaders, to ensure that empty GS waves do not send GS_EMIT and
5995 * GS_CUT messages.
5996 *
5997 * For monolithic merged shaders, the first shader is wrapped in an
5998 * if-block together with its prolog in si_build_wrapper_function.
5999 */
6000 if (ctx->screen->info.chip_class >= GFX9) {
6001 if (!is_monolithic &&
6002 sel->info.num_instructions > 1 && /* not empty shader */
6003 (shader->key.as_es || shader->key.as_ls) &&
6004 (ctx->type == PIPE_SHADER_TESS_EVAL ||
6005 (ctx->type == PIPE_SHADER_VERTEX &&
6006 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog)))) {
6007 si_init_exec_from_input(ctx,
6008 ctx->param_merged_wave_info, 0);
6009 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
6010 ctx->type == PIPE_SHADER_GEOMETRY) {
6011 if (!is_monolithic)
6012 ac_init_exec_full_mask(&ctx->ac);
6013
6014 /* The barrier must execute for all shaders in a
6015 * threadgroup.
6016 */
6017 si_llvm_emit_barrier(NULL, bld_base, NULL);
6018
6019 LLVMValueRef num_threads = unpack_param(ctx, ctx->param_merged_wave_info, 8, 8);
6020 LLVMValueRef ena =
6021 LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
6022 ac_get_thread_id(&ctx->ac), num_threads, "");
6023 lp_build_if(&ctx->merged_wrap_if_state, &ctx->gallivm, ena);
6024 }
6025 }
6026
6027 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
6028 sel->tcs_info.tessfactors_are_def_in_all_invocs) {
6029 for (unsigned i = 0; i < 6; i++) {
6030 ctx->invoc0_tess_factors[i] =
6031 lp_build_alloca_undef(&ctx->gallivm, ctx->i32, "");
6032 }
6033 }
6034
6035 if (ctx->type == PIPE_SHADER_GEOMETRY) {
6036 int i;
6037 for (i = 0; i < 4; i++) {
6038 ctx->gs_next_vertex[i] =
6039 lp_build_alloca(&ctx->gallivm,
6040 ctx->i32, "");
6041 }
6042 }
6043
6044 if (sel->force_correct_derivs_after_kill) {
6045 ctx->postponed_kill = lp_build_alloca_undef(&ctx->gallivm, ctx->i1, "");
6046 /* true = don't kill. */
6047 LLVMBuildStore(ctx->ac.builder, LLVMConstInt(ctx->i1, 1, 0),
6048 ctx->postponed_kill);
6049 }
6050
6051 if (sel->tokens) {
6052 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
6053 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
6054 return false;
6055 }
6056 } else {
6057 if (!si_nir_build_llvm(ctx, sel->nir)) {
6058 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
6059 return false;
6060 }
6061 }
6062
6063 si_llvm_build_ret(ctx, ctx->return_value);
6064 return true;
6065 }
6066
6067 /**
6068 * Compute the VS prolog key, which contains all the information needed to
6069 * build the VS prolog function, and set shader->info bits where needed.
6070 *
6071 * \param info Shader info of the vertex shader.
6072 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
6073 * \param prolog_key Key of the VS prolog
6074 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
6075 * \param key Output shader part key.
6076 */
6077 static void si_get_vs_prolog_key(const struct tgsi_shader_info *info,
6078 unsigned num_input_sgprs,
6079 const struct si_vs_prolog_bits *prolog_key,
6080 struct si_shader *shader_out,
6081 union si_shader_part_key *key)
6082 {
6083 memset(key, 0, sizeof(*key));
6084 key->vs_prolog.states = *prolog_key;
6085 key->vs_prolog.num_input_sgprs = num_input_sgprs;
6086 key->vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
6087 key->vs_prolog.as_ls = shader_out->key.as_ls;
6088 key->vs_prolog.as_es = shader_out->key.as_es;
6089
6090 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
6091 key->vs_prolog.as_ls = 1;
6092 key->vs_prolog.num_merged_next_stage_vgprs = 2;
6093 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
6094 key->vs_prolog.as_es = 1;
6095 key->vs_prolog.num_merged_next_stage_vgprs = 5;
6096 }
6097
6098 /* Enable loading the InstanceID VGPR. */
6099 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
6100
6101 if ((key->vs_prolog.states.instance_divisor_is_one |
6102 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
6103 shader_out->info.uses_instanceid = true;
6104 }
6105
6106 /**
6107 * Compute the PS prolog key, which contains all the information needed to
6108 * build the PS prolog function, and set related bits in shader->config.
6109 */
6110 static void si_get_ps_prolog_key(struct si_shader *shader,
6111 union si_shader_part_key *key,
6112 bool separate_prolog)
6113 {
6114 struct tgsi_shader_info *info = &shader->selector->info;
6115
6116 memset(key, 0, sizeof(*key));
6117 key->ps_prolog.states = shader->key.part.ps.prolog;
6118 key->ps_prolog.colors_read = info->colors_read;
6119 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
6120 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
6121 key->ps_prolog.wqm = info->uses_derivatives &&
6122 (key->ps_prolog.colors_read ||
6123 key->ps_prolog.states.force_persp_sample_interp ||
6124 key->ps_prolog.states.force_linear_sample_interp ||
6125 key->ps_prolog.states.force_persp_center_interp ||
6126 key->ps_prolog.states.force_linear_center_interp ||
6127 key->ps_prolog.states.bc_optimize_for_persp ||
6128 key->ps_prolog.states.bc_optimize_for_linear);
6129 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
6130
6131 if (info->colors_read) {
6132 unsigned *color = shader->selector->color_attr_index;
6133
6134 if (shader->key.part.ps.prolog.color_two_side) {
6135 /* BCOLORs are stored after the last input. */
6136 key->ps_prolog.num_interp_inputs = info->num_inputs;
6137 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
6138 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
6139 }
6140
6141 for (unsigned i = 0; i < 2; i++) {
6142 unsigned interp = info->input_interpolate[color[i]];
6143 unsigned location = info->input_interpolate_loc[color[i]];
6144
6145 if (!(info->colors_read & (0xf << i*4)))
6146 continue;
6147
6148 key->ps_prolog.color_attr_index[i] = color[i];
6149
6150 if (shader->key.part.ps.prolog.flatshade_colors &&
6151 interp == TGSI_INTERPOLATE_COLOR)
6152 interp = TGSI_INTERPOLATE_CONSTANT;
6153
6154 switch (interp) {
6155 case TGSI_INTERPOLATE_CONSTANT:
6156 key->ps_prolog.color_interp_vgpr_index[i] = -1;
6157 break;
6158 case TGSI_INTERPOLATE_PERSPECTIVE:
6159 case TGSI_INTERPOLATE_COLOR:
6160 /* Force the interpolation location for colors here. */
6161 if (shader->key.part.ps.prolog.force_persp_sample_interp)
6162 location = TGSI_INTERPOLATE_LOC_SAMPLE;
6163 if (shader->key.part.ps.prolog.force_persp_center_interp)
6164 location = TGSI_INTERPOLATE_LOC_CENTER;
6165
6166 switch (location) {
6167 case TGSI_INTERPOLATE_LOC_SAMPLE:
6168 key->ps_prolog.color_interp_vgpr_index[i] = 0;
6169 shader->config.spi_ps_input_ena |=
6170 S_0286CC_PERSP_SAMPLE_ENA(1);
6171 break;
6172 case TGSI_INTERPOLATE_LOC_CENTER:
6173 key->ps_prolog.color_interp_vgpr_index[i] = 2;
6174 shader->config.spi_ps_input_ena |=
6175 S_0286CC_PERSP_CENTER_ENA(1);
6176 break;
6177 case TGSI_INTERPOLATE_LOC_CENTROID:
6178 key->ps_prolog.color_interp_vgpr_index[i] = 4;
6179 shader->config.spi_ps_input_ena |=
6180 S_0286CC_PERSP_CENTROID_ENA(1);
6181 break;
6182 default:
6183 assert(0);
6184 }
6185 break;
6186 case TGSI_INTERPOLATE_LINEAR:
6187 /* Force the interpolation location for colors here. */
6188 if (shader->key.part.ps.prolog.force_linear_sample_interp)
6189 location = TGSI_INTERPOLATE_LOC_SAMPLE;
6190 if (shader->key.part.ps.prolog.force_linear_center_interp)
6191 location = TGSI_INTERPOLATE_LOC_CENTER;
6192
6193 /* The VGPR assignment for non-monolithic shaders
6194 * works because InitialPSInputAddr is set on the
6195 * main shader and PERSP_PULL_MODEL is never used.
6196 */
6197 switch (location) {
6198 case TGSI_INTERPOLATE_LOC_SAMPLE:
6199 key->ps_prolog.color_interp_vgpr_index[i] =
6200 separate_prolog ? 6 : 9;
6201 shader->config.spi_ps_input_ena |=
6202 S_0286CC_LINEAR_SAMPLE_ENA(1);
6203 break;
6204 case TGSI_INTERPOLATE_LOC_CENTER:
6205 key->ps_prolog.color_interp_vgpr_index[i] =
6206 separate_prolog ? 8 : 11;
6207 shader->config.spi_ps_input_ena |=
6208 S_0286CC_LINEAR_CENTER_ENA(1);
6209 break;
6210 case TGSI_INTERPOLATE_LOC_CENTROID:
6211 key->ps_prolog.color_interp_vgpr_index[i] =
6212 separate_prolog ? 10 : 13;
6213 shader->config.spi_ps_input_ena |=
6214 S_0286CC_LINEAR_CENTROID_ENA(1);
6215 break;
6216 default:
6217 assert(0);
6218 }
6219 break;
6220 default:
6221 assert(0);
6222 }
6223 }
6224 }
6225 }
6226
6227 /**
6228 * Check whether a PS prolog is required based on the key.
6229 */
6230 static bool si_need_ps_prolog(const union si_shader_part_key *key)
6231 {
6232 return key->ps_prolog.colors_read ||
6233 key->ps_prolog.states.force_persp_sample_interp ||
6234 key->ps_prolog.states.force_linear_sample_interp ||
6235 key->ps_prolog.states.force_persp_center_interp ||
6236 key->ps_prolog.states.force_linear_center_interp ||
6237 key->ps_prolog.states.bc_optimize_for_persp ||
6238 key->ps_prolog.states.bc_optimize_for_linear ||
6239 key->ps_prolog.states.poly_stipple ||
6240 key->ps_prolog.states.samplemask_log_ps_iter;
6241 }
6242
6243 /**
6244 * Compute the PS epilog key, which contains all the information needed to
6245 * build the PS epilog function.
6246 */
6247 static void si_get_ps_epilog_key(struct si_shader *shader,
6248 union si_shader_part_key *key)
6249 {
6250 struct tgsi_shader_info *info = &shader->selector->info;
6251 memset(key, 0, sizeof(*key));
6252 key->ps_epilog.colors_written = info->colors_written;
6253 key->ps_epilog.writes_z = info->writes_z;
6254 key->ps_epilog.writes_stencil = info->writes_stencil;
6255 key->ps_epilog.writes_samplemask = info->writes_samplemask;
6256 key->ps_epilog.states = shader->key.part.ps.epilog;
6257 }
6258
6259 /**
6260 * Build the GS prolog function. Rotate the input vertices for triangle strips
6261 * with adjacency.
6262 */
6263 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
6264 union si_shader_part_key *key)
6265 {
6266 unsigned num_sgprs, num_vgprs;
6267 struct si_function_info fninfo;
6268 LLVMBuilderRef builder = ctx->ac.builder;
6269 LLVMTypeRef returns[48];
6270 LLVMValueRef func, ret;
6271
6272 si_init_function_info(&fninfo);
6273
6274 if (ctx->screen->info.chip_class >= GFX9) {
6275 num_sgprs = 8 + GFX9_GS_NUM_USER_SGPR;
6276 num_vgprs = 5; /* ES inputs are not needed by GS */
6277 } else {
6278 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
6279 num_vgprs = 8;
6280 }
6281
6282 for (unsigned i = 0; i < num_sgprs; ++i) {
6283 add_arg(&fninfo, ARG_SGPR, ctx->i32);
6284 returns[i] = ctx->i32;
6285 }
6286
6287 for (unsigned i = 0; i < num_vgprs; ++i) {
6288 add_arg(&fninfo, ARG_VGPR, ctx->i32);
6289 returns[num_sgprs + i] = ctx->f32;
6290 }
6291
6292 /* Create the function. */
6293 si_create_function(ctx, "gs_prolog", returns, num_sgprs + num_vgprs,
6294 &fninfo, 0);
6295 func = ctx->main_fn;
6296
6297 /* Set the full EXEC mask for the prolog, because we are only fiddling
6298 * with registers here. The main shader part will set the correct EXEC
6299 * mask.
6300 */
6301 if (ctx->screen->info.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
6302 ac_init_exec_full_mask(&ctx->ac);
6303
6304 /* Copy inputs to outputs. This should be no-op, as the registers match,
6305 * but it will prevent the compiler from overwriting them unintentionally.
6306 */
6307 ret = ctx->return_value;
6308 for (unsigned i = 0; i < num_sgprs; i++) {
6309 LLVMValueRef p = LLVMGetParam(func, i);
6310 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
6311 }
6312 for (unsigned i = 0; i < num_vgprs; i++) {
6313 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
6314 p = ac_to_float(&ctx->ac, p);
6315 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
6316 }
6317
6318 if (key->gs_prolog.states.tri_strip_adj_fix) {
6319 /* Remap the input vertices for every other primitive. */
6320 const unsigned gfx6_vtx_params[6] = {
6321 num_sgprs,
6322 num_sgprs + 1,
6323 num_sgprs + 3,
6324 num_sgprs + 4,
6325 num_sgprs + 5,
6326 num_sgprs + 6
6327 };
6328 const unsigned gfx9_vtx_params[3] = {
6329 num_sgprs,
6330 num_sgprs + 1,
6331 num_sgprs + 4,
6332 };
6333 LLVMValueRef vtx_in[6], vtx_out[6];
6334 LLVMValueRef prim_id, rotate;
6335
6336 if (ctx->screen->info.chip_class >= GFX9) {
6337 for (unsigned i = 0; i < 3; i++) {
6338 vtx_in[i*2] = unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
6339 vtx_in[i*2+1] = unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
6340 }
6341 } else {
6342 for (unsigned i = 0; i < 6; i++)
6343 vtx_in[i] = LLVMGetParam(func, gfx6_vtx_params[i]);
6344 }
6345
6346 prim_id = LLVMGetParam(func, num_sgprs + 2);
6347 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
6348
6349 for (unsigned i = 0; i < 6; ++i) {
6350 LLVMValueRef base, rotated;
6351 base = vtx_in[i];
6352 rotated = vtx_in[(i + 4) % 6];
6353 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
6354 }
6355
6356 if (ctx->screen->info.chip_class >= GFX9) {
6357 for (unsigned i = 0; i < 3; i++) {
6358 LLVMValueRef hi, out;
6359
6360 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
6361 LLVMConstInt(ctx->i32, 16, 0), "");
6362 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
6363 out = ac_to_float(&ctx->ac, out);
6364 ret = LLVMBuildInsertValue(builder, ret, out,
6365 gfx9_vtx_params[i], "");
6366 }
6367 } else {
6368 for (unsigned i = 0; i < 6; i++) {
6369 LLVMValueRef out;
6370
6371 out = ac_to_float(&ctx->ac, vtx_out[i]);
6372 ret = LLVMBuildInsertValue(builder, ret, out,
6373 gfx6_vtx_params[i], "");
6374 }
6375 }
6376 }
6377
6378 LLVMBuildRet(builder, ret);
6379 }
6380
6381 /**
6382 * Given a list of shader part functions, build a wrapper function that
6383 * runs them in sequence to form a monolithic shader.
6384 */
6385 static void si_build_wrapper_function(struct si_shader_context *ctx,
6386 LLVMValueRef *parts,
6387 unsigned num_parts,
6388 unsigned main_part,
6389 unsigned next_shader_first_part)
6390 {
6391 LLVMBuilderRef builder = ctx->ac.builder;
6392 /* PS epilog has one arg per color component; gfx9 merged shader
6393 * prologs need to forward 32 user SGPRs.
6394 */
6395 struct si_function_info fninfo;
6396 LLVMValueRef initial[64], out[64];
6397 LLVMTypeRef function_type;
6398 unsigned num_first_params;
6399 unsigned num_out, initial_num_out;
6400 MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
6401 MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
6402 unsigned num_sgprs, num_vgprs;
6403 unsigned gprs;
6404 struct lp_build_if_state if_state;
6405
6406 si_init_function_info(&fninfo);
6407
6408 for (unsigned i = 0; i < num_parts; ++i) {
6409 lp_add_function_attr(parts[i], -1, LP_FUNC_ATTR_ALWAYSINLINE);
6410 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
6411 }
6412
6413 /* The parameters of the wrapper function correspond to those of the
6414 * first part in terms of SGPRs and VGPRs, but we use the types of the
6415 * main part to get the right types. This is relevant for the
6416 * dereferenceable attribute on descriptor table pointers.
6417 */
6418 num_sgprs = 0;
6419 num_vgprs = 0;
6420
6421 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
6422 num_first_params = LLVMCountParamTypes(function_type);
6423
6424 for (unsigned i = 0; i < num_first_params; ++i) {
6425 LLVMValueRef param = LLVMGetParam(parts[0], i);
6426
6427 if (ac_is_sgpr_param(param)) {
6428 assert(num_vgprs == 0);
6429 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
6430 } else {
6431 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
6432 }
6433 }
6434
6435 gprs = 0;
6436 while (gprs < num_sgprs + num_vgprs) {
6437 LLVMValueRef param = LLVMGetParam(parts[main_part], fninfo.num_params);
6438 LLVMTypeRef type = LLVMTypeOf(param);
6439 unsigned size = ac_get_type_size(type) / 4;
6440
6441 add_arg(&fninfo, gprs < num_sgprs ? ARG_SGPR : ARG_VGPR, type);
6442
6443 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
6444 assert(gprs + size <= num_sgprs + num_vgprs &&
6445 (gprs >= num_sgprs || gprs + size <= num_sgprs));
6446
6447 gprs += size;
6448 }
6449
6450 si_create_function(ctx, "wrapper", NULL, 0, &fninfo,
6451 si_get_max_workgroup_size(ctx->shader));
6452
6453 if (is_merged_shader(ctx->shader))
6454 ac_init_exec_full_mask(&ctx->ac);
6455
6456 /* Record the arguments of the function as if they were an output of
6457 * a previous part.
6458 */
6459 num_out = 0;
6460 num_out_sgpr = 0;
6461
6462 for (unsigned i = 0; i < fninfo.num_params; ++i) {
6463 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
6464 LLVMTypeRef param_type = LLVMTypeOf(param);
6465 LLVMTypeRef out_type = i < fninfo.num_sgpr_params ? ctx->i32 : ctx->f32;
6466 unsigned size = ac_get_type_size(param_type) / 4;
6467
6468 if (size == 1) {
6469 if (param_type != out_type)
6470 param = LLVMBuildBitCast(builder, param, out_type, "");
6471 out[num_out++] = param;
6472 } else {
6473 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
6474
6475 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6476 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
6477 param_type = ctx->i64;
6478 }
6479
6480 if (param_type != vector_type)
6481 param = LLVMBuildBitCast(builder, param, vector_type, "");
6482
6483 for (unsigned j = 0; j < size; ++j)
6484 out[num_out++] = LLVMBuildExtractElement(
6485 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
6486 }
6487
6488 if (i < fninfo.num_sgpr_params)
6489 num_out_sgpr = num_out;
6490 }
6491
6492 memcpy(initial, out, sizeof(out));
6493 initial_num_out = num_out;
6494 initial_num_out_sgpr = num_out_sgpr;
6495
6496 /* Now chain the parts. */
6497 for (unsigned part = 0; part < num_parts; ++part) {
6498 LLVMValueRef in[48];
6499 LLVMValueRef ret;
6500 LLVMTypeRef ret_type;
6501 unsigned out_idx = 0;
6502 unsigned num_params = LLVMCountParams(parts[part]);
6503
6504 /* Merged shaders are executed conditionally depending
6505 * on the number of enabled threads passed in the input SGPRs. */
6506 if (is_merged_shader(ctx->shader) && part == 0) {
6507 LLVMValueRef ena, count = initial[3];
6508
6509 count = LLVMBuildAnd(builder, count,
6510 LLVMConstInt(ctx->i32, 0x7f, 0), "");
6511 ena = LLVMBuildICmp(builder, LLVMIntULT,
6512 ac_get_thread_id(&ctx->ac), count, "");
6513 lp_build_if(&if_state, &ctx->gallivm, ena);
6514 }
6515
6516 /* Derive arguments for the next part from outputs of the
6517 * previous one.
6518 */
6519 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
6520 LLVMValueRef param;
6521 LLVMTypeRef param_type;
6522 bool is_sgpr;
6523 unsigned param_size;
6524 LLVMValueRef arg = NULL;
6525
6526 param = LLVMGetParam(parts[part], param_idx);
6527 param_type = LLVMTypeOf(param);
6528 param_size = ac_get_type_size(param_type) / 4;
6529 is_sgpr = ac_is_sgpr_param(param);
6530
6531 if (is_sgpr) {
6532 #if HAVE_LLVM < 0x0400
6533 LLVMRemoveAttribute(param, LLVMByValAttribute);
6534 #else
6535 unsigned kind_id = LLVMGetEnumAttributeKindForName("byval", 5);
6536 LLVMRemoveEnumAttributeAtIndex(parts[part], param_idx + 1, kind_id);
6537 #endif
6538 lp_add_function_attr(parts[part], param_idx + 1, LP_FUNC_ATTR_INREG);
6539 }
6540
6541 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
6542 assert(is_sgpr || out_idx >= num_out_sgpr);
6543
6544 if (param_size == 1)
6545 arg = out[out_idx];
6546 else
6547 arg = lp_build_gather_values(&ctx->gallivm, &out[out_idx], param_size);
6548
6549 if (LLVMTypeOf(arg) != param_type) {
6550 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
6551 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
6552 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
6553 } else {
6554 arg = LLVMBuildBitCast(builder, arg, param_type, "");
6555 }
6556 }
6557
6558 in[param_idx] = arg;
6559 out_idx += param_size;
6560 }
6561
6562 ret = LLVMBuildCall(builder, parts[part], in, num_params, "");
6563
6564 if (is_merged_shader(ctx->shader) &&
6565 part + 1 == next_shader_first_part) {
6566 lp_build_endif(&if_state);
6567
6568 /* The second half of the merged shader should use
6569 * the inputs from the toplevel (wrapper) function,
6570 * not the return value from the last call.
6571 *
6572 * That's because the last call was executed condi-
6573 * tionally, so we can't consume it in the main
6574 * block.
6575 */
6576 memcpy(out, initial, sizeof(initial));
6577 num_out = initial_num_out;
6578 num_out_sgpr = initial_num_out_sgpr;
6579 continue;
6580 }
6581
6582 /* Extract the returned GPRs. */
6583 ret_type = LLVMTypeOf(ret);
6584 num_out = 0;
6585 num_out_sgpr = 0;
6586
6587 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
6588 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
6589
6590 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
6591
6592 for (unsigned i = 0; i < ret_size; ++i) {
6593 LLVMValueRef val =
6594 LLVMBuildExtractValue(builder, ret, i, "");
6595
6596 assert(num_out < ARRAY_SIZE(out));
6597 out[num_out++] = val;
6598
6599 if (LLVMTypeOf(val) == ctx->i32) {
6600 assert(num_out_sgpr + 1 == num_out);
6601 num_out_sgpr = num_out;
6602 }
6603 }
6604 }
6605 }
6606
6607 LLVMBuildRetVoid(builder);
6608 }
6609
6610 int si_compile_tgsi_shader(struct si_screen *sscreen,
6611 LLVMTargetMachineRef tm,
6612 struct si_shader *shader,
6613 bool is_monolithic,
6614 struct pipe_debug_callback *debug)
6615 {
6616 struct si_shader_selector *sel = shader->selector;
6617 struct si_shader_context ctx;
6618 int r = -1;
6619
6620 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6621 * conversion fails. */
6622 if (si_can_dump_shader(sscreen, sel->info.processor) &&
6623 !(sscreen->debug_flags & DBG(NO_TGSI))) {
6624 if (sel->tokens)
6625 tgsi_dump(sel->tokens, 0);
6626 else
6627 nir_print_shader(sel->nir, stderr);
6628 si_dump_streamout(&sel->so);
6629 }
6630
6631 si_init_shader_ctx(&ctx, sscreen, tm);
6632 si_llvm_context_set_tgsi(&ctx, shader);
6633 ctx.separate_prolog = !is_monolithic;
6634
6635 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
6636 sizeof(shader->info.vs_output_param_offset));
6637
6638 shader->info.uses_instanceid = sel->info.uses_instanceid;
6639
6640 if (!si_compile_tgsi_main(&ctx, is_monolithic)) {
6641 si_llvm_dispose(&ctx);
6642 return -1;
6643 }
6644
6645 if (is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
6646 LLVMValueRef parts[2];
6647 bool need_prolog = sel->vs_needs_prolog;
6648
6649 parts[1] = ctx.main_fn;
6650
6651 if (need_prolog) {
6652 union si_shader_part_key prolog_key;
6653 si_get_vs_prolog_key(&sel->info,
6654 shader->info.num_input_sgprs,
6655 &shader->key.part.vs.prolog,
6656 shader, &prolog_key);
6657 si_build_vs_prolog_function(&ctx, &prolog_key);
6658 parts[0] = ctx.main_fn;
6659 }
6660
6661 si_build_wrapper_function(&ctx, parts + !need_prolog,
6662 1 + need_prolog, need_prolog, 0);
6663 } else if (is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
6664 if (sscreen->info.chip_class >= GFX9) {
6665 struct si_shader_selector *ls = shader->key.part.tcs.ls;
6666 LLVMValueRef parts[4];
6667 bool vs_needs_prolog =
6668 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog);
6669
6670 /* TCS main part */
6671 parts[2] = ctx.main_fn;
6672
6673 /* TCS epilog */
6674 union si_shader_part_key tcs_epilog_key;
6675 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
6676 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6677 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
6678 parts[3] = ctx.main_fn;
6679
6680 /* VS prolog */
6681 if (vs_needs_prolog) {
6682 union si_shader_part_key vs_prolog_key;
6683 si_get_vs_prolog_key(&ls->info,
6684 shader->info.num_input_sgprs,
6685 &shader->key.part.tcs.ls_prolog,
6686 shader, &vs_prolog_key);
6687 vs_prolog_key.vs_prolog.is_monolithic = true;
6688 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6689 parts[0] = ctx.main_fn;
6690 }
6691
6692 /* VS as LS main part */
6693 struct si_shader shader_ls = {};
6694 shader_ls.selector = ls;
6695 shader_ls.key.as_ls = 1;
6696 shader_ls.key.mono = shader->key.mono;
6697 shader_ls.key.opt = shader->key.opt;
6698 si_llvm_context_set_tgsi(&ctx, &shader_ls);
6699
6700 if (!si_compile_tgsi_main(&ctx, true)) {
6701 si_llvm_dispose(&ctx);
6702 return -1;
6703 }
6704 shader->info.uses_instanceid |= ls->info.uses_instanceid;
6705 parts[1] = ctx.main_fn;
6706
6707 /* Reset the shader context. */
6708 ctx.shader = shader;
6709 ctx.type = PIPE_SHADER_TESS_CTRL;
6710
6711 si_build_wrapper_function(&ctx,
6712 parts + !vs_needs_prolog,
6713 4 - !vs_needs_prolog, 0,
6714 vs_needs_prolog ? 2 : 1);
6715 } else {
6716 LLVMValueRef parts[2];
6717 union si_shader_part_key epilog_key;
6718
6719 parts[0] = ctx.main_fn;
6720
6721 memset(&epilog_key, 0, sizeof(epilog_key));
6722 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6723 si_build_tcs_epilog_function(&ctx, &epilog_key);
6724 parts[1] = ctx.main_fn;
6725
6726 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
6727 }
6728 } else if (is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
6729 if (ctx.screen->info.chip_class >= GFX9) {
6730 struct si_shader_selector *es = shader->key.part.gs.es;
6731 LLVMValueRef es_prolog = NULL;
6732 LLVMValueRef es_main = NULL;
6733 LLVMValueRef gs_prolog = NULL;
6734 LLVMValueRef gs_main = ctx.main_fn;
6735
6736 /* GS prolog */
6737 union si_shader_part_key gs_prolog_key;
6738 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
6739 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6740 gs_prolog_key.gs_prolog.is_monolithic = true;
6741 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
6742 gs_prolog = ctx.main_fn;
6743
6744 /* ES prolog */
6745 if (es->vs_needs_prolog) {
6746 union si_shader_part_key vs_prolog_key;
6747 si_get_vs_prolog_key(&es->info,
6748 shader->info.num_input_sgprs,
6749 &shader->key.part.gs.vs_prolog,
6750 shader, &vs_prolog_key);
6751 vs_prolog_key.vs_prolog.is_monolithic = true;
6752 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
6753 es_prolog = ctx.main_fn;
6754 }
6755
6756 /* ES main part */
6757 struct si_shader shader_es = {};
6758 shader_es.selector = es;
6759 shader_es.key.as_es = 1;
6760 shader_es.key.mono = shader->key.mono;
6761 shader_es.key.opt = shader->key.opt;
6762 si_llvm_context_set_tgsi(&ctx, &shader_es);
6763
6764 if (!si_compile_tgsi_main(&ctx, true)) {
6765 si_llvm_dispose(&ctx);
6766 return -1;
6767 }
6768 shader->info.uses_instanceid |= es->info.uses_instanceid;
6769 es_main = ctx.main_fn;
6770
6771 /* Reset the shader context. */
6772 ctx.shader = shader;
6773 ctx.type = PIPE_SHADER_GEOMETRY;
6774
6775 /* Prepare the array of shader parts. */
6776 LLVMValueRef parts[4];
6777 unsigned num_parts = 0, main_part, next_first_part;
6778
6779 if (es_prolog)
6780 parts[num_parts++] = es_prolog;
6781
6782 parts[main_part = num_parts++] = es_main;
6783 parts[next_first_part = num_parts++] = gs_prolog;
6784 parts[num_parts++] = gs_main;
6785
6786 si_build_wrapper_function(&ctx, parts, num_parts,
6787 main_part, next_first_part);
6788 } else {
6789 LLVMValueRef parts[2];
6790 union si_shader_part_key prolog_key;
6791
6792 parts[1] = ctx.main_fn;
6793
6794 memset(&prolog_key, 0, sizeof(prolog_key));
6795 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6796 si_build_gs_prolog_function(&ctx, &prolog_key);
6797 parts[0] = ctx.main_fn;
6798
6799 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
6800 }
6801 } else if (is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
6802 LLVMValueRef parts[3];
6803 union si_shader_part_key prolog_key;
6804 union si_shader_part_key epilog_key;
6805 bool need_prolog;
6806
6807 si_get_ps_prolog_key(shader, &prolog_key, false);
6808 need_prolog = si_need_ps_prolog(&prolog_key);
6809
6810 parts[need_prolog ? 1 : 0] = ctx.main_fn;
6811
6812 if (need_prolog) {
6813 si_build_ps_prolog_function(&ctx, &prolog_key);
6814 parts[0] = ctx.main_fn;
6815 }
6816
6817 si_get_ps_epilog_key(shader, &epilog_key);
6818 si_build_ps_epilog_function(&ctx, &epilog_key);
6819 parts[need_prolog ? 2 : 1] = ctx.main_fn;
6820
6821 si_build_wrapper_function(&ctx, parts, need_prolog ? 3 : 2,
6822 need_prolog ? 1 : 0, 0);
6823 }
6824
6825 si_llvm_optimize_module(&ctx);
6826
6827 /* Post-optimization transformations and analysis. */
6828 si_optimize_vs_outputs(&ctx);
6829
6830 if ((debug && debug->debug_message) ||
6831 si_can_dump_shader(sscreen, ctx.type))
6832 si_count_scratch_private_memory(&ctx);
6833
6834 /* Compile to bytecode. */
6835 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
6836 ctx.gallivm.module, debug, ctx.type, "TGSI shader");
6837 si_llvm_dispose(&ctx);
6838 if (r) {
6839 fprintf(stderr, "LLVM failed to compile shader\n");
6840 return r;
6841 }
6842
6843 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
6844 * LLVM 3.9svn has this bug.
6845 */
6846 if (sel->type == PIPE_SHADER_COMPUTE) {
6847 unsigned wave_size = 64;
6848 unsigned max_vgprs = 256;
6849 unsigned max_sgprs = sscreen->info.chip_class >= VI ? 800 : 512;
6850 unsigned max_sgprs_per_wave = 128;
6851 unsigned max_block_threads = si_get_max_workgroup_size(shader);
6852 unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
6853 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
6854
6855 max_vgprs = max_vgprs / min_waves_per_simd;
6856 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
6857
6858 if (shader->config.num_sgprs > max_sgprs ||
6859 shader->config.num_vgprs > max_vgprs) {
6860 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6861 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6862 shader->config.num_sgprs, shader->config.num_vgprs,
6863 max_sgprs, max_vgprs);
6864
6865 /* Just terminate the process, because dependent
6866 * shaders can hang due to bad input data, but use
6867 * the env var to allow shader-db to work.
6868 */
6869 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6870 abort();
6871 }
6872 }
6873
6874 /* Add the scratch offset to input SGPRs. */
6875 if (shader->config.scratch_bytes_per_wave && !is_merged_shader(shader))
6876 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6877
6878 /* Calculate the number of fragment input VGPRs. */
6879 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6880 shader->info.num_input_vgprs = 0;
6881 shader->info.face_vgpr_index = -1;
6882 shader->info.ancillary_vgpr_index = -1;
6883
6884 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6885 shader->info.num_input_vgprs += 2;
6886 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6887 shader->info.num_input_vgprs += 2;
6888 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6889 shader->info.num_input_vgprs += 2;
6890 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6891 shader->info.num_input_vgprs += 3;
6892 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6893 shader->info.num_input_vgprs += 2;
6894 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6895 shader->info.num_input_vgprs += 2;
6896 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6897 shader->info.num_input_vgprs += 2;
6898 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6899 shader->info.num_input_vgprs += 1;
6900 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6901 shader->info.num_input_vgprs += 1;
6902 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6903 shader->info.num_input_vgprs += 1;
6904 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6905 shader->info.num_input_vgprs += 1;
6906 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6907 shader->info.num_input_vgprs += 1;
6908 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6909 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6910 shader->info.num_input_vgprs += 1;
6911 }
6912 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr)) {
6913 shader->info.ancillary_vgpr_index = shader->info.num_input_vgprs;
6914 shader->info.num_input_vgprs += 1;
6915 }
6916 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6917 shader->info.num_input_vgprs += 1;
6918 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6919 shader->info.num_input_vgprs += 1;
6920 }
6921
6922 return 0;
6923 }
6924
6925 /**
6926 * Create, compile and return a shader part (prolog or epilog).
6927 *
6928 * \param sscreen screen
6929 * \param list list of shader parts of the same category
6930 * \param type shader type
6931 * \param key shader part key
6932 * \param prolog whether the part being requested is a prolog
6933 * \param tm LLVM target machine
6934 * \param debug debug callback
6935 * \param build the callback responsible for building the main function
6936 * \return non-NULL on success
6937 */
6938 static struct si_shader_part *
6939 si_get_shader_part(struct si_screen *sscreen,
6940 struct si_shader_part **list,
6941 enum pipe_shader_type type,
6942 bool prolog,
6943 union si_shader_part_key *key,
6944 LLVMTargetMachineRef tm,
6945 struct pipe_debug_callback *debug,
6946 void (*build)(struct si_shader_context *,
6947 union si_shader_part_key *),
6948 const char *name)
6949 {
6950 struct si_shader_part *result;
6951
6952 mtx_lock(&sscreen->shader_parts_mutex);
6953
6954 /* Find existing. */
6955 for (result = *list; result; result = result->next) {
6956 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
6957 mtx_unlock(&sscreen->shader_parts_mutex);
6958 return result;
6959 }
6960 }
6961
6962 /* Compile a new one. */
6963 result = CALLOC_STRUCT(si_shader_part);
6964 result->key = *key;
6965
6966 struct si_shader shader = {};
6967 struct si_shader_context ctx;
6968
6969 si_init_shader_ctx(&ctx, sscreen, tm);
6970 ctx.shader = &shader;
6971 ctx.type = type;
6972
6973 switch (type) {
6974 case PIPE_SHADER_VERTEX:
6975 shader.key.as_ls = key->vs_prolog.as_ls;
6976 shader.key.as_es = key->vs_prolog.as_es;
6977 break;
6978 case PIPE_SHADER_TESS_CTRL:
6979 assert(!prolog);
6980 shader.key.part.tcs.epilog = key->tcs_epilog.states;
6981 break;
6982 case PIPE_SHADER_GEOMETRY:
6983 assert(prolog);
6984 break;
6985 case PIPE_SHADER_FRAGMENT:
6986 if (prolog)
6987 shader.key.part.ps.prolog = key->ps_prolog.states;
6988 else
6989 shader.key.part.ps.epilog = key->ps_epilog.states;
6990 break;
6991 default:
6992 unreachable("bad shader part");
6993 }
6994
6995 build(&ctx, key);
6996
6997 /* Compile. */
6998 si_llvm_optimize_module(&ctx);
6999
7000 if (si_compile_llvm(sscreen, &result->binary, &result->config, tm,
7001 ctx.ac.module, debug, ctx.type, name)) {
7002 FREE(result);
7003 result = NULL;
7004 goto out;
7005 }
7006
7007 result->next = *list;
7008 *list = result;
7009
7010 out:
7011 si_llvm_dispose(&ctx);
7012 mtx_unlock(&sscreen->shader_parts_mutex);
7013 return result;
7014 }
7015
7016 static LLVMValueRef si_prolog_get_rw_buffers(struct si_shader_context *ctx)
7017 {
7018 LLVMValueRef ptr[2], list;
7019 bool is_merged_shader =
7020 ctx->screen->info.chip_class >= GFX9 &&
7021 (ctx->type == PIPE_SHADER_TESS_CTRL ||
7022 ctx->type == PIPE_SHADER_GEOMETRY ||
7023 ctx->shader->key.as_ls || ctx->shader->key.as_es);
7024
7025 /* Get the pointer to rw buffers. */
7026 ptr[0] = LLVMGetParam(ctx->main_fn, (is_merged_shader ? 8 : 0) + SI_SGPR_RW_BUFFERS);
7027 ptr[1] = LLVMGetParam(ctx->main_fn, (is_merged_shader ? 8 : 0) + SI_SGPR_RW_BUFFERS_HI);
7028 list = lp_build_gather_values(&ctx->gallivm, ptr, 2);
7029 list = LLVMBuildBitCast(ctx->ac.builder, list, ctx->i64, "");
7030 list = LLVMBuildIntToPtr(ctx->ac.builder, list,
7031 si_const_array(ctx->v4i32, SI_NUM_RW_BUFFERS), "");
7032 return list;
7033 }
7034
7035 /**
7036 * Build the vertex shader prolog function.
7037 *
7038 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
7039 * All inputs are returned unmodified. The vertex load indices are
7040 * stored after them, which will be used by the API VS for fetching inputs.
7041 *
7042 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
7043 * input_v0,
7044 * input_v1,
7045 * input_v2,
7046 * input_v3,
7047 * (VertexID + BaseVertex),
7048 * (InstanceID + StartInstance),
7049 * (InstanceID / 2 + StartInstance)
7050 */
7051 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
7052 union si_shader_part_key *key)
7053 {
7054 struct si_function_info fninfo;
7055 LLVMTypeRef *returns;
7056 LLVMValueRef ret, func;
7057 int num_returns, i;
7058 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
7059 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
7060 LLVMValueRef input_vgprs[9];
7061 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
7062 num_input_vgprs;
7063 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
7064
7065 si_init_function_info(&fninfo);
7066
7067 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
7068 returns = alloca((num_all_input_regs + key->vs_prolog.last_input + 1) *
7069 sizeof(LLVMTypeRef));
7070 num_returns = 0;
7071
7072 /* Declare input and output SGPRs. */
7073 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7074 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7075 returns[num_returns++] = ctx->i32;
7076 }
7077
7078 /* Preloaded VGPRs (outputs must be floats) */
7079 for (i = 0; i < num_input_vgprs; i++) {
7080 add_arg_assign(&fninfo, ARG_VGPR, ctx->i32, &input_vgprs[i]);
7081 returns[num_returns++] = ctx->f32;
7082 }
7083
7084 /* Vertex load indices. */
7085 for (i = 0; i <= key->vs_prolog.last_input; i++)
7086 returns[num_returns++] = ctx->f32;
7087
7088 /* Create the function. */
7089 si_create_function(ctx, "vs_prolog", returns, num_returns, &fninfo, 0);
7090 func = ctx->main_fn;
7091
7092 if (key->vs_prolog.num_merged_next_stage_vgprs) {
7093 if (!key->vs_prolog.is_monolithic)
7094 si_init_exec_from_input(ctx, 3, 0);
7095
7096 if (key->vs_prolog.as_ls &&
7097 ctx->screen->has_ls_vgpr_init_bug) {
7098 /* If there are no HS threads, SPI loads the LS VGPRs
7099 * starting at VGPR 0. Shift them back to where they
7100 * belong.
7101 */
7102 LLVMValueRef has_hs_threads =
7103 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
7104 unpack_param(ctx, 3, 8, 8),
7105 ctx->i32_0, "");
7106
7107 for (i = 4; i > 0; --i) {
7108 input_vgprs[i + 1] =
7109 LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
7110 input_vgprs[i + 1],
7111 input_vgprs[i - 1], "");
7112 }
7113 }
7114 }
7115
7116 ctx->abi.vertex_id = input_vgprs[first_vs_vgpr];
7117 ctx->abi.instance_id = input_vgprs[first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1)];
7118
7119 /* Copy inputs to outputs. This should be no-op, as the registers match,
7120 * but it will prevent the compiler from overwriting them unintentionally.
7121 */
7122 ret = ctx->return_value;
7123 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7124 LLVMValueRef p = LLVMGetParam(func, i);
7125 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
7126 }
7127 for (i = 0; i < num_input_vgprs; i++) {
7128 LLVMValueRef p = input_vgprs[i];
7129 p = ac_to_float(&ctx->ac, p);
7130 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p,
7131 key->vs_prolog.num_input_sgprs + i, "");
7132 }
7133
7134 /* Compute vertex load indices from instance divisors. */
7135 LLVMValueRef instance_divisor_constbuf = NULL;
7136
7137 if (key->vs_prolog.states.instance_divisor_is_fetched) {
7138 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
7139 LLVMValueRef buf_index =
7140 LLVMConstInt(ctx->i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
7141 instance_divisor_constbuf =
7142 ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
7143 }
7144
7145 for (i = 0; i <= key->vs_prolog.last_input; i++) {
7146 bool divisor_is_one =
7147 key->vs_prolog.states.instance_divisor_is_one & (1u << i);
7148 bool divisor_is_fetched =
7149 key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
7150 LLVMValueRef index;
7151
7152 if (divisor_is_one || divisor_is_fetched) {
7153 LLVMValueRef divisor = ctx->i32_1;
7154
7155 if (divisor_is_fetched) {
7156 divisor = buffer_load_const(ctx, instance_divisor_constbuf,
7157 LLVMConstInt(ctx->i32, i * 4, 0));
7158 divisor = ac_to_integer(&ctx->ac, divisor);
7159 }
7160
7161 /* InstanceID / Divisor + StartInstance */
7162 index = get_instance_index_for_fetch(ctx,
7163 user_sgpr_base +
7164 SI_SGPR_START_INSTANCE,
7165 divisor);
7166 } else {
7167 /* VertexID + BaseVertex */
7168 index = LLVMBuildAdd(ctx->ac.builder,
7169 ctx->abi.vertex_id,
7170 LLVMGetParam(func, user_sgpr_base +
7171 SI_SGPR_BASE_VERTEX), "");
7172 }
7173
7174 index = ac_to_float(&ctx->ac, index);
7175 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index,
7176 fninfo.num_params + i, "");
7177 }
7178
7179 si_llvm_build_ret(ctx, ret);
7180 }
7181
7182 static bool si_get_vs_prolog(struct si_screen *sscreen,
7183 LLVMTargetMachineRef tm,
7184 struct si_shader *shader,
7185 struct pipe_debug_callback *debug,
7186 struct si_shader *main_part,
7187 const struct si_vs_prolog_bits *key)
7188 {
7189 struct si_shader_selector *vs = main_part->selector;
7190
7191 if (!si_vs_needs_prolog(vs, key))
7192 return true;
7193
7194 /* Get the prolog. */
7195 union si_shader_part_key prolog_key;
7196 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
7197 key, shader, &prolog_key);
7198
7199 shader->prolog =
7200 si_get_shader_part(sscreen, &sscreen->vs_prologs,
7201 PIPE_SHADER_VERTEX, true, &prolog_key, tm,
7202 debug, si_build_vs_prolog_function,
7203 "Vertex Shader Prolog");
7204 return shader->prolog != NULL;
7205 }
7206
7207 /**
7208 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
7209 */
7210 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
7211 LLVMTargetMachineRef tm,
7212 struct si_shader *shader,
7213 struct pipe_debug_callback *debug)
7214 {
7215 return si_get_vs_prolog(sscreen, tm, shader, debug, shader,
7216 &shader->key.part.vs.prolog);
7217 }
7218
7219 /**
7220 * Compile the TCS epilog function. This writes tesselation factors to memory
7221 * based on the output primitive type of the tesselator (determined by TES).
7222 */
7223 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
7224 union si_shader_part_key *key)
7225 {
7226 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7227 struct si_function_info fninfo;
7228 LLVMValueRef func;
7229
7230 si_init_function_info(&fninfo);
7231
7232 if (ctx->screen->info.chip_class >= GFX9) {
7233 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7234 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7235 add_arg(&fninfo, ARG_SGPR, ctx->i32); /* wave info */
7236 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7237 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7238 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7239 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7240 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7241 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7242 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7243 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7244 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7245 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7246 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7247 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7248 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7249 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7250 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7251 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7252 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7253 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7254 } else {
7255 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7256 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7257 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7258 add_arg(&fninfo, ARG_SGPR, ctx->i64);
7259 ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7260 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7261 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7262 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7263 ctx->param_tcs_offchip_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7264 ctx->param_tcs_factor_addr_base64k = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7265 ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7266 ctx->param_tcs_factor_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
7267 }
7268
7269 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* VGPR gap */
7270 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* VGPR gap */
7271 unsigned tess_factors_idx =
7272 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* patch index within the wave (REL_PATCH_ID) */
7273 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* invocation ID within the patch */
7274 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* LDS offset where tess factors should be loaded from */
7275
7276 for (unsigned i = 0; i < 6; i++)
7277 add_arg(&fninfo, ARG_VGPR, ctx->i32); /* tess factors */
7278
7279 /* Create the function. */
7280 si_create_function(ctx, "tcs_epilog", NULL, 0, &fninfo,
7281 ctx->screen->info.chip_class >= CIK ? 128 : 64);
7282 ac_declare_lds_as_pointer(&ctx->ac);
7283 func = ctx->main_fn;
7284
7285 LLVMValueRef invoc0_tess_factors[6];
7286 for (unsigned i = 0; i < 6; i++)
7287 invoc0_tess_factors[i] = LLVMGetParam(func, tess_factors_idx + 3 + i);
7288
7289 si_write_tess_factors(bld_base,
7290 LLVMGetParam(func, tess_factors_idx),
7291 LLVMGetParam(func, tess_factors_idx + 1),
7292 LLVMGetParam(func, tess_factors_idx + 2),
7293 invoc0_tess_factors, invoc0_tess_factors + 4);
7294
7295 LLVMBuildRetVoid(ctx->ac.builder);
7296 }
7297
7298 /**
7299 * Select and compile (or reuse) TCS parts (epilog).
7300 */
7301 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
7302 LLVMTargetMachineRef tm,
7303 struct si_shader *shader,
7304 struct pipe_debug_callback *debug)
7305 {
7306 if (sscreen->info.chip_class >= GFX9) {
7307 struct si_shader *ls_main_part =
7308 shader->key.part.tcs.ls->main_shader_part_ls;
7309
7310 if (!si_get_vs_prolog(sscreen, tm, shader, debug, ls_main_part,
7311 &shader->key.part.tcs.ls_prolog))
7312 return false;
7313
7314 shader->previous_stage = ls_main_part;
7315 }
7316
7317 /* Get the epilog. */
7318 union si_shader_part_key epilog_key;
7319 memset(&epilog_key, 0, sizeof(epilog_key));
7320 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
7321
7322 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
7323 PIPE_SHADER_TESS_CTRL, false,
7324 &epilog_key, tm, debug,
7325 si_build_tcs_epilog_function,
7326 "Tessellation Control Shader Epilog");
7327 return shader->epilog != NULL;
7328 }
7329
7330 /**
7331 * Select and compile (or reuse) GS parts (prolog).
7332 */
7333 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
7334 LLVMTargetMachineRef tm,
7335 struct si_shader *shader,
7336 struct pipe_debug_callback *debug)
7337 {
7338 if (sscreen->info.chip_class >= GFX9) {
7339 struct si_shader *es_main_part =
7340 shader->key.part.gs.es->main_shader_part_es;
7341
7342 if (shader->key.part.gs.es->type == PIPE_SHADER_VERTEX &&
7343 !si_get_vs_prolog(sscreen, tm, shader, debug, es_main_part,
7344 &shader->key.part.gs.vs_prolog))
7345 return false;
7346
7347 shader->previous_stage = es_main_part;
7348 }
7349
7350 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
7351 return true;
7352
7353 union si_shader_part_key prolog_key;
7354 memset(&prolog_key, 0, sizeof(prolog_key));
7355 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
7356
7357 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
7358 PIPE_SHADER_GEOMETRY, true,
7359 &prolog_key, tm, debug,
7360 si_build_gs_prolog_function,
7361 "Geometry Shader Prolog");
7362 return shader->prolog2 != NULL;
7363 }
7364
7365 /**
7366 * Build the pixel shader prolog function. This handles:
7367 * - two-side color selection and interpolation
7368 * - overriding interpolation parameters for the API PS
7369 * - polygon stippling
7370 *
7371 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
7372 * overriden by other states. (e.g. per-sample interpolation)
7373 * Interpolated colors are stored after the preloaded VGPRs.
7374 */
7375 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
7376 union si_shader_part_key *key)
7377 {
7378 struct si_function_info fninfo;
7379 LLVMValueRef ret, func;
7380 int num_returns, i, num_color_channels;
7381
7382 assert(si_need_ps_prolog(key));
7383
7384 si_init_function_info(&fninfo);
7385
7386 /* Declare inputs. */
7387 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
7388 add_arg(&fninfo, ARG_SGPR, ctx->i32);
7389
7390 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
7391 add_arg(&fninfo, ARG_VGPR, ctx->f32);
7392
7393 /* Declare outputs (same as inputs + add colors if needed) */
7394 num_returns = fninfo.num_params;
7395 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
7396 for (i = 0; i < num_color_channels; i++)
7397 fninfo.types[num_returns++] = ctx->f32;
7398
7399 /* Create the function. */
7400 si_create_function(ctx, "ps_prolog", fninfo.types, num_returns,
7401 &fninfo, 0);
7402 func = ctx->main_fn;
7403
7404 /* Copy inputs to outputs. This should be no-op, as the registers match,
7405 * but it will prevent the compiler from overwriting them unintentionally.
7406 */
7407 ret = ctx->return_value;
7408 for (i = 0; i < fninfo.num_params; i++) {
7409 LLVMValueRef p = LLVMGetParam(func, i);
7410 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
7411 }
7412
7413 /* Polygon stippling. */
7414 if (key->ps_prolog.states.poly_stipple) {
7415 /* POS_FIXED_PT is always last. */
7416 unsigned pos = key->ps_prolog.num_input_sgprs +
7417 key->ps_prolog.num_input_vgprs - 1;
7418 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
7419
7420 si_llvm_emit_polygon_stipple(ctx, list, pos);
7421 }
7422
7423 if (key->ps_prolog.states.bc_optimize_for_persp ||
7424 key->ps_prolog.states.bc_optimize_for_linear) {
7425 unsigned i, base = key->ps_prolog.num_input_sgprs;
7426 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
7427
7428 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
7429 * The hw doesn't compute CENTROID if the whole wave only
7430 * contains fully-covered quads.
7431 *
7432 * PRIM_MASK is after user SGPRs.
7433 */
7434 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7435 bc_optimize = LLVMBuildLShr(ctx->ac.builder, bc_optimize,
7436 LLVMConstInt(ctx->i32, 31, 0), "");
7437 bc_optimize = LLVMBuildTrunc(ctx->ac.builder, bc_optimize,
7438 ctx->i1, "");
7439
7440 if (key->ps_prolog.states.bc_optimize_for_persp) {
7441 /* Read PERSP_CENTER. */
7442 for (i = 0; i < 2; i++)
7443 center[i] = LLVMGetParam(func, base + 2 + i);
7444 /* Read PERSP_CENTROID. */
7445 for (i = 0; i < 2; i++)
7446 centroid[i] = LLVMGetParam(func, base + 4 + i);
7447 /* Select PERSP_CENTROID. */
7448 for (i = 0; i < 2; i++) {
7449 tmp = LLVMBuildSelect(ctx->ac.builder, bc_optimize,
7450 center[i], centroid[i], "");
7451 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7452 tmp, base + 4 + i, "");
7453 }
7454 }
7455 if (key->ps_prolog.states.bc_optimize_for_linear) {
7456 /* Read LINEAR_CENTER. */
7457 for (i = 0; i < 2; i++)
7458 center[i] = LLVMGetParam(func, base + 8 + i);
7459 /* Read LINEAR_CENTROID. */
7460 for (i = 0; i < 2; i++)
7461 centroid[i] = LLVMGetParam(func, base + 10 + i);
7462 /* Select LINEAR_CENTROID. */
7463 for (i = 0; i < 2; i++) {
7464 tmp = LLVMBuildSelect(ctx->ac.builder, bc_optimize,
7465 center[i], centroid[i], "");
7466 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7467 tmp, base + 10 + i, "");
7468 }
7469 }
7470 }
7471
7472 /* Force per-sample interpolation. */
7473 if (key->ps_prolog.states.force_persp_sample_interp) {
7474 unsigned i, base = key->ps_prolog.num_input_sgprs;
7475 LLVMValueRef persp_sample[2];
7476
7477 /* Read PERSP_SAMPLE. */
7478 for (i = 0; i < 2; i++)
7479 persp_sample[i] = LLVMGetParam(func, base + i);
7480 /* Overwrite PERSP_CENTER. */
7481 for (i = 0; i < 2; i++)
7482 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7483 persp_sample[i], base + 2 + i, "");
7484 /* Overwrite PERSP_CENTROID. */
7485 for (i = 0; i < 2; i++)
7486 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7487 persp_sample[i], base + 4 + i, "");
7488 }
7489 if (key->ps_prolog.states.force_linear_sample_interp) {
7490 unsigned i, base = key->ps_prolog.num_input_sgprs;
7491 LLVMValueRef linear_sample[2];
7492
7493 /* Read LINEAR_SAMPLE. */
7494 for (i = 0; i < 2; i++)
7495 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
7496 /* Overwrite LINEAR_CENTER. */
7497 for (i = 0; i < 2; i++)
7498 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7499 linear_sample[i], base + 8 + i, "");
7500 /* Overwrite LINEAR_CENTROID. */
7501 for (i = 0; i < 2; i++)
7502 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7503 linear_sample[i], base + 10 + i, "");
7504 }
7505
7506 /* Force center interpolation. */
7507 if (key->ps_prolog.states.force_persp_center_interp) {
7508 unsigned i, base = key->ps_prolog.num_input_sgprs;
7509 LLVMValueRef persp_center[2];
7510
7511 /* Read PERSP_CENTER. */
7512 for (i = 0; i < 2; i++)
7513 persp_center[i] = LLVMGetParam(func, base + 2 + i);
7514 /* Overwrite PERSP_SAMPLE. */
7515 for (i = 0; i < 2; i++)
7516 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7517 persp_center[i], base + i, "");
7518 /* Overwrite PERSP_CENTROID. */
7519 for (i = 0; i < 2; i++)
7520 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7521 persp_center[i], base + 4 + i, "");
7522 }
7523 if (key->ps_prolog.states.force_linear_center_interp) {
7524 unsigned i, base = key->ps_prolog.num_input_sgprs;
7525 LLVMValueRef linear_center[2];
7526
7527 /* Read LINEAR_CENTER. */
7528 for (i = 0; i < 2; i++)
7529 linear_center[i] = LLVMGetParam(func, base + 8 + i);
7530 /* Overwrite LINEAR_SAMPLE. */
7531 for (i = 0; i < 2; i++)
7532 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7533 linear_center[i], base + 6 + i, "");
7534 /* Overwrite LINEAR_CENTROID. */
7535 for (i = 0; i < 2; i++)
7536 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
7537 linear_center[i], base + 10 + i, "");
7538 }
7539
7540 /* Interpolate colors. */
7541 unsigned color_out_idx = 0;
7542 for (i = 0; i < 2; i++) {
7543 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
7544 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
7545 key->ps_prolog.face_vgpr_index;
7546 LLVMValueRef interp[2], color[4];
7547 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
7548
7549 if (!writemask)
7550 continue;
7551
7552 /* If the interpolation qualifier is not CONSTANT (-1). */
7553 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
7554 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
7555 key->ps_prolog.color_interp_vgpr_index[i];
7556
7557 /* Get the (i,j) updated by bc_optimize handling. */
7558 interp[0] = LLVMBuildExtractValue(ctx->ac.builder, ret,
7559 interp_vgpr, "");
7560 interp[1] = LLVMBuildExtractValue(ctx->ac.builder, ret,
7561 interp_vgpr + 1, "");
7562 interp_ij = lp_build_gather_values(&ctx->gallivm, interp, 2);
7563 }
7564
7565 /* Use the absolute location of the input. */
7566 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7567
7568 if (key->ps_prolog.states.color_two_side) {
7569 face = LLVMGetParam(func, face_vgpr);
7570 face = ac_to_integer(&ctx->ac, face);
7571 }
7572
7573 interp_fs_input(ctx,
7574 key->ps_prolog.color_attr_index[i],
7575 TGSI_SEMANTIC_COLOR, i,
7576 key->ps_prolog.num_interp_inputs,
7577 key->ps_prolog.colors_read, interp_ij,
7578 prim_mask, face, color);
7579
7580 while (writemask) {
7581 unsigned chan = u_bit_scan(&writemask);
7582 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, color[chan],
7583 fninfo.num_params + color_out_idx++, "");
7584 }
7585 }
7586
7587 /* Section 15.2.2 (Shader Inputs) of the OpenGL 4.5 (Core Profile) spec
7588 * says:
7589 *
7590 * "When per-sample shading is active due to the use of a fragment
7591 * input qualified by sample or due to the use of the gl_SampleID
7592 * or gl_SamplePosition variables, only the bit for the current
7593 * sample is set in gl_SampleMaskIn. When state specifies multiple
7594 * fragment shader invocations for a given fragment, the sample
7595 * mask for any single fragment shader invocation may specify a
7596 * subset of the covered samples for the fragment. In this case,
7597 * the bit corresponding to each covered sample will be set in
7598 * exactly one fragment shader invocation."
7599 *
7600 * The samplemask loaded by hardware is always the coverage of the
7601 * entire pixel/fragment, so mask bits out based on the sample ID.
7602 */
7603 if (key->ps_prolog.states.samplemask_log_ps_iter) {
7604 /* The bit pattern matches that used by fixed function fragment
7605 * processing. */
7606 static const uint16_t ps_iter_masks[] = {
7607 0xffff, /* not used */
7608 0x5555,
7609 0x1111,
7610 0x0101,
7611 0x0001,
7612 };
7613 assert(key->ps_prolog.states.samplemask_log_ps_iter < ARRAY_SIZE(ps_iter_masks));
7614
7615 uint32_t ps_iter_mask = ps_iter_masks[key->ps_prolog.states.samplemask_log_ps_iter];
7616 unsigned ancillary_vgpr = key->ps_prolog.num_input_sgprs +
7617 key->ps_prolog.ancillary_vgpr_index;
7618 LLVMValueRef sampleid = unpack_param(ctx, ancillary_vgpr, 8, 4);
7619 LLVMValueRef samplemask = LLVMGetParam(func, ancillary_vgpr + 1);
7620
7621 samplemask = ac_to_integer(&ctx->ac, samplemask);
7622 samplemask = LLVMBuildAnd(
7623 ctx->ac.builder,
7624 samplemask,
7625 LLVMBuildShl(ctx->ac.builder,
7626 LLVMConstInt(ctx->i32, ps_iter_mask, false),
7627 sampleid, ""),
7628 "");
7629 samplemask = ac_to_float(&ctx->ac, samplemask);
7630
7631 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, samplemask,
7632 ancillary_vgpr + 1, "");
7633 }
7634
7635 /* Tell LLVM to insert WQM instruction sequence when needed. */
7636 if (key->ps_prolog.wqm) {
7637 LLVMAddTargetDependentFunctionAttr(func,
7638 "amdgpu-ps-wqm-outputs", "");
7639 }
7640
7641 si_llvm_build_ret(ctx, ret);
7642 }
7643
7644 /**
7645 * Build the pixel shader epilog function. This handles everything that must be
7646 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7647 */
7648 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
7649 union si_shader_part_key *key)
7650 {
7651 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
7652 struct si_function_info fninfo;
7653 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7654 int i;
7655 struct si_ps_exports exp = {};
7656
7657 si_init_function_info(&fninfo);
7658
7659 /* Declare input SGPRs. */
7660 ctx->param_rw_buffers = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7661 ctx->param_bindless_samplers_and_images = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7662 ctx->param_const_and_shader_buffers = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7663 ctx->param_samplers_and_images = add_arg(&fninfo, ARG_SGPR, ctx->i64);
7664 add_arg_checked(&fninfo, ARG_SGPR, ctx->f32, SI_PARAM_ALPHA_REF);
7665
7666 /* Declare input VGPRs. */
7667 unsigned required_num_params =
7668 fninfo.num_sgpr_params +
7669 util_bitcount(key->ps_epilog.colors_written) * 4 +
7670 key->ps_epilog.writes_z +
7671 key->ps_epilog.writes_stencil +
7672 key->ps_epilog.writes_samplemask;
7673
7674 required_num_params = MAX2(required_num_params,
7675 fninfo.num_sgpr_params + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7676
7677 while (fninfo.num_params < required_num_params)
7678 add_arg(&fninfo, ARG_VGPR, ctx->f32);
7679
7680 /* Create the function. */
7681 si_create_function(ctx, "ps_epilog", NULL, 0, &fninfo, 0);
7682 /* Disable elimination of unused inputs. */
7683 si_llvm_add_attribute(ctx->main_fn,
7684 "InitialPSInputAddr", 0xffffff);
7685
7686 /* Process colors. */
7687 unsigned vgpr = fninfo.num_sgpr_params;
7688 unsigned colors_written = key->ps_epilog.colors_written;
7689 int last_color_export = -1;
7690
7691 /* Find the last color export. */
7692 if (!key->ps_epilog.writes_z &&
7693 !key->ps_epilog.writes_stencil &&
7694 !key->ps_epilog.writes_samplemask) {
7695 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7696
7697 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7698 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7699 /* Just set this if any of the colorbuffers are enabled. */
7700 if (spi_format &
7701 ((1ull << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7702 last_color_export = 0;
7703 } else {
7704 for (i = 0; i < 8; i++)
7705 if (colors_written & (1 << i) &&
7706 (spi_format >> (i * 4)) & 0xf)
7707 last_color_export = i;
7708 }
7709 }
7710
7711 while (colors_written) {
7712 LLVMValueRef color[4];
7713 int mrt = u_bit_scan(&colors_written);
7714
7715 for (i = 0; i < 4; i++)
7716 color[i] = LLVMGetParam(ctx->main_fn, vgpr++);
7717
7718 si_export_mrt_color(bld_base, color, mrt,
7719 fninfo.num_params - 1,
7720 mrt == last_color_export, &exp);
7721 }
7722
7723 /* Process depth, stencil, samplemask. */
7724 if (key->ps_epilog.writes_z)
7725 depth = LLVMGetParam(ctx->main_fn, vgpr++);
7726 if (key->ps_epilog.writes_stencil)
7727 stencil = LLVMGetParam(ctx->main_fn, vgpr++);
7728 if (key->ps_epilog.writes_samplemask)
7729 samplemask = LLVMGetParam(ctx->main_fn, vgpr++);
7730
7731 if (depth || stencil || samplemask)
7732 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
7733 else if (last_color_export == -1)
7734 si_export_null(bld_base);
7735
7736 if (exp.num)
7737 si_emit_ps_exports(ctx, &exp);
7738
7739 /* Compile. */
7740 LLVMBuildRetVoid(ctx->ac.builder);
7741 }
7742
7743 /**
7744 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7745 */
7746 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7747 LLVMTargetMachineRef tm,
7748 struct si_shader *shader,
7749 struct pipe_debug_callback *debug)
7750 {
7751 union si_shader_part_key prolog_key;
7752 union si_shader_part_key epilog_key;
7753
7754 /* Get the prolog. */
7755 si_get_ps_prolog_key(shader, &prolog_key, true);
7756
7757 /* The prolog is a no-op if these aren't set. */
7758 if (si_need_ps_prolog(&prolog_key)) {
7759 shader->prolog =
7760 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7761 PIPE_SHADER_FRAGMENT, true,
7762 &prolog_key, tm, debug,
7763 si_build_ps_prolog_function,
7764 "Fragment Shader Prolog");
7765 if (!shader->prolog)
7766 return false;
7767 }
7768
7769 /* Get the epilog. */
7770 si_get_ps_epilog_key(shader, &epilog_key);
7771
7772 shader->epilog =
7773 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7774 PIPE_SHADER_FRAGMENT, false,
7775 &epilog_key, tm, debug,
7776 si_build_ps_epilog_function,
7777 "Fragment Shader Epilog");
7778 if (!shader->epilog)
7779 return false;
7780
7781 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7782 if (shader->key.part.ps.prolog.poly_stipple) {
7783 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7784 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7785 }
7786
7787 /* Set up the enable bits for per-sample shading if needed. */
7788 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
7789 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7790 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7791 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7792 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7793 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7794 }
7795 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
7796 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7797 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7798 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7799 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7800 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7801 }
7802 if (shader->key.part.ps.prolog.force_persp_center_interp &&
7803 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7804 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7805 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
7806 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7807 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7808 }
7809 if (shader->key.part.ps.prolog.force_linear_center_interp &&
7810 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7811 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7812 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
7813 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7814 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7815 }
7816
7817 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7818 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7819 !(shader->config.spi_ps_input_ena & 0xf)) {
7820 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7821 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7822 }
7823
7824 /* At least one pair of interpolation weights must be enabled. */
7825 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7826 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7827 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7828 }
7829
7830 /* Samplemask fixup requires the sample ID. */
7831 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
7832 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
7833 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
7834 }
7835
7836 /* The sample mask input is always enabled, because the API shader always
7837 * passes it through to the epilog. Disable it here if it's unused.
7838 */
7839 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
7840 !shader->selector->info.reads_samplemask)
7841 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7842
7843 return true;
7844 }
7845
7846 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
7847 unsigned *lds_size)
7848 {
7849 /* SPI barrier management bug:
7850 * Make sure we have at least 4k of LDS in use to avoid the bug.
7851 * It applies to workgroup sizes of more than one wavefront.
7852 */
7853 if (sscreen->info.family == CHIP_BONAIRE ||
7854 sscreen->info.family == CHIP_KABINI ||
7855 sscreen->info.family == CHIP_MULLINS)
7856 *lds_size = MAX2(*lds_size, 8);
7857 }
7858
7859 static void si_fix_resource_usage(struct si_screen *sscreen,
7860 struct si_shader *shader)
7861 {
7862 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7863
7864 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7865
7866 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
7867 si_get_max_workgroup_size(shader) > 64) {
7868 si_multiwave_lds_size_workaround(sscreen,
7869 &shader->config.lds_size);
7870 }
7871 }
7872
7873 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
7874 struct si_shader *shader,
7875 struct pipe_debug_callback *debug)
7876 {
7877 struct si_shader_selector *sel = shader->selector;
7878 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
7879 int r;
7880
7881 /* LS, ES, VS are compiled on demand if the main part hasn't been
7882 * compiled for that stage.
7883 *
7884 * Vertex shaders are compiled on demand when a vertex fetch
7885 * workaround must be applied.
7886 */
7887 if (shader->is_monolithic) {
7888 /* Monolithic shader (compiled as a whole, has many variants,
7889 * may take a long time to compile).
7890 */
7891 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
7892 if (r)
7893 return r;
7894 } else {
7895 /* The shader consists of several parts:
7896 *
7897 * - the middle part is the user shader, it has 1 variant only
7898 * and it was compiled during the creation of the shader
7899 * selector
7900 * - the prolog part is inserted at the beginning
7901 * - the epilog part is inserted at the end
7902 *
7903 * The prolog and epilog have many (but simple) variants.
7904 *
7905 * Starting with gfx9, geometry and tessellation control
7906 * shaders also contain the prolog and user shader parts of
7907 * the previous shader stage.
7908 */
7909
7910 if (!mainp)
7911 return -1;
7912
7913 /* Copy the compiled TGSI shader data over. */
7914 shader->is_binary_shared = true;
7915 shader->binary = mainp->binary;
7916 shader->config = mainp->config;
7917 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7918 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7919 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7920 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
7921 memcpy(shader->info.vs_output_param_offset,
7922 mainp->info.vs_output_param_offset,
7923 sizeof(mainp->info.vs_output_param_offset));
7924 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7925 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7926 shader->info.nr_param_exports = mainp->info.nr_param_exports;
7927
7928 /* Select prologs and/or epilogs. */
7929 switch (sel->type) {
7930 case PIPE_SHADER_VERTEX:
7931 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
7932 return -1;
7933 break;
7934 case PIPE_SHADER_TESS_CTRL:
7935 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
7936 return -1;
7937 break;
7938 case PIPE_SHADER_TESS_EVAL:
7939 break;
7940 case PIPE_SHADER_GEOMETRY:
7941 if (!si_shader_select_gs_parts(sscreen, tm, shader, debug))
7942 return -1;
7943 break;
7944 case PIPE_SHADER_FRAGMENT:
7945 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
7946 return -1;
7947
7948 /* Make sure we have at least as many VGPRs as there
7949 * are allocated inputs.
7950 */
7951 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7952 shader->info.num_input_vgprs);
7953 break;
7954 }
7955
7956 /* Update SGPR and VGPR counts. */
7957 if (shader->prolog) {
7958 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7959 shader->prolog->config.num_sgprs);
7960 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7961 shader->prolog->config.num_vgprs);
7962 }
7963 if (shader->previous_stage) {
7964 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7965 shader->previous_stage->config.num_sgprs);
7966 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7967 shader->previous_stage->config.num_vgprs);
7968 shader->config.spilled_sgprs =
7969 MAX2(shader->config.spilled_sgprs,
7970 shader->previous_stage->config.spilled_sgprs);
7971 shader->config.spilled_vgprs =
7972 MAX2(shader->config.spilled_vgprs,
7973 shader->previous_stage->config.spilled_vgprs);
7974 shader->config.private_mem_vgprs =
7975 MAX2(shader->config.private_mem_vgprs,
7976 shader->previous_stage->config.private_mem_vgprs);
7977 shader->config.scratch_bytes_per_wave =
7978 MAX2(shader->config.scratch_bytes_per_wave,
7979 shader->previous_stage->config.scratch_bytes_per_wave);
7980 shader->info.uses_instanceid |=
7981 shader->previous_stage->info.uses_instanceid;
7982 }
7983 if (shader->prolog2) {
7984 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7985 shader->prolog2->config.num_sgprs);
7986 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7987 shader->prolog2->config.num_vgprs);
7988 }
7989 if (shader->epilog) {
7990 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7991 shader->epilog->config.num_sgprs);
7992 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7993 shader->epilog->config.num_vgprs);
7994 }
7995 }
7996
7997 si_fix_resource_usage(sscreen, shader);
7998 si_shader_dump(sscreen, shader, debug, sel->info.processor,
7999 stderr, true);
8000
8001 /* Upload. */
8002 r = si_shader_binary_upload(sscreen, shader);
8003 if (r) {
8004 fprintf(stderr, "LLVM failed to upload shader\n");
8005 return r;
8006 }
8007
8008 return 0;
8009 }
8010
8011 void si_shader_destroy(struct si_shader *shader)
8012 {
8013 if (shader->scratch_bo)
8014 r600_resource_reference(&shader->scratch_bo, NULL);
8015
8016 r600_resource_reference(&shader->bo, NULL);
8017
8018 if (!shader->is_binary_shared)
8019 ac_shader_binary_clean(&shader->binary);
8020
8021 free(shader->shader_log);
8022 }