radeonsi: extract the LLVM type name construction into its own function
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_bitarit.h"
35 #include "gallivm/lp_bld_flow.h"
36 #include "radeon/r600_cs.h"
37 #include "radeon/radeon_llvm.h"
38 #include "radeon/radeon_elf_util.h"
39 #include "radeon/radeon_llvm_emit.h"
40 #include "util/u_memory.h"
41 #include "util/u_pstipple.h"
42 #include "tgsi/tgsi_parse.h"
43 #include "tgsi/tgsi_util.h"
44 #include "tgsi/tgsi_dump.h"
45
46 #include "si_pipe.h"
47 #include "si_shader.h"
48 #include "sid.h"
49
50 #include <errno.h>
51
52 static const char *scratch_rsrc_dword0_symbol =
53 "SCRATCH_RSRC_DWORD0";
54
55 static const char *scratch_rsrc_dword1_symbol =
56 "SCRATCH_RSRC_DWORD1";
57
58 struct si_shader_output_values
59 {
60 LLVMValueRef values[4];
61 unsigned name;
62 unsigned sid;
63 };
64
65 struct si_shader_context
66 {
67 struct radeon_llvm_context radeon_bld;
68 struct si_shader *shader;
69 struct si_screen *screen;
70
71 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
72 bool is_gs_copy_shader;
73
74 /* Whether to generate the optimized shader variant compiled as a whole
75 * (without a prolog and epilog)
76 */
77 bool is_monolithic;
78
79 int param_streamout_config;
80 int param_streamout_write_index;
81 int param_streamout_offset[4];
82 int param_vertex_id;
83 int param_rel_auto_id;
84 int param_vs_prim_id;
85 int param_instance_id;
86 int param_vertex_index0;
87 int param_tes_u;
88 int param_tes_v;
89 int param_tes_rel_patch_id;
90 int param_tes_patch_id;
91 int param_es2gs_offset;
92
93 LLVMTargetMachineRef tm;
94
95 LLVMValueRef const_md;
96 LLVMValueRef const_buffers[SI_NUM_CONST_BUFFERS];
97 LLVMValueRef lds;
98 LLVMValueRef *constants[SI_NUM_CONST_BUFFERS];
99 LLVMValueRef sampler_views[SI_NUM_SAMPLERS];
100 LLVMValueRef sampler_states[SI_NUM_SAMPLERS];
101 LLVMValueRef fmasks[SI_NUM_USER_SAMPLERS];
102 LLVMValueRef images[SI_NUM_IMAGES];
103 LLVMValueRef so_buffers[4];
104 LLVMValueRef esgs_ring;
105 LLVMValueRef gsvs_ring[4];
106 LLVMValueRef gs_next_vertex[4];
107 LLVMValueRef return_value;
108
109 LLVMTypeRef voidt;
110 LLVMTypeRef i1;
111 LLVMTypeRef i8;
112 LLVMTypeRef i32;
113 LLVMTypeRef i64;
114 LLVMTypeRef i128;
115 LLVMTypeRef f32;
116 LLVMTypeRef v16i8;
117 LLVMTypeRef v2i32;
118 LLVMTypeRef v4i32;
119 LLVMTypeRef v4f32;
120 LLVMTypeRef v8i32;
121 };
122
123 static struct si_shader_context *si_shader_context(
124 struct lp_build_tgsi_context *bld_base)
125 {
126 return (struct si_shader_context *)bld_base;
127 }
128
129 static void si_init_shader_ctx(struct si_shader_context *ctx,
130 struct si_screen *sscreen,
131 struct si_shader *shader,
132 LLVMTargetMachineRef tm);
133
134 /* Ideally pass the sample mask input to the PS epilog as v13, which
135 * is its usual location, so that the shader doesn't have to add v_mov.
136 */
137 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 13
138
139 /* The VS location of the PrimitiveID input is the same in the epilog,
140 * so that the main shader part doesn't have to move it.
141 */
142 #define VS_EPILOG_PRIMID_LOC 2
143
144 #define PERSPECTIVE_BASE 0
145 #define LINEAR_BASE 9
146
147 #define SAMPLE_OFFSET 0
148 #define CENTER_OFFSET 2
149 #define CENTROID_OFSET 4
150
151 #define USE_SGPR_MAX_SUFFIX_LEN 5
152 #define CONST_ADDR_SPACE 2
153 #define LOCAL_ADDR_SPACE 3
154 #define USER_SGPR_ADDR_SPACE 8
155
156
157 #define SENDMSG_GS 2
158 #define SENDMSG_GS_DONE 3
159
160 #define SENDMSG_GS_OP_NOP (0 << 4)
161 #define SENDMSG_GS_OP_CUT (1 << 4)
162 #define SENDMSG_GS_OP_EMIT (2 << 4)
163 #define SENDMSG_GS_OP_EMIT_CUT (3 << 4)
164
165 /**
166 * Returns a unique index for a semantic name and index. The index must be
167 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
168 * calculated.
169 */
170 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
171 {
172 switch (semantic_name) {
173 case TGSI_SEMANTIC_POSITION:
174 return 0;
175 case TGSI_SEMANTIC_PSIZE:
176 return 1;
177 case TGSI_SEMANTIC_CLIPDIST:
178 assert(index <= 1);
179 return 2 + index;
180 case TGSI_SEMANTIC_GENERIC:
181 if (index <= 63-4)
182 return 4 + index;
183 else
184 /* same explanation as in the default statement,
185 * the only user hitting this is st/nine.
186 */
187 return 0;
188
189 /* patch indices are completely separate and thus start from 0 */
190 case TGSI_SEMANTIC_TESSOUTER:
191 return 0;
192 case TGSI_SEMANTIC_TESSINNER:
193 return 1;
194 case TGSI_SEMANTIC_PATCH:
195 return 2 + index;
196
197 default:
198 /* Don't fail here. The result of this function is only used
199 * for LS, TCS, TES, and GS, where legacy GL semantics can't
200 * occur, but this function is called for all vertex shaders
201 * before it's known whether LS will be compiled or not.
202 */
203 return 0;
204 }
205 }
206
207 /**
208 * Get the value of a shader input parameter and extract a bitfield.
209 */
210 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
211 unsigned param, unsigned rshift,
212 unsigned bitwidth)
213 {
214 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
215 LLVMValueRef value = LLVMGetParam(ctx->radeon_bld.main_fn,
216 param);
217
218 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
219 value = bitcast(&ctx->radeon_bld.soa.bld_base,
220 TGSI_TYPE_UNSIGNED, value);
221
222 if (rshift)
223 value = LLVMBuildLShr(gallivm->builder, value,
224 lp_build_const_int32(gallivm, rshift), "");
225
226 if (rshift + bitwidth < 32) {
227 unsigned mask = (1 << bitwidth) - 1;
228 value = LLVMBuildAnd(gallivm->builder, value,
229 lp_build_const_int32(gallivm, mask), "");
230 }
231
232 return value;
233 }
234
235 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
236 {
237 switch (ctx->type) {
238 case TGSI_PROCESSOR_TESS_CTRL:
239 return unpack_param(ctx, SI_PARAM_REL_IDS, 0, 8);
240
241 case TGSI_PROCESSOR_TESS_EVAL:
242 return LLVMGetParam(ctx->radeon_bld.main_fn,
243 ctx->param_tes_rel_patch_id);
244
245 default:
246 assert(0);
247 return NULL;
248 }
249 }
250
251 /* Tessellation shaders pass outputs to the next shader using LDS.
252 *
253 * LS outputs = TCS inputs
254 * TCS outputs = TES inputs
255 *
256 * The LDS layout is:
257 * - TCS inputs for patch 0
258 * - TCS inputs for patch 1
259 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
260 * - ...
261 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
262 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
263 * - TCS outputs for patch 1
264 * - Per-patch TCS outputs for patch 1
265 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
266 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
267 * - ...
268 *
269 * All three shaders VS(LS), TCS, TES share the same LDS space.
270 */
271
272 static LLVMValueRef
273 get_tcs_in_patch_stride(struct si_shader_context *ctx)
274 {
275 if (ctx->type == TGSI_PROCESSOR_VERTEX)
276 return unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 0, 13);
277 else if (ctx->type == TGSI_PROCESSOR_TESS_CTRL)
278 return unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 0, 13);
279 else {
280 assert(0);
281 return NULL;
282 }
283 }
284
285 static LLVMValueRef
286 get_tcs_out_patch_stride(struct si_shader_context *ctx)
287 {
288 return unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 0, 13);
289 }
290
291 static LLVMValueRef
292 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
293 {
294 return lp_build_mul_imm(&ctx->radeon_bld.soa.bld_base.uint_bld,
295 unpack_param(ctx,
296 SI_PARAM_TCS_OUT_OFFSETS,
297 0, 16),
298 4);
299 }
300
301 static LLVMValueRef
302 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
303 {
304 return lp_build_mul_imm(&ctx->radeon_bld.soa.bld_base.uint_bld,
305 unpack_param(ctx,
306 SI_PARAM_TCS_OUT_OFFSETS,
307 16, 16),
308 4);
309 }
310
311 static LLVMValueRef
312 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
313 {
314 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
315 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
316 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
317
318 return LLVMBuildMul(gallivm->builder, patch_stride, rel_patch_id, "");
319 }
320
321 static LLVMValueRef
322 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
323 {
324 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
325 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
326 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
327 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
328
329 return LLVMBuildAdd(gallivm->builder, patch0_offset,
330 LLVMBuildMul(gallivm->builder, patch_stride,
331 rel_patch_id, ""),
332 "");
333 }
334
335 static LLVMValueRef
336 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
337 {
338 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
339 LLVMValueRef patch0_patch_data_offset =
340 get_tcs_out_patch0_patch_data_offset(ctx);
341 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
342 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
343
344 return LLVMBuildAdd(gallivm->builder, patch0_patch_data_offset,
345 LLVMBuildMul(gallivm->builder, patch_stride,
346 rel_patch_id, ""),
347 "");
348 }
349
350 static void build_indexed_store(struct si_shader_context *ctx,
351 LLVMValueRef base_ptr, LLVMValueRef index,
352 LLVMValueRef value)
353 {
354 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
355 struct gallivm_state *gallivm = bld_base->base.gallivm;
356 LLVMValueRef indices[2], pointer;
357
358 indices[0] = bld_base->uint_bld.zero;
359 indices[1] = index;
360
361 pointer = LLVMBuildGEP(gallivm->builder, base_ptr, indices, 2, "");
362 LLVMBuildStore(gallivm->builder, value, pointer);
363 }
364
365 /**
366 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
367 * It's equivalent to doing a load from &base_ptr[index].
368 *
369 * \param base_ptr Where the array starts.
370 * \param index The element index into the array.
371 */
372 static LLVMValueRef build_indexed_load(struct si_shader_context *ctx,
373 LLVMValueRef base_ptr, LLVMValueRef index)
374 {
375 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
376 struct gallivm_state *gallivm = bld_base->base.gallivm;
377 LLVMValueRef indices[2], pointer;
378
379 indices[0] = bld_base->uint_bld.zero;
380 indices[1] = index;
381
382 pointer = LLVMBuildGEP(gallivm->builder, base_ptr, indices, 2, "");
383 return LLVMBuildLoad(gallivm->builder, pointer, "");
384 }
385
386 /**
387 * Do a load from &base_ptr[index], but also add a flag that it's loading
388 * a constant.
389 */
390 static LLVMValueRef build_indexed_load_const(
391 struct si_shader_context *ctx,
392 LLVMValueRef base_ptr, LLVMValueRef index)
393 {
394 LLVMValueRef result = build_indexed_load(ctx, base_ptr, index);
395 LLVMSetMetadata(result, 1, ctx->const_md);
396 return result;
397 }
398
399 static LLVMValueRef get_instance_index_for_fetch(
400 struct radeon_llvm_context *radeon_bld,
401 unsigned param_start_instance, unsigned divisor)
402 {
403 struct si_shader_context *ctx =
404 si_shader_context(&radeon_bld->soa.bld_base);
405 struct gallivm_state *gallivm = radeon_bld->soa.bld_base.base.gallivm;
406
407 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
408 ctx->param_instance_id);
409
410 /* The division must be done before START_INSTANCE is added. */
411 if (divisor > 1)
412 result = LLVMBuildUDiv(gallivm->builder, result,
413 lp_build_const_int32(gallivm, divisor), "");
414
415 return LLVMBuildAdd(gallivm->builder, result,
416 LLVMGetParam(radeon_bld->main_fn, param_start_instance), "");
417 }
418
419 static void declare_input_vs(
420 struct radeon_llvm_context *radeon_bld,
421 unsigned input_index,
422 const struct tgsi_full_declaration *decl)
423 {
424 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
425 struct gallivm_state *gallivm = base->gallivm;
426 struct si_shader_context *ctx =
427 si_shader_context(&radeon_bld->soa.bld_base);
428 unsigned divisor =
429 ctx->shader->key.vs.prolog.instance_divisors[input_index];
430
431 unsigned chan;
432
433 LLVMValueRef t_list_ptr;
434 LLVMValueRef t_offset;
435 LLVMValueRef t_list;
436 LLVMValueRef attribute_offset;
437 LLVMValueRef buffer_index;
438 LLVMValueRef args[3];
439 LLVMValueRef input;
440
441 /* Load the T list */
442 t_list_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFERS);
443
444 t_offset = lp_build_const_int32(gallivm, input_index);
445
446 t_list = build_indexed_load_const(ctx, t_list_ptr, t_offset);
447
448 /* Build the attribute offset */
449 attribute_offset = lp_build_const_int32(gallivm, 0);
450
451 if (!ctx->is_monolithic) {
452 buffer_index = LLVMGetParam(radeon_bld->main_fn,
453 ctx->param_vertex_index0 +
454 input_index);
455 } else if (divisor) {
456 /* Build index from instance ID, start instance and divisor */
457 ctx->shader->info.uses_instanceid = true;
458 buffer_index = get_instance_index_for_fetch(&ctx->radeon_bld,
459 SI_PARAM_START_INSTANCE,
460 divisor);
461 } else {
462 /* Load the buffer index for vertices. */
463 LLVMValueRef vertex_id = LLVMGetParam(ctx->radeon_bld.main_fn,
464 ctx->param_vertex_id);
465 LLVMValueRef base_vertex = LLVMGetParam(radeon_bld->main_fn,
466 SI_PARAM_BASE_VERTEX);
467 buffer_index = LLVMBuildAdd(gallivm->builder, base_vertex, vertex_id, "");
468 }
469
470 args[0] = t_list;
471 args[1] = attribute_offset;
472 args[2] = buffer_index;
473 input = lp_build_intrinsic(gallivm->builder,
474 "llvm.SI.vs.load.input", ctx->v4f32, args, 3,
475 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
476
477 /* Break up the vec4 into individual components */
478 for (chan = 0; chan < 4; chan++) {
479 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
480 /* XXX: Use a helper function for this. There is one in
481 * tgsi_llvm.c. */
482 ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
483 LLVMBuildExtractElement(gallivm->builder,
484 input, llvm_chan, "");
485 }
486 }
487
488 static LLVMValueRef get_primitive_id(struct lp_build_tgsi_context *bld_base,
489 unsigned swizzle)
490 {
491 struct si_shader_context *ctx = si_shader_context(bld_base);
492
493 if (swizzle > 0)
494 return bld_base->uint_bld.zero;
495
496 switch (ctx->type) {
497 case TGSI_PROCESSOR_VERTEX:
498 return LLVMGetParam(ctx->radeon_bld.main_fn,
499 ctx->param_vs_prim_id);
500 case TGSI_PROCESSOR_TESS_CTRL:
501 return LLVMGetParam(ctx->radeon_bld.main_fn,
502 SI_PARAM_PATCH_ID);
503 case TGSI_PROCESSOR_TESS_EVAL:
504 return LLVMGetParam(ctx->radeon_bld.main_fn,
505 ctx->param_tes_patch_id);
506 case TGSI_PROCESSOR_GEOMETRY:
507 return LLVMGetParam(ctx->radeon_bld.main_fn,
508 SI_PARAM_PRIMITIVE_ID);
509 default:
510 assert(0);
511 return bld_base->uint_bld.zero;
512 }
513 }
514
515 /**
516 * Return the value of tgsi_ind_register for indexing.
517 * This is the indirect index with the constant offset added to it.
518 */
519 static LLVMValueRef get_indirect_index(struct si_shader_context *ctx,
520 const struct tgsi_ind_register *ind,
521 int rel_index)
522 {
523 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
524 LLVMValueRef result;
525
526 result = ctx->radeon_bld.soa.addr[ind->Index][ind->Swizzle];
527 result = LLVMBuildLoad(gallivm->builder, result, "");
528 result = LLVMBuildAdd(gallivm->builder, result,
529 lp_build_const_int32(gallivm, rel_index), "");
530 return result;
531 }
532
533 /**
534 * Calculate a dword address given an input or output register and a stride.
535 */
536 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
537 const struct tgsi_full_dst_register *dst,
538 const struct tgsi_full_src_register *src,
539 LLVMValueRef vertex_dw_stride,
540 LLVMValueRef base_addr)
541 {
542 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
543 struct tgsi_shader_info *info = &ctx->shader->selector->info;
544 ubyte *name, *index, *array_first;
545 int first, param;
546 struct tgsi_full_dst_register reg;
547
548 /* Set the register description. The address computation is the same
549 * for sources and destinations. */
550 if (src) {
551 reg.Register.File = src->Register.File;
552 reg.Register.Index = src->Register.Index;
553 reg.Register.Indirect = src->Register.Indirect;
554 reg.Register.Dimension = src->Register.Dimension;
555 reg.Indirect = src->Indirect;
556 reg.Dimension = src->Dimension;
557 reg.DimIndirect = src->DimIndirect;
558 } else
559 reg = *dst;
560
561 /* If the register is 2-dimensional (e.g. an array of vertices
562 * in a primitive), calculate the base address of the vertex. */
563 if (reg.Register.Dimension) {
564 LLVMValueRef index;
565
566 if (reg.Dimension.Indirect)
567 index = get_indirect_index(ctx, &reg.DimIndirect,
568 reg.Dimension.Index);
569 else
570 index = lp_build_const_int32(gallivm, reg.Dimension.Index);
571
572 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
573 LLVMBuildMul(gallivm->builder, index,
574 vertex_dw_stride, ""), "");
575 }
576
577 /* Get information about the register. */
578 if (reg.Register.File == TGSI_FILE_INPUT) {
579 name = info->input_semantic_name;
580 index = info->input_semantic_index;
581 array_first = info->input_array_first;
582 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
583 name = info->output_semantic_name;
584 index = info->output_semantic_index;
585 array_first = info->output_array_first;
586 } else {
587 assert(0);
588 return NULL;
589 }
590
591 if (reg.Register.Indirect) {
592 /* Add the relative address of the element. */
593 LLVMValueRef ind_index;
594
595 if (reg.Indirect.ArrayID)
596 first = array_first[reg.Indirect.ArrayID];
597 else
598 first = reg.Register.Index;
599
600 ind_index = get_indirect_index(ctx, &reg.Indirect,
601 reg.Register.Index - first);
602
603 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
604 LLVMBuildMul(gallivm->builder, ind_index,
605 lp_build_const_int32(gallivm, 4), ""), "");
606
607 param = si_shader_io_get_unique_index(name[first], index[first]);
608 } else {
609 param = si_shader_io_get_unique_index(name[reg.Register.Index],
610 index[reg.Register.Index]);
611 }
612
613 /* Add the base address of the element. */
614 return LLVMBuildAdd(gallivm->builder, base_addr,
615 lp_build_const_int32(gallivm, param * 4), "");
616 }
617
618 /**
619 * Load from LDS.
620 *
621 * \param type output value type
622 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
623 * \param dw_addr address in dwords
624 */
625 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
626 enum tgsi_opcode_type type, unsigned swizzle,
627 LLVMValueRef dw_addr)
628 {
629 struct si_shader_context *ctx = si_shader_context(bld_base);
630 struct gallivm_state *gallivm = bld_base->base.gallivm;
631 LLVMValueRef value;
632
633 if (swizzle == ~0) {
634 LLVMValueRef values[TGSI_NUM_CHANNELS];
635
636 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
637 values[chan] = lds_load(bld_base, type, chan, dw_addr);
638
639 return lp_build_gather_values(bld_base->base.gallivm, values,
640 TGSI_NUM_CHANNELS);
641 }
642
643 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
644 lp_build_const_int32(gallivm, swizzle));
645
646 value = build_indexed_load(ctx, ctx->lds, dw_addr);
647 if (type == TGSI_TYPE_DOUBLE) {
648 LLVMValueRef value2;
649 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
650 lp_build_const_int32(gallivm, swizzle + 1));
651 value2 = build_indexed_load(ctx, ctx->lds, dw_addr);
652 return radeon_llvm_emit_fetch_double(bld_base, value, value2);
653 }
654
655 return LLVMBuildBitCast(gallivm->builder, value,
656 tgsi2llvmtype(bld_base, type), "");
657 }
658
659 /**
660 * Store to LDS.
661 *
662 * \param swizzle offset (typically 0..3)
663 * \param dw_addr address in dwords
664 * \param value value to store
665 */
666 static void lds_store(struct lp_build_tgsi_context *bld_base,
667 unsigned swizzle, LLVMValueRef dw_addr,
668 LLVMValueRef value)
669 {
670 struct si_shader_context *ctx = si_shader_context(bld_base);
671 struct gallivm_state *gallivm = bld_base->base.gallivm;
672
673 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
674 lp_build_const_int32(gallivm, swizzle));
675
676 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
677 build_indexed_store(ctx, ctx->lds,
678 dw_addr, value);
679 }
680
681 static LLVMValueRef fetch_input_tcs(
682 struct lp_build_tgsi_context *bld_base,
683 const struct tgsi_full_src_register *reg,
684 enum tgsi_opcode_type type, unsigned swizzle)
685 {
686 struct si_shader_context *ctx = si_shader_context(bld_base);
687 LLVMValueRef dw_addr, stride;
688
689 stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
690 dw_addr = get_tcs_in_current_patch_offset(ctx);
691 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
692
693 return lds_load(bld_base, type, swizzle, dw_addr);
694 }
695
696 static LLVMValueRef fetch_output_tcs(
697 struct lp_build_tgsi_context *bld_base,
698 const struct tgsi_full_src_register *reg,
699 enum tgsi_opcode_type type, unsigned swizzle)
700 {
701 struct si_shader_context *ctx = si_shader_context(bld_base);
702 LLVMValueRef dw_addr, stride;
703
704 if (reg->Register.Dimension) {
705 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
706 dw_addr = get_tcs_out_current_patch_offset(ctx);
707 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
708 } else {
709 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
710 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
711 }
712
713 return lds_load(bld_base, type, swizzle, dw_addr);
714 }
715
716 static LLVMValueRef fetch_input_tes(
717 struct lp_build_tgsi_context *bld_base,
718 const struct tgsi_full_src_register *reg,
719 enum tgsi_opcode_type type, unsigned swizzle)
720 {
721 struct si_shader_context *ctx = si_shader_context(bld_base);
722 LLVMValueRef dw_addr, stride;
723
724 if (reg->Register.Dimension) {
725 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
726 dw_addr = get_tcs_out_current_patch_offset(ctx);
727 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
728 } else {
729 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
730 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
731 }
732
733 return lds_load(bld_base, type, swizzle, dw_addr);
734 }
735
736 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
737 const struct tgsi_full_instruction *inst,
738 const struct tgsi_opcode_info *info,
739 LLVMValueRef dst[4])
740 {
741 struct si_shader_context *ctx = si_shader_context(bld_base);
742 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
743 unsigned chan_index;
744 LLVMValueRef dw_addr, stride;
745
746 /* Only handle per-patch and per-vertex outputs here.
747 * Vectors will be lowered to scalars and this function will be called again.
748 */
749 if (reg->Register.File != TGSI_FILE_OUTPUT ||
750 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
751 radeon_llvm_emit_store(bld_base, inst, info, dst);
752 return;
753 }
754
755 if (reg->Register.Dimension) {
756 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
757 dw_addr = get_tcs_out_current_patch_offset(ctx);
758 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
759 } else {
760 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
761 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
762 }
763
764 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
765 LLVMValueRef value = dst[chan_index];
766
767 if (inst->Instruction.Saturate)
768 value = radeon_llvm_saturate(bld_base, value);
769
770 lds_store(bld_base, chan_index, dw_addr, value);
771 }
772 }
773
774 static LLVMValueRef fetch_input_gs(
775 struct lp_build_tgsi_context *bld_base,
776 const struct tgsi_full_src_register *reg,
777 enum tgsi_opcode_type type,
778 unsigned swizzle)
779 {
780 struct lp_build_context *base = &bld_base->base;
781 struct si_shader_context *ctx = si_shader_context(bld_base);
782 struct si_shader *shader = ctx->shader;
783 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
784 struct gallivm_state *gallivm = base->gallivm;
785 LLVMValueRef vtx_offset;
786 LLVMValueRef args[9];
787 unsigned vtx_offset_param;
788 struct tgsi_shader_info *info = &shader->selector->info;
789 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
790 unsigned semantic_index = info->input_semantic_index[reg->Register.Index];
791 unsigned param;
792 LLVMValueRef value;
793
794 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
795 return get_primitive_id(bld_base, swizzle);
796
797 if (!reg->Register.Dimension)
798 return NULL;
799
800 if (swizzle == ~0) {
801 LLVMValueRef values[TGSI_NUM_CHANNELS];
802 unsigned chan;
803 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
804 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
805 }
806 return lp_build_gather_values(bld_base->base.gallivm, values,
807 TGSI_NUM_CHANNELS);
808 }
809
810 /* Get the vertex offset parameter */
811 vtx_offset_param = reg->Dimension.Index;
812 if (vtx_offset_param < 2) {
813 vtx_offset_param += SI_PARAM_VTX0_OFFSET;
814 } else {
815 assert(vtx_offset_param < 6);
816 vtx_offset_param += SI_PARAM_VTX2_OFFSET - 2;
817 }
818 vtx_offset = lp_build_mul_imm(uint,
819 LLVMGetParam(ctx->radeon_bld.main_fn,
820 vtx_offset_param),
821 4);
822
823 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
824 args[0] = ctx->esgs_ring;
825 args[1] = vtx_offset;
826 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle) * 256);
827 args[3] = uint->zero;
828 args[4] = uint->one; /* OFFEN */
829 args[5] = uint->zero; /* IDXEN */
830 args[6] = uint->one; /* GLC */
831 args[7] = uint->zero; /* SLC */
832 args[8] = uint->zero; /* TFE */
833
834 value = lp_build_intrinsic(gallivm->builder,
835 "llvm.SI.buffer.load.dword.i32.i32",
836 ctx->i32, args, 9,
837 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute);
838 if (type == TGSI_TYPE_DOUBLE) {
839 LLVMValueRef value2;
840 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle + 1) * 256);
841 value2 = lp_build_intrinsic(gallivm->builder,
842 "llvm.SI.buffer.load.dword.i32.i32",
843 ctx->i32, args, 9,
844 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute);
845 return radeon_llvm_emit_fetch_double(bld_base,
846 value, value2);
847 }
848 return LLVMBuildBitCast(gallivm->builder,
849 value,
850 tgsi2llvmtype(bld_base, type), "");
851 }
852
853 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
854 {
855 switch (interpolate) {
856 case TGSI_INTERPOLATE_CONSTANT:
857 return 0;
858
859 case TGSI_INTERPOLATE_LINEAR:
860 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
861 return SI_PARAM_LINEAR_SAMPLE;
862 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
863 return SI_PARAM_LINEAR_CENTROID;
864 else
865 return SI_PARAM_LINEAR_CENTER;
866 break;
867 case TGSI_INTERPOLATE_COLOR:
868 case TGSI_INTERPOLATE_PERSPECTIVE:
869 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
870 return SI_PARAM_PERSP_SAMPLE;
871 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
872 return SI_PARAM_PERSP_CENTROID;
873 else
874 return SI_PARAM_PERSP_CENTER;
875 break;
876 default:
877 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
878 return -1;
879 }
880 }
881
882 /* This shouldn't be used by explicit INTERP opcodes. */
883 static unsigned select_interp_param(struct si_shader_context *ctx,
884 unsigned param)
885 {
886 if (!ctx->shader->key.ps.prolog.force_persample_interp ||
887 !ctx->is_monolithic)
888 return param;
889
890 /* If the shader doesn't use center/centroid, just return the parameter.
891 *
892 * If the shader only uses one set of (i,j), "si_emit_spi_ps_input" can
893 * switch between center/centroid and sample without shader changes.
894 */
895 switch (param) {
896 case SI_PARAM_PERSP_CENTROID:
897 case SI_PARAM_PERSP_CENTER:
898 return SI_PARAM_PERSP_SAMPLE;
899
900 case SI_PARAM_LINEAR_CENTROID:
901 case SI_PARAM_LINEAR_CENTER:
902 return SI_PARAM_LINEAR_SAMPLE;
903
904 default:
905 return param;
906 }
907 }
908
909 /**
910 * Interpolate a fragment shader input.
911 *
912 * @param ctx context
913 * @param input_index index of the input in hardware
914 * @param semantic_name TGSI_SEMANTIC_*
915 * @param semantic_index semantic index
916 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
917 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
918 * @param interp_param interpolation weights (i,j)
919 * @param prim_mask SI_PARAM_PRIM_MASK
920 * @param face SI_PARAM_FRONT_FACE
921 * @param result the return value (4 components)
922 */
923 static void interp_fs_input(struct si_shader_context *ctx,
924 unsigned input_index,
925 unsigned semantic_name,
926 unsigned semantic_index,
927 unsigned num_interp_inputs,
928 unsigned colors_read_mask,
929 LLVMValueRef interp_param,
930 LLVMValueRef prim_mask,
931 LLVMValueRef face,
932 LLVMValueRef result[4])
933 {
934 struct lp_build_context *base = &ctx->radeon_bld.soa.bld_base.base;
935 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
936 struct gallivm_state *gallivm = base->gallivm;
937 const char *intr_name;
938 LLVMValueRef attr_number;
939
940 unsigned chan;
941
942 attr_number = lp_build_const_int32(gallivm, input_index);
943
944 /* fs.constant returns the param from the middle vertex, so it's not
945 * really useful for flat shading. It's meant to be used for custom
946 * interpolation (but the intrinsic can't fetch from the other two
947 * vertices).
948 *
949 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
950 * to do the right thing. The only reason we use fs.constant is that
951 * fs.interp cannot be used on integers, because they can be equal
952 * to NaN.
953 */
954 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
955
956 if (semantic_name == TGSI_SEMANTIC_COLOR &&
957 ctx->shader->key.ps.prolog.color_two_side) {
958 LLVMValueRef args[4];
959 LLVMValueRef is_face_positive;
960 LLVMValueRef back_attr_number;
961
962 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
963 * otherwise it's at offset "num_inputs".
964 */
965 unsigned back_attr_offset = num_interp_inputs;
966 if (semantic_index == 1 && colors_read_mask & 0xf)
967 back_attr_offset += 1;
968
969 back_attr_number = lp_build_const_int32(gallivm, back_attr_offset);
970
971 is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
972 face, uint->zero, "");
973
974 args[2] = prim_mask;
975 args[3] = interp_param;
976 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
977 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
978 LLVMValueRef front, back;
979
980 args[0] = llvm_chan;
981 args[1] = attr_number;
982 front = lp_build_intrinsic(gallivm->builder, intr_name,
983 ctx->f32, args, args[3] ? 4 : 3,
984 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
985
986 args[1] = back_attr_number;
987 back = lp_build_intrinsic(gallivm->builder, intr_name,
988 ctx->f32, args, args[3] ? 4 : 3,
989 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
990
991 result[chan] = LLVMBuildSelect(gallivm->builder,
992 is_face_positive,
993 front,
994 back,
995 "");
996 }
997 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
998 LLVMValueRef args[4];
999
1000 args[0] = uint->zero;
1001 args[1] = attr_number;
1002 args[2] = prim_mask;
1003 args[3] = interp_param;
1004 result[0] = lp_build_intrinsic(gallivm->builder, intr_name,
1005 ctx->f32, args, args[3] ? 4 : 3,
1006 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1007 result[1] =
1008 result[2] = lp_build_const_float(gallivm, 0.0f);
1009 result[3] = lp_build_const_float(gallivm, 1.0f);
1010 } else {
1011 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1012 LLVMValueRef args[4];
1013 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
1014
1015 args[0] = llvm_chan;
1016 args[1] = attr_number;
1017 args[2] = prim_mask;
1018 args[3] = interp_param;
1019 result[chan] = lp_build_intrinsic(gallivm->builder, intr_name,
1020 ctx->f32, args, args[3] ? 4 : 3,
1021 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1022 }
1023 }
1024 }
1025
1026 static void declare_input_fs(
1027 struct radeon_llvm_context *radeon_bld,
1028 unsigned input_index,
1029 const struct tgsi_full_declaration *decl)
1030 {
1031 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
1032 struct si_shader_context *ctx =
1033 si_shader_context(&radeon_bld->soa.bld_base);
1034 struct si_shader *shader = ctx->shader;
1035 LLVMValueRef main_fn = radeon_bld->main_fn;
1036 LLVMValueRef interp_param = NULL;
1037 int interp_param_idx;
1038
1039 /* Get colors from input VGPRs (set by the prolog). */
1040 if (!ctx->is_monolithic &&
1041 decl->Semantic.Name == TGSI_SEMANTIC_COLOR) {
1042 unsigned i = decl->Semantic.Index;
1043 unsigned colors_read = shader->selector->info.colors_read;
1044 unsigned mask = colors_read >> (i * 4);
1045 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1046 (i ? util_bitcount(colors_read & 0xf) : 0);
1047
1048 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
1049 mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1050 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
1051 mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1052 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
1053 mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1054 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
1055 mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1056 return;
1057 }
1058
1059 interp_param_idx = lookup_interp_param_index(decl->Interp.Interpolate,
1060 decl->Interp.Location);
1061 if (interp_param_idx == -1)
1062 return;
1063 else if (interp_param_idx) {
1064 interp_param_idx = select_interp_param(ctx,
1065 interp_param_idx);
1066 interp_param = LLVMGetParam(main_fn, interp_param_idx);
1067 }
1068
1069 interp_fs_input(ctx, input_index, decl->Semantic.Name,
1070 decl->Semantic.Index, shader->selector->info.num_inputs,
1071 shader->selector->info.colors_read, interp_param,
1072 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1073 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1074 &radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 0)]);
1075 }
1076
1077 static LLVMValueRef get_sample_id(struct radeon_llvm_context *radeon_bld)
1078 {
1079 return unpack_param(si_shader_context(&radeon_bld->soa.bld_base),
1080 SI_PARAM_ANCILLARY, 8, 4);
1081 }
1082
1083 /**
1084 * Load a dword from a constant buffer.
1085 */
1086 static LLVMValueRef buffer_load_const(LLVMBuilderRef builder, LLVMValueRef resource,
1087 LLVMValueRef offset, LLVMTypeRef return_type)
1088 {
1089 LLVMValueRef args[2] = {resource, offset};
1090
1091 return lp_build_intrinsic(builder, "llvm.SI.load.const", return_type, args, 2,
1092 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1093 }
1094
1095 static LLVMValueRef load_sample_position(struct radeon_llvm_context *radeon_bld, LLVMValueRef sample_id)
1096 {
1097 struct si_shader_context *ctx =
1098 si_shader_context(&radeon_bld->soa.bld_base);
1099 struct lp_build_context *uint_bld = &radeon_bld->soa.bld_base.uint_bld;
1100 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1101 LLVMBuilderRef builder = gallivm->builder;
1102 LLVMValueRef desc = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
1103 LLVMValueRef buf_index = lp_build_const_int32(gallivm, SI_DRIVER_STATE_CONST_BUF);
1104 LLVMValueRef resource = build_indexed_load_const(ctx, desc, buf_index);
1105
1106 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1107 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1108 LLVMValueRef offset1 = LLVMBuildAdd(builder, offset0, lp_build_const_int32(gallivm, 4), "");
1109
1110 LLVMValueRef pos[4] = {
1111 buffer_load_const(builder, resource, offset0, ctx->f32),
1112 buffer_load_const(builder, resource, offset1, ctx->f32),
1113 lp_build_const_float(gallivm, 0),
1114 lp_build_const_float(gallivm, 0)
1115 };
1116
1117 return lp_build_gather_values(gallivm, pos, 4);
1118 }
1119
1120 static void declare_system_value(
1121 struct radeon_llvm_context *radeon_bld,
1122 unsigned index,
1123 const struct tgsi_full_declaration *decl)
1124 {
1125 struct si_shader_context *ctx =
1126 si_shader_context(&radeon_bld->soa.bld_base);
1127 struct lp_build_context *bld = &radeon_bld->soa.bld_base.base;
1128 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1129 LLVMValueRef value = 0;
1130
1131 switch (decl->Semantic.Name) {
1132 case TGSI_SEMANTIC_INSTANCEID:
1133 value = LLVMGetParam(radeon_bld->main_fn,
1134 ctx->param_instance_id);
1135 break;
1136
1137 case TGSI_SEMANTIC_VERTEXID:
1138 value = LLVMBuildAdd(gallivm->builder,
1139 LLVMGetParam(radeon_bld->main_fn,
1140 ctx->param_vertex_id),
1141 LLVMGetParam(radeon_bld->main_fn,
1142 SI_PARAM_BASE_VERTEX), "");
1143 break;
1144
1145 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1146 value = LLVMGetParam(radeon_bld->main_fn,
1147 ctx->param_vertex_id);
1148 break;
1149
1150 case TGSI_SEMANTIC_BASEVERTEX:
1151 value = LLVMGetParam(radeon_bld->main_fn,
1152 SI_PARAM_BASE_VERTEX);
1153 break;
1154
1155 case TGSI_SEMANTIC_INVOCATIONID:
1156 if (ctx->type == TGSI_PROCESSOR_TESS_CTRL)
1157 value = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
1158 else if (ctx->type == TGSI_PROCESSOR_GEOMETRY)
1159 value = LLVMGetParam(radeon_bld->main_fn,
1160 SI_PARAM_GS_INSTANCE_ID);
1161 else
1162 assert(!"INVOCATIONID not implemented");
1163 break;
1164
1165 case TGSI_SEMANTIC_POSITION:
1166 {
1167 LLVMValueRef pos[4] = {
1168 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1169 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1170 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Z_FLOAT),
1171 lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base, TGSI_OPCODE_RCP,
1172 LLVMGetParam(radeon_bld->main_fn,
1173 SI_PARAM_POS_W_FLOAT)),
1174 };
1175 value = lp_build_gather_values(gallivm, pos, 4);
1176 break;
1177 }
1178
1179 case TGSI_SEMANTIC_FACE:
1180 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_FRONT_FACE);
1181 break;
1182
1183 case TGSI_SEMANTIC_SAMPLEID:
1184 value = get_sample_id(radeon_bld);
1185 break;
1186
1187 case TGSI_SEMANTIC_SAMPLEPOS: {
1188 LLVMValueRef pos[4] = {
1189 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1190 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1191 lp_build_const_float(gallivm, 0),
1192 lp_build_const_float(gallivm, 0)
1193 };
1194 pos[0] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1195 TGSI_OPCODE_FRC, pos[0]);
1196 pos[1] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1197 TGSI_OPCODE_FRC, pos[1]);
1198 value = lp_build_gather_values(gallivm, pos, 4);
1199 break;
1200 }
1201
1202 case TGSI_SEMANTIC_SAMPLEMASK:
1203 /* This can only occur with the OpenGL Core profile, which
1204 * doesn't support smoothing.
1205 */
1206 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1207 break;
1208
1209 case TGSI_SEMANTIC_TESSCOORD:
1210 {
1211 LLVMValueRef coord[4] = {
1212 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_u),
1213 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_v),
1214 bld->zero,
1215 bld->zero
1216 };
1217
1218 /* For triangles, the vector should be (u, v, 1-u-v). */
1219 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1220 PIPE_PRIM_TRIANGLES)
1221 coord[2] = lp_build_sub(bld, bld->one,
1222 lp_build_add(bld, coord[0], coord[1]));
1223
1224 value = lp_build_gather_values(gallivm, coord, 4);
1225 break;
1226 }
1227
1228 case TGSI_SEMANTIC_VERTICESIN:
1229 value = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 26, 6);
1230 break;
1231
1232 case TGSI_SEMANTIC_TESSINNER:
1233 case TGSI_SEMANTIC_TESSOUTER:
1234 {
1235 LLVMValueRef dw_addr;
1236 int param = si_shader_io_get_unique_index(decl->Semantic.Name, 0);
1237
1238 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1239 dw_addr = LLVMBuildAdd(gallivm->builder, dw_addr,
1240 lp_build_const_int32(gallivm, param * 4), "");
1241
1242 value = lds_load(&radeon_bld->soa.bld_base, TGSI_TYPE_FLOAT,
1243 ~0, dw_addr);
1244 break;
1245 }
1246
1247 case TGSI_SEMANTIC_PRIMID:
1248 value = get_primitive_id(&radeon_bld->soa.bld_base, 0);
1249 break;
1250
1251 default:
1252 assert(!"unknown system value");
1253 return;
1254 }
1255
1256 radeon_bld->system_values[index] = value;
1257 }
1258
1259 static LLVMValueRef fetch_constant(
1260 struct lp_build_tgsi_context *bld_base,
1261 const struct tgsi_full_src_register *reg,
1262 enum tgsi_opcode_type type,
1263 unsigned swizzle)
1264 {
1265 struct si_shader_context *ctx = si_shader_context(bld_base);
1266 struct lp_build_context *base = &bld_base->base;
1267 const struct tgsi_ind_register *ireg = &reg->Indirect;
1268 unsigned buf, idx;
1269
1270 LLVMValueRef addr, bufp;
1271 LLVMValueRef result;
1272
1273 if (swizzle == LP_CHAN_ALL) {
1274 unsigned chan;
1275 LLVMValueRef values[4];
1276 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
1277 values[chan] = fetch_constant(bld_base, reg, type, chan);
1278
1279 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
1280 }
1281
1282 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
1283 idx = reg->Register.Index * 4 + swizzle;
1284
1285 if (!reg->Register.Indirect && !reg->Dimension.Indirect) {
1286 if (type != TGSI_TYPE_DOUBLE)
1287 return bitcast(bld_base, type, ctx->constants[buf][idx]);
1288 else {
1289 return radeon_llvm_emit_fetch_double(bld_base,
1290 ctx->constants[buf][idx],
1291 ctx->constants[buf][idx + 1]);
1292 }
1293 }
1294
1295 if (reg->Register.Dimension && reg->Dimension.Indirect) {
1296 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
1297 LLVMValueRef index;
1298 index = get_indirect_index(ctx, &reg->DimIndirect,
1299 reg->Dimension.Index);
1300 bufp = build_indexed_load_const(ctx, ptr, index);
1301 } else
1302 bufp = ctx->const_buffers[buf];
1303
1304 addr = ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
1305 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
1306 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
1307 addr = lp_build_add(&bld_base->uint_bld, addr,
1308 lp_build_const_int32(base->gallivm, idx * 4));
1309
1310 result = buffer_load_const(base->gallivm->builder, bufp,
1311 addr, ctx->f32);
1312
1313 if (type != TGSI_TYPE_DOUBLE)
1314 result = bitcast(bld_base, type, result);
1315 else {
1316 LLVMValueRef addr2, result2;
1317 addr2 = ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle + 1];
1318 addr2 = LLVMBuildLoad(base->gallivm->builder, addr2, "load addr reg2");
1319 addr2 = lp_build_mul_imm(&bld_base->uint_bld, addr2, 16);
1320 addr2 = lp_build_add(&bld_base->uint_bld, addr2,
1321 lp_build_const_int32(base->gallivm, idx * 4));
1322
1323 result2 = buffer_load_const(base->gallivm->builder, ctx->const_buffers[buf],
1324 addr2, ctx->f32);
1325
1326 result = radeon_llvm_emit_fetch_double(bld_base,
1327 result, result2);
1328 }
1329 return result;
1330 }
1331
1332 /* Upper 16 bits must be zero. */
1333 static LLVMValueRef si_llvm_pack_two_int16(struct gallivm_state *gallivm,
1334 LLVMValueRef val[2])
1335 {
1336 return LLVMBuildOr(gallivm->builder, val[0],
1337 LLVMBuildShl(gallivm->builder, val[1],
1338 lp_build_const_int32(gallivm, 16),
1339 ""), "");
1340 }
1341
1342 /* Upper 16 bits are ignored and will be dropped. */
1343 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct gallivm_state *gallivm,
1344 LLVMValueRef val[2])
1345 {
1346 LLVMValueRef v[2] = {
1347 LLVMBuildAnd(gallivm->builder, val[0],
1348 lp_build_const_int32(gallivm, 0xffff), ""),
1349 val[1],
1350 };
1351 return si_llvm_pack_two_int16(gallivm, v);
1352 }
1353
1354 /* Initialize arguments for the shader export intrinsic */
1355 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
1356 LLVMValueRef *values,
1357 unsigned target,
1358 LLVMValueRef *args)
1359 {
1360 struct si_shader_context *ctx = si_shader_context(bld_base);
1361 struct lp_build_context *uint =
1362 &ctx->radeon_bld.soa.bld_base.uint_bld;
1363 struct lp_build_context *base = &bld_base->base;
1364 struct gallivm_state *gallivm = base->gallivm;
1365 LLVMBuilderRef builder = base->gallivm->builder;
1366 LLVMValueRef val[4];
1367 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1368 unsigned chan;
1369 bool is_int8;
1370
1371 /* Default is 0xf. Adjusted below depending on the format. */
1372 args[0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1373
1374 /* Specify whether the EXEC mask represents the valid mask */
1375 args[1] = uint->zero;
1376
1377 /* Specify whether this is the last export */
1378 args[2] = uint->zero;
1379
1380 /* Specify the target we are exporting */
1381 args[3] = lp_build_const_int32(base->gallivm, target);
1382
1383 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
1384 const union si_shader_key *key = &ctx->shader->key;
1385 unsigned col_formats = key->ps.epilog.spi_shader_col_format;
1386 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1387
1388 assert(cbuf >= 0 && cbuf < 8);
1389 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1390 is_int8 = (key->ps.epilog.color_is_int8 >> cbuf) & 0x1;
1391 }
1392
1393 args[4] = uint->zero; /* COMPR flag */
1394 args[5] = base->undef;
1395 args[6] = base->undef;
1396 args[7] = base->undef;
1397 args[8] = base->undef;
1398
1399 switch (spi_shader_col_format) {
1400 case V_028714_SPI_SHADER_ZERO:
1401 args[0] = uint->zero; /* writemask */
1402 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
1403 break;
1404
1405 case V_028714_SPI_SHADER_32_R:
1406 args[0] = uint->one; /* writemask */
1407 args[5] = values[0];
1408 break;
1409
1410 case V_028714_SPI_SHADER_32_GR:
1411 args[0] = lp_build_const_int32(base->gallivm, 0x3); /* writemask */
1412 args[5] = values[0];
1413 args[6] = values[1];
1414 break;
1415
1416 case V_028714_SPI_SHADER_32_AR:
1417 args[0] = lp_build_const_int32(base->gallivm, 0x9); /* writemask */
1418 args[5] = values[0];
1419 args[8] = values[3];
1420 break;
1421
1422 case V_028714_SPI_SHADER_FP16_ABGR:
1423 args[4] = uint->one; /* COMPR flag */
1424
1425 for (chan = 0; chan < 2; chan++) {
1426 LLVMValueRef pack_args[2] = {
1427 values[2 * chan],
1428 values[2 * chan + 1]
1429 };
1430 LLVMValueRef packed;
1431
1432 packed = lp_build_intrinsic(base->gallivm->builder,
1433 "llvm.SI.packf16",
1434 ctx->i32, pack_args, 2,
1435 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1436 args[chan + 5] =
1437 LLVMBuildBitCast(base->gallivm->builder,
1438 packed, ctx->f32, "");
1439 }
1440 break;
1441
1442 case V_028714_SPI_SHADER_UNORM16_ABGR:
1443 for (chan = 0; chan < 4; chan++) {
1444 val[chan] = radeon_llvm_saturate(bld_base, values[chan]);
1445 val[chan] = LLVMBuildFMul(builder, val[chan],
1446 lp_build_const_float(gallivm, 65535), "");
1447 val[chan] = LLVMBuildFAdd(builder, val[chan],
1448 lp_build_const_float(gallivm, 0.5), "");
1449 val[chan] = LLVMBuildFPToUI(builder, val[chan],
1450 ctx->i32, "");
1451 }
1452
1453 args[4] = uint->one; /* COMPR flag */
1454 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1455 si_llvm_pack_two_int16(gallivm, val));
1456 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1457 si_llvm_pack_two_int16(gallivm, val+2));
1458 break;
1459
1460 case V_028714_SPI_SHADER_SNORM16_ABGR:
1461 for (chan = 0; chan < 4; chan++) {
1462 /* Clamp between [-1, 1]. */
1463 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MIN,
1464 values[chan],
1465 lp_build_const_float(gallivm, 1));
1466 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MAX,
1467 val[chan],
1468 lp_build_const_float(gallivm, -1));
1469 /* Convert to a signed integer in [-32767, 32767]. */
1470 val[chan] = LLVMBuildFMul(builder, val[chan],
1471 lp_build_const_float(gallivm, 32767), "");
1472 /* If positive, add 0.5, else add -0.5. */
1473 val[chan] = LLVMBuildFAdd(builder, val[chan],
1474 LLVMBuildSelect(builder,
1475 LLVMBuildFCmp(builder, LLVMRealOGE,
1476 val[chan], base->zero, ""),
1477 lp_build_const_float(gallivm, 0.5),
1478 lp_build_const_float(gallivm, -0.5), ""), "");
1479 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
1480 }
1481
1482 args[4] = uint->one; /* COMPR flag */
1483 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1484 si_llvm_pack_two_int32_as_int16(gallivm, val));
1485 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1486 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
1487 break;
1488
1489 case V_028714_SPI_SHADER_UINT16_ABGR: {
1490 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
1491 255 : 65535);
1492 /* Clamp. */
1493 for (chan = 0; chan < 4; chan++) {
1494 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1495 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
1496 val[chan], max);
1497 }
1498
1499 args[4] = uint->one; /* COMPR flag */
1500 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1501 si_llvm_pack_two_int16(gallivm, val));
1502 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1503 si_llvm_pack_two_int16(gallivm, val+2));
1504 break;
1505 }
1506
1507 case V_028714_SPI_SHADER_SINT16_ABGR: {
1508 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
1509 127 : 32767);
1510 LLVMValueRef min = lp_build_const_int32(gallivm, is_int8 ?
1511 -128 : -32768);
1512 /* Clamp. */
1513 for (chan = 0; chan < 4; chan++) {
1514 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1515 val[chan] = lp_build_emit_llvm_binary(bld_base,
1516 TGSI_OPCODE_IMIN,
1517 val[chan], max);
1518 val[chan] = lp_build_emit_llvm_binary(bld_base,
1519 TGSI_OPCODE_IMAX,
1520 val[chan], min);
1521 }
1522
1523 args[4] = uint->one; /* COMPR flag */
1524 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1525 si_llvm_pack_two_int32_as_int16(gallivm, val));
1526 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1527 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
1528 break;
1529 }
1530
1531 case V_028714_SPI_SHADER_32_ABGR:
1532 memcpy(&args[5], values, sizeof(values[0]) * 4);
1533 break;
1534 }
1535 }
1536
1537 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
1538 LLVMValueRef alpha)
1539 {
1540 struct si_shader_context *ctx = si_shader_context(bld_base);
1541 struct gallivm_state *gallivm = bld_base->base.gallivm;
1542
1543 if (ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
1544 LLVMValueRef alpha_ref = LLVMGetParam(ctx->radeon_bld.main_fn,
1545 SI_PARAM_ALPHA_REF);
1546
1547 LLVMValueRef alpha_pass =
1548 lp_build_cmp(&bld_base->base,
1549 ctx->shader->key.ps.epilog.alpha_func,
1550 alpha, alpha_ref);
1551 LLVMValueRef arg =
1552 lp_build_select(&bld_base->base,
1553 alpha_pass,
1554 lp_build_const_float(gallivm, 1.0f),
1555 lp_build_const_float(gallivm, -1.0f));
1556
1557 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
1558 ctx->voidt, &arg, 1, 0);
1559 } else {
1560 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kilp",
1561 ctx->voidt, NULL, 0, 0);
1562 }
1563 }
1564
1565 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
1566 LLVMValueRef alpha,
1567 unsigned samplemask_param)
1568 {
1569 struct si_shader_context *ctx = si_shader_context(bld_base);
1570 struct gallivm_state *gallivm = bld_base->base.gallivm;
1571 LLVMValueRef coverage;
1572
1573 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
1574 coverage = LLVMGetParam(ctx->radeon_bld.main_fn,
1575 samplemask_param);
1576 coverage = bitcast(bld_base, TGSI_TYPE_SIGNED, coverage);
1577
1578 coverage = lp_build_intrinsic(gallivm->builder, "llvm.ctpop.i32",
1579 ctx->i32,
1580 &coverage, 1, LLVMReadNoneAttribute);
1581
1582 coverage = LLVMBuildUIToFP(gallivm->builder, coverage,
1583 ctx->f32, "");
1584
1585 coverage = LLVMBuildFMul(gallivm->builder, coverage,
1586 lp_build_const_float(gallivm,
1587 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
1588
1589 return LLVMBuildFMul(gallivm->builder, alpha, coverage, "");
1590 }
1591
1592 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context *bld_base,
1593 LLVMValueRef (*pos)[9], LLVMValueRef *out_elts)
1594 {
1595 struct si_shader_context *ctx = si_shader_context(bld_base);
1596 struct lp_build_context *base = &bld_base->base;
1597 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
1598 unsigned reg_index;
1599 unsigned chan;
1600 unsigned const_chan;
1601 LLVMValueRef base_elt;
1602 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
1603 LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm, SI_DRIVER_STATE_CONST_BUF);
1604 LLVMValueRef const_resource = build_indexed_load_const(ctx, ptr, constbuf_index);
1605
1606 for (reg_index = 0; reg_index < 2; reg_index ++) {
1607 LLVMValueRef *args = pos[2 + reg_index];
1608
1609 args[5] =
1610 args[6] =
1611 args[7] =
1612 args[8] = lp_build_const_float(base->gallivm, 0.0f);
1613
1614 /* Compute dot products of position and user clip plane vectors */
1615 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1616 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
1617 args[1] = lp_build_const_int32(base->gallivm,
1618 ((reg_index * 4 + chan) * 4 +
1619 const_chan) * 4);
1620 base_elt = buffer_load_const(base->gallivm->builder, const_resource,
1621 args[1], ctx->f32);
1622 args[5 + chan] =
1623 lp_build_add(base, args[5 + chan],
1624 lp_build_mul(base, base_elt,
1625 out_elts[const_chan]));
1626 }
1627 }
1628
1629 args[0] = lp_build_const_int32(base->gallivm, 0xf);
1630 args[1] = uint->zero;
1631 args[2] = uint->zero;
1632 args[3] = lp_build_const_int32(base->gallivm,
1633 V_008DFC_SQ_EXP_POS + 2 + reg_index);
1634 args[4] = uint->zero;
1635 }
1636 }
1637
1638 static void si_dump_streamout(struct pipe_stream_output_info *so)
1639 {
1640 unsigned i;
1641
1642 if (so->num_outputs)
1643 fprintf(stderr, "STREAMOUT\n");
1644
1645 for (i = 0; i < so->num_outputs; i++) {
1646 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
1647 so->output[i].start_component;
1648 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
1649 i, so->output[i].output_buffer,
1650 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
1651 so->output[i].register_index,
1652 mask & 1 ? "x" : "",
1653 mask & 2 ? "y" : "",
1654 mask & 4 ? "z" : "",
1655 mask & 8 ? "w" : "");
1656 }
1657 }
1658
1659 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
1660 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
1661 * or v4i32 (num_channels=3,4). */
1662 static void build_tbuffer_store(struct si_shader_context *ctx,
1663 LLVMValueRef rsrc,
1664 LLVMValueRef vdata,
1665 unsigned num_channels,
1666 LLVMValueRef vaddr,
1667 LLVMValueRef soffset,
1668 unsigned inst_offset,
1669 unsigned dfmt,
1670 unsigned nfmt,
1671 unsigned offen,
1672 unsigned idxen,
1673 unsigned glc,
1674 unsigned slc,
1675 unsigned tfe)
1676 {
1677 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
1678 LLVMValueRef args[] = {
1679 rsrc,
1680 vdata,
1681 LLVMConstInt(ctx->i32, num_channels, 0),
1682 vaddr,
1683 soffset,
1684 LLVMConstInt(ctx->i32, inst_offset, 0),
1685 LLVMConstInt(ctx->i32, dfmt, 0),
1686 LLVMConstInt(ctx->i32, nfmt, 0),
1687 LLVMConstInt(ctx->i32, offen, 0),
1688 LLVMConstInt(ctx->i32, idxen, 0),
1689 LLVMConstInt(ctx->i32, glc, 0),
1690 LLVMConstInt(ctx->i32, slc, 0),
1691 LLVMConstInt(ctx->i32, tfe, 0)
1692 };
1693
1694 /* The instruction offset field has 12 bits */
1695 assert(offen || inst_offset < (1 << 12));
1696
1697 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
1698 unsigned func = CLAMP(num_channels, 1, 3) - 1;
1699 const char *types[] = {"i32", "v2i32", "v4i32"};
1700 char name[256];
1701 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
1702
1703 lp_build_intrinsic(gallivm->builder, name, ctx->voidt,
1704 args, Elements(args), 0);
1705 }
1706
1707 static void build_tbuffer_store_dwords(struct si_shader_context *ctx,
1708 LLVMValueRef rsrc,
1709 LLVMValueRef vdata,
1710 unsigned num_channels,
1711 LLVMValueRef vaddr,
1712 LLVMValueRef soffset,
1713 unsigned inst_offset)
1714 {
1715 static unsigned dfmt[] = {
1716 V_008F0C_BUF_DATA_FORMAT_32,
1717 V_008F0C_BUF_DATA_FORMAT_32_32,
1718 V_008F0C_BUF_DATA_FORMAT_32_32_32,
1719 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
1720 };
1721 assert(num_channels >= 1 && num_channels <= 4);
1722
1723 build_tbuffer_store(ctx, rsrc, vdata, num_channels, vaddr, soffset,
1724 inst_offset, dfmt[num_channels-1],
1725 V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
1726 }
1727
1728 /* On SI, the vertex shader is responsible for writing streamout data
1729 * to buffers. */
1730 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
1731 struct si_shader_output_values *outputs,
1732 unsigned noutput)
1733 {
1734 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
1735 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
1736 LLVMBuilderRef builder = gallivm->builder;
1737 int i, j;
1738 struct lp_build_if_state if_ctx;
1739
1740 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
1741 LLVMValueRef so_vtx_count =
1742 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
1743
1744 LLVMValueRef tid = lp_build_intrinsic(builder, "llvm.SI.tid", ctx->i32,
1745 NULL, 0, LLVMReadNoneAttribute);
1746
1747 /* can_emit = tid < so_vtx_count; */
1748 LLVMValueRef can_emit =
1749 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
1750
1751 LLVMValueRef stream_id =
1752 unpack_param(ctx, ctx->param_streamout_config, 24, 2);
1753
1754 /* Emit the streamout code conditionally. This actually avoids
1755 * out-of-bounds buffer access. The hw tells us via the SGPR
1756 * (so_vtx_count) which threads are allowed to emit streamout data. */
1757 lp_build_if(&if_ctx, gallivm, can_emit);
1758 {
1759 /* The buffer offset is computed as follows:
1760 * ByteOffset = streamout_offset[buffer_id]*4 +
1761 * (streamout_write_index + thread_id)*stride[buffer_id] +
1762 * attrib_offset
1763 */
1764
1765 LLVMValueRef so_write_index =
1766 LLVMGetParam(ctx->radeon_bld.main_fn,
1767 ctx->param_streamout_write_index);
1768
1769 /* Compute (streamout_write_index + thread_id). */
1770 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
1771
1772 /* Compute the write offset for each enabled buffer. */
1773 LLVMValueRef so_write_offset[4] = {};
1774 for (i = 0; i < 4; i++) {
1775 if (!so->stride[i])
1776 continue;
1777
1778 LLVMValueRef so_offset = LLVMGetParam(ctx->radeon_bld.main_fn,
1779 ctx->param_streamout_offset[i]);
1780 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
1781
1782 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
1783 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
1784 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
1785 }
1786
1787 /* Write streamout data. */
1788 for (i = 0; i < so->num_outputs; i++) {
1789 unsigned buf_idx = so->output[i].output_buffer;
1790 unsigned reg = so->output[i].register_index;
1791 unsigned start = so->output[i].start_component;
1792 unsigned num_comps = so->output[i].num_components;
1793 unsigned stream = so->output[i].stream;
1794 LLVMValueRef out[4];
1795 struct lp_build_if_state if_ctx_stream;
1796
1797 assert(num_comps && num_comps <= 4);
1798 if (!num_comps || num_comps > 4)
1799 continue;
1800
1801 if (reg >= noutput)
1802 continue;
1803
1804 /* Load the output as int. */
1805 for (j = 0; j < num_comps; j++) {
1806 out[j] = LLVMBuildBitCast(builder,
1807 outputs[reg].values[start+j],
1808 ctx->i32, "");
1809 }
1810
1811 /* Pack the output. */
1812 LLVMValueRef vdata = NULL;
1813
1814 switch (num_comps) {
1815 case 1: /* as i32 */
1816 vdata = out[0];
1817 break;
1818 case 2: /* as v2i32 */
1819 case 3: /* as v4i32 (aligned to 4) */
1820 case 4: /* as v4i32 */
1821 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
1822 for (j = 0; j < num_comps; j++) {
1823 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
1824 LLVMConstInt(ctx->i32, j, 0), "");
1825 }
1826 break;
1827 }
1828
1829 LLVMValueRef can_emit_stream =
1830 LLVMBuildICmp(builder, LLVMIntEQ,
1831 stream_id,
1832 lp_build_const_int32(gallivm, stream), "");
1833
1834 lp_build_if(&if_ctx_stream, gallivm, can_emit_stream);
1835 build_tbuffer_store_dwords(ctx, ctx->so_buffers[buf_idx],
1836 vdata, num_comps,
1837 so_write_offset[buf_idx],
1838 LLVMConstInt(ctx->i32, 0, 0),
1839 so->output[i].dst_offset*4);
1840 lp_build_endif(&if_ctx_stream);
1841 }
1842 }
1843 lp_build_endif(&if_ctx);
1844 }
1845
1846
1847 /* Generate export instructions for hardware VS shader stage */
1848 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
1849 struct si_shader_output_values *outputs,
1850 unsigned noutput)
1851 {
1852 struct si_shader_context *ctx = si_shader_context(bld_base);
1853 struct si_shader *shader = ctx->shader;
1854 struct lp_build_context *base = &bld_base->base;
1855 struct lp_build_context *uint =
1856 &ctx->radeon_bld.soa.bld_base.uint_bld;
1857 LLVMValueRef args[9];
1858 LLVMValueRef pos_args[4][9] = { { 0 } };
1859 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
1860 unsigned semantic_name, semantic_index;
1861 unsigned target;
1862 unsigned param_count = 0;
1863 unsigned pos_idx;
1864 int i;
1865
1866 if (outputs && ctx->shader->selector->so.num_outputs) {
1867 si_llvm_emit_streamout(ctx, outputs, noutput);
1868 }
1869
1870 for (i = 0; i < noutput; i++) {
1871 semantic_name = outputs[i].name;
1872 semantic_index = outputs[i].sid;
1873
1874 handle_semantic:
1875 /* Select the correct target */
1876 switch(semantic_name) {
1877 case TGSI_SEMANTIC_PSIZE:
1878 psize_value = outputs[i].values[0];
1879 continue;
1880 case TGSI_SEMANTIC_EDGEFLAG:
1881 edgeflag_value = outputs[i].values[0];
1882 continue;
1883 case TGSI_SEMANTIC_LAYER:
1884 layer_value = outputs[i].values[0];
1885 semantic_name = TGSI_SEMANTIC_GENERIC;
1886 goto handle_semantic;
1887 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1888 viewport_index_value = outputs[i].values[0];
1889 semantic_name = TGSI_SEMANTIC_GENERIC;
1890 goto handle_semantic;
1891 case TGSI_SEMANTIC_POSITION:
1892 target = V_008DFC_SQ_EXP_POS;
1893 break;
1894 case TGSI_SEMANTIC_COLOR:
1895 case TGSI_SEMANTIC_BCOLOR:
1896 target = V_008DFC_SQ_EXP_PARAM + param_count;
1897 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
1898 shader->info.vs_output_param_offset[i] = param_count;
1899 param_count++;
1900 break;
1901 case TGSI_SEMANTIC_CLIPDIST:
1902 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
1903 break;
1904 case TGSI_SEMANTIC_CLIPVERTEX:
1905 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
1906 continue;
1907 case TGSI_SEMANTIC_PRIMID:
1908 case TGSI_SEMANTIC_FOG:
1909 case TGSI_SEMANTIC_TEXCOORD:
1910 case TGSI_SEMANTIC_GENERIC:
1911 target = V_008DFC_SQ_EXP_PARAM + param_count;
1912 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
1913 shader->info.vs_output_param_offset[i] = param_count;
1914 param_count++;
1915 break;
1916 default:
1917 target = 0;
1918 fprintf(stderr,
1919 "Warning: SI unhandled vs output type:%d\n",
1920 semantic_name);
1921 }
1922
1923 si_llvm_init_export_args(bld_base, outputs[i].values, target, args);
1924
1925 if (target >= V_008DFC_SQ_EXP_POS &&
1926 target <= (V_008DFC_SQ_EXP_POS + 3)) {
1927 memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
1928 args, sizeof(args));
1929 } else {
1930 lp_build_intrinsic(base->gallivm->builder,
1931 "llvm.SI.export", ctx->voidt,
1932 args, 9, 0);
1933 }
1934
1935 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
1936 semantic_name = TGSI_SEMANTIC_GENERIC;
1937 goto handle_semantic;
1938 }
1939 }
1940
1941 shader->info.nr_param_exports = param_count;
1942
1943 /* We need to add the position output manually if it's missing. */
1944 if (!pos_args[0][0]) {
1945 pos_args[0][0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1946 pos_args[0][1] = uint->zero; /* EXEC mask */
1947 pos_args[0][2] = uint->zero; /* last export? */
1948 pos_args[0][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS);
1949 pos_args[0][4] = uint->zero; /* COMPR flag */
1950 pos_args[0][5] = base->zero; /* X */
1951 pos_args[0][6] = base->zero; /* Y */
1952 pos_args[0][7] = base->zero; /* Z */
1953 pos_args[0][8] = base->one; /* W */
1954 }
1955
1956 /* Write the misc vector (point size, edgeflag, layer, viewport). */
1957 if (shader->selector->info.writes_psize ||
1958 shader->selector->info.writes_edgeflag ||
1959 shader->selector->info.writes_viewport_index ||
1960 shader->selector->info.writes_layer) {
1961 pos_args[1][0] = lp_build_const_int32(base->gallivm, /* writemask */
1962 shader->selector->info.writes_psize |
1963 (shader->selector->info.writes_edgeflag << 1) |
1964 (shader->selector->info.writes_layer << 2) |
1965 (shader->selector->info.writes_viewport_index << 3));
1966 pos_args[1][1] = uint->zero; /* EXEC mask */
1967 pos_args[1][2] = uint->zero; /* last export? */
1968 pos_args[1][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + 1);
1969 pos_args[1][4] = uint->zero; /* COMPR flag */
1970 pos_args[1][5] = base->zero; /* X */
1971 pos_args[1][6] = base->zero; /* Y */
1972 pos_args[1][7] = base->zero; /* Z */
1973 pos_args[1][8] = base->zero; /* W */
1974
1975 if (shader->selector->info.writes_psize)
1976 pos_args[1][5] = psize_value;
1977
1978 if (shader->selector->info.writes_edgeflag) {
1979 /* The output is a float, but the hw expects an integer
1980 * with the first bit containing the edge flag. */
1981 edgeflag_value = LLVMBuildFPToUI(base->gallivm->builder,
1982 edgeflag_value,
1983 ctx->i32, "");
1984 edgeflag_value = lp_build_min(&bld_base->int_bld,
1985 edgeflag_value,
1986 bld_base->int_bld.one);
1987
1988 /* The LLVM intrinsic expects a float. */
1989 pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder,
1990 edgeflag_value,
1991 ctx->f32, "");
1992 }
1993
1994 if (shader->selector->info.writes_layer)
1995 pos_args[1][7] = layer_value;
1996
1997 if (shader->selector->info.writes_viewport_index)
1998 pos_args[1][8] = viewport_index_value;
1999 }
2000
2001 for (i = 0; i < 4; i++)
2002 if (pos_args[i][0])
2003 shader->info.nr_pos_exports++;
2004
2005 pos_idx = 0;
2006 for (i = 0; i < 4; i++) {
2007 if (!pos_args[i][0])
2008 continue;
2009
2010 /* Specify the target we are exporting */
2011 pos_args[i][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + pos_idx++);
2012
2013 if (pos_idx == shader->info.nr_pos_exports)
2014 /* Specify that this is the last export */
2015 pos_args[i][2] = uint->one;
2016
2017 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2018 ctx->voidt, pos_args[i], 9, 0);
2019 }
2020 }
2021
2022 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
2023 LLVMValueRef rel_patch_id,
2024 LLVMValueRef invocation_id,
2025 LLVMValueRef tcs_out_current_patch_data_offset)
2026 {
2027 struct si_shader_context *ctx = si_shader_context(bld_base);
2028 struct gallivm_state *gallivm = bld_base->base.gallivm;
2029 struct si_shader *shader = ctx->shader;
2030 unsigned tess_inner_index, tess_outer_index;
2031 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2032 LLVMValueRef out[6], vec0, vec1, rw_buffers, tf_base;
2033 unsigned stride, outer_comps, inner_comps, i;
2034 struct lp_build_if_state if_ctx;
2035
2036 /* Do this only for invocation 0, because the tess levels are per-patch,
2037 * not per-vertex.
2038 *
2039 * This can't jump, because invocation 0 executes this. It should
2040 * at least mask out the loads and stores for other invocations.
2041 */
2042 lp_build_if(&if_ctx, gallivm,
2043 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2044 invocation_id, bld_base->uint_bld.zero, ""));
2045
2046 /* Determine the layout of one tess factor element in the buffer. */
2047 switch (shader->key.tcs.epilog.prim_mode) {
2048 case PIPE_PRIM_LINES:
2049 stride = 2; /* 2 dwords, 1 vec2 store */
2050 outer_comps = 2;
2051 inner_comps = 0;
2052 break;
2053 case PIPE_PRIM_TRIANGLES:
2054 stride = 4; /* 4 dwords, 1 vec4 store */
2055 outer_comps = 3;
2056 inner_comps = 1;
2057 break;
2058 case PIPE_PRIM_QUADS:
2059 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2060 outer_comps = 4;
2061 inner_comps = 2;
2062 break;
2063 default:
2064 assert(0);
2065 return;
2066 }
2067
2068 /* Load tess_inner and tess_outer from LDS.
2069 * Any invocation can write them, so we can't get them from a temporary.
2070 */
2071 tess_inner_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSINNER, 0);
2072 tess_outer_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSOUTER, 0);
2073
2074 lds_base = tcs_out_current_patch_data_offset;
2075 lds_inner = LLVMBuildAdd(gallivm->builder, lds_base,
2076 lp_build_const_int32(gallivm,
2077 tess_inner_index * 4), "");
2078 lds_outer = LLVMBuildAdd(gallivm->builder, lds_base,
2079 lp_build_const_int32(gallivm,
2080 tess_outer_index * 4), "");
2081
2082 for (i = 0; i < outer_comps; i++)
2083 out[i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
2084 for (i = 0; i < inner_comps; i++)
2085 out[outer_comps+i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
2086
2087 /* Convert the outputs to vectors for stores. */
2088 vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
2089 vec1 = NULL;
2090
2091 if (stride > 4)
2092 vec1 = lp_build_gather_values(gallivm, out+4, stride - 4);
2093
2094 /* Get the buffer. */
2095 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn,
2096 SI_PARAM_RW_BUFFERS);
2097 buffer = build_indexed_load_const(ctx, rw_buffers,
2098 lp_build_const_int32(gallivm, SI_RING_TESS_FACTOR));
2099
2100 /* Get the offset. */
2101 tf_base = LLVMGetParam(ctx->radeon_bld.main_fn,
2102 SI_PARAM_TESS_FACTOR_OFFSET);
2103 byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
2104 lp_build_const_int32(gallivm, 4 * stride), "");
2105
2106 /* Store the outputs. */
2107 build_tbuffer_store_dwords(ctx, buffer, vec0,
2108 MIN2(stride, 4), byteoffset, tf_base, 0);
2109 if (vec1)
2110 build_tbuffer_store_dwords(ctx, buffer, vec1,
2111 stride - 4, byteoffset, tf_base, 16);
2112 lp_build_endif(&if_ctx);
2113 }
2114
2115 /* This only writes the tessellation factor levels. */
2116 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
2117 {
2118 struct si_shader_context *ctx = si_shader_context(bld_base);
2119 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2120
2121 rel_patch_id = get_rel_patch_id(ctx);
2122 invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
2123 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2124
2125 if (!ctx->is_monolithic) {
2126 /* Return epilog parameters from this function. */
2127 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
2128 LLVMValueRef ret = ctx->return_value;
2129 LLVMValueRef rw_buffers, rw0, rw1, tf_soffset;
2130 unsigned vgpr;
2131
2132 /* RW_BUFFERS pointer */
2133 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn,
2134 SI_PARAM_RW_BUFFERS);
2135 rw_buffers = LLVMBuildPtrToInt(builder, rw_buffers, ctx->i64, "");
2136 rw_buffers = LLVMBuildBitCast(builder, rw_buffers, ctx->v2i32, "");
2137 rw0 = LLVMBuildExtractElement(builder, rw_buffers,
2138 bld_base->uint_bld.zero, "");
2139 rw1 = LLVMBuildExtractElement(builder, rw_buffers,
2140 bld_base->uint_bld.one, "");
2141 ret = LLVMBuildInsertValue(builder, ret, rw0, 0, "");
2142 ret = LLVMBuildInsertValue(builder, ret, rw1, 1, "");
2143
2144 /* Tess factor buffer soffset is after user SGPRs. */
2145 tf_soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
2146 SI_PARAM_TESS_FACTOR_OFFSET);
2147 ret = LLVMBuildInsertValue(builder, ret, tf_soffset,
2148 SI_TCS_NUM_USER_SGPR, "");
2149
2150 /* VGPRs */
2151 rel_patch_id = bitcast(bld_base, TGSI_TYPE_FLOAT, rel_patch_id);
2152 invocation_id = bitcast(bld_base, TGSI_TYPE_FLOAT, invocation_id);
2153 tf_lds_offset = bitcast(bld_base, TGSI_TYPE_FLOAT, tf_lds_offset);
2154
2155 vgpr = SI_TCS_NUM_USER_SGPR + 1;
2156 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2157 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2158 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2159 ctx->return_value = ret;
2160 return;
2161 }
2162
2163 si_write_tess_factors(bld_base, rel_patch_id, invocation_id, tf_lds_offset);
2164 }
2165
2166 static void si_llvm_emit_ls_epilogue(struct lp_build_tgsi_context *bld_base)
2167 {
2168 struct si_shader_context *ctx = si_shader_context(bld_base);
2169 struct si_shader *shader = ctx->shader;
2170 struct tgsi_shader_info *info = &shader->selector->info;
2171 struct gallivm_state *gallivm = bld_base->base.gallivm;
2172 unsigned i, chan;
2173 LLVMValueRef vertex_id = LLVMGetParam(ctx->radeon_bld.main_fn,
2174 ctx->param_rel_auto_id);
2175 LLVMValueRef vertex_dw_stride =
2176 unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 13, 8);
2177 LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
2178 vertex_dw_stride, "");
2179
2180 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2181 * its inputs from it. */
2182 for (i = 0; i < info->num_outputs; i++) {
2183 LLVMValueRef *out_ptr = ctx->radeon_bld.soa.outputs[i];
2184 unsigned name = info->output_semantic_name[i];
2185 unsigned index = info->output_semantic_index[i];
2186 int param = si_shader_io_get_unique_index(name, index);
2187 LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
2188 lp_build_const_int32(gallivm, param * 4), "");
2189
2190 for (chan = 0; chan < 4; chan++) {
2191 lds_store(bld_base, chan, dw_addr,
2192 LLVMBuildLoad(gallivm->builder, out_ptr[chan], ""));
2193 }
2194 }
2195 }
2196
2197 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context *bld_base)
2198 {
2199 struct si_shader_context *ctx = si_shader_context(bld_base);
2200 struct gallivm_state *gallivm = bld_base->base.gallivm;
2201 struct si_shader *es = ctx->shader;
2202 struct tgsi_shader_info *info = &es->selector->info;
2203 LLVMValueRef soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
2204 ctx->param_es2gs_offset);
2205 unsigned chan;
2206 int i;
2207
2208 for (i = 0; i < info->num_outputs; i++) {
2209 LLVMValueRef *out_ptr =
2210 ctx->radeon_bld.soa.outputs[i];
2211 int param_index;
2212
2213 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2214 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2215 continue;
2216
2217 param_index = si_shader_io_get_unique_index(info->output_semantic_name[i],
2218 info->output_semantic_index[i]);
2219
2220 for (chan = 0; chan < 4; chan++) {
2221 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
2222 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
2223
2224 build_tbuffer_store(ctx,
2225 ctx->esgs_ring,
2226 out_val, 1,
2227 LLVMGetUndef(ctx->i32), soffset,
2228 (4 * param_index + chan) * 4,
2229 V_008F0C_BUF_DATA_FORMAT_32,
2230 V_008F0C_BUF_NUM_FORMAT_UINT,
2231 0, 0, 1, 1, 0);
2232 }
2233 }
2234 }
2235
2236 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
2237 {
2238 struct si_shader_context *ctx = si_shader_context(bld_base);
2239 struct gallivm_state *gallivm = bld_base->base.gallivm;
2240 LLVMValueRef args[2];
2241
2242 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_NOP | SENDMSG_GS_DONE);
2243 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
2244 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
2245 ctx->voidt, args, 2, LLVMNoUnwindAttribute);
2246 }
2247
2248 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
2249 {
2250 struct si_shader_context *ctx = si_shader_context(bld_base);
2251 struct gallivm_state *gallivm = bld_base->base.gallivm;
2252 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2253 struct si_shader_output_values *outputs = NULL;
2254 int i,j;
2255
2256 assert(!ctx->is_gs_copy_shader);
2257
2258 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2259
2260 /* Vertex color clamping.
2261 *
2262 * This uses a state constant loaded in a user data SGPR and
2263 * an IF statement is added that clamps all colors if the constant
2264 * is true.
2265 */
2266 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
2267 struct lp_build_if_state if_ctx;
2268 LLVMValueRef cond = NULL;
2269 LLVMValueRef addr, val;
2270
2271 for (i = 0; i < info->num_outputs; i++) {
2272 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
2273 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
2274 continue;
2275
2276 /* We've found a color. */
2277 if (!cond) {
2278 /* The state is in the first bit of the user SGPR. */
2279 cond = LLVMGetParam(ctx->radeon_bld.main_fn,
2280 SI_PARAM_VS_STATE_BITS);
2281 cond = LLVMBuildTrunc(gallivm->builder, cond,
2282 ctx->i1, "");
2283 lp_build_if(&if_ctx, gallivm, cond);
2284 }
2285
2286 for (j = 0; j < 4; j++) {
2287 addr = ctx->radeon_bld.soa.outputs[i][j];
2288 val = LLVMBuildLoad(gallivm->builder, addr, "");
2289 val = radeon_llvm_saturate(bld_base, val);
2290 LLVMBuildStore(gallivm->builder, val, addr);
2291 }
2292 }
2293
2294 if (cond)
2295 lp_build_endif(&if_ctx);
2296 }
2297
2298 for (i = 0; i < info->num_outputs; i++) {
2299 outputs[i].name = info->output_semantic_name[i];
2300 outputs[i].sid = info->output_semantic_index[i];
2301
2302 for (j = 0; j < 4; j++)
2303 outputs[i].values[j] =
2304 LLVMBuildLoad(gallivm->builder,
2305 ctx->radeon_bld.soa.outputs[i][j],
2306 "");
2307 }
2308
2309 if (ctx->is_monolithic) {
2310 /* Export PrimitiveID when PS needs it. */
2311 if (si_vs_exports_prim_id(ctx->shader)) {
2312 outputs[i].name = TGSI_SEMANTIC_PRIMID;
2313 outputs[i].sid = 0;
2314 outputs[i].values[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2315 get_primitive_id(bld_base, 0));
2316 outputs[i].values[1] = bld_base->base.undef;
2317 outputs[i].values[2] = bld_base->base.undef;
2318 outputs[i].values[3] = bld_base->base.undef;
2319 i++;
2320 }
2321 } else {
2322 /* Return the primitive ID from the LLVM function. */
2323 ctx->return_value =
2324 LLVMBuildInsertValue(gallivm->builder,
2325 ctx->return_value,
2326 bitcast(bld_base, TGSI_TYPE_FLOAT,
2327 get_primitive_id(bld_base, 0)),
2328 VS_EPILOG_PRIMID_LOC, "");
2329 }
2330
2331 si_llvm_export_vs(bld_base, outputs, i);
2332 FREE(outputs);
2333 }
2334
2335 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
2336 LLVMValueRef depth, LLVMValueRef stencil,
2337 LLVMValueRef samplemask)
2338 {
2339 struct si_shader_context *ctx = si_shader_context(bld_base);
2340 struct lp_build_context *base = &bld_base->base;
2341 struct lp_build_context *uint = &bld_base->uint_bld;
2342 LLVMValueRef args[9];
2343 unsigned mask = 0;
2344
2345 assert(depth || stencil || samplemask);
2346
2347 args[1] = uint->one; /* whether the EXEC mask is valid */
2348 args[2] = uint->one; /* DONE bit */
2349
2350 /* Specify the target we are exporting */
2351 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
2352
2353 args[4] = uint->zero; /* COMP flag */
2354 args[5] = base->undef; /* R, depth */
2355 args[6] = base->undef; /* G, stencil test value[0:7], stencil op value[8:15] */
2356 args[7] = base->undef; /* B, sample mask */
2357 args[8] = base->undef; /* A, alpha to mask */
2358
2359 if (depth) {
2360 args[5] = depth;
2361 mask |= 0x1;
2362 }
2363
2364 if (stencil) {
2365 args[6] = stencil;
2366 mask |= 0x2;
2367 }
2368
2369 if (samplemask) {
2370 args[7] = samplemask;
2371 mask |= 0x4;
2372 }
2373
2374 /* SI (except OLAND) has a bug that it only looks
2375 * at the X writemask component. */
2376 if (ctx->screen->b.chip_class == SI &&
2377 ctx->screen->b.family != CHIP_OLAND)
2378 mask |= 0x1;
2379
2380 /* Specify which components to enable */
2381 args[0] = lp_build_const_int32(base->gallivm, mask);
2382
2383 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2384 ctx->voidt, args, 9, 0);
2385 }
2386
2387 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
2388 LLVMValueRef *color, unsigned index,
2389 unsigned samplemask_param,
2390 bool is_last)
2391 {
2392 struct si_shader_context *ctx = si_shader_context(bld_base);
2393 struct lp_build_context *base = &bld_base->base;
2394 int i;
2395
2396 /* Clamp color */
2397 if (ctx->shader->key.ps.epilog.clamp_color)
2398 for (i = 0; i < 4; i++)
2399 color[i] = radeon_llvm_saturate(bld_base, color[i]);
2400
2401 /* Alpha to one */
2402 if (ctx->shader->key.ps.epilog.alpha_to_one)
2403 color[3] = base->one;
2404
2405 /* Alpha test */
2406 if (index == 0 &&
2407 ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
2408 si_alpha_test(bld_base, color[3]);
2409
2410 /* Line & polygon smoothing */
2411 if (ctx->shader->key.ps.epilog.poly_line_smoothing)
2412 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
2413 samplemask_param);
2414
2415 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
2416 if (ctx->shader->key.ps.epilog.last_cbuf > 0) {
2417 LLVMValueRef args[8][9];
2418 int c, last = -1;
2419
2420 /* Get the export arguments, also find out what the last one is. */
2421 for (c = 0; c <= ctx->shader->key.ps.epilog.last_cbuf; c++) {
2422 si_llvm_init_export_args(bld_base, color,
2423 V_008DFC_SQ_EXP_MRT + c, args[c]);
2424 if (args[c][0] != bld_base->uint_bld.zero)
2425 last = c;
2426 }
2427
2428 /* Emit all exports. */
2429 for (c = 0; c <= ctx->shader->key.ps.epilog.last_cbuf; c++) {
2430 if (is_last && last == c) {
2431 args[c][1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
2432 args[c][2] = bld_base->uint_bld.one; /* DONE bit */
2433 } else if (args[c][0] == bld_base->uint_bld.zero)
2434 continue; /* unnecessary NULL export */
2435
2436 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2437 ctx->voidt, args[c], 9, 0);
2438 }
2439 } else {
2440 LLVMValueRef args[9];
2441
2442 /* Export */
2443 si_llvm_init_export_args(bld_base, color, V_008DFC_SQ_EXP_MRT + index,
2444 args);
2445 if (is_last) {
2446 args[1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
2447 args[2] = bld_base->uint_bld.one; /* DONE bit */
2448 } else if (args[0] == bld_base->uint_bld.zero)
2449 return; /* unnecessary NULL export */
2450
2451 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2452 ctx->voidt, args, 9, 0);
2453 }
2454 }
2455
2456 static void si_export_null(struct lp_build_tgsi_context *bld_base)
2457 {
2458 struct si_shader_context *ctx = si_shader_context(bld_base);
2459 struct lp_build_context *base = &bld_base->base;
2460 struct lp_build_context *uint = &bld_base->uint_bld;
2461 LLVMValueRef args[9];
2462
2463 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
2464 args[1] = uint->one; /* whether the EXEC mask is valid */
2465 args[2] = uint->one; /* DONE bit */
2466 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
2467 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
2468 args[5] = uint->undef; /* R */
2469 args[6] = uint->undef; /* G */
2470 args[7] = uint->undef; /* B */
2471 args[8] = uint->undef; /* A */
2472
2473 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2474 ctx->voidt, args, 9, 0);
2475 }
2476
2477 static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context *bld_base)
2478 {
2479 struct si_shader_context *ctx = si_shader_context(bld_base);
2480 struct si_shader *shader = ctx->shader;
2481 struct lp_build_context *base = &bld_base->base;
2482 struct tgsi_shader_info *info = &shader->selector->info;
2483 LLVMBuilderRef builder = base->gallivm->builder;
2484 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
2485 int last_color_export = -1;
2486 int i;
2487
2488 /* Determine the last export. If MRTZ is present, it's always last.
2489 * Otherwise, find the last color export.
2490 */
2491 if (!info->writes_z && !info->writes_stencil && !info->writes_samplemask) {
2492 unsigned spi_format = shader->key.ps.epilog.spi_shader_col_format;
2493
2494 /* Don't export NULL and return if alpha-test is enabled. */
2495 if (shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS &&
2496 shader->key.ps.epilog.alpha_func != PIPE_FUNC_NEVER &&
2497 (spi_format & 0xf) == 0)
2498 spi_format |= V_028714_SPI_SHADER_32_AR;
2499
2500 for (i = 0; i < info->num_outputs; i++) {
2501 unsigned index = info->output_semantic_index[i];
2502
2503 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR)
2504 continue;
2505
2506 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
2507 if (shader->key.ps.epilog.last_cbuf > 0) {
2508 /* Just set this if any of the colorbuffers are enabled. */
2509 if (spi_format &
2510 ((1llu << (4 * (shader->key.ps.epilog.last_cbuf + 1))) - 1))
2511 last_color_export = i;
2512 continue;
2513 }
2514
2515 if ((spi_format >> (index * 4)) & 0xf)
2516 last_color_export = i;
2517 }
2518
2519 /* If there are no outputs, export NULL. */
2520 if (last_color_export == -1) {
2521 si_export_null(bld_base);
2522 return;
2523 }
2524 }
2525
2526 for (i = 0; i < info->num_outputs; i++) {
2527 unsigned semantic_name = info->output_semantic_name[i];
2528 unsigned semantic_index = info->output_semantic_index[i];
2529 unsigned j;
2530 LLVMValueRef color[4] = {};
2531
2532 /* Select the correct target */
2533 switch (semantic_name) {
2534 case TGSI_SEMANTIC_POSITION:
2535 depth = LLVMBuildLoad(builder,
2536 ctx->radeon_bld.soa.outputs[i][2], "");
2537 break;
2538 case TGSI_SEMANTIC_STENCIL:
2539 stencil = LLVMBuildLoad(builder,
2540 ctx->radeon_bld.soa.outputs[i][1], "");
2541 break;
2542 case TGSI_SEMANTIC_SAMPLEMASK:
2543 samplemask = LLVMBuildLoad(builder,
2544 ctx->radeon_bld.soa.outputs[i][0], "");
2545 break;
2546 case TGSI_SEMANTIC_COLOR:
2547 for (j = 0; j < 4; j++)
2548 color[j] = LLVMBuildLoad(builder,
2549 ctx->radeon_bld.soa.outputs[i][j], "");
2550
2551 si_export_mrt_color(bld_base, color, semantic_index,
2552 SI_PARAM_SAMPLE_COVERAGE,
2553 last_color_export == i);
2554 break;
2555 default:
2556 fprintf(stderr,
2557 "Warning: SI unhandled fs output type:%d\n",
2558 semantic_name);
2559 }
2560 }
2561
2562 if (depth || stencil || samplemask)
2563 si_export_mrt_z(bld_base, depth, stencil, samplemask);
2564 }
2565
2566 /**
2567 * Return PS outputs in this order:
2568 *
2569 * v[0:3] = color0.xyzw
2570 * v[4:7] = color1.xyzw
2571 * ...
2572 * vN+0 = Depth
2573 * vN+1 = Stencil
2574 * vN+2 = SampleMask
2575 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
2576 *
2577 * The alpha-ref SGPR is returned via its original location.
2578 */
2579 static void si_llvm_return_fs_outputs(struct lp_build_tgsi_context *bld_base)
2580 {
2581 struct si_shader_context *ctx = si_shader_context(bld_base);
2582 struct si_shader *shader = ctx->shader;
2583 struct lp_build_context *base = &bld_base->base;
2584 struct tgsi_shader_info *info = &shader->selector->info;
2585 LLVMBuilderRef builder = base->gallivm->builder;
2586 unsigned i, j, first_vgpr, vgpr;
2587
2588 LLVMValueRef color[8][4] = {};
2589 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
2590 LLVMValueRef ret;
2591
2592 /* Read the output values. */
2593 for (i = 0; i < info->num_outputs; i++) {
2594 unsigned semantic_name = info->output_semantic_name[i];
2595 unsigned semantic_index = info->output_semantic_index[i];
2596
2597 switch (semantic_name) {
2598 case TGSI_SEMANTIC_COLOR:
2599 assert(semantic_index < 8);
2600 for (j = 0; j < 4; j++) {
2601 LLVMValueRef ptr = ctx->radeon_bld.soa.outputs[i][j];
2602 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
2603 color[semantic_index][j] = result;
2604 }
2605 break;
2606 case TGSI_SEMANTIC_POSITION:
2607 depth = LLVMBuildLoad(builder,
2608 ctx->radeon_bld.soa.outputs[i][2], "");
2609 break;
2610 case TGSI_SEMANTIC_STENCIL:
2611 stencil = LLVMBuildLoad(builder,
2612 ctx->radeon_bld.soa.outputs[i][1], "");
2613 break;
2614 case TGSI_SEMANTIC_SAMPLEMASK:
2615 samplemask = LLVMBuildLoad(builder,
2616 ctx->radeon_bld.soa.outputs[i][0], "");
2617 break;
2618 default:
2619 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
2620 semantic_name);
2621 }
2622 }
2623
2624 /* Fill the return structure. */
2625 ret = ctx->return_value;
2626
2627 /* Set SGPRs. */
2628 ret = LLVMBuildInsertValue(builder, ret,
2629 bitcast(bld_base, TGSI_TYPE_SIGNED,
2630 LLVMGetParam(ctx->radeon_bld.main_fn,
2631 SI_PARAM_ALPHA_REF)),
2632 SI_SGPR_ALPHA_REF, "");
2633
2634 /* Set VGPRs */
2635 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
2636 for (i = 0; i < ARRAY_SIZE(color); i++) {
2637 if (!color[i][0])
2638 continue;
2639
2640 for (j = 0; j < 4; j++)
2641 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
2642 }
2643 if (depth)
2644 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
2645 if (stencil)
2646 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
2647 if (samplemask)
2648 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
2649
2650 /* Add the input sample mask for smoothing at the end. */
2651 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
2652 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
2653 ret = LLVMBuildInsertValue(builder, ret,
2654 LLVMGetParam(ctx->radeon_bld.main_fn,
2655 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
2656
2657 ctx->return_value = ret;
2658 }
2659
2660 /**
2661 * Given a v8i32 resource descriptor for a buffer, extract the size of the
2662 * buffer in number of elements and return it as an i32.
2663 */
2664 static LLVMValueRef get_buffer_size(
2665 struct lp_build_tgsi_context *bld_base,
2666 LLVMValueRef descriptor)
2667 {
2668 struct si_shader_context *ctx = si_shader_context(bld_base);
2669 struct gallivm_state *gallivm = bld_base->base.gallivm;
2670 LLVMBuilderRef builder = gallivm->builder;
2671 LLVMValueRef size =
2672 LLVMBuildExtractElement(builder, descriptor,
2673 lp_build_const_int32(gallivm, 6), "");
2674
2675 if (ctx->screen->b.chip_class >= VI) {
2676 /* On VI, the descriptor contains the size in bytes,
2677 * but TXQ must return the size in elements.
2678 * The stride is always non-zero for resources using TXQ.
2679 */
2680 LLVMValueRef stride =
2681 LLVMBuildExtractElement(builder, descriptor,
2682 lp_build_const_int32(gallivm, 5), "");
2683 stride = LLVMBuildLShr(builder, stride,
2684 lp_build_const_int32(gallivm, 16), "");
2685 stride = LLVMBuildAnd(builder, stride,
2686 lp_build_const_int32(gallivm, 0x3FFF), "");
2687
2688 size = LLVMBuildUDiv(builder, size, stride, "");
2689 }
2690
2691 return size;
2692 }
2693
2694 /**
2695 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
2696 * intrinsic names).
2697 */
2698 static void build_int_type_name(
2699 LLVMTypeRef type,
2700 char *buf, unsigned bufsize)
2701 {
2702 assert(bufsize >= 6);
2703
2704 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind)
2705 snprintf(buf, bufsize, "v%ui32",
2706 LLVMGetVectorSize(type));
2707 else
2708 strcpy(buf, "i32");
2709 }
2710
2711 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
2712 struct lp_build_tgsi_context *bld_base,
2713 struct lp_build_emit_data *emit_data);
2714
2715 static bool tgsi_is_array_sampler(unsigned target)
2716 {
2717 return target == TGSI_TEXTURE_1D_ARRAY ||
2718 target == TGSI_TEXTURE_SHADOW1D_ARRAY ||
2719 target == TGSI_TEXTURE_2D_ARRAY ||
2720 target == TGSI_TEXTURE_SHADOW2D_ARRAY ||
2721 target == TGSI_TEXTURE_CUBE_ARRAY ||
2722 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY ||
2723 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
2724 }
2725
2726 static bool tgsi_is_array_image(unsigned target)
2727 {
2728 return target == TGSI_TEXTURE_3D ||
2729 target == TGSI_TEXTURE_CUBE ||
2730 target == TGSI_TEXTURE_1D_ARRAY ||
2731 target == TGSI_TEXTURE_2D_ARRAY ||
2732 target == TGSI_TEXTURE_CUBE_ARRAY ||
2733 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
2734 }
2735
2736 /**
2737 * Load the resource descriptor for \p image.
2738 */
2739 static void
2740 image_fetch_rsrc(
2741 struct lp_build_tgsi_context *bld_base,
2742 const struct tgsi_full_src_register *image,
2743 LLVMValueRef *rsrc)
2744 {
2745 struct si_shader_context *ctx = si_shader_context(bld_base);
2746
2747 assert(image->Register.File == TGSI_FILE_IMAGE);
2748
2749 if (!image->Register.Indirect) {
2750 /* Fast path: use preloaded resources */
2751 *rsrc = ctx->images[image->Register.Index];
2752 } else {
2753 /* Indexing and manual load */
2754 LLVMValueRef ind_index;
2755 LLVMValueRef rsrc_ptr;
2756
2757 ind_index = get_indirect_index(ctx, &image->Indirect, image->Register.Index);
2758
2759 rsrc_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_IMAGES);
2760 *rsrc = build_indexed_load_const(ctx, rsrc_ptr, ind_index);
2761 }
2762 }
2763
2764 static void resq_fetch_args(
2765 struct lp_build_tgsi_context * bld_base,
2766 struct lp_build_emit_data * emit_data)
2767 {
2768 struct gallivm_state *gallivm = bld_base->base.gallivm;
2769 const struct tgsi_full_instruction *inst = emit_data->inst;
2770 const struct tgsi_full_src_register *reg = &inst->Src[0];
2771 unsigned tex_target = inst->Memory.Texture;
2772
2773 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
2774
2775 if (tex_target == TGSI_TEXTURE_BUFFER) {
2776 image_fetch_rsrc(bld_base, reg, &emit_data->args[0]);
2777 emit_data->arg_count = 1;
2778 } else {
2779 emit_data->args[0] = bld_base->uint_bld.zero; /* mip level */
2780 image_fetch_rsrc(bld_base, reg, &emit_data->args[1]);
2781 emit_data->args[2] = lp_build_const_int32(gallivm, 15); /* dmask */
2782 emit_data->args[3] = bld_base->uint_bld.zero; /* unorm */
2783 emit_data->args[4] = bld_base->uint_bld.zero; /* r128 */
2784 emit_data->args[5] = tgsi_is_array_image(tex_target) ?
2785 bld_base->uint_bld.one : bld_base->uint_bld.zero; /* da */
2786 emit_data->args[6] = bld_base->uint_bld.zero; /* glc */
2787 emit_data->args[7] = bld_base->uint_bld.zero; /* slc */
2788 emit_data->args[8] = bld_base->uint_bld.zero; /* tfe */
2789 emit_data->args[9] = bld_base->uint_bld.zero; /* lwe */
2790 emit_data->arg_count = 10;
2791 }
2792 }
2793
2794 static void resq_emit(
2795 const struct lp_build_tgsi_action *action,
2796 struct lp_build_tgsi_context *bld_base,
2797 struct lp_build_emit_data *emit_data)
2798 {
2799 struct gallivm_state *gallivm = bld_base->base.gallivm;
2800 LLVMBuilderRef builder = gallivm->builder;
2801 const struct tgsi_full_instruction *inst = emit_data->inst;
2802 unsigned target = inst->Memory.Texture;
2803 LLVMValueRef out;
2804
2805 if (target == TGSI_TEXTURE_BUFFER) {
2806 out = get_buffer_size(bld_base, emit_data->args[0]);
2807 } else {
2808 out = lp_build_intrinsic(
2809 builder, "llvm.SI.getresinfo.i32", emit_data->dst_type,
2810 emit_data->args, emit_data->arg_count,
2811 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
2812
2813 /* Divide the number of layers by 6 to get the number of cubes. */
2814 if (target == TGSI_TEXTURE_CUBE_ARRAY) {
2815 LLVMValueRef imm2 = lp_build_const_int32(gallivm, 2);
2816 LLVMValueRef imm6 = lp_build_const_int32(gallivm, 6);
2817
2818 LLVMValueRef z = LLVMBuildExtractElement(builder, out, imm2, "");
2819 z = LLVMBuildBitCast(builder, z, bld_base->uint_bld.elem_type, "");
2820 z = LLVMBuildSDiv(builder, z, imm6, "");
2821 z = LLVMBuildBitCast(builder, z, bld_base->base.elem_type, "");
2822 out = LLVMBuildInsertElement(builder, out, z, imm2, "");
2823 }
2824 }
2825
2826 emit_data->output[emit_data->chan] = out;
2827 }
2828
2829 static void set_tex_fetch_args(struct si_shader_context *ctx,
2830 struct lp_build_emit_data *emit_data,
2831 unsigned opcode, unsigned target,
2832 LLVMValueRef res_ptr, LLVMValueRef samp_ptr,
2833 LLVMValueRef *param, unsigned count,
2834 unsigned dmask)
2835 {
2836 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
2837 unsigned num_args;
2838 unsigned is_rect = target == TGSI_TEXTURE_RECT;
2839
2840 /* Pad to power of two vector */
2841 while (count < util_next_power_of_two(count))
2842 param[count++] = LLVMGetUndef(ctx->i32);
2843
2844 /* Texture coordinates. */
2845 if (count > 1)
2846 emit_data->args[0] = lp_build_gather_values(gallivm, param, count);
2847 else
2848 emit_data->args[0] = param[0];
2849
2850 /* Resource. */
2851 emit_data->args[1] = res_ptr;
2852 num_args = 2;
2853
2854 if (opcode == TGSI_OPCODE_TXF || opcode == TGSI_OPCODE_TXQ)
2855 emit_data->dst_type = ctx->v4i32;
2856 else {
2857 emit_data->dst_type = ctx->v4f32;
2858
2859 emit_data->args[num_args++] = samp_ptr;
2860 }
2861
2862 emit_data->args[num_args++] = lp_build_const_int32(gallivm, dmask);
2863 emit_data->args[num_args++] = lp_build_const_int32(gallivm, is_rect); /* unorm */
2864 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* r128 */
2865 emit_data->args[num_args++] = lp_build_const_int32(gallivm,
2866 tgsi_is_array_sampler(target)); /* da */
2867 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* glc */
2868 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* slc */
2869 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* tfe */
2870 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* lwe */
2871
2872 emit_data->arg_count = num_args;
2873 }
2874
2875 static const struct lp_build_tgsi_action tex_action;
2876
2877 enum desc_type {
2878 DESC_IMAGE,
2879 DESC_FMASK,
2880 DESC_SAMPLER
2881 };
2882
2883 static LLVMTypeRef const_array(LLVMTypeRef elem_type, int num_elements)
2884 {
2885 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
2886 CONST_ADDR_SPACE);
2887 }
2888
2889 /**
2890 * Load an image view, fmask view. or sampler state descriptor.
2891 */
2892 static LLVMValueRef get_sampler_desc_custom(struct si_shader_context *ctx,
2893 LLVMValueRef list, LLVMValueRef index,
2894 enum desc_type type)
2895 {
2896 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
2897 LLVMBuilderRef builder = gallivm->builder;
2898
2899 switch (type) {
2900 case DESC_IMAGE:
2901 /* The image is at [0:7]. */
2902 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
2903 break;
2904 case DESC_FMASK:
2905 /* The FMASK is at [8:15]. */
2906 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
2907 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 1, 0), "");
2908 break;
2909 case DESC_SAMPLER:
2910 /* The sampler state is at [12:15]. */
2911 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
2912 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 3, 0), "");
2913 list = LLVMBuildPointerCast(builder, list,
2914 const_array(ctx->v4i32, 0), "");
2915 break;
2916 }
2917
2918 return build_indexed_load_const(ctx, list, index);
2919 }
2920
2921 static LLVMValueRef get_sampler_desc(struct si_shader_context *ctx,
2922 LLVMValueRef index, enum desc_type type)
2923 {
2924 LLVMValueRef list = LLVMGetParam(ctx->radeon_bld.main_fn,
2925 SI_PARAM_SAMPLERS);
2926
2927 return get_sampler_desc_custom(ctx, list, index, type);
2928 }
2929
2930 static void tex_fetch_ptrs(
2931 struct lp_build_tgsi_context *bld_base,
2932 struct lp_build_emit_data *emit_data,
2933 LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr, LLVMValueRef *fmask_ptr)
2934 {
2935 struct si_shader_context *ctx = si_shader_context(bld_base);
2936 const struct tgsi_full_instruction *inst = emit_data->inst;
2937 unsigned target = inst->Texture.Texture;
2938 unsigned sampler_src;
2939 unsigned sampler_index;
2940
2941 sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
2942 sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
2943
2944 if (emit_data->inst->Src[sampler_src].Register.Indirect) {
2945 const struct tgsi_full_src_register *reg = &emit_data->inst->Src[sampler_src];
2946 LLVMValueRef ind_index;
2947
2948 ind_index = get_indirect_index(ctx, &reg->Indirect, reg->Register.Index);
2949
2950 *res_ptr = get_sampler_desc(ctx, ind_index, DESC_IMAGE);
2951
2952 if (target == TGSI_TEXTURE_2D_MSAA ||
2953 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
2954 *samp_ptr = NULL;
2955 *fmask_ptr = get_sampler_desc(ctx, ind_index, DESC_FMASK);
2956 } else {
2957 *samp_ptr = get_sampler_desc(ctx, ind_index, DESC_SAMPLER);
2958 *fmask_ptr = NULL;
2959 }
2960 } else {
2961 *res_ptr = ctx->sampler_views[sampler_index];
2962 *samp_ptr = ctx->sampler_states[sampler_index];
2963 *fmask_ptr = ctx->fmasks[sampler_index];
2964 }
2965 }
2966
2967 static void tex_fetch_args(
2968 struct lp_build_tgsi_context *bld_base,
2969 struct lp_build_emit_data *emit_data)
2970 {
2971 struct si_shader_context *ctx = si_shader_context(bld_base);
2972 struct gallivm_state *gallivm = bld_base->base.gallivm;
2973 LLVMBuilderRef builder = gallivm->builder;
2974 const struct tgsi_full_instruction *inst = emit_data->inst;
2975 unsigned opcode = inst->Instruction.Opcode;
2976 unsigned target = inst->Texture.Texture;
2977 LLVMValueRef coords[5], derivs[6];
2978 LLVMValueRef address[16];
2979 int ref_pos;
2980 unsigned num_coords = tgsi_util_get_texture_coord_dim(target, &ref_pos);
2981 unsigned count = 0;
2982 unsigned chan;
2983 unsigned num_deriv_channels = 0;
2984 bool has_offset = inst->Texture.NumOffsets > 0;
2985 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
2986 unsigned dmask = 0xf;
2987
2988 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
2989
2990 if (opcode == TGSI_OPCODE_TXQ) {
2991 if (target == TGSI_TEXTURE_BUFFER) {
2992 /* Read the size from the buffer descriptor directly. */
2993 LLVMValueRef res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
2994 emit_data->args[0] = get_buffer_size(bld_base, res);
2995 return;
2996 }
2997
2998 /* Textures - set the mip level. */
2999 address[count++] = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
3000
3001 set_tex_fetch_args(ctx, emit_data, opcode, target, res_ptr,
3002 NULL, address, count, 0xf);
3003 return;
3004 }
3005
3006 if (target == TGSI_TEXTURE_BUFFER) {
3007 LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
3008
3009 /* Bitcast and truncate v8i32 to v16i8. */
3010 LLVMValueRef res = res_ptr;
3011 res = LLVMBuildBitCast(gallivm->builder, res, v2i128, "");
3012 res = LLVMBuildExtractElement(gallivm->builder, res, bld_base->uint_bld.one, "");
3013 res = LLVMBuildBitCast(gallivm->builder, res, ctx->v16i8, "");
3014
3015 emit_data->dst_type = ctx->v4f32;
3016 emit_data->args[0] = res;
3017 emit_data->args[1] = bld_base->uint_bld.zero;
3018 emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
3019 emit_data->arg_count = 3;
3020 return;
3021 }
3022
3023 /* Fetch and project texture coordinates */
3024 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
3025 for (chan = 0; chan < 3; chan++ ) {
3026 coords[chan] = lp_build_emit_fetch(bld_base,
3027 emit_data->inst, 0,
3028 chan);
3029 if (opcode == TGSI_OPCODE_TXP)
3030 coords[chan] = lp_build_emit_llvm_binary(bld_base,
3031 TGSI_OPCODE_DIV,
3032 coords[chan],
3033 coords[3]);
3034 }
3035
3036 if (opcode == TGSI_OPCODE_TXP)
3037 coords[3] = bld_base->base.one;
3038
3039 /* Pack offsets. */
3040 if (has_offset && opcode != TGSI_OPCODE_TXF) {
3041 /* The offsets are six-bit signed integers packed like this:
3042 * X=[5:0], Y=[13:8], and Z=[21:16].
3043 */
3044 LLVMValueRef offset[3], pack;
3045
3046 assert(inst->Texture.NumOffsets == 1);
3047
3048 for (chan = 0; chan < 3; chan++) {
3049 offset[chan] = lp_build_emit_fetch_texoffset(bld_base,
3050 emit_data->inst, 0, chan);
3051 offset[chan] = LLVMBuildAnd(gallivm->builder, offset[chan],
3052 lp_build_const_int32(gallivm, 0x3f), "");
3053 if (chan)
3054 offset[chan] = LLVMBuildShl(gallivm->builder, offset[chan],
3055 lp_build_const_int32(gallivm, chan*8), "");
3056 }
3057
3058 pack = LLVMBuildOr(gallivm->builder, offset[0], offset[1], "");
3059 pack = LLVMBuildOr(gallivm->builder, pack, offset[2], "");
3060 address[count++] = pack;
3061 }
3062
3063 /* Pack LOD bias value */
3064 if (opcode == TGSI_OPCODE_TXB)
3065 address[count++] = coords[3];
3066 if (opcode == TGSI_OPCODE_TXB2)
3067 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
3068
3069 /* Pack depth comparison value */
3070 if (tgsi_is_shadow_target(target) && opcode != TGSI_OPCODE_LODQ) {
3071 if (target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
3072 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
3073 } else {
3074 assert(ref_pos >= 0);
3075 address[count++] = coords[ref_pos];
3076 }
3077 }
3078
3079 /* Pack user derivatives */
3080 if (opcode == TGSI_OPCODE_TXD) {
3081 int param, num_src_deriv_channels;
3082
3083 switch (target) {
3084 case TGSI_TEXTURE_3D:
3085 num_src_deriv_channels = 3;
3086 num_deriv_channels = 3;
3087 break;
3088 case TGSI_TEXTURE_2D:
3089 case TGSI_TEXTURE_SHADOW2D:
3090 case TGSI_TEXTURE_RECT:
3091 case TGSI_TEXTURE_SHADOWRECT:
3092 case TGSI_TEXTURE_2D_ARRAY:
3093 case TGSI_TEXTURE_SHADOW2D_ARRAY:
3094 num_src_deriv_channels = 2;
3095 num_deriv_channels = 2;
3096 break;
3097 case TGSI_TEXTURE_CUBE:
3098 case TGSI_TEXTURE_SHADOWCUBE:
3099 case TGSI_TEXTURE_CUBE_ARRAY:
3100 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
3101 /* Cube derivatives will be converted to 2D. */
3102 num_src_deriv_channels = 3;
3103 num_deriv_channels = 2;
3104 break;
3105 case TGSI_TEXTURE_1D:
3106 case TGSI_TEXTURE_SHADOW1D:
3107 case TGSI_TEXTURE_1D_ARRAY:
3108 case TGSI_TEXTURE_SHADOW1D_ARRAY:
3109 num_src_deriv_channels = 1;
3110 num_deriv_channels = 1;
3111 break;
3112 default:
3113 unreachable("invalid target");
3114 }
3115
3116 for (param = 0; param < 2; param++)
3117 for (chan = 0; chan < num_src_deriv_channels; chan++)
3118 derivs[param * num_src_deriv_channels + chan] =
3119 lp_build_emit_fetch(bld_base, inst, param+1, chan);
3120 }
3121
3122 if (target == TGSI_TEXTURE_CUBE ||
3123 target == TGSI_TEXTURE_CUBE_ARRAY ||
3124 target == TGSI_TEXTURE_SHADOWCUBE ||
3125 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
3126 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords, derivs);
3127
3128 if (opcode == TGSI_OPCODE_TXD)
3129 for (int i = 0; i < num_deriv_channels * 2; i++)
3130 address[count++] = derivs[i];
3131
3132 /* Pack texture coordinates */
3133 address[count++] = coords[0];
3134 if (num_coords > 1)
3135 address[count++] = coords[1];
3136 if (num_coords > 2)
3137 address[count++] = coords[2];
3138
3139 /* Pack LOD or sample index */
3140 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
3141 address[count++] = coords[3];
3142 else if (opcode == TGSI_OPCODE_TXL2)
3143 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
3144
3145 if (count > 16) {
3146 assert(!"Cannot handle more than 16 texture address parameters");
3147 count = 16;
3148 }
3149
3150 for (chan = 0; chan < count; chan++ ) {
3151 address[chan] = LLVMBuildBitCast(gallivm->builder,
3152 address[chan], ctx->i32, "");
3153 }
3154
3155 /* Adjust the sample index according to FMASK.
3156 *
3157 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
3158 * which is the identity mapping. Each nibble says which physical sample
3159 * should be fetched to get that sample.
3160 *
3161 * For example, 0x11111100 means there are only 2 samples stored and
3162 * the second sample covers 3/4 of the pixel. When reading samples 0
3163 * and 1, return physical sample 0 (determined by the first two 0s
3164 * in FMASK), otherwise return physical sample 1.
3165 *
3166 * The sample index should be adjusted as follows:
3167 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
3168 */
3169 if (target == TGSI_TEXTURE_2D_MSAA ||
3170 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
3171 struct lp_build_context *uint_bld = &bld_base->uint_bld;
3172 struct lp_build_emit_data txf_emit_data = *emit_data;
3173 LLVMValueRef txf_address[4];
3174 unsigned txf_count = count;
3175 struct tgsi_full_instruction inst = {};
3176
3177 memcpy(txf_address, address, sizeof(txf_address));
3178
3179 if (target == TGSI_TEXTURE_2D_MSAA) {
3180 txf_address[2] = bld_base->uint_bld.zero;
3181 }
3182 txf_address[3] = bld_base->uint_bld.zero;
3183
3184 /* Read FMASK using TXF. */
3185 inst.Instruction.Opcode = TGSI_OPCODE_TXF;
3186 inst.Texture.Texture = target;
3187 txf_emit_data.inst = &inst;
3188 txf_emit_data.chan = 0;
3189 set_tex_fetch_args(ctx, &txf_emit_data, TGSI_OPCODE_TXF,
3190 target, fmask_ptr, NULL,
3191 txf_address, txf_count, 0xf);
3192 build_tex_intrinsic(&tex_action, bld_base, &txf_emit_data);
3193
3194 /* Initialize some constants. */
3195 LLVMValueRef four = LLVMConstInt(ctx->i32, 4, 0);
3196 LLVMValueRef F = LLVMConstInt(ctx->i32, 0xF, 0);
3197
3198 /* Apply the formula. */
3199 LLVMValueRef fmask =
3200 LLVMBuildExtractElement(gallivm->builder,
3201 txf_emit_data.output[0],
3202 uint_bld->zero, "");
3203
3204 unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
3205
3206 LLVMValueRef sample_index4 =
3207 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
3208
3209 LLVMValueRef shifted_fmask =
3210 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
3211
3212 LLVMValueRef final_sample =
3213 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
3214
3215 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
3216 * resource descriptor is 0 (invalid),
3217 */
3218 LLVMValueRef fmask_desc =
3219 LLVMBuildBitCast(gallivm->builder, fmask_ptr,
3220 ctx->v8i32, "");
3221
3222 LLVMValueRef fmask_word1 =
3223 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
3224 uint_bld->one, "");
3225
3226 LLVMValueRef word1_is_nonzero =
3227 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
3228 fmask_word1, uint_bld->zero, "");
3229
3230 /* Replace the MSAA sample index. */
3231 address[sample_chan] =
3232 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
3233 final_sample, address[sample_chan], "");
3234 }
3235
3236 if (opcode == TGSI_OPCODE_TXF) {
3237 /* add tex offsets */
3238 if (inst->Texture.NumOffsets) {
3239 struct lp_build_context *uint_bld = &bld_base->uint_bld;
3240 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
3241 const struct tgsi_texture_offset *off = inst->TexOffsets;
3242
3243 assert(inst->Texture.NumOffsets == 1);
3244
3245 switch (target) {
3246 case TGSI_TEXTURE_3D:
3247 address[2] = lp_build_add(uint_bld, address[2],
3248 bld->immediates[off->Index][off->SwizzleZ]);
3249 /* fall through */
3250 case TGSI_TEXTURE_2D:
3251 case TGSI_TEXTURE_SHADOW2D:
3252 case TGSI_TEXTURE_RECT:
3253 case TGSI_TEXTURE_SHADOWRECT:
3254 case TGSI_TEXTURE_2D_ARRAY:
3255 case TGSI_TEXTURE_SHADOW2D_ARRAY:
3256 address[1] =
3257 lp_build_add(uint_bld, address[1],
3258 bld->immediates[off->Index][off->SwizzleY]);
3259 /* fall through */
3260 case TGSI_TEXTURE_1D:
3261 case TGSI_TEXTURE_SHADOW1D:
3262 case TGSI_TEXTURE_1D_ARRAY:
3263 case TGSI_TEXTURE_SHADOW1D_ARRAY:
3264 address[0] =
3265 lp_build_add(uint_bld, address[0],
3266 bld->immediates[off->Index][off->SwizzleX]);
3267 break;
3268 /* texture offsets do not apply to other texture targets */
3269 }
3270 }
3271 }
3272
3273 if (opcode == TGSI_OPCODE_TG4) {
3274 unsigned gather_comp = 0;
3275
3276 /* DMASK was repurposed for GATHER4. 4 components are always
3277 * returned and DMASK works like a swizzle - it selects
3278 * the component to fetch. The only valid DMASK values are
3279 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
3280 * (red,red,red,red) etc.) The ISA document doesn't mention
3281 * this.
3282 */
3283
3284 /* Get the component index from src1.x for Gather4. */
3285 if (!tgsi_is_shadow_target(target)) {
3286 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
3287 LLVMValueRef comp_imm;
3288 struct tgsi_src_register src1 = inst->Src[1].Register;
3289
3290 assert(src1.File == TGSI_FILE_IMMEDIATE);
3291
3292 comp_imm = imms[src1.Index][src1.SwizzleX];
3293 gather_comp = LLVMConstIntGetZExtValue(comp_imm);
3294 gather_comp = CLAMP(gather_comp, 0, 3);
3295 }
3296
3297 dmask = 1 << gather_comp;
3298 }
3299
3300 set_tex_fetch_args(ctx, emit_data, opcode, target, res_ptr,
3301 samp_ptr, address, count, dmask);
3302 }
3303
3304 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
3305 struct lp_build_tgsi_context *bld_base,
3306 struct lp_build_emit_data *emit_data)
3307 {
3308 struct lp_build_context *base = &bld_base->base;
3309 unsigned opcode = emit_data->inst->Instruction.Opcode;
3310 unsigned target = emit_data->inst->Texture.Texture;
3311 char intr_name[127];
3312 bool has_offset = emit_data->inst->Texture.NumOffsets > 0;
3313 bool is_shadow = tgsi_is_shadow_target(target);
3314 char type[64];
3315 const char *name = "llvm.SI.image.sample";
3316 const char *infix = "";
3317
3318 if (opcode == TGSI_OPCODE_TXQ && target == TGSI_TEXTURE_BUFFER) {
3319 /* Just return the buffer size. */
3320 emit_data->output[emit_data->chan] = emit_data->args[0];
3321 return;
3322 }
3323
3324 if (target == TGSI_TEXTURE_BUFFER) {
3325 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3326 base->gallivm->builder,
3327 "llvm.SI.vs.load.input", emit_data->dst_type,
3328 emit_data->args, emit_data->arg_count,
3329 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
3330 return;
3331 }
3332
3333 switch (opcode) {
3334 case TGSI_OPCODE_TXF:
3335 name = target == TGSI_TEXTURE_2D_MSAA ||
3336 target == TGSI_TEXTURE_2D_ARRAY_MSAA ?
3337 "llvm.SI.image.load" :
3338 "llvm.SI.image.load.mip";
3339 is_shadow = false;
3340 has_offset = false;
3341 break;
3342 case TGSI_OPCODE_TXQ:
3343 name = "llvm.SI.getresinfo";
3344 is_shadow = false;
3345 has_offset = false;
3346 break;
3347 case TGSI_OPCODE_LODQ:
3348 name = "llvm.SI.getlod";
3349 is_shadow = false;
3350 has_offset = false;
3351 break;
3352 case TGSI_OPCODE_TEX:
3353 case TGSI_OPCODE_TEX2:
3354 case TGSI_OPCODE_TXP:
3355 break;
3356 case TGSI_OPCODE_TXB:
3357 case TGSI_OPCODE_TXB2:
3358 infix = ".b";
3359 break;
3360 case TGSI_OPCODE_TXL:
3361 case TGSI_OPCODE_TXL2:
3362 infix = ".l";
3363 break;
3364 case TGSI_OPCODE_TXD:
3365 infix = ".d";
3366 break;
3367 case TGSI_OPCODE_TG4:
3368 name = "llvm.SI.gather4";
3369 break;
3370 default:
3371 assert(0);
3372 return;
3373 }
3374
3375 /* Add the type and suffixes .c, .o if needed. */
3376 build_int_type_name(LLVMTypeOf(emit_data->args[0]), type, sizeof(type));
3377 sprintf(intr_name, "%s%s%s%s.%s",
3378 name, is_shadow ? ".c" : "", infix,
3379 has_offset ? ".o" : "", type);
3380
3381 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3382 base->gallivm->builder, intr_name, emit_data->dst_type,
3383 emit_data->args, emit_data->arg_count,
3384 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
3385
3386 /* Divide the number of layers by 6 to get the number of cubes. */
3387 if (opcode == TGSI_OPCODE_TXQ &&
3388 (target == TGSI_TEXTURE_CUBE_ARRAY ||
3389 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)) {
3390 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
3391 LLVMValueRef two = lp_build_const_int32(bld_base->base.gallivm, 2);
3392 LLVMValueRef six = lp_build_const_int32(bld_base->base.gallivm, 6);
3393
3394 LLVMValueRef v4 = emit_data->output[emit_data->chan];
3395 LLVMValueRef z = LLVMBuildExtractElement(builder, v4, two, "");
3396 z = LLVMBuildSDiv(builder, z, six, "");
3397
3398 emit_data->output[emit_data->chan] =
3399 LLVMBuildInsertElement(builder, v4, z, two, "");
3400 }
3401 }
3402
3403 static void si_llvm_emit_txqs(
3404 const struct lp_build_tgsi_action *action,
3405 struct lp_build_tgsi_context *bld_base,
3406 struct lp_build_emit_data *emit_data)
3407 {
3408 struct si_shader_context *ctx = si_shader_context(bld_base);
3409 struct gallivm_state *gallivm = bld_base->base.gallivm;
3410 LLVMBuilderRef builder = gallivm->builder;
3411 LLVMValueRef res, samples;
3412 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
3413
3414 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
3415
3416
3417 /* Read the samples from the descriptor directly. */
3418 res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
3419 samples = LLVMBuildExtractElement(
3420 builder, res,
3421 lp_build_const_int32(gallivm, 3), "");
3422 samples = LLVMBuildLShr(builder, samples,
3423 lp_build_const_int32(gallivm, 16), "");
3424 samples = LLVMBuildAnd(builder, samples,
3425 lp_build_const_int32(gallivm, 0xf), "");
3426 samples = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1),
3427 samples, "");
3428
3429 emit_data->output[emit_data->chan] = samples;
3430 }
3431
3432 /*
3433 * SI implements derivatives using the local data store (LDS)
3434 * All writes to the LDS happen in all executing threads at
3435 * the same time. TID is the Thread ID for the current
3436 * thread and is a value between 0 and 63, representing
3437 * the thread's position in the wavefront.
3438 *
3439 * For the pixel shader threads are grouped into quads of four pixels.
3440 * The TIDs of the pixels of a quad are:
3441 *
3442 * +------+------+
3443 * |4n + 0|4n + 1|
3444 * +------+------+
3445 * |4n + 2|4n + 3|
3446 * +------+------+
3447 *
3448 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
3449 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
3450 * the current pixel's column, and masking with 0xfffffffe yields the TID
3451 * of the left pixel of the current pixel's row.
3452 *
3453 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
3454 * adding 2 yields the TID of the pixel below the top pixel.
3455 */
3456 /* masks for thread ID. */
3457 #define TID_MASK_TOP_LEFT 0xfffffffc
3458 #define TID_MASK_TOP 0xfffffffd
3459 #define TID_MASK_LEFT 0xfffffffe
3460
3461 static void si_llvm_emit_ddxy(
3462 const struct lp_build_tgsi_action *action,
3463 struct lp_build_tgsi_context *bld_base,
3464 struct lp_build_emit_data *emit_data)
3465 {
3466 struct si_shader_context *ctx = si_shader_context(bld_base);
3467 struct gallivm_state *gallivm = bld_base->base.gallivm;
3468 const struct tgsi_full_instruction *inst = emit_data->inst;
3469 unsigned opcode = inst->Instruction.Opcode;
3470 LLVMValueRef indices[2];
3471 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
3472 LLVMValueRef tl, trbl, result[4];
3473 unsigned swizzle[4];
3474 unsigned c;
3475 int idx;
3476 unsigned mask;
3477
3478 indices[0] = bld_base->uint_bld.zero;
3479 indices[1] = lp_build_intrinsic(gallivm->builder, "llvm.SI.tid", ctx->i32,
3480 NULL, 0, LLVMReadNoneAttribute);
3481 store_ptr = LLVMBuildGEP(gallivm->builder, ctx->lds,
3482 indices, 2, "");
3483
3484 if (opcode == TGSI_OPCODE_DDX_FINE)
3485 mask = TID_MASK_LEFT;
3486 else if (opcode == TGSI_OPCODE_DDY_FINE)
3487 mask = TID_MASK_TOP;
3488 else
3489 mask = TID_MASK_TOP_LEFT;
3490
3491 indices[1] = LLVMBuildAnd(gallivm->builder, indices[1],
3492 lp_build_const_int32(gallivm, mask), "");
3493 load_ptr0 = LLVMBuildGEP(gallivm->builder, ctx->lds,
3494 indices, 2, "");
3495
3496 /* for DDX we want to next X pixel, DDY next Y pixel. */
3497 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
3498 indices[1] = LLVMBuildAdd(gallivm->builder, indices[1],
3499 lp_build_const_int32(gallivm, idx), "");
3500 load_ptr1 = LLVMBuildGEP(gallivm->builder, ctx->lds,
3501 indices, 2, "");
3502
3503 for (c = 0; c < 4; ++c) {
3504 unsigned i;
3505
3506 swizzle[c] = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], c);
3507 for (i = 0; i < c; ++i) {
3508 if (swizzle[i] == swizzle[c]) {
3509 result[c] = result[i];
3510 break;
3511 }
3512 }
3513 if (i != c)
3514 continue;
3515
3516 LLVMBuildStore(gallivm->builder,
3517 LLVMBuildBitCast(gallivm->builder,
3518 lp_build_emit_fetch(bld_base, inst, 0, c),
3519 ctx->i32, ""),
3520 store_ptr);
3521
3522 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
3523 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
3524
3525 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
3526 trbl = LLVMBuildBitCast(gallivm->builder, trbl, ctx->f32, "");
3527
3528 result[c] = LLVMBuildFSub(gallivm->builder, trbl, tl, "");
3529 }
3530
3531 emit_data->output[0] = lp_build_gather_values(gallivm, result, 4);
3532 }
3533
3534 /*
3535 * this takes an I,J coordinate pair,
3536 * and works out the X and Y derivatives.
3537 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
3538 */
3539 static LLVMValueRef si_llvm_emit_ddxy_interp(
3540 struct lp_build_tgsi_context *bld_base,
3541 LLVMValueRef interp_ij)
3542 {
3543 struct si_shader_context *ctx = si_shader_context(bld_base);
3544 struct gallivm_state *gallivm = bld_base->base.gallivm;
3545 LLVMValueRef indices[2];
3546 LLVMValueRef store_ptr, load_ptr_x, load_ptr_y, load_ptr_ddx, load_ptr_ddy, temp, temp2;
3547 LLVMValueRef tl, tr, bl, result[4];
3548 unsigned c;
3549
3550 indices[0] = bld_base->uint_bld.zero;
3551 indices[1] = lp_build_intrinsic(gallivm->builder, "llvm.SI.tid", ctx->i32,
3552 NULL, 0, LLVMReadNoneAttribute);
3553 store_ptr = LLVMBuildGEP(gallivm->builder, ctx->lds,
3554 indices, 2, "");
3555
3556 temp = LLVMBuildAnd(gallivm->builder, indices[1],
3557 lp_build_const_int32(gallivm, TID_MASK_LEFT), "");
3558
3559 temp2 = LLVMBuildAnd(gallivm->builder, indices[1],
3560 lp_build_const_int32(gallivm, TID_MASK_TOP), "");
3561
3562 indices[1] = temp;
3563 load_ptr_x = LLVMBuildGEP(gallivm->builder, ctx->lds,
3564 indices, 2, "");
3565
3566 indices[1] = temp2;
3567 load_ptr_y = LLVMBuildGEP(gallivm->builder, ctx->lds,
3568 indices, 2, "");
3569
3570 indices[1] = LLVMBuildAdd(gallivm->builder, temp,
3571 lp_build_const_int32(gallivm, 1), "");
3572 load_ptr_ddx = LLVMBuildGEP(gallivm->builder, ctx->lds,
3573 indices, 2, "");
3574
3575 indices[1] = LLVMBuildAdd(gallivm->builder, temp2,
3576 lp_build_const_int32(gallivm, 2), "");
3577 load_ptr_ddy = LLVMBuildGEP(gallivm->builder, ctx->lds,
3578 indices, 2, "");
3579
3580 for (c = 0; c < 2; ++c) {
3581 LLVMValueRef store_val;
3582 LLVMValueRef c_ll = lp_build_const_int32(gallivm, c);
3583
3584 store_val = LLVMBuildExtractElement(gallivm->builder,
3585 interp_ij, c_ll, "");
3586 LLVMBuildStore(gallivm->builder,
3587 store_val,
3588 store_ptr);
3589
3590 tl = LLVMBuildLoad(gallivm->builder, load_ptr_x, "");
3591 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
3592
3593 tr = LLVMBuildLoad(gallivm->builder, load_ptr_ddx, "");
3594 tr = LLVMBuildBitCast(gallivm->builder, tr, ctx->f32, "");
3595
3596 result[c] = LLVMBuildFSub(gallivm->builder, tr, tl, "");
3597
3598 tl = LLVMBuildLoad(gallivm->builder, load_ptr_y, "");
3599 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
3600
3601 bl = LLVMBuildLoad(gallivm->builder, load_ptr_ddy, "");
3602 bl = LLVMBuildBitCast(gallivm->builder, bl, ctx->f32, "");
3603
3604 result[c + 2] = LLVMBuildFSub(gallivm->builder, bl, tl, "");
3605 }
3606
3607 return lp_build_gather_values(gallivm, result, 4);
3608 }
3609
3610 static void interp_fetch_args(
3611 struct lp_build_tgsi_context *bld_base,
3612 struct lp_build_emit_data *emit_data)
3613 {
3614 struct si_shader_context *ctx = si_shader_context(bld_base);
3615 struct gallivm_state *gallivm = bld_base->base.gallivm;
3616 const struct tgsi_full_instruction *inst = emit_data->inst;
3617
3618 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
3619 /* offset is in second src, first two channels */
3620 emit_data->args[0] = lp_build_emit_fetch(bld_base,
3621 emit_data->inst, 1,
3622 TGSI_CHAN_X);
3623 emit_data->args[1] = lp_build_emit_fetch(bld_base,
3624 emit_data->inst, 1,
3625 TGSI_CHAN_Y);
3626 emit_data->arg_count = 2;
3627 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3628 LLVMValueRef sample_position;
3629 LLVMValueRef sample_id;
3630 LLVMValueRef halfval = lp_build_const_float(gallivm, 0.5f);
3631
3632 /* fetch sample ID, then fetch its sample position,
3633 * and place into first two channels.
3634 */
3635 sample_id = lp_build_emit_fetch(bld_base,
3636 emit_data->inst, 1, TGSI_CHAN_X);
3637 sample_id = LLVMBuildBitCast(gallivm->builder, sample_id,
3638 ctx->i32, "");
3639 sample_position = load_sample_position(&ctx->radeon_bld, sample_id);
3640
3641 emit_data->args[0] = LLVMBuildExtractElement(gallivm->builder,
3642 sample_position,
3643 lp_build_const_int32(gallivm, 0), "");
3644
3645 emit_data->args[0] = LLVMBuildFSub(gallivm->builder, emit_data->args[0], halfval, "");
3646 emit_data->args[1] = LLVMBuildExtractElement(gallivm->builder,
3647 sample_position,
3648 lp_build_const_int32(gallivm, 1), "");
3649 emit_data->args[1] = LLVMBuildFSub(gallivm->builder, emit_data->args[1], halfval, "");
3650 emit_data->arg_count = 2;
3651 }
3652 }
3653
3654 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
3655 struct lp_build_tgsi_context *bld_base,
3656 struct lp_build_emit_data *emit_data)
3657 {
3658 struct si_shader_context *ctx = si_shader_context(bld_base);
3659 struct si_shader *shader = ctx->shader;
3660 struct gallivm_state *gallivm = bld_base->base.gallivm;
3661 LLVMValueRef interp_param;
3662 const struct tgsi_full_instruction *inst = emit_data->inst;
3663 const char *intr_name;
3664 int input_index = inst->Src[0].Register.Index;
3665 int chan;
3666 int i;
3667 LLVMValueRef attr_number;
3668 LLVMValueRef params = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
3669 int interp_param_idx;
3670 unsigned interp = shader->selector->info.input_interpolate[input_index];
3671 unsigned location;
3672
3673 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
3674
3675 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3676 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
3677 location = TGSI_INTERPOLATE_LOC_CENTER;
3678 else
3679 location = TGSI_INTERPOLATE_LOC_CENTROID;
3680
3681 interp_param_idx = lookup_interp_param_index(interp, location);
3682 if (interp_param_idx == -1)
3683 return;
3684 else if (interp_param_idx)
3685 interp_param = LLVMGetParam(ctx->radeon_bld.main_fn, interp_param_idx);
3686 else
3687 interp_param = NULL;
3688
3689 attr_number = lp_build_const_int32(gallivm, input_index);
3690
3691 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3692 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3693 LLVMValueRef ij_out[2];
3694 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
3695
3696 /*
3697 * take the I then J parameters, and the DDX/Y for it, and
3698 * calculate the IJ inputs for the interpolator.
3699 * temp1 = ddx * offset/sample.x + I;
3700 * interp_param.I = ddy * offset/sample.y + temp1;
3701 * temp1 = ddx * offset/sample.x + J;
3702 * interp_param.J = ddy * offset/sample.y + temp1;
3703 */
3704 for (i = 0; i < 2; i++) {
3705 LLVMValueRef ix_ll = lp_build_const_int32(gallivm, i);
3706 LLVMValueRef iy_ll = lp_build_const_int32(gallivm, i + 2);
3707 LLVMValueRef ddx_el = LLVMBuildExtractElement(gallivm->builder,
3708 ddxy_out, ix_ll, "");
3709 LLVMValueRef ddy_el = LLVMBuildExtractElement(gallivm->builder,
3710 ddxy_out, iy_ll, "");
3711 LLVMValueRef interp_el = LLVMBuildExtractElement(gallivm->builder,
3712 interp_param, ix_ll, "");
3713 LLVMValueRef temp1, temp2;
3714
3715 interp_el = LLVMBuildBitCast(gallivm->builder, interp_el,
3716 ctx->f32, "");
3717
3718 temp1 = LLVMBuildFMul(gallivm->builder, ddx_el, emit_data->args[0], "");
3719
3720 temp1 = LLVMBuildFAdd(gallivm->builder, temp1, interp_el, "");
3721
3722 temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
3723
3724 temp2 = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
3725
3726 ij_out[i] = LLVMBuildBitCast(gallivm->builder,
3727 temp2, ctx->i32, "");
3728 }
3729 interp_param = lp_build_gather_values(bld_base->base.gallivm, ij_out, 2);
3730 }
3731
3732 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
3733 for (chan = 0; chan < 2; chan++) {
3734 LLVMValueRef args[4];
3735 LLVMValueRef llvm_chan;
3736 unsigned schan;
3737
3738 schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
3739 llvm_chan = lp_build_const_int32(gallivm, schan);
3740
3741 args[0] = llvm_chan;
3742 args[1] = attr_number;
3743 args[2] = params;
3744 args[3] = interp_param;
3745
3746 emit_data->output[chan] =
3747 lp_build_intrinsic(gallivm->builder, intr_name,
3748 ctx->f32, args, args[3] ? 4 : 3,
3749 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
3750 }
3751 }
3752
3753 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
3754 struct lp_build_emit_data *emit_data)
3755 {
3756 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
3757 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
3758 unsigned stream;
3759
3760 assert(src0.File == TGSI_FILE_IMMEDIATE);
3761
3762 stream = LLVMConstIntGetZExtValue(imms[src0.Index][src0.SwizzleX]) & 0x3;
3763 return stream;
3764 }
3765
3766 /* Emit one vertex from the geometry shader */
3767 static void si_llvm_emit_vertex(
3768 const struct lp_build_tgsi_action *action,
3769 struct lp_build_tgsi_context *bld_base,
3770 struct lp_build_emit_data *emit_data)
3771 {
3772 struct si_shader_context *ctx = si_shader_context(bld_base);
3773 struct lp_build_context *uint = &bld_base->uint_bld;
3774 struct si_shader *shader = ctx->shader;
3775 struct tgsi_shader_info *info = &shader->selector->info;
3776 struct gallivm_state *gallivm = bld_base->base.gallivm;
3777 LLVMValueRef soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
3778 SI_PARAM_GS2VS_OFFSET);
3779 LLVMValueRef gs_next_vertex;
3780 LLVMValueRef can_emit, kill;
3781 LLVMValueRef args[2];
3782 unsigned chan;
3783 int i;
3784 unsigned stream;
3785
3786 stream = si_llvm_get_stream(bld_base, emit_data);
3787
3788 /* Write vertex attribute values to GSVS ring */
3789 gs_next_vertex = LLVMBuildLoad(gallivm->builder,
3790 ctx->gs_next_vertex[stream],
3791 "");
3792
3793 /* If this thread has already emitted the declared maximum number of
3794 * vertices, kill it: excessive vertex emissions are not supposed to
3795 * have any effect, and GS threads have no externally observable
3796 * effects other than emitting vertices.
3797 */
3798 can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULE, gs_next_vertex,
3799 lp_build_const_int32(gallivm,
3800 shader->selector->gs_max_out_vertices), "");
3801 kill = lp_build_select(&bld_base->base, can_emit,
3802 lp_build_const_float(gallivm, 1.0f),
3803 lp_build_const_float(gallivm, -1.0f));
3804
3805 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
3806 ctx->voidt, &kill, 1, 0);
3807
3808 for (i = 0; i < info->num_outputs; i++) {
3809 LLVMValueRef *out_ptr =
3810 ctx->radeon_bld.soa.outputs[i];
3811
3812 for (chan = 0; chan < 4; chan++) {
3813 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
3814 LLVMValueRef voffset =
3815 lp_build_const_int32(gallivm, (i * 4 + chan) *
3816 shader->selector->gs_max_out_vertices);
3817
3818 voffset = lp_build_add(uint, voffset, gs_next_vertex);
3819 voffset = lp_build_mul_imm(uint, voffset, 4);
3820
3821 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
3822
3823 build_tbuffer_store(ctx,
3824 ctx->gsvs_ring[stream],
3825 out_val, 1,
3826 voffset, soffset, 0,
3827 V_008F0C_BUF_DATA_FORMAT_32,
3828 V_008F0C_BUF_NUM_FORMAT_UINT,
3829 1, 0, 1, 1, 0);
3830 }
3831 }
3832 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
3833 lp_build_const_int32(gallivm, 1));
3834
3835 LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
3836
3837 /* Signal vertex emission */
3838 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_EMIT | SENDMSG_GS | (stream << 8));
3839 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
3840 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
3841 ctx->voidt, args, 2, LLVMNoUnwindAttribute);
3842 }
3843
3844 /* Cut one primitive from the geometry shader */
3845 static void si_llvm_emit_primitive(
3846 const struct lp_build_tgsi_action *action,
3847 struct lp_build_tgsi_context *bld_base,
3848 struct lp_build_emit_data *emit_data)
3849 {
3850 struct si_shader_context *ctx = si_shader_context(bld_base);
3851 struct gallivm_state *gallivm = bld_base->base.gallivm;
3852 LLVMValueRef args[2];
3853 unsigned stream;
3854
3855 /* Signal primitive cut */
3856 stream = si_llvm_get_stream(bld_base, emit_data);
3857 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_CUT | SENDMSG_GS | (stream << 8));
3858 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
3859 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
3860 ctx->voidt, args, 2, LLVMNoUnwindAttribute);
3861 }
3862
3863 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
3864 struct lp_build_tgsi_context *bld_base,
3865 struct lp_build_emit_data *emit_data)
3866 {
3867 struct si_shader_context *ctx = si_shader_context(bld_base);
3868 struct gallivm_state *gallivm = bld_base->base.gallivm;
3869
3870 lp_build_intrinsic(gallivm->builder,
3871 HAVE_LLVM >= 0x0309 ? "llvm.amdgcn.s.barrier"
3872 : "llvm.AMDGPU.barrier.local",
3873 ctx->voidt, NULL, 0, LLVMNoUnwindAttribute);
3874 }
3875
3876 static const struct lp_build_tgsi_action tex_action = {
3877 .fetch_args = tex_fetch_args,
3878 .emit = build_tex_intrinsic,
3879 };
3880
3881 static const struct lp_build_tgsi_action interp_action = {
3882 .fetch_args = interp_fetch_args,
3883 .emit = build_interp_intrinsic,
3884 };
3885
3886 static void si_create_function(struct si_shader_context *ctx,
3887 LLVMTypeRef *returns, unsigned num_returns,
3888 LLVMTypeRef *params, unsigned num_params,
3889 int last_array_pointer, int last_sgpr)
3890 {
3891 int i;
3892
3893 radeon_llvm_create_func(&ctx->radeon_bld, returns, num_returns,
3894 params, num_params);
3895 radeon_llvm_shader_type(ctx->radeon_bld.main_fn, ctx->type);
3896 ctx->return_value = LLVMGetUndef(ctx->radeon_bld.return_type);
3897
3898 for (i = 0; i <= last_sgpr; ++i) {
3899 LLVMValueRef P = LLVMGetParam(ctx->radeon_bld.main_fn, i);
3900
3901 /* We tell llvm that array inputs are passed by value to allow Sinking pass
3902 * to move load. Inputs are constant so this is fine. */
3903 if (i <= last_array_pointer)
3904 LLVMAddAttribute(P, LLVMByValAttribute);
3905 else
3906 LLVMAddAttribute(P, LLVMInRegAttribute);
3907 }
3908 }
3909
3910 static void create_meta_data(struct si_shader_context *ctx)
3911 {
3912 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
3913 LLVMValueRef args[3];
3914
3915 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
3916 args[1] = 0;
3917 args[2] = lp_build_const_int32(gallivm, 1);
3918
3919 ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
3920 }
3921
3922 static void declare_streamout_params(struct si_shader_context *ctx,
3923 struct pipe_stream_output_info *so,
3924 LLVMTypeRef *params, LLVMTypeRef i32,
3925 unsigned *num_params)
3926 {
3927 int i;
3928
3929 /* Streamout SGPRs. */
3930 if (so->num_outputs) {
3931 params[ctx->param_streamout_config = (*num_params)++] = i32;
3932 params[ctx->param_streamout_write_index = (*num_params)++] = i32;
3933 }
3934 /* A streamout buffer offset is loaded if the stride is non-zero. */
3935 for (i = 0; i < 4; i++) {
3936 if (!so->stride[i])
3937 continue;
3938
3939 params[ctx->param_streamout_offset[i] = (*num_params)++] = i32;
3940 }
3941 }
3942
3943 static unsigned llvm_get_type_size(LLVMTypeRef type)
3944 {
3945 LLVMTypeKind kind = LLVMGetTypeKind(type);
3946
3947 switch (kind) {
3948 case LLVMIntegerTypeKind:
3949 return LLVMGetIntTypeWidth(type) / 8;
3950 case LLVMFloatTypeKind:
3951 return 4;
3952 case LLVMPointerTypeKind:
3953 return 8;
3954 case LLVMVectorTypeKind:
3955 return LLVMGetVectorSize(type) *
3956 llvm_get_type_size(LLVMGetElementType(type));
3957 default:
3958 assert(0);
3959 return 0;
3960 }
3961 }
3962
3963 static void declare_tess_lds(struct si_shader_context *ctx)
3964 {
3965 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3966 LLVMTypeRef i32 = ctx->radeon_bld.soa.bld_base.uint_bld.elem_type;
3967
3968 /* This is the upper bound, maximum is 32 inputs times 32 vertices */
3969 unsigned vertex_data_dw_size = 32*32*4;
3970 unsigned patch_data_dw_size = 32*4;
3971 /* The formula is: TCS inputs + TCS outputs + TCS patch outputs. */
3972 unsigned patch_dw_size = vertex_data_dw_size*2 + patch_data_dw_size;
3973 unsigned lds_dwords = patch_dw_size;
3974
3975 /* The actual size is computed outside of the shader to reduce
3976 * the number of shader variants. */
3977 ctx->lds =
3978 LLVMAddGlobalInAddressSpace(gallivm->module,
3979 LLVMArrayType(i32, lds_dwords),
3980 "tess_lds",
3981 LOCAL_ADDR_SPACE);
3982 }
3983
3984 static void create_function(struct si_shader_context *ctx)
3985 {
3986 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
3987 struct gallivm_state *gallivm = bld_base->base.gallivm;
3988 struct si_shader *shader = ctx->shader;
3989 LLVMTypeRef params[SI_NUM_PARAMS + SI_NUM_VERTEX_BUFFERS], v3i32;
3990 LLVMTypeRef returns[16+32*4];
3991 unsigned i, last_array_pointer, last_sgpr, num_params, num_return_sgprs;
3992 unsigned num_returns = 0;
3993
3994 v3i32 = LLVMVectorType(ctx->i32, 3);
3995
3996 params[SI_PARAM_RW_BUFFERS] = const_array(ctx->v16i8, SI_NUM_RW_BUFFERS);
3997 params[SI_PARAM_CONST_BUFFERS] = const_array(ctx->v16i8, SI_NUM_CONST_BUFFERS);
3998 params[SI_PARAM_SAMPLERS] = const_array(ctx->v8i32, SI_NUM_SAMPLERS);
3999 params[SI_PARAM_IMAGES] = const_array(ctx->v8i32, SI_NUM_IMAGES);
4000 last_array_pointer = SI_PARAM_IMAGES;
4001
4002 switch (ctx->type) {
4003 case TGSI_PROCESSOR_VERTEX:
4004 params[SI_PARAM_VERTEX_BUFFERS] = const_array(ctx->v16i8, SI_NUM_VERTEX_BUFFERS);
4005 last_array_pointer = SI_PARAM_VERTEX_BUFFERS;
4006 params[SI_PARAM_BASE_VERTEX] = ctx->i32;
4007 params[SI_PARAM_START_INSTANCE] = ctx->i32;
4008 num_params = SI_PARAM_START_INSTANCE+1;
4009
4010 if (shader->key.vs.as_es) {
4011 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
4012 } else if (shader->key.vs.as_ls) {
4013 params[SI_PARAM_LS_OUT_LAYOUT] = ctx->i32;
4014 num_params = SI_PARAM_LS_OUT_LAYOUT+1;
4015 } else {
4016 if (ctx->is_gs_copy_shader) {
4017 last_array_pointer = SI_PARAM_CONST_BUFFERS;
4018 num_params = SI_PARAM_CONST_BUFFERS+1;
4019 } else {
4020 params[SI_PARAM_VS_STATE_BITS] = ctx->i32;
4021 num_params = SI_PARAM_VS_STATE_BITS+1;
4022 }
4023
4024 /* The locations of the other parameters are assigned dynamically. */
4025 declare_streamout_params(ctx, &shader->selector->so,
4026 params, ctx->i32, &num_params);
4027 }
4028
4029 last_sgpr = num_params-1;
4030
4031 /* VGPRs */
4032 params[ctx->param_vertex_id = num_params++] = ctx->i32;
4033 params[ctx->param_rel_auto_id = num_params++] = ctx->i32;
4034 params[ctx->param_vs_prim_id = num_params++] = ctx->i32;
4035 params[ctx->param_instance_id = num_params++] = ctx->i32;
4036
4037 if (!ctx->is_monolithic &&
4038 !ctx->is_gs_copy_shader) {
4039 /* Vertex load indices. */
4040 ctx->param_vertex_index0 = num_params;
4041
4042 for (i = 0; i < shader->selector->info.num_inputs; i++)
4043 params[num_params++] = ctx->i32;
4044
4045 /* PrimitiveID output. */
4046 if (!shader->key.vs.as_es && !shader->key.vs.as_ls)
4047 for (i = 0; i <= VS_EPILOG_PRIMID_LOC; i++)
4048 returns[num_returns++] = ctx->f32;
4049 }
4050 break;
4051
4052 case TGSI_PROCESSOR_TESS_CTRL:
4053 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
4054 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
4055 params[SI_PARAM_TCS_IN_LAYOUT] = ctx->i32;
4056 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx->i32;
4057 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
4058
4059 /* VGPRs */
4060 params[SI_PARAM_PATCH_ID] = ctx->i32;
4061 params[SI_PARAM_REL_IDS] = ctx->i32;
4062 num_params = SI_PARAM_REL_IDS+1;
4063
4064 if (!ctx->is_monolithic) {
4065 /* PARAM_TESS_FACTOR_OFFSET is after user SGPRs. */
4066 for (i = 0; i <= SI_TCS_NUM_USER_SGPR; i++)
4067 returns[num_returns++] = ctx->i32; /* SGPRs */
4068
4069 for (i = 0; i < 3; i++)
4070 returns[num_returns++] = ctx->f32; /* VGPRs */
4071 }
4072 break;
4073
4074 case TGSI_PROCESSOR_TESS_EVAL:
4075 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
4076 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
4077 num_params = SI_PARAM_TCS_OUT_LAYOUT+1;
4078
4079 if (shader->key.tes.as_es) {
4080 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
4081 } else {
4082 declare_streamout_params(ctx, &shader->selector->so,
4083 params, ctx->i32, &num_params);
4084 }
4085 last_sgpr = num_params - 1;
4086
4087 /* VGPRs */
4088 params[ctx->param_tes_u = num_params++] = ctx->f32;
4089 params[ctx->param_tes_v = num_params++] = ctx->f32;
4090 params[ctx->param_tes_rel_patch_id = num_params++] = ctx->i32;
4091 params[ctx->param_tes_patch_id = num_params++] = ctx->i32;
4092
4093 /* PrimitiveID output. */
4094 if (!ctx->is_monolithic && !shader->key.tes.as_es)
4095 for (i = 0; i <= VS_EPILOG_PRIMID_LOC; i++)
4096 returns[num_returns++] = ctx->f32;
4097 break;
4098
4099 case TGSI_PROCESSOR_GEOMETRY:
4100 params[SI_PARAM_GS2VS_OFFSET] = ctx->i32;
4101 params[SI_PARAM_GS_WAVE_ID] = ctx->i32;
4102 last_sgpr = SI_PARAM_GS_WAVE_ID;
4103
4104 /* VGPRs */
4105 params[SI_PARAM_VTX0_OFFSET] = ctx->i32;
4106 params[SI_PARAM_VTX1_OFFSET] = ctx->i32;
4107 params[SI_PARAM_PRIMITIVE_ID] = ctx->i32;
4108 params[SI_PARAM_VTX2_OFFSET] = ctx->i32;
4109 params[SI_PARAM_VTX3_OFFSET] = ctx->i32;
4110 params[SI_PARAM_VTX4_OFFSET] = ctx->i32;
4111 params[SI_PARAM_VTX5_OFFSET] = ctx->i32;
4112 params[SI_PARAM_GS_INSTANCE_ID] = ctx->i32;
4113 num_params = SI_PARAM_GS_INSTANCE_ID+1;
4114 break;
4115
4116 case TGSI_PROCESSOR_FRAGMENT:
4117 params[SI_PARAM_ALPHA_REF] = ctx->f32;
4118 params[SI_PARAM_PRIM_MASK] = ctx->i32;
4119 last_sgpr = SI_PARAM_PRIM_MASK;
4120 params[SI_PARAM_PERSP_SAMPLE] = ctx->v2i32;
4121 params[SI_PARAM_PERSP_CENTER] = ctx->v2i32;
4122 params[SI_PARAM_PERSP_CENTROID] = ctx->v2i32;
4123 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
4124 params[SI_PARAM_LINEAR_SAMPLE] = ctx->v2i32;
4125 params[SI_PARAM_LINEAR_CENTER] = ctx->v2i32;
4126 params[SI_PARAM_LINEAR_CENTROID] = ctx->v2i32;
4127 params[SI_PARAM_LINE_STIPPLE_TEX] = ctx->f32;
4128 params[SI_PARAM_POS_X_FLOAT] = ctx->f32;
4129 params[SI_PARAM_POS_Y_FLOAT] = ctx->f32;
4130 params[SI_PARAM_POS_Z_FLOAT] = ctx->f32;
4131 params[SI_PARAM_POS_W_FLOAT] = ctx->f32;
4132 params[SI_PARAM_FRONT_FACE] = ctx->i32;
4133 params[SI_PARAM_ANCILLARY] = ctx->i32;
4134 params[SI_PARAM_SAMPLE_COVERAGE] = ctx->f32;
4135 params[SI_PARAM_POS_FIXED_PT] = ctx->i32;
4136 num_params = SI_PARAM_POS_FIXED_PT+1;
4137
4138 if (!ctx->is_monolithic) {
4139 /* Color inputs from the prolog. */
4140 if (shader->selector->info.colors_read) {
4141 unsigned num_color_elements =
4142 util_bitcount(shader->selector->info.colors_read);
4143
4144 assert(num_params + num_color_elements <= ARRAY_SIZE(params));
4145 for (i = 0; i < num_color_elements; i++)
4146 params[num_params++] = ctx->f32;
4147 }
4148
4149 /* Outputs for the epilog. */
4150 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
4151 num_returns =
4152 num_return_sgprs +
4153 util_bitcount(shader->selector->info.colors_written) * 4 +
4154 shader->selector->info.writes_z +
4155 shader->selector->info.writes_stencil +
4156 shader->selector->info.writes_samplemask +
4157 1 /* SampleMaskIn */;
4158
4159 num_returns = MAX2(num_returns,
4160 num_return_sgprs +
4161 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
4162
4163 for (i = 0; i < num_return_sgprs; i++)
4164 returns[i] = ctx->i32;
4165 for (; i < num_returns; i++)
4166 returns[i] = ctx->f32;
4167 }
4168 break;
4169
4170 default:
4171 assert(0 && "unimplemented shader");
4172 return;
4173 }
4174
4175 assert(num_params <= Elements(params));
4176
4177 si_create_function(ctx, returns, num_returns, params,
4178 num_params, last_array_pointer, last_sgpr);
4179
4180 /* Reserve register locations for VGPR inputs the PS prolog may need. */
4181 if (ctx->type == TGSI_PROCESSOR_FRAGMENT &&
4182 !ctx->is_monolithic) {
4183 radeon_llvm_add_attribute(ctx->radeon_bld.main_fn,
4184 "InitialPSInputAddr",
4185 S_0286D0_PERSP_SAMPLE_ENA(1) |
4186 S_0286D0_PERSP_CENTER_ENA(1) |
4187 S_0286D0_PERSP_CENTROID_ENA(1) |
4188 S_0286D0_LINEAR_SAMPLE_ENA(1) |
4189 S_0286D0_LINEAR_CENTER_ENA(1) |
4190 S_0286D0_LINEAR_CENTROID_ENA(1) |
4191 S_0286D0_FRONT_FACE_ENA(1) |
4192 S_0286D0_POS_FIXED_PT_ENA(1));
4193 }
4194
4195 shader->info.num_input_sgprs = 0;
4196 shader->info.num_input_vgprs = 0;
4197
4198 for (i = 0; i <= last_sgpr; ++i)
4199 shader->info.num_input_sgprs += llvm_get_type_size(params[i]) / 4;
4200
4201 /* Unused fragment shader inputs are eliminated by the compiler,
4202 * so we don't know yet how many there will be.
4203 */
4204 if (ctx->type != TGSI_PROCESSOR_FRAGMENT)
4205 for (; i < num_params; ++i)
4206 shader->info.num_input_vgprs += llvm_get_type_size(params[i]) / 4;
4207
4208 if (bld_base->info &&
4209 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
4210 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0 ||
4211 bld_base->info->opcode_count[TGSI_OPCODE_DDX_FINE] > 0 ||
4212 bld_base->info->opcode_count[TGSI_OPCODE_DDY_FINE] > 0 ||
4213 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_OFFSET] > 0 ||
4214 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_SAMPLE] > 0))
4215 ctx->lds =
4216 LLVMAddGlobalInAddressSpace(gallivm->module,
4217 LLVMArrayType(ctx->i32, 64),
4218 "ddxy_lds",
4219 LOCAL_ADDR_SPACE);
4220
4221 if ((ctx->type == TGSI_PROCESSOR_VERTEX && shader->key.vs.as_ls) ||
4222 ctx->type == TGSI_PROCESSOR_TESS_CTRL ||
4223 ctx->type == TGSI_PROCESSOR_TESS_EVAL)
4224 declare_tess_lds(ctx);
4225 }
4226
4227 static void preload_constants(struct si_shader_context *ctx)
4228 {
4229 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
4230 struct gallivm_state *gallivm = bld_base->base.gallivm;
4231 const struct tgsi_shader_info *info = bld_base->info;
4232 unsigned buf;
4233 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
4234
4235 for (buf = 0; buf < SI_NUM_CONST_BUFFERS; buf++) {
4236 unsigned i, num_const = info->const_file_max[buf] + 1;
4237
4238 if (num_const == 0)
4239 continue;
4240
4241 /* Allocate space for the constant values */
4242 ctx->constants[buf] = CALLOC(num_const * 4, sizeof(LLVMValueRef));
4243
4244 /* Load the resource descriptor */
4245 ctx->const_buffers[buf] =
4246 build_indexed_load_const(ctx, ptr, lp_build_const_int32(gallivm, buf));
4247
4248 /* Load the constants, we rely on the code sinking to do the rest */
4249 for (i = 0; i < num_const * 4; ++i) {
4250 ctx->constants[buf][i] =
4251 buffer_load_const(gallivm->builder,
4252 ctx->const_buffers[buf],
4253 lp_build_const_int32(gallivm, i * 4),
4254 ctx->f32);
4255 }
4256 }
4257 }
4258
4259 static void preload_samplers(struct si_shader_context *ctx)
4260 {
4261 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
4262 struct gallivm_state *gallivm = bld_base->base.gallivm;
4263 const struct tgsi_shader_info *info = bld_base->info;
4264 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
4265 LLVMValueRef offset;
4266
4267 if (num_samplers == 0)
4268 return;
4269
4270 /* Load the resources and samplers, we rely on the code sinking to do the rest */
4271 for (i = 0; i < num_samplers; ++i) {
4272 /* Resource */
4273 offset = lp_build_const_int32(gallivm, i);
4274 ctx->sampler_views[i] =
4275 get_sampler_desc(ctx, offset, DESC_IMAGE);
4276
4277 /* FMASK resource */
4278 if (info->is_msaa_sampler[i])
4279 ctx->fmasks[i] =
4280 get_sampler_desc(ctx, offset, DESC_FMASK);
4281 else
4282 ctx->sampler_states[i] =
4283 get_sampler_desc(ctx, offset, DESC_SAMPLER);
4284 }
4285 }
4286
4287 static void preload_images(struct si_shader_context *ctx)
4288 {
4289 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
4290 struct gallivm_state *gallivm = bld_base->base.gallivm;
4291 unsigned num_images = bld_base->info->file_max[TGSI_FILE_IMAGE] + 1;
4292 LLVMValueRef res_ptr;
4293 unsigned i;
4294
4295 if (num_images == 0)
4296 return;
4297
4298 res_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_IMAGES);
4299
4300 for (i = 0; i < num_images; ++i) {
4301 /* Rely on LLVM to shrink the load for buffer resources. */
4302 ctx->images[i] =
4303 build_indexed_load_const(ctx, res_ptr,
4304 lp_build_const_int32(gallivm, i));
4305 }
4306 }
4307
4308 static void preload_streamout_buffers(struct si_shader_context *ctx)
4309 {
4310 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
4311 struct gallivm_state *gallivm = bld_base->base.gallivm;
4312 unsigned i;
4313
4314 /* Streamout can only be used if the shader is compiled as VS. */
4315 if (!ctx->shader->selector->so.num_outputs ||
4316 (ctx->type == TGSI_PROCESSOR_VERTEX &&
4317 (ctx->shader->key.vs.as_es ||
4318 ctx->shader->key.vs.as_ls)) ||
4319 (ctx->type == TGSI_PROCESSOR_TESS_EVAL &&
4320 ctx->shader->key.tes.as_es))
4321 return;
4322
4323 LLVMValueRef buf_ptr = LLVMGetParam(ctx->radeon_bld.main_fn,
4324 SI_PARAM_RW_BUFFERS);
4325
4326 /* Load the resources, we rely on the code sinking to do the rest */
4327 for (i = 0; i < 4; ++i) {
4328 if (ctx->shader->selector->so.stride[i]) {
4329 LLVMValueRef offset = lp_build_const_int32(gallivm,
4330 SI_SO_BUF_OFFSET + i);
4331
4332 ctx->so_buffers[i] = build_indexed_load_const(ctx, buf_ptr, offset);
4333 }
4334 }
4335 }
4336
4337 /**
4338 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
4339 * for later use.
4340 */
4341 static void preload_ring_buffers(struct si_shader_context *ctx)
4342 {
4343 struct gallivm_state *gallivm =
4344 ctx->radeon_bld.soa.bld_base.base.gallivm;
4345
4346 LLVMValueRef buf_ptr = LLVMGetParam(ctx->radeon_bld.main_fn,
4347 SI_PARAM_RW_BUFFERS);
4348
4349 if ((ctx->type == TGSI_PROCESSOR_VERTEX &&
4350 ctx->shader->key.vs.as_es) ||
4351 (ctx->type == TGSI_PROCESSOR_TESS_EVAL &&
4352 ctx->shader->key.tes.as_es) ||
4353 ctx->type == TGSI_PROCESSOR_GEOMETRY) {
4354 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_RING_ESGS);
4355
4356 ctx->esgs_ring =
4357 build_indexed_load_const(ctx, buf_ptr, offset);
4358 }
4359
4360 if (ctx->is_gs_copy_shader) {
4361 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_RING_GSVS);
4362
4363 ctx->gsvs_ring[0] =
4364 build_indexed_load_const(ctx, buf_ptr, offset);
4365 }
4366 if (ctx->type == TGSI_PROCESSOR_GEOMETRY) {
4367 int i;
4368 for (i = 0; i < 4; i++) {
4369 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_RING_GSVS + i);
4370
4371 ctx->gsvs_ring[i] =
4372 build_indexed_load_const(ctx, buf_ptr, offset);
4373 }
4374 }
4375 }
4376
4377 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
4378 LLVMValueRef param_sampler_views,
4379 unsigned param_pos_fixed_pt)
4380 {
4381 struct lp_build_tgsi_context *bld_base =
4382 &ctx->radeon_bld.soa.bld_base;
4383 struct gallivm_state *gallivm = bld_base->base.gallivm;
4384 struct lp_build_emit_data result = {};
4385 struct tgsi_full_instruction inst = {};
4386 LLVMValueRef desc, sampler_index, address[2], pix;
4387
4388 /* Use the fixed-point gl_FragCoord input.
4389 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
4390 * per coordinate to get the repeating effect.
4391 */
4392 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
4393 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
4394
4395 /* Load the sampler view descriptor. */
4396 sampler_index = lp_build_const_int32(gallivm, SI_POLY_STIPPLE_SAMPLER);
4397 desc = get_sampler_desc_custom(ctx, param_sampler_views,
4398 sampler_index, DESC_IMAGE);
4399
4400 /* Load the texel. */
4401 inst.Instruction.Opcode = TGSI_OPCODE_TXF;
4402 inst.Texture.Texture = TGSI_TEXTURE_2D_MSAA; /* = use load, not load_mip */
4403 result.inst = &inst;
4404 set_tex_fetch_args(ctx, &result, TGSI_OPCODE_TXF,
4405 inst.Texture.Texture,
4406 desc, NULL, address, ARRAY_SIZE(address), 0xf);
4407 build_tex_intrinsic(&tex_action, bld_base, &result);
4408
4409 /* Kill the thread accordingly. */
4410 pix = LLVMBuildExtractElement(gallivm->builder, result.output[0],
4411 lp_build_const_int32(gallivm, 3), "");
4412 pix = bitcast(bld_base, TGSI_TYPE_FLOAT, pix);
4413 pix = LLVMBuildFNeg(gallivm->builder, pix, "");
4414
4415 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
4416 LLVMVoidTypeInContext(gallivm->context),
4417 &pix, 1, 0);
4418 }
4419
4420 void si_shader_binary_read_config(struct radeon_shader_binary *binary,
4421 struct si_shader_config *conf,
4422 unsigned symbol_offset)
4423 {
4424 unsigned i;
4425 const unsigned char *config =
4426 radeon_shader_binary_config_start(binary, symbol_offset);
4427
4428 /* XXX: We may be able to emit some of these values directly rather than
4429 * extracting fields to be emitted later.
4430 */
4431
4432 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
4433 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
4434 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
4435 switch (reg) {
4436 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
4437 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
4438 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
4439 case R_00B848_COMPUTE_PGM_RSRC1:
4440 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
4441 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
4442 conf->float_mode = G_00B028_FLOAT_MODE(value);
4443 conf->rsrc1 = value;
4444 break;
4445 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
4446 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
4447 break;
4448 case R_00B84C_COMPUTE_PGM_RSRC2:
4449 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
4450 conf->rsrc2 = value;
4451 break;
4452 case R_0286CC_SPI_PS_INPUT_ENA:
4453 conf->spi_ps_input_ena = value;
4454 break;
4455 case R_0286D0_SPI_PS_INPUT_ADDR:
4456 conf->spi_ps_input_addr = value;
4457 break;
4458 case R_0286E8_SPI_TMPRING_SIZE:
4459 case R_00B860_COMPUTE_TMPRING_SIZE:
4460 /* WAVESIZE is in units of 256 dwords. */
4461 conf->scratch_bytes_per_wave =
4462 G_00B860_WAVESIZE(value) * 256 * 4 * 1;
4463 break;
4464 default:
4465 {
4466 static bool printed;
4467
4468 if (!printed) {
4469 fprintf(stderr, "Warning: LLVM emitted unknown "
4470 "config register: 0x%x\n", reg);
4471 printed = true;
4472 }
4473 }
4474 break;
4475 }
4476
4477 if (!conf->spi_ps_input_addr)
4478 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
4479 }
4480 }
4481
4482 void si_shader_apply_scratch_relocs(struct si_context *sctx,
4483 struct si_shader *shader,
4484 uint64_t scratch_va)
4485 {
4486 unsigned i;
4487 uint32_t scratch_rsrc_dword0 = scratch_va;
4488 uint32_t scratch_rsrc_dword1 =
4489 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32)
4490 | S_008F04_STRIDE(shader->config.scratch_bytes_per_wave / 64);
4491
4492 for (i = 0 ; i < shader->binary.reloc_count; i++) {
4493 const struct radeon_shader_reloc *reloc =
4494 &shader->binary.relocs[i];
4495 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
4496 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
4497 &scratch_rsrc_dword0, 4);
4498 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
4499 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
4500 &scratch_rsrc_dword1, 4);
4501 }
4502 }
4503 }
4504
4505 static unsigned si_get_shader_binary_size(struct si_shader *shader)
4506 {
4507 unsigned size = shader->binary.code_size;
4508
4509 if (shader->prolog)
4510 size += shader->prolog->binary.code_size;
4511 if (shader->epilog)
4512 size += shader->epilog->binary.code_size;
4513 return size;
4514 }
4515
4516 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
4517 {
4518 const struct radeon_shader_binary *prolog =
4519 shader->prolog ? &shader->prolog->binary : NULL;
4520 const struct radeon_shader_binary *epilog =
4521 shader->epilog ? &shader->epilog->binary : NULL;
4522 const struct radeon_shader_binary *mainb = &shader->binary;
4523 unsigned bo_size = si_get_shader_binary_size(shader) +
4524 (!epilog ? mainb->rodata_size : 0);
4525 unsigned char *ptr;
4526
4527 assert(!prolog || !prolog->rodata_size);
4528 assert((!prolog && !epilog) || !mainb->rodata_size);
4529 assert(!epilog || !epilog->rodata_size);
4530
4531 r600_resource_reference(&shader->bo, NULL);
4532 shader->bo = si_resource_create_custom(&sscreen->b.b,
4533 PIPE_USAGE_IMMUTABLE,
4534 bo_size);
4535 if (!shader->bo)
4536 return -ENOMEM;
4537
4538 /* Upload. */
4539 ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
4540 PIPE_TRANSFER_READ_WRITE);
4541
4542 if (prolog) {
4543 util_memcpy_cpu_to_le32(ptr, prolog->code, prolog->code_size);
4544 ptr += prolog->code_size;
4545 }
4546
4547 util_memcpy_cpu_to_le32(ptr, mainb->code, mainb->code_size);
4548 ptr += mainb->code_size;
4549
4550 if (epilog)
4551 util_memcpy_cpu_to_le32(ptr, epilog->code, epilog->code_size);
4552 else if (mainb->rodata_size > 0)
4553 util_memcpy_cpu_to_le32(ptr, mainb->rodata, mainb->rodata_size);
4554
4555 sscreen->b.ws->buffer_unmap(shader->bo->buf);
4556 return 0;
4557 }
4558
4559 static void si_shader_dump_disassembly(const struct radeon_shader_binary *binary,
4560 struct pipe_debug_callback *debug,
4561 const char *name, FILE *file)
4562 {
4563 char *line, *p;
4564 unsigned i, count;
4565
4566 if (binary->disasm_string) {
4567 fprintf(file, "Shader %s disassembly:\n", name);
4568 fprintf(file, "%s", binary->disasm_string);
4569
4570 if (debug && debug->debug_message) {
4571 /* Very long debug messages are cut off, so send the
4572 * disassembly one line at a time. This causes more
4573 * overhead, but on the plus side it simplifies
4574 * parsing of resulting logs.
4575 */
4576 pipe_debug_message(debug, SHADER_INFO,
4577 "Shader Disassembly Begin");
4578
4579 line = binary->disasm_string;
4580 while (*line) {
4581 p = strchrnul(line, '\n');
4582 count = p - line;
4583
4584 if (count) {
4585 pipe_debug_message(debug, SHADER_INFO,
4586 "%.*s", count, line);
4587 }
4588
4589 if (!*p)
4590 break;
4591 line = p + 1;
4592 }
4593
4594 pipe_debug_message(debug, SHADER_INFO,
4595 "Shader Disassembly End");
4596 }
4597 } else {
4598 fprintf(file, "Shader %s binary:\n", name);
4599 for (i = 0; i < binary->code_size; i += 4) {
4600 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
4601 binary->code[i + 3], binary->code[i + 2],
4602 binary->code[i + 1], binary->code[i]);
4603 }
4604 }
4605 }
4606
4607 static void si_shader_dump_stats(struct si_screen *sscreen,
4608 struct si_shader_config *conf,
4609 unsigned num_inputs,
4610 unsigned code_size,
4611 struct pipe_debug_callback *debug,
4612 unsigned processor,
4613 FILE *file)
4614 {
4615 unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
4616 unsigned lds_per_wave = 0;
4617 unsigned max_simd_waves = 10;
4618
4619 /* Compute LDS usage for PS. */
4620 if (processor == TGSI_PROCESSOR_FRAGMENT) {
4621 /* The minimum usage per wave is (num_inputs * 36). The maximum
4622 * usage is (num_inputs * 36 * 16).
4623 * We can get anything in between and it varies between waves.
4624 *
4625 * Other stages don't know the size at compile time or don't
4626 * allocate LDS per wave, but instead they do it per thread group.
4627 */
4628 lds_per_wave = conf->lds_size * lds_increment +
4629 align(num_inputs * 36, lds_increment);
4630 }
4631
4632 /* Compute the per-SIMD wave counts. */
4633 if (conf->num_sgprs) {
4634 if (sscreen->b.chip_class >= VI)
4635 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
4636 else
4637 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
4638 }
4639
4640 if (conf->num_vgprs)
4641 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
4642
4643 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
4644 * that PS can use.
4645 */
4646 if (lds_per_wave)
4647 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
4648
4649 if (file != stderr ||
4650 r600_can_dump_shader(&sscreen->b, processor)) {
4651 if (processor == TGSI_PROCESSOR_FRAGMENT) {
4652 fprintf(file, "*** SHADER CONFIG ***\n"
4653 "SPI_PS_INPUT_ADDR = 0x%04x\n"
4654 "SPI_PS_INPUT_ENA = 0x%04x\n",
4655 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
4656 }
4657
4658 fprintf(file, "*** SHADER STATS ***\n"
4659 "SGPRS: %d\n"
4660 "VGPRS: %d\n"
4661 "Code Size: %d bytes\n"
4662 "LDS: %d blocks\n"
4663 "Scratch: %d bytes per wave\n"
4664 "Max Waves: %d\n"
4665 "********************\n",
4666 conf->num_sgprs, conf->num_vgprs, code_size,
4667 conf->lds_size, conf->scratch_bytes_per_wave,
4668 max_simd_waves);
4669 }
4670
4671 pipe_debug_message(debug, SHADER_INFO,
4672 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
4673 "LDS: %d Scratch: %d Max Waves: %d",
4674 conf->num_sgprs, conf->num_vgprs, code_size,
4675 conf->lds_size, conf->scratch_bytes_per_wave,
4676 max_simd_waves);
4677 }
4678
4679 static const char *si_get_shader_name(struct si_shader *shader,
4680 unsigned processor)
4681 {
4682 switch (processor) {
4683 case TGSI_PROCESSOR_VERTEX:
4684 if (shader->key.vs.as_es)
4685 return "Vertex Shader as ES";
4686 else if (shader->key.vs.as_ls)
4687 return "Vertex Shader as LS";
4688 else
4689 return "Vertex Shader as VS";
4690 case TGSI_PROCESSOR_TESS_CTRL:
4691 return "Tessellation Control Shader";
4692 case TGSI_PROCESSOR_TESS_EVAL:
4693 if (shader->key.tes.as_es)
4694 return "Tessellation Evaluation Shader as ES";
4695 else
4696 return "Tessellation Evaluation Shader as VS";
4697 case TGSI_PROCESSOR_GEOMETRY:
4698 if (shader->gs_copy_shader == NULL)
4699 return "GS Copy Shader as VS";
4700 else
4701 return "Geometry Shader";
4702 case TGSI_PROCESSOR_FRAGMENT:
4703 return "Pixel Shader";
4704 case TGSI_PROCESSOR_COMPUTE:
4705 return "Compute Shader";
4706 default:
4707 return "Unknown Shader";
4708 }
4709 }
4710
4711 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
4712 struct pipe_debug_callback *debug, unsigned processor,
4713 FILE *file)
4714 {
4715 if (file != stderr ||
4716 (r600_can_dump_shader(&sscreen->b, processor) &&
4717 !(sscreen->b.debug_flags & DBG_NO_ASM))) {
4718 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
4719
4720 if (shader->prolog)
4721 si_shader_dump_disassembly(&shader->prolog->binary,
4722 debug, "prolog", file);
4723
4724 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
4725
4726 if (shader->epilog)
4727 si_shader_dump_disassembly(&shader->epilog->binary,
4728 debug, "epilog", file);
4729 fprintf(file, "\n");
4730 }
4731
4732 si_shader_dump_stats(sscreen, &shader->config,
4733 shader->selector ? shader->selector->info.num_inputs : 0,
4734 si_get_shader_binary_size(shader), debug, processor,
4735 file);
4736 }
4737
4738 int si_compile_llvm(struct si_screen *sscreen,
4739 struct radeon_shader_binary *binary,
4740 struct si_shader_config *conf,
4741 LLVMTargetMachineRef tm,
4742 LLVMModuleRef mod,
4743 struct pipe_debug_callback *debug,
4744 unsigned processor,
4745 const char *name)
4746 {
4747 int r = 0;
4748 unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
4749
4750 if (r600_can_dump_shader(&sscreen->b, processor)) {
4751 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
4752
4753 if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
4754 fprintf(stderr, "%s LLVM IR:\n\n", name);
4755 LLVMDumpModule(mod);
4756 fprintf(stderr, "\n");
4757 }
4758 }
4759
4760 if (!si_replace_shader(count, binary)) {
4761 r = radeon_llvm_compile(mod, binary,
4762 r600_get_llvm_processor_name(sscreen->b.family), tm,
4763 debug);
4764 if (r)
4765 return r;
4766 }
4767
4768 si_shader_binary_read_config(binary, conf, 0);
4769
4770 /* Enable 64-bit and 16-bit denormals, because there is no performance
4771 * cost.
4772 *
4773 * If denormals are enabled, all floating-point output modifiers are
4774 * ignored.
4775 *
4776 * Don't enable denormals for 32-bit floats, because:
4777 * - Floating-point output modifiers would be ignored by the hw.
4778 * - Some opcodes don't support denormals, such as v_mad_f32. We would
4779 * have to stop using those.
4780 * - SI & CI would be very slow.
4781 */
4782 conf->float_mode |= V_00B028_FP_64_DENORMS;
4783
4784 FREE(binary->config);
4785 FREE(binary->global_symbol_offsets);
4786 binary->config = NULL;
4787 binary->global_symbol_offsets = NULL;
4788
4789 /* Some shaders can't have rodata because their binaries can be
4790 * concatenated.
4791 */
4792 if (binary->rodata_size &&
4793 (processor == TGSI_PROCESSOR_VERTEX ||
4794 processor == TGSI_PROCESSOR_TESS_CTRL ||
4795 processor == TGSI_PROCESSOR_TESS_EVAL ||
4796 processor == TGSI_PROCESSOR_FRAGMENT)) {
4797 fprintf(stderr, "radeonsi: The shader can't have rodata.");
4798 return -EINVAL;
4799 }
4800
4801 return r;
4802 }
4803
4804 /* Generate code for the hardware VS shader stage to go with a geometry shader */
4805 static int si_generate_gs_copy_shader(struct si_screen *sscreen,
4806 struct si_shader_context *ctx,
4807 struct si_shader *gs,
4808 struct pipe_debug_callback *debug)
4809 {
4810 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
4811 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
4812 struct lp_build_context *uint = &bld_base->uint_bld;
4813 struct si_shader_output_values *outputs;
4814 struct tgsi_shader_info *gsinfo = &gs->selector->info;
4815 LLVMValueRef args[9];
4816 int i, r;
4817
4818 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
4819
4820 si_init_shader_ctx(ctx, sscreen, ctx->shader, ctx->tm);
4821 ctx->type = TGSI_PROCESSOR_VERTEX;
4822 ctx->is_gs_copy_shader = true;
4823
4824 create_meta_data(ctx);
4825 create_function(ctx);
4826 preload_streamout_buffers(ctx);
4827 preload_ring_buffers(ctx);
4828
4829 args[0] = ctx->gsvs_ring[0];
4830 args[1] = lp_build_mul_imm(uint,
4831 LLVMGetParam(ctx->radeon_bld.main_fn,
4832 ctx->param_vertex_id),
4833 4);
4834 args[3] = uint->zero;
4835 args[4] = uint->one; /* OFFEN */
4836 args[5] = uint->zero; /* IDXEN */
4837 args[6] = uint->one; /* GLC */
4838 args[7] = uint->one; /* SLC */
4839 args[8] = uint->zero; /* TFE */
4840
4841 /* Fetch vertex data from GSVS ring */
4842 for (i = 0; i < gsinfo->num_outputs; ++i) {
4843 unsigned chan;
4844
4845 outputs[i].name = gsinfo->output_semantic_name[i];
4846 outputs[i].sid = gsinfo->output_semantic_index[i];
4847
4848 for (chan = 0; chan < 4; chan++) {
4849 args[2] = lp_build_const_int32(gallivm,
4850 (i * 4 + chan) *
4851 gs->selector->gs_max_out_vertices * 16 * 4);
4852
4853 outputs[i].values[chan] =
4854 LLVMBuildBitCast(gallivm->builder,
4855 lp_build_intrinsic(gallivm->builder,
4856 "llvm.SI.buffer.load.dword.i32.i32",
4857 ctx->i32, args, 9,
4858 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute),
4859 ctx->f32, "");
4860 }
4861 }
4862
4863 si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
4864
4865 LLVMBuildRet(gallivm->builder, ctx->return_value);
4866
4867 /* Dump LLVM IR before any optimization passes */
4868 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
4869 r600_can_dump_shader(&sscreen->b, TGSI_PROCESSOR_GEOMETRY))
4870 LLVMDumpModule(bld_base->base.gallivm->module);
4871
4872 radeon_llvm_finalize_module(&ctx->radeon_bld);
4873
4874 r = si_compile_llvm(sscreen, &ctx->shader->binary,
4875 &ctx->shader->config, ctx->tm,
4876 bld_base->base.gallivm->module,
4877 debug, TGSI_PROCESSOR_GEOMETRY,
4878 "GS Copy Shader");
4879 if (!r) {
4880 if (r600_can_dump_shader(&sscreen->b, TGSI_PROCESSOR_GEOMETRY))
4881 fprintf(stderr, "GS Copy Shader:\n");
4882 si_shader_dump(sscreen, ctx->shader, debug,
4883 TGSI_PROCESSOR_GEOMETRY, stderr);
4884 r = si_shader_binary_upload(sscreen, ctx->shader);
4885 }
4886
4887 radeon_llvm_dispose(&ctx->radeon_bld);
4888
4889 FREE(outputs);
4890 return r;
4891 }
4892
4893 void si_dump_shader_key(unsigned shader, union si_shader_key *key, FILE *f)
4894 {
4895 int i;
4896
4897 fprintf(f, "SHADER KEY\n");
4898
4899 switch (shader) {
4900 case PIPE_SHADER_VERTEX:
4901 fprintf(f, " instance_divisors = {");
4902 for (i = 0; i < Elements(key->vs.prolog.instance_divisors); i++)
4903 fprintf(f, !i ? "%u" : ", %u",
4904 key->vs.prolog.instance_divisors[i]);
4905 fprintf(f, "}\n");
4906 fprintf(f, " as_es = %u\n", key->vs.as_es);
4907 fprintf(f, " as_ls = %u\n", key->vs.as_ls);
4908 fprintf(f, " export_prim_id = %u\n", key->vs.epilog.export_prim_id);
4909 break;
4910
4911 case PIPE_SHADER_TESS_CTRL:
4912 fprintf(f, " prim_mode = %u\n", key->tcs.epilog.prim_mode);
4913 break;
4914
4915 case PIPE_SHADER_TESS_EVAL:
4916 fprintf(f, " as_es = %u\n", key->tes.as_es);
4917 fprintf(f, " export_prim_id = %u\n", key->tes.epilog.export_prim_id);
4918 break;
4919
4920 case PIPE_SHADER_GEOMETRY:
4921 break;
4922
4923 case PIPE_SHADER_FRAGMENT:
4924 fprintf(f, " prolog.color_two_side = %u\n", key->ps.prolog.color_two_side);
4925 fprintf(f, " prolog.poly_stipple = %u\n", key->ps.prolog.poly_stipple);
4926 fprintf(f, " prolog.force_persample_interp = %u\n", key->ps.prolog.force_persample_interp);
4927 fprintf(f, " epilog.spi_shader_col_format = 0x%x\n", key->ps.epilog.spi_shader_col_format);
4928 fprintf(f, " epilog.color_is_int8 = 0x%X\n", key->ps.epilog.color_is_int8);
4929 fprintf(f, " epilog.last_cbuf = %u\n", key->ps.epilog.last_cbuf);
4930 fprintf(f, " epilog.alpha_func = %u\n", key->ps.epilog.alpha_func);
4931 fprintf(f, " epilog.alpha_to_one = %u\n", key->ps.epilog.alpha_to_one);
4932 fprintf(f, " epilog.poly_line_smoothing = %u\n", key->ps.epilog.poly_line_smoothing);
4933 fprintf(f, " epilog.clamp_color = %u\n", key->ps.epilog.clamp_color);
4934 break;
4935
4936 default:
4937 assert(0);
4938 }
4939 }
4940
4941 static void si_init_shader_ctx(struct si_shader_context *ctx,
4942 struct si_screen *sscreen,
4943 struct si_shader *shader,
4944 LLVMTargetMachineRef tm)
4945 {
4946 struct lp_build_tgsi_context *bld_base;
4947
4948 memset(ctx, 0, sizeof(*ctx));
4949 radeon_llvm_context_init(&ctx->radeon_bld, "amdgcn--");
4950 ctx->tm = tm;
4951 ctx->screen = sscreen;
4952 if (shader && shader->selector)
4953 ctx->type = shader->selector->info.processor;
4954 else
4955 ctx->type = -1;
4956 ctx->shader = shader;
4957
4958 ctx->voidt = LLVMVoidTypeInContext(ctx->radeon_bld.gallivm.context);
4959 ctx->i1 = LLVMInt1TypeInContext(ctx->radeon_bld.gallivm.context);
4960 ctx->i8 = LLVMInt8TypeInContext(ctx->radeon_bld.gallivm.context);
4961 ctx->i32 = LLVMInt32TypeInContext(ctx->radeon_bld.gallivm.context);
4962 ctx->i64 = LLVMInt64TypeInContext(ctx->radeon_bld.gallivm.context);
4963 ctx->i128 = LLVMIntTypeInContext(ctx->radeon_bld.gallivm.context, 128);
4964 ctx->f32 = LLVMFloatTypeInContext(ctx->radeon_bld.gallivm.context);
4965 ctx->v16i8 = LLVMVectorType(ctx->i8, 16);
4966 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
4967 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
4968 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
4969 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
4970
4971 bld_base = &ctx->radeon_bld.soa.bld_base;
4972 if (shader && shader->selector)
4973 bld_base->info = &shader->selector->info;
4974 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
4975
4976 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
4977 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
4978 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
4979
4980 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
4981 bld_base->op_actions[TGSI_OPCODE_TEX2] = tex_action;
4982 bld_base->op_actions[TGSI_OPCODE_TXB] = tex_action;
4983 bld_base->op_actions[TGSI_OPCODE_TXB2] = tex_action;
4984 bld_base->op_actions[TGSI_OPCODE_TXD] = tex_action;
4985 bld_base->op_actions[TGSI_OPCODE_TXF] = tex_action;
4986 bld_base->op_actions[TGSI_OPCODE_TXL] = tex_action;
4987 bld_base->op_actions[TGSI_OPCODE_TXL2] = tex_action;
4988 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
4989 bld_base->op_actions[TGSI_OPCODE_TXQ] = tex_action;
4990 bld_base->op_actions[TGSI_OPCODE_TG4] = tex_action;
4991 bld_base->op_actions[TGSI_OPCODE_LODQ] = tex_action;
4992 bld_base->op_actions[TGSI_OPCODE_TXQS].emit = si_llvm_emit_txqs;
4993
4994 bld_base->op_actions[TGSI_OPCODE_RESQ].fetch_args = resq_fetch_args;
4995 bld_base->op_actions[TGSI_OPCODE_RESQ].emit = resq_emit;
4996
4997 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
4998 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
4999 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
5000 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
5001
5002 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
5003 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
5004 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
5005
5006 bld_base->op_actions[TGSI_OPCODE_MAX].emit = build_tgsi_intrinsic_nomem;
5007 bld_base->op_actions[TGSI_OPCODE_MAX].intr_name = "llvm.maxnum.f32";
5008 bld_base->op_actions[TGSI_OPCODE_MIN].emit = build_tgsi_intrinsic_nomem;
5009 bld_base->op_actions[TGSI_OPCODE_MIN].intr_name = "llvm.minnum.f32";
5010 }
5011
5012 int si_compile_tgsi_shader(struct si_screen *sscreen,
5013 LLVMTargetMachineRef tm,
5014 struct si_shader *shader,
5015 bool is_monolithic,
5016 struct pipe_debug_callback *debug)
5017 {
5018 struct si_shader_selector *sel = shader->selector;
5019 struct si_shader_context ctx;
5020 struct lp_build_tgsi_context *bld_base;
5021 LLVMModuleRef mod;
5022 int r = 0;
5023
5024 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
5025 * conversion fails. */
5026 if (r600_can_dump_shader(&sscreen->b, sel->info.processor) &&
5027 !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
5028 si_dump_shader_key(sel->type, &shader->key, stderr);
5029 tgsi_dump(sel->tokens, 0);
5030 si_dump_streamout(&sel->so);
5031 }
5032
5033 si_init_shader_ctx(&ctx, sscreen, shader, tm);
5034 ctx.is_monolithic = is_monolithic;
5035
5036 shader->info.uses_instanceid = sel->info.uses_instanceid;
5037
5038 bld_base = &ctx.radeon_bld.soa.bld_base;
5039 ctx.radeon_bld.load_system_value = declare_system_value;
5040
5041 switch (ctx.type) {
5042 case TGSI_PROCESSOR_VERTEX:
5043 ctx.radeon_bld.load_input = declare_input_vs;
5044 if (shader->key.vs.as_ls)
5045 bld_base->emit_epilogue = si_llvm_emit_ls_epilogue;
5046 else if (shader->key.vs.as_es)
5047 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
5048 else
5049 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
5050 break;
5051 case TGSI_PROCESSOR_TESS_CTRL:
5052 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
5053 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
5054 bld_base->emit_store = store_output_tcs;
5055 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
5056 break;
5057 case TGSI_PROCESSOR_TESS_EVAL:
5058 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
5059 if (shader->key.tes.as_es)
5060 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
5061 else
5062 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
5063 break;
5064 case TGSI_PROCESSOR_GEOMETRY:
5065 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
5066 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
5067 break;
5068 case TGSI_PROCESSOR_FRAGMENT:
5069 ctx.radeon_bld.load_input = declare_input_fs;
5070 if (is_monolithic)
5071 bld_base->emit_epilogue = si_llvm_emit_fs_epilogue;
5072 else
5073 bld_base->emit_epilogue = si_llvm_return_fs_outputs;
5074 break;
5075 default:
5076 assert(!"Unsupported shader type");
5077 return -1;
5078 }
5079
5080 create_meta_data(&ctx);
5081 create_function(&ctx);
5082 preload_constants(&ctx);
5083 preload_samplers(&ctx);
5084 preload_images(&ctx);
5085 preload_streamout_buffers(&ctx);
5086 preload_ring_buffers(&ctx);
5087
5088 if (ctx.is_monolithic && sel->type == PIPE_SHADER_FRAGMENT &&
5089 shader->key.ps.prolog.poly_stipple) {
5090 LLVMValueRef views = LLVMGetParam(ctx.radeon_bld.main_fn,
5091 SI_PARAM_SAMPLERS);
5092 si_llvm_emit_polygon_stipple(&ctx, views,
5093 SI_PARAM_POS_FIXED_PT);
5094 }
5095
5096 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
5097 int i;
5098 for (i = 0; i < 4; i++) {
5099 ctx.gs_next_vertex[i] =
5100 lp_build_alloca(bld_base->base.gallivm,
5101 ctx.i32, "");
5102 }
5103 }
5104
5105 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
5106 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
5107 goto out;
5108 }
5109
5110 LLVMBuildRet(bld_base->base.gallivm->builder, ctx.return_value);
5111 mod = bld_base->base.gallivm->module;
5112
5113 /* Dump LLVM IR before any optimization passes */
5114 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
5115 r600_can_dump_shader(&sscreen->b, ctx.type))
5116 LLVMDumpModule(mod);
5117
5118 radeon_llvm_finalize_module(&ctx.radeon_bld);
5119
5120 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
5121 mod, debug, ctx.type, "TGSI shader");
5122 if (r) {
5123 fprintf(stderr, "LLVM failed to compile shader\n");
5124 goto out;
5125 }
5126
5127 radeon_llvm_dispose(&ctx.radeon_bld);
5128
5129 /* Calculate the number of fragment input VGPRs. */
5130 if (ctx.type == TGSI_PROCESSOR_FRAGMENT) {
5131 shader->info.num_input_vgprs = 0;
5132 shader->info.face_vgpr_index = -1;
5133
5134 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
5135 shader->info.num_input_vgprs += 2;
5136 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
5137 shader->info.num_input_vgprs += 2;
5138 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
5139 shader->info.num_input_vgprs += 2;
5140 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
5141 shader->info.num_input_vgprs += 3;
5142 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
5143 shader->info.num_input_vgprs += 2;
5144 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
5145 shader->info.num_input_vgprs += 2;
5146 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
5147 shader->info.num_input_vgprs += 2;
5148 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
5149 shader->info.num_input_vgprs += 1;
5150 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
5151 shader->info.num_input_vgprs += 1;
5152 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
5153 shader->info.num_input_vgprs += 1;
5154 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
5155 shader->info.num_input_vgprs += 1;
5156 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
5157 shader->info.num_input_vgprs += 1;
5158 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
5159 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
5160 shader->info.num_input_vgprs += 1;
5161 }
5162 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr))
5163 shader->info.num_input_vgprs += 1;
5164 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
5165 shader->info.num_input_vgprs += 1;
5166 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
5167 shader->info.num_input_vgprs += 1;
5168 }
5169
5170 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
5171 shader->gs_copy_shader = CALLOC_STRUCT(si_shader);
5172 shader->gs_copy_shader->selector = shader->selector;
5173 ctx.shader = shader->gs_copy_shader;
5174 if ((r = si_generate_gs_copy_shader(sscreen, &ctx,
5175 shader, debug))) {
5176 free(shader->gs_copy_shader);
5177 shader->gs_copy_shader = NULL;
5178 goto out;
5179 }
5180 }
5181
5182 out:
5183 for (int i = 0; i < SI_NUM_CONST_BUFFERS; i++)
5184 FREE(ctx.constants[i]);
5185 return r;
5186 }
5187
5188 /**
5189 * Create, compile and return a shader part (prolog or epilog).
5190 *
5191 * \param sscreen screen
5192 * \param list list of shader parts of the same category
5193 * \param key shader part key
5194 * \param tm LLVM target machine
5195 * \param debug debug callback
5196 * \param compile the callback responsible for compilation
5197 * \return non-NULL on success
5198 */
5199 static struct si_shader_part *
5200 si_get_shader_part(struct si_screen *sscreen,
5201 struct si_shader_part **list,
5202 union si_shader_part_key *key,
5203 LLVMTargetMachineRef tm,
5204 struct pipe_debug_callback *debug,
5205 bool (*compile)(struct si_screen *,
5206 LLVMTargetMachineRef,
5207 struct pipe_debug_callback *,
5208 struct si_shader_part *))
5209 {
5210 struct si_shader_part *result;
5211
5212 pipe_mutex_lock(sscreen->shader_parts_mutex);
5213
5214 /* Find existing. */
5215 for (result = *list; result; result = result->next) {
5216 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
5217 pipe_mutex_unlock(sscreen->shader_parts_mutex);
5218 return result;
5219 }
5220 }
5221
5222 /* Compile a new one. */
5223 result = CALLOC_STRUCT(si_shader_part);
5224 result->key = *key;
5225 if (!compile(sscreen, tm, debug, result)) {
5226 FREE(result);
5227 pipe_mutex_unlock(sscreen->shader_parts_mutex);
5228 return NULL;
5229 }
5230
5231 result->next = *list;
5232 *list = result;
5233 pipe_mutex_unlock(sscreen->shader_parts_mutex);
5234 return result;
5235 }
5236
5237 /**
5238 * Create a vertex shader prolog.
5239 *
5240 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
5241 * All inputs are returned unmodified. The vertex load indices are
5242 * stored after them, which will used by the API VS for fetching inputs.
5243 *
5244 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
5245 * input_v0,
5246 * input_v1,
5247 * input_v2,
5248 * input_v3,
5249 * (VertexID + BaseVertex),
5250 * (InstanceID + StartInstance),
5251 * (InstanceID / 2 + StartInstance)
5252 */
5253 static bool si_compile_vs_prolog(struct si_screen *sscreen,
5254 LLVMTargetMachineRef tm,
5255 struct pipe_debug_callback *debug,
5256 struct si_shader_part *out)
5257 {
5258 union si_shader_part_key *key = &out->key;
5259 struct si_shader shader = {};
5260 struct si_shader_context ctx;
5261 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
5262 LLVMTypeRef *params, *returns;
5263 LLVMValueRef ret, func;
5264 int last_sgpr, num_params, num_returns, i;
5265 bool status = true;
5266
5267 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
5268 ctx.type = TGSI_PROCESSOR_VERTEX;
5269 ctx.param_vertex_id = key->vs_prolog.num_input_sgprs;
5270 ctx.param_instance_id = key->vs_prolog.num_input_sgprs + 3;
5271
5272 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
5273 params = alloca((key->vs_prolog.num_input_sgprs + 4) *
5274 sizeof(LLVMTypeRef));
5275 returns = alloca((key->vs_prolog.num_input_sgprs + 4 +
5276 key->vs_prolog.last_input + 1) *
5277 sizeof(LLVMTypeRef));
5278 num_params = 0;
5279 num_returns = 0;
5280
5281 /* Declare input and output SGPRs. */
5282 num_params = 0;
5283 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
5284 params[num_params++] = ctx.i32;
5285 returns[num_returns++] = ctx.i32;
5286 }
5287 last_sgpr = num_params - 1;
5288
5289 /* 4 preloaded VGPRs (outputs must be floats) */
5290 for (i = 0; i < 4; i++) {
5291 params[num_params++] = ctx.i32;
5292 returns[num_returns++] = ctx.f32;
5293 }
5294
5295 /* Vertex load indices. */
5296 for (i = 0; i <= key->vs_prolog.last_input; i++)
5297 returns[num_returns++] = ctx.f32;
5298
5299 /* Create the function. */
5300 si_create_function(&ctx, returns, num_returns, params,
5301 num_params, -1, last_sgpr);
5302 func = ctx.radeon_bld.main_fn;
5303
5304 /* Copy inputs to outputs. This should be no-op, as the registers match,
5305 * but it will prevent the compiler from overwriting them unintentionally.
5306 */
5307 ret = ctx.return_value;
5308 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
5309 LLVMValueRef p = LLVMGetParam(func, i);
5310 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
5311 }
5312 for (i = num_params - 4; i < num_params; i++) {
5313 LLVMValueRef p = LLVMGetParam(func, i);
5314 p = LLVMBuildBitCast(gallivm->builder, p, ctx.f32, "");
5315 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
5316 }
5317
5318 /* Compute vertex load indices from instance divisors. */
5319 for (i = 0; i <= key->vs_prolog.last_input; i++) {
5320 unsigned divisor = key->vs_prolog.states.instance_divisors[i];
5321 LLVMValueRef index;
5322
5323 if (divisor) {
5324 /* InstanceID / Divisor + StartInstance */
5325 index = get_instance_index_for_fetch(&ctx.radeon_bld,
5326 SI_SGPR_START_INSTANCE,
5327 divisor);
5328 } else {
5329 /* VertexID + BaseVertex */
5330 index = LLVMBuildAdd(gallivm->builder,
5331 LLVMGetParam(func, ctx.param_vertex_id),
5332 LLVMGetParam(func, SI_SGPR_BASE_VERTEX), "");
5333 }
5334
5335 index = LLVMBuildBitCast(gallivm->builder, index, ctx.f32, "");
5336 ret = LLVMBuildInsertValue(gallivm->builder, ret, index,
5337 num_params++, "");
5338 }
5339
5340 /* Compile. */
5341 LLVMBuildRet(gallivm->builder, ret);
5342 radeon_llvm_finalize_module(&ctx.radeon_bld);
5343
5344 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
5345 gallivm->module, debug, ctx.type,
5346 "Vertex Shader Prolog"))
5347 status = false;
5348
5349 radeon_llvm_dispose(&ctx.radeon_bld);
5350 return status;
5351 }
5352
5353 /**
5354 * Compile the vertex shader epilog. This is also used by the tessellation
5355 * evaluation shader compiled as VS.
5356 *
5357 * The input is PrimitiveID.
5358 *
5359 * If PrimitiveID is required by the pixel shader, export it.
5360 * Otherwise, do nothing.
5361 */
5362 static bool si_compile_vs_epilog(struct si_screen *sscreen,
5363 LLVMTargetMachineRef tm,
5364 struct pipe_debug_callback *debug,
5365 struct si_shader_part *out)
5366 {
5367 union si_shader_part_key *key = &out->key;
5368 struct si_shader_context ctx;
5369 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
5370 struct lp_build_tgsi_context *bld_base = &ctx.radeon_bld.soa.bld_base;
5371 LLVMTypeRef params[5];
5372 int num_params, i;
5373 bool status = true;
5374
5375 si_init_shader_ctx(&ctx, sscreen, NULL, tm);
5376 ctx.type = TGSI_PROCESSOR_VERTEX;
5377
5378 /* Declare input VGPRs. */
5379 num_params = key->vs_epilog.states.export_prim_id ?
5380 (VS_EPILOG_PRIMID_LOC + 1) : 0;
5381 assert(num_params <= ARRAY_SIZE(params));
5382
5383 for (i = 0; i < num_params; i++)
5384 params[i] = ctx.f32;
5385
5386 /* Create the function. */
5387 si_create_function(&ctx, NULL, 0, params, num_params,
5388 -1, -1);
5389
5390 /* Emit exports. */
5391 if (key->vs_epilog.states.export_prim_id) {
5392 struct lp_build_context *base = &bld_base->base;
5393 struct lp_build_context *uint = &bld_base->uint_bld;
5394 LLVMValueRef args[9];
5395
5396 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
5397 args[1] = uint->zero; /* whether the EXEC mask is valid */
5398 args[2] = uint->zero; /* DONE bit */
5399 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_PARAM +
5400 key->vs_epilog.prim_id_param_offset);
5401 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
5402 args[5] = LLVMGetParam(ctx.radeon_bld.main_fn,
5403 VS_EPILOG_PRIMID_LOC); /* X */
5404 args[6] = uint->undef; /* Y */
5405 args[7] = uint->undef; /* Z */
5406 args[8] = uint->undef; /* W */
5407
5408 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
5409 LLVMVoidTypeInContext(base->gallivm->context),
5410 args, 9, 0);
5411 }
5412
5413 /* Compile. */
5414 LLVMBuildRet(gallivm->builder, ctx.return_value);
5415 radeon_llvm_finalize_module(&ctx.radeon_bld);
5416
5417 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
5418 gallivm->module, debug, ctx.type,
5419 "Vertex Shader Epilog"))
5420 status = false;
5421
5422 radeon_llvm_dispose(&ctx.radeon_bld);
5423 return status;
5424 }
5425
5426 /**
5427 * Create & compile a vertex shader epilog. This a helper used by VS and TES.
5428 */
5429 static bool si_get_vs_epilog(struct si_screen *sscreen,
5430 LLVMTargetMachineRef tm,
5431 struct si_shader *shader,
5432 struct pipe_debug_callback *debug,
5433 struct si_vs_epilog_bits *states)
5434 {
5435 union si_shader_part_key epilog_key;
5436
5437 memset(&epilog_key, 0, sizeof(epilog_key));
5438 epilog_key.vs_epilog.states = *states;
5439
5440 /* Set up the PrimitiveID output. */
5441 if (shader->key.vs.epilog.export_prim_id) {
5442 unsigned index = shader->selector->info.num_outputs;
5443 unsigned offset = shader->info.nr_param_exports++;
5444
5445 epilog_key.vs_epilog.prim_id_param_offset = offset;
5446 assert(index < ARRAY_SIZE(shader->info.vs_output_param_offset));
5447 shader->info.vs_output_param_offset[index] = offset;
5448 }
5449
5450 shader->epilog = si_get_shader_part(sscreen, &sscreen->vs_epilogs,
5451 &epilog_key, tm, debug,
5452 si_compile_vs_epilog);
5453 return shader->epilog != NULL;
5454 }
5455
5456 /**
5457 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
5458 */
5459 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
5460 LLVMTargetMachineRef tm,
5461 struct si_shader *shader,
5462 struct pipe_debug_callback *debug)
5463 {
5464 struct tgsi_shader_info *info = &shader->selector->info;
5465 union si_shader_part_key prolog_key;
5466 unsigned i;
5467
5468 /* Get the prolog. */
5469 memset(&prolog_key, 0, sizeof(prolog_key));
5470 prolog_key.vs_prolog.states = shader->key.vs.prolog;
5471 prolog_key.vs_prolog.num_input_sgprs = shader->info.num_input_sgprs;
5472 prolog_key.vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
5473
5474 /* The prolog is a no-op if there are no inputs. */
5475 if (info->num_inputs) {
5476 shader->prolog =
5477 si_get_shader_part(sscreen, &sscreen->vs_prologs,
5478 &prolog_key, tm, debug,
5479 si_compile_vs_prolog);
5480 if (!shader->prolog)
5481 return false;
5482 }
5483
5484 /* Get the epilog. */
5485 if (!shader->key.vs.as_es && !shader->key.vs.as_ls &&
5486 !si_get_vs_epilog(sscreen, tm, shader, debug,
5487 &shader->key.vs.epilog))
5488 return false;
5489
5490 /* Set the instanceID flag. */
5491 for (i = 0; i < info->num_inputs; i++)
5492 if (prolog_key.vs_prolog.states.instance_divisors[i])
5493 shader->info.uses_instanceid = true;
5494
5495 return true;
5496 }
5497
5498 /**
5499 * Select and compile (or reuse) TES parts (epilog).
5500 */
5501 static bool si_shader_select_tes_parts(struct si_screen *sscreen,
5502 LLVMTargetMachineRef tm,
5503 struct si_shader *shader,
5504 struct pipe_debug_callback *debug)
5505 {
5506 if (shader->key.tes.as_es)
5507 return true;
5508
5509 /* TES compiled as VS. */
5510 return si_get_vs_epilog(sscreen, tm, shader, debug,
5511 &shader->key.tes.epilog);
5512 }
5513
5514 /**
5515 * Compile the TCS epilog. This writes tesselation factors to memory based on
5516 * the output primitive type of the tesselator (determined by TES).
5517 */
5518 static bool si_compile_tcs_epilog(struct si_screen *sscreen,
5519 LLVMTargetMachineRef tm,
5520 struct pipe_debug_callback *debug,
5521 struct si_shader_part *out)
5522 {
5523 union si_shader_part_key *key = &out->key;
5524 struct si_shader shader = {};
5525 struct si_shader_context ctx;
5526 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
5527 struct lp_build_tgsi_context *bld_base = &ctx.radeon_bld.soa.bld_base;
5528 LLVMTypeRef params[16];
5529 LLVMValueRef func;
5530 int last_array_pointer, last_sgpr, num_params;
5531 bool status = true;
5532
5533 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
5534 ctx.type = TGSI_PROCESSOR_TESS_CTRL;
5535 shader.key.tcs.epilog = key->tcs_epilog.states;
5536
5537 /* Declare inputs. Only RW_BUFFERS and TESS_FACTOR_OFFSET are used. */
5538 params[SI_PARAM_RW_BUFFERS] = const_array(ctx.v16i8, SI_NUM_RW_BUFFERS);
5539 last_array_pointer = SI_PARAM_RW_BUFFERS;
5540 params[SI_PARAM_CONST_BUFFERS] = ctx.i64;
5541 params[SI_PARAM_SAMPLERS] = ctx.i64;
5542 params[SI_PARAM_IMAGES] = ctx.i64;
5543 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx.i32;
5544 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx.i32;
5545 params[SI_PARAM_TCS_IN_LAYOUT] = ctx.i32;
5546 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx.i32;
5547 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
5548 num_params = last_sgpr + 1;
5549
5550 params[num_params++] = ctx.i32; /* patch index within the wave (REL_PATCH_ID) */
5551 params[num_params++] = ctx.i32; /* invocation ID within the patch */
5552 params[num_params++] = ctx.i32; /* LDS offset where tess factors should be loaded from */
5553
5554 /* Create the function. */
5555 si_create_function(&ctx, NULL, 0, params, num_params,
5556 last_array_pointer, last_sgpr);
5557 declare_tess_lds(&ctx);
5558 func = ctx.radeon_bld.main_fn;
5559
5560 si_write_tess_factors(bld_base,
5561 LLVMGetParam(func, last_sgpr + 1),
5562 LLVMGetParam(func, last_sgpr + 2),
5563 LLVMGetParam(func, last_sgpr + 3));
5564
5565 /* Compile. */
5566 LLVMBuildRet(gallivm->builder, ctx.return_value);
5567 radeon_llvm_finalize_module(&ctx.radeon_bld);
5568
5569 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
5570 gallivm->module, debug, ctx.type,
5571 "Tessellation Control Shader Epilog"))
5572 status = false;
5573
5574 radeon_llvm_dispose(&ctx.radeon_bld);
5575 return status;
5576 }
5577
5578 /**
5579 * Select and compile (or reuse) TCS parts (epilog).
5580 */
5581 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
5582 LLVMTargetMachineRef tm,
5583 struct si_shader *shader,
5584 struct pipe_debug_callback *debug)
5585 {
5586 union si_shader_part_key epilog_key;
5587
5588 /* Get the epilog. */
5589 memset(&epilog_key, 0, sizeof(epilog_key));
5590 epilog_key.tcs_epilog.states = shader->key.tcs.epilog;
5591
5592 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
5593 &epilog_key, tm, debug,
5594 si_compile_tcs_epilog);
5595 return shader->epilog != NULL;
5596 }
5597
5598 /**
5599 * Compile the pixel shader prolog. This handles:
5600 * - two-side color selection and interpolation
5601 * - overriding interpolation parameters for the API PS
5602 * - polygon stippling
5603 *
5604 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
5605 * overriden by other states. (e.g. per-sample interpolation)
5606 * Interpolated colors are stored after the preloaded VGPRs.
5607 */
5608 static bool si_compile_ps_prolog(struct si_screen *sscreen,
5609 LLVMTargetMachineRef tm,
5610 struct pipe_debug_callback *debug,
5611 struct si_shader_part *out)
5612 {
5613 union si_shader_part_key *key = &out->key;
5614 struct si_shader shader = {};
5615 struct si_shader_context ctx;
5616 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
5617 LLVMTypeRef *params;
5618 LLVMValueRef ret, func;
5619 int last_sgpr, num_params, num_returns, i, num_color_channels;
5620 bool status = true;
5621
5622 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
5623 ctx.type = TGSI_PROCESSOR_FRAGMENT;
5624 shader.key.ps.prolog = key->ps_prolog.states;
5625
5626 /* Number of inputs + 8 color elements. */
5627 params = alloca((key->ps_prolog.num_input_sgprs +
5628 key->ps_prolog.num_input_vgprs + 8) *
5629 sizeof(LLVMTypeRef));
5630
5631 /* Declare inputs. */
5632 num_params = 0;
5633 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
5634 params[num_params++] = ctx.i32;
5635 last_sgpr = num_params - 1;
5636
5637 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
5638 params[num_params++] = ctx.f32;
5639
5640 /* Declare outputs (same as inputs + add colors if needed) */
5641 num_returns = num_params;
5642 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
5643 for (i = 0; i < num_color_channels; i++)
5644 params[num_returns++] = ctx.f32;
5645
5646 /* Create the function. */
5647 si_create_function(&ctx, params, num_returns, params,
5648 num_params, -1, last_sgpr);
5649 func = ctx.radeon_bld.main_fn;
5650
5651 /* Copy inputs to outputs. This should be no-op, as the registers match,
5652 * but it will prevent the compiler from overwriting them unintentionally.
5653 */
5654 ret = ctx.return_value;
5655 for (i = 0; i < num_params; i++) {
5656 LLVMValueRef p = LLVMGetParam(func, i);
5657 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
5658 }
5659
5660 /* Polygon stippling. */
5661 if (key->ps_prolog.states.poly_stipple) {
5662 /* POS_FIXED_PT is always last. */
5663 unsigned pos = key->ps_prolog.num_input_sgprs +
5664 key->ps_prolog.num_input_vgprs - 1;
5665 LLVMValueRef ptr[2], views;
5666
5667 /* Get the pointer to sampler views. */
5668 ptr[0] = LLVMGetParam(func, SI_SGPR_SAMPLERS);
5669 ptr[1] = LLVMGetParam(func, SI_SGPR_SAMPLERS+1);
5670 views = lp_build_gather_values(gallivm, ptr, 2);
5671 views = LLVMBuildBitCast(gallivm->builder, views, ctx.i64, "");
5672 views = LLVMBuildIntToPtr(gallivm->builder, views,
5673 const_array(ctx.v8i32, SI_NUM_SAMPLERS), "");
5674
5675 si_llvm_emit_polygon_stipple(&ctx, views, pos);
5676 }
5677
5678 /* Interpolate colors. */
5679 for (i = 0; i < 2; i++) {
5680 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
5681 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
5682 key->ps_prolog.face_vgpr_index;
5683 LLVMValueRef interp[2], color[4];
5684 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
5685
5686 if (!writemask)
5687 continue;
5688
5689 /* If the interpolation qualifier is not CONSTANT (-1). */
5690 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
5691 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
5692 key->ps_prolog.color_interp_vgpr_index[i];
5693
5694 interp[0] = LLVMGetParam(func, interp_vgpr);
5695 interp[1] = LLVMGetParam(func, interp_vgpr + 1);
5696 interp_ij = lp_build_gather_values(gallivm, interp, 2);
5697 interp_ij = LLVMBuildBitCast(gallivm->builder, interp_ij,
5698 ctx.v2i32, "");
5699 }
5700
5701 /* Use the absolute location of the input. */
5702 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
5703
5704 if (key->ps_prolog.states.color_two_side) {
5705 face = LLVMGetParam(func, face_vgpr);
5706 face = LLVMBuildBitCast(gallivm->builder, face, ctx.i32, "");
5707 }
5708
5709 interp_fs_input(&ctx,
5710 key->ps_prolog.color_attr_index[i],
5711 TGSI_SEMANTIC_COLOR, i,
5712 key->ps_prolog.num_interp_inputs,
5713 key->ps_prolog.colors_read, interp_ij,
5714 prim_mask, face, color);
5715
5716 while (writemask) {
5717 unsigned chan = u_bit_scan(&writemask);
5718 ret = LLVMBuildInsertValue(gallivm->builder, ret, color[chan],
5719 num_params++, "");
5720 }
5721 }
5722
5723 /* Force per-sample interpolation. */
5724 if (key->ps_prolog.states.force_persample_interp) {
5725 unsigned i, base = key->ps_prolog.num_input_sgprs;
5726 LLVMValueRef persp_sample[2], linear_sample[2];
5727
5728 /* Read PERSP_SAMPLE. */
5729 for (i = 0; i < 2; i++)
5730 persp_sample[i] = LLVMGetParam(func, base + i);
5731 /* Overwrite PERSP_CENTER. */
5732 for (i = 0; i < 2; i++)
5733 ret = LLVMBuildInsertValue(gallivm->builder, ret,
5734 persp_sample[i], base + 2 + i, "");
5735 /* Overwrite PERSP_CENTROID. */
5736 for (i = 0; i < 2; i++)
5737 ret = LLVMBuildInsertValue(gallivm->builder, ret,
5738 persp_sample[i], base + 4 + i, "");
5739 /* Read LINEAR_SAMPLE. */
5740 for (i = 0; i < 2; i++)
5741 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
5742 /* Overwrite LINEAR_CENTER. */
5743 for (i = 0; i < 2; i++)
5744 ret = LLVMBuildInsertValue(gallivm->builder, ret,
5745 linear_sample[i], base + 8 + i, "");
5746 /* Overwrite LINEAR_CENTROID. */
5747 for (i = 0; i < 2; i++)
5748 ret = LLVMBuildInsertValue(gallivm->builder, ret,
5749 linear_sample[i], base + 10 + i, "");
5750 }
5751
5752 /* Compile. */
5753 LLVMBuildRet(gallivm->builder, ret);
5754 radeon_llvm_finalize_module(&ctx.radeon_bld);
5755
5756 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
5757 gallivm->module, debug, ctx.type,
5758 "Fragment Shader Prolog"))
5759 status = false;
5760
5761 radeon_llvm_dispose(&ctx.radeon_bld);
5762 return status;
5763 }
5764
5765 /**
5766 * Compile the pixel shader epilog. This handles everything that must be
5767 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
5768 */
5769 static bool si_compile_ps_epilog(struct si_screen *sscreen,
5770 LLVMTargetMachineRef tm,
5771 struct pipe_debug_callback *debug,
5772 struct si_shader_part *out)
5773 {
5774 union si_shader_part_key *key = &out->key;
5775 struct si_shader shader = {};
5776 struct si_shader_context ctx;
5777 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
5778 struct lp_build_tgsi_context *bld_base = &ctx.radeon_bld.soa.bld_base;
5779 LLVMTypeRef params[16+8*4+3];
5780 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
5781 int last_array_pointer, last_sgpr, num_params, i;
5782 bool status = true;
5783
5784 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
5785 ctx.type = TGSI_PROCESSOR_FRAGMENT;
5786 shader.key.ps.epilog = key->ps_epilog.states;
5787
5788 /* Declare input SGPRs. */
5789 params[SI_PARAM_RW_BUFFERS] = ctx.i64;
5790 params[SI_PARAM_CONST_BUFFERS] = ctx.i64;
5791 params[SI_PARAM_SAMPLERS] = ctx.i64;
5792 params[SI_PARAM_IMAGES] = ctx.i64;
5793 params[SI_PARAM_ALPHA_REF] = ctx.f32;
5794 last_array_pointer = -1;
5795 last_sgpr = SI_PARAM_ALPHA_REF;
5796
5797 /* Declare input VGPRs. */
5798 num_params = (last_sgpr + 1) +
5799 util_bitcount(key->ps_epilog.colors_written) * 4 +
5800 key->ps_epilog.writes_z +
5801 key->ps_epilog.writes_stencil +
5802 key->ps_epilog.writes_samplemask;
5803
5804 num_params = MAX2(num_params,
5805 last_sgpr + 1 + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
5806
5807 assert(num_params <= ARRAY_SIZE(params));
5808
5809 for (i = last_sgpr + 1; i < num_params; i++)
5810 params[i] = ctx.f32;
5811
5812 /* Create the function. */
5813 si_create_function(&ctx, NULL, 0, params, num_params,
5814 last_array_pointer, last_sgpr);
5815 /* Disable elimination of unused inputs. */
5816 radeon_llvm_add_attribute(ctx.radeon_bld.main_fn,
5817 "InitialPSInputAddr", 0xffffff);
5818
5819 /* Process colors. */
5820 unsigned vgpr = last_sgpr + 1;
5821 unsigned colors_written = key->ps_epilog.colors_written;
5822 int last_color_export = -1;
5823
5824 /* Find the last color export. */
5825 if (!key->ps_epilog.writes_z &&
5826 !key->ps_epilog.writes_stencil &&
5827 !key->ps_epilog.writes_samplemask) {
5828 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
5829
5830 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
5831 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
5832 /* Just set this if any of the colorbuffers are enabled. */
5833 if (spi_format &
5834 ((1llu << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
5835 last_color_export = 0;
5836 } else {
5837 for (i = 0; i < 8; i++)
5838 if (colors_written & (1 << i) &&
5839 (spi_format >> (i * 4)) & 0xf)
5840 last_color_export = i;
5841 }
5842 }
5843
5844 while (colors_written) {
5845 LLVMValueRef color[4];
5846 int mrt = u_bit_scan(&colors_written);
5847
5848 for (i = 0; i < 4; i++)
5849 color[i] = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
5850
5851 si_export_mrt_color(bld_base, color, mrt,
5852 num_params - 1,
5853 mrt == last_color_export);
5854 }
5855
5856 /* Process depth, stencil, samplemask. */
5857 if (key->ps_epilog.writes_z)
5858 depth = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
5859 if (key->ps_epilog.writes_stencil)
5860 stencil = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
5861 if (key->ps_epilog.writes_samplemask)
5862 samplemask = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
5863
5864 if (depth || stencil || samplemask)
5865 si_export_mrt_z(bld_base, depth, stencil, samplemask);
5866 else if (last_color_export == -1)
5867 si_export_null(bld_base);
5868
5869 /* Compile. */
5870 LLVMBuildRetVoid(gallivm->builder);
5871 radeon_llvm_finalize_module(&ctx.radeon_bld);
5872
5873 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
5874 gallivm->module, debug, ctx.type,
5875 "Fragment Shader Epilog"))
5876 status = false;
5877
5878 radeon_llvm_dispose(&ctx.radeon_bld);
5879 return status;
5880 }
5881
5882 /**
5883 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
5884 */
5885 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
5886 LLVMTargetMachineRef tm,
5887 struct si_shader *shader,
5888 struct pipe_debug_callback *debug)
5889 {
5890 struct tgsi_shader_info *info = &shader->selector->info;
5891 union si_shader_part_key prolog_key;
5892 union si_shader_part_key epilog_key;
5893 unsigned i;
5894
5895 /* Get the prolog. */
5896 memset(&prolog_key, 0, sizeof(prolog_key));
5897 prolog_key.ps_prolog.states = shader->key.ps.prolog;
5898 prolog_key.ps_prolog.colors_read = info->colors_read;
5899 prolog_key.ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
5900 prolog_key.ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
5901
5902 if (info->colors_read) {
5903 unsigned *color = shader->selector->color_attr_index;
5904
5905 if (shader->key.ps.prolog.color_two_side) {
5906 /* BCOLORs are stored after the last input. */
5907 prolog_key.ps_prolog.num_interp_inputs = info->num_inputs;
5908 prolog_key.ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
5909 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
5910 }
5911
5912 for (i = 0; i < 2; i++) {
5913 unsigned location = info->input_interpolate_loc[color[i]];
5914
5915 if (!(info->colors_read & (0xf << i*4)))
5916 continue;
5917
5918 prolog_key.ps_prolog.color_attr_index[i] = color[i];
5919
5920 /* Force per-sample interpolation for the colors here. */
5921 if (shader->key.ps.prolog.force_persample_interp)
5922 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5923
5924 switch (info->input_interpolate[color[i]]) {
5925 case TGSI_INTERPOLATE_CONSTANT:
5926 prolog_key.ps_prolog.color_interp_vgpr_index[i] = -1;
5927 break;
5928 case TGSI_INTERPOLATE_PERSPECTIVE:
5929 case TGSI_INTERPOLATE_COLOR:
5930 switch (location) {
5931 case TGSI_INTERPOLATE_LOC_SAMPLE:
5932 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 0;
5933 shader->config.spi_ps_input_ena |=
5934 S_0286CC_PERSP_SAMPLE_ENA(1);
5935 break;
5936 case TGSI_INTERPOLATE_LOC_CENTER:
5937 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 2;
5938 shader->config.spi_ps_input_ena |=
5939 S_0286CC_PERSP_CENTER_ENA(1);
5940 break;
5941 case TGSI_INTERPOLATE_LOC_CENTROID:
5942 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 4;
5943 shader->config.spi_ps_input_ena |=
5944 S_0286CC_PERSP_CENTROID_ENA(1);
5945 break;
5946 default:
5947 assert(0);
5948 }
5949 break;
5950 case TGSI_INTERPOLATE_LINEAR:
5951 switch (location) {
5952 case TGSI_INTERPOLATE_LOC_SAMPLE:
5953 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 6;
5954 shader->config.spi_ps_input_ena |=
5955 S_0286CC_LINEAR_SAMPLE_ENA(1);
5956 break;
5957 case TGSI_INTERPOLATE_LOC_CENTER:
5958 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 8;
5959 shader->config.spi_ps_input_ena |=
5960 S_0286CC_LINEAR_CENTER_ENA(1);
5961 break;
5962 case TGSI_INTERPOLATE_LOC_CENTROID:
5963 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 10;
5964 shader->config.spi_ps_input_ena |=
5965 S_0286CC_LINEAR_CENTROID_ENA(1);
5966 break;
5967 default:
5968 assert(0);
5969 }
5970 break;
5971 default:
5972 assert(0);
5973 }
5974 }
5975 }
5976
5977 /* The prolog is a no-op if these aren't set. */
5978 if (prolog_key.ps_prolog.colors_read ||
5979 prolog_key.ps_prolog.states.force_persample_interp ||
5980 prolog_key.ps_prolog.states.poly_stipple) {
5981 shader->prolog =
5982 si_get_shader_part(sscreen, &sscreen->ps_prologs,
5983 &prolog_key, tm, debug,
5984 si_compile_ps_prolog);
5985 if (!shader->prolog)
5986 return false;
5987 }
5988
5989 /* Get the epilog. */
5990 memset(&epilog_key, 0, sizeof(epilog_key));
5991 epilog_key.ps_epilog.colors_written = info->colors_written;
5992 epilog_key.ps_epilog.writes_z = info->writes_z;
5993 epilog_key.ps_epilog.writes_stencil = info->writes_stencil;
5994 epilog_key.ps_epilog.writes_samplemask = info->writes_samplemask;
5995 epilog_key.ps_epilog.states = shader->key.ps.epilog;
5996
5997 shader->epilog =
5998 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
5999 &epilog_key, tm, debug,
6000 si_compile_ps_epilog);
6001 if (!shader->epilog)
6002 return false;
6003
6004 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
6005 if (shader->key.ps.prolog.poly_stipple) {
6006 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
6007 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
6008 }
6009
6010 /* Set up the enable bits for per-sample shading if needed. */
6011 if (shader->key.ps.prolog.force_persample_interp) {
6012 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
6013 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena)) {
6014 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
6015 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
6016 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
6017 }
6018 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
6019 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena)) {
6020 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
6021 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
6022 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
6023 }
6024 }
6025
6026 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
6027 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
6028 !(shader->config.spi_ps_input_ena & 0xf)) {
6029 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
6030 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
6031 }
6032
6033 /* At least one pair of interpolation weights must be enabled. */
6034 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
6035 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
6036 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
6037 }
6038
6039 /* The sample mask input is always enabled, because the API shader always
6040 * passes it through to the epilog. Disable it here if it's unused.
6041 */
6042 if (!shader->key.ps.epilog.poly_line_smoothing &&
6043 !shader->selector->info.reads_samplemask)
6044 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
6045
6046 return true;
6047 }
6048
6049 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
6050 struct si_shader *shader,
6051 struct pipe_debug_callback *debug)
6052 {
6053 struct si_shader *mainp = shader->selector->main_shader_part;
6054 int r;
6055
6056 /* LS, ES, VS are compiled on demand if the main part hasn't been
6057 * compiled for that stage.
6058 */
6059 if (!mainp ||
6060 (shader->selector->type == PIPE_SHADER_VERTEX &&
6061 (shader->key.vs.as_es != mainp->key.vs.as_es ||
6062 shader->key.vs.as_ls != mainp->key.vs.as_ls)) ||
6063 (shader->selector->type == PIPE_SHADER_TESS_EVAL &&
6064 shader->key.tes.as_es != mainp->key.tes.as_es)) {
6065 /* Monolithic shader (compiled as a whole, has many variants,
6066 * may take a long time to compile).
6067 */
6068 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
6069 if (r)
6070 return r;
6071 } else {
6072 /* The shader consists of 2-3 parts:
6073 *
6074 * - the middle part is the user shader, it has 1 variant only
6075 * and it was compiled during the creation of the shader
6076 * selector
6077 * - the prolog part is inserted at the beginning
6078 * - the epilog part is inserted at the end
6079 *
6080 * The prolog and epilog have many (but simple) variants.
6081 */
6082
6083 /* Copy the compiled TGSI shader data over. */
6084 shader->is_binary_shared = true;
6085 shader->binary = mainp->binary;
6086 shader->config = mainp->config;
6087 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
6088 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
6089 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
6090 memcpy(shader->info.vs_output_param_offset,
6091 mainp->info.vs_output_param_offset,
6092 sizeof(mainp->info.vs_output_param_offset));
6093 shader->info.uses_instanceid = mainp->info.uses_instanceid;
6094 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
6095 shader->info.nr_param_exports = mainp->info.nr_param_exports;
6096
6097 /* Select prologs and/or epilogs. */
6098 switch (shader->selector->type) {
6099 case PIPE_SHADER_VERTEX:
6100 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
6101 return -1;
6102 break;
6103 case PIPE_SHADER_TESS_CTRL:
6104 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
6105 return -1;
6106 break;
6107 case PIPE_SHADER_TESS_EVAL:
6108 if (!si_shader_select_tes_parts(sscreen, tm, shader, debug))
6109 return -1;
6110 break;
6111 case PIPE_SHADER_FRAGMENT:
6112 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
6113 return -1;
6114
6115 /* Make sure we have at least as many VGPRs as there
6116 * are allocated inputs.
6117 */
6118 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6119 shader->info.num_input_vgprs);
6120 break;
6121 }
6122
6123 /* Update SGPR and VGPR counts. */
6124 if (shader->prolog) {
6125 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6126 shader->prolog->config.num_sgprs);
6127 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6128 shader->prolog->config.num_vgprs);
6129 }
6130 if (shader->epilog) {
6131 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6132 shader->epilog->config.num_sgprs);
6133 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6134 shader->epilog->config.num_vgprs);
6135 }
6136 }
6137
6138 si_shader_dump(sscreen, shader, debug, shader->selector->info.processor,
6139 stderr);
6140
6141 /* Upload. */
6142 r = si_shader_binary_upload(sscreen, shader);
6143 if (r) {
6144 fprintf(stderr, "LLVM failed to upload shader\n");
6145 return r;
6146 }
6147
6148 return 0;
6149 }
6150
6151 void si_shader_destroy(struct si_shader *shader)
6152 {
6153 if (shader->gs_copy_shader) {
6154 si_shader_destroy(shader->gs_copy_shader);
6155 FREE(shader->gs_copy_shader);
6156 }
6157
6158 if (shader->scratch_bo)
6159 r600_resource_reference(&shader->scratch_bo, NULL);
6160
6161 r600_resource_reference(&shader->bo, NULL);
6162
6163 if (!shader->is_binary_shared)
6164 radeon_shader_binary_clean(&shader->binary);
6165 }