radeonsi: Store inputs to memory when not using a TCS.
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_bitarit.h"
35 #include "gallivm/lp_bld_flow.h"
36 #include "radeon/r600_cs.h"
37 #include "radeon/radeon_llvm.h"
38 #include "radeon/radeon_elf_util.h"
39 #include "radeon/radeon_llvm_emit.h"
40 #include "util/u_memory.h"
41 #include "util/u_pstipple.h"
42 #include "util/u_string.h"
43 #include "tgsi/tgsi_parse.h"
44 #include "tgsi/tgsi_build.h"
45 #include "tgsi/tgsi_util.h"
46 #include "tgsi/tgsi_dump.h"
47
48 #include "si_pipe.h"
49 #include "si_shader.h"
50 #include "sid.h"
51
52 #include <errno.h>
53
54 static const char *scratch_rsrc_dword0_symbol =
55 "SCRATCH_RSRC_DWORD0";
56
57 static const char *scratch_rsrc_dword1_symbol =
58 "SCRATCH_RSRC_DWORD1";
59
60 struct si_shader_output_values
61 {
62 LLVMValueRef values[4];
63 unsigned name;
64 unsigned sid;
65 };
66
67 struct si_shader_context
68 {
69 struct radeon_llvm_context radeon_bld;
70 struct si_shader *shader;
71 struct si_screen *screen;
72
73 unsigned type; /* PIPE_SHADER_* specifies the type of shader. */
74 bool is_gs_copy_shader;
75
76 /* Whether to generate the optimized shader variant compiled as a whole
77 * (without a prolog and epilog)
78 */
79 bool is_monolithic;
80
81 int param_streamout_config;
82 int param_streamout_write_index;
83 int param_streamout_offset[4];
84 int param_vertex_id;
85 int param_rel_auto_id;
86 int param_vs_prim_id;
87 int param_instance_id;
88 int param_vertex_index0;
89 int param_tes_u;
90 int param_tes_v;
91 int param_tes_rel_patch_id;
92 int param_tes_patch_id;
93 int param_es2gs_offset;
94 int param_oc_lds;
95
96 /* Sets a bit if the dynamic HS control word was 0x80000000. The bit is
97 * 0x800000 for VS, 0x1 for ES.
98 */
99 int param_tess_offchip;
100
101 LLVMTargetMachineRef tm;
102
103 unsigned uniform_md_kind;
104 LLVMValueRef const_md;
105 LLVMValueRef empty_md;
106 LLVMValueRef const_buffers[SI_NUM_CONST_BUFFERS];
107 LLVMValueRef lds;
108 LLVMValueRef *constants[SI_NUM_CONST_BUFFERS];
109 LLVMValueRef shader_buffers[SI_NUM_SHADER_BUFFERS];
110 LLVMValueRef sampler_views[SI_NUM_SAMPLERS];
111 LLVMValueRef sampler_states[SI_NUM_SAMPLERS];
112 LLVMValueRef fmasks[SI_NUM_SAMPLERS];
113 LLVMValueRef images[SI_NUM_IMAGES];
114 LLVMValueRef so_buffers[4];
115 LLVMValueRef esgs_ring;
116 LLVMValueRef gsvs_ring[4];
117 LLVMValueRef gs_next_vertex[4];
118 LLVMValueRef return_value;
119
120 LLVMTypeRef voidt;
121 LLVMTypeRef i1;
122 LLVMTypeRef i8;
123 LLVMTypeRef i32;
124 LLVMTypeRef i64;
125 LLVMTypeRef i128;
126 LLVMTypeRef f32;
127 LLVMTypeRef v16i8;
128 LLVMTypeRef v2i32;
129 LLVMTypeRef v4i32;
130 LLVMTypeRef v4f32;
131 LLVMTypeRef v8i32;
132
133 LLVMValueRef shared_memory;
134 };
135
136 static struct si_shader_context *si_shader_context(
137 struct lp_build_tgsi_context *bld_base)
138 {
139 return (struct si_shader_context *)bld_base;
140 }
141
142 static void si_init_shader_ctx(struct si_shader_context *ctx,
143 struct si_screen *sscreen,
144 struct si_shader *shader,
145 LLVMTargetMachineRef tm);
146
147 /* Ideally pass the sample mask input to the PS epilog as v13, which
148 * is its usual location, so that the shader doesn't have to add v_mov.
149 */
150 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 13
151
152 /* The VS location of the PrimitiveID input is the same in the epilog,
153 * so that the main shader part doesn't have to move it.
154 */
155 #define VS_EPILOG_PRIMID_LOC 2
156
157 #define PERSPECTIVE_BASE 0
158 #define LINEAR_BASE 9
159
160 #define SAMPLE_OFFSET 0
161 #define CENTER_OFFSET 2
162 #define CENTROID_OFSET 4
163
164 #define USE_SGPR_MAX_SUFFIX_LEN 5
165 #define CONST_ADDR_SPACE 2
166 #define LOCAL_ADDR_SPACE 3
167 #define USER_SGPR_ADDR_SPACE 8
168
169
170 #define SENDMSG_GS 2
171 #define SENDMSG_GS_DONE 3
172
173 #define SENDMSG_GS_OP_NOP (0 << 4)
174 #define SENDMSG_GS_OP_CUT (1 << 4)
175 #define SENDMSG_GS_OP_EMIT (2 << 4)
176 #define SENDMSG_GS_OP_EMIT_CUT (3 << 4)
177
178 /**
179 * Returns a unique index for a semantic name and index. The index must be
180 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
181 * calculated.
182 */
183 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
184 {
185 switch (semantic_name) {
186 case TGSI_SEMANTIC_POSITION:
187 return 0;
188 case TGSI_SEMANTIC_PSIZE:
189 return 1;
190 case TGSI_SEMANTIC_CLIPDIST:
191 assert(index <= 1);
192 return 2 + index;
193 case TGSI_SEMANTIC_GENERIC:
194 if (index <= 63-4)
195 return 4 + index;
196 else
197 /* same explanation as in the default statement,
198 * the only user hitting this is st/nine.
199 */
200 return 0;
201
202 /* patch indices are completely separate and thus start from 0 */
203 case TGSI_SEMANTIC_TESSOUTER:
204 return 0;
205 case TGSI_SEMANTIC_TESSINNER:
206 return 1;
207 case TGSI_SEMANTIC_PATCH:
208 return 2 + index;
209
210 default:
211 /* Don't fail here. The result of this function is only used
212 * for LS, TCS, TES, and GS, where legacy GL semantics can't
213 * occur, but this function is called for all vertex shaders
214 * before it's known whether LS will be compiled or not.
215 */
216 return 0;
217 }
218 }
219
220 /**
221 * Get the value of a shader input parameter and extract a bitfield.
222 */
223 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
224 unsigned param, unsigned rshift,
225 unsigned bitwidth)
226 {
227 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
228 LLVMValueRef value = LLVMGetParam(ctx->radeon_bld.main_fn,
229 param);
230
231 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
232 value = bitcast(&ctx->radeon_bld.soa.bld_base,
233 TGSI_TYPE_UNSIGNED, value);
234
235 if (rshift)
236 value = LLVMBuildLShr(gallivm->builder, value,
237 lp_build_const_int32(gallivm, rshift), "");
238
239 if (rshift + bitwidth < 32) {
240 unsigned mask = (1 << bitwidth) - 1;
241 value = LLVMBuildAnd(gallivm->builder, value,
242 lp_build_const_int32(gallivm, mask), "");
243 }
244
245 return value;
246 }
247
248 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
249 {
250 switch (ctx->type) {
251 case PIPE_SHADER_TESS_CTRL:
252 return unpack_param(ctx, SI_PARAM_REL_IDS, 0, 8);
253
254 case PIPE_SHADER_TESS_EVAL:
255 return LLVMGetParam(ctx->radeon_bld.main_fn,
256 ctx->param_tes_rel_patch_id);
257
258 default:
259 assert(0);
260 return NULL;
261 }
262 }
263
264 /* Tessellation shaders pass outputs to the next shader using LDS.
265 *
266 * LS outputs = TCS inputs
267 * TCS outputs = TES inputs
268 *
269 * The LDS layout is:
270 * - TCS inputs for patch 0
271 * - TCS inputs for patch 1
272 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
273 * - ...
274 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
275 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
276 * - TCS outputs for patch 1
277 * - Per-patch TCS outputs for patch 1
278 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
279 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
280 * - ...
281 *
282 * All three shaders VS(LS), TCS, TES share the same LDS space.
283 */
284
285 static LLVMValueRef
286 get_tcs_in_patch_stride(struct si_shader_context *ctx)
287 {
288 if (ctx->type == PIPE_SHADER_VERTEX)
289 return unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 0, 13);
290 else if (ctx->type == PIPE_SHADER_TESS_CTRL)
291 return unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 0, 13);
292 else {
293 assert(0);
294 return NULL;
295 }
296 }
297
298 static LLVMValueRef
299 get_tcs_out_patch_stride(struct si_shader_context *ctx)
300 {
301 return unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 0, 13);
302 }
303
304 static LLVMValueRef
305 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
306 {
307 return lp_build_mul_imm(&ctx->radeon_bld.soa.bld_base.uint_bld,
308 unpack_param(ctx,
309 SI_PARAM_TCS_OUT_OFFSETS,
310 0, 16),
311 4);
312 }
313
314 static LLVMValueRef
315 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
316 {
317 return lp_build_mul_imm(&ctx->radeon_bld.soa.bld_base.uint_bld,
318 unpack_param(ctx,
319 SI_PARAM_TCS_OUT_OFFSETS,
320 16, 16),
321 4);
322 }
323
324 static LLVMValueRef
325 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
326 {
327 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
328 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
329 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
330
331 return LLVMBuildMul(gallivm->builder, patch_stride, rel_patch_id, "");
332 }
333
334 static LLVMValueRef
335 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
336 {
337 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
338 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
339 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
340 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
341
342 return LLVMBuildAdd(gallivm->builder, patch0_offset,
343 LLVMBuildMul(gallivm->builder, patch_stride,
344 rel_patch_id, ""),
345 "");
346 }
347
348 static LLVMValueRef
349 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
350 {
351 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
352 LLVMValueRef patch0_patch_data_offset =
353 get_tcs_out_patch0_patch_data_offset(ctx);
354 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
355 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
356
357 return LLVMBuildAdd(gallivm->builder, patch0_patch_data_offset,
358 LLVMBuildMul(gallivm->builder, patch_stride,
359 rel_patch_id, ""),
360 "");
361 }
362
363 static void build_indexed_store(struct si_shader_context *ctx,
364 LLVMValueRef base_ptr, LLVMValueRef index,
365 LLVMValueRef value)
366 {
367 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
368 struct gallivm_state *gallivm = bld_base->base.gallivm;
369 LLVMValueRef indices[2], pointer;
370
371 indices[0] = bld_base->uint_bld.zero;
372 indices[1] = index;
373
374 pointer = LLVMBuildGEP(gallivm->builder, base_ptr, indices, 2, "");
375 LLVMBuildStore(gallivm->builder, value, pointer);
376 }
377
378 /**
379 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
380 * It's equivalent to doing a load from &base_ptr[index].
381 *
382 * \param base_ptr Where the array starts.
383 * \param index The element index into the array.
384 * \param uniform Whether the base_ptr and index can be assumed to be
385 * dynamically uniform
386 */
387 static LLVMValueRef build_indexed_load(struct si_shader_context *ctx,
388 LLVMValueRef base_ptr, LLVMValueRef index,
389 bool uniform)
390 {
391 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
392 struct gallivm_state *gallivm = bld_base->base.gallivm;
393 LLVMValueRef indices[2], pointer;
394
395 indices[0] = bld_base->uint_bld.zero;
396 indices[1] = index;
397
398 pointer = LLVMBuildGEP(gallivm->builder, base_ptr, indices, 2, "");
399 if (uniform)
400 LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
401 return LLVMBuildLoad(gallivm->builder, pointer, "");
402 }
403
404 /**
405 * Do a load from &base_ptr[index], but also add a flag that it's loading
406 * a constant from a dynamically uniform index.
407 */
408 static LLVMValueRef build_indexed_load_const(
409 struct si_shader_context *ctx,
410 LLVMValueRef base_ptr, LLVMValueRef index)
411 {
412 LLVMValueRef result = build_indexed_load(ctx, base_ptr, index, true);
413 LLVMSetMetadata(result, 1, ctx->const_md);
414 return result;
415 }
416
417 static LLVMValueRef get_instance_index_for_fetch(
418 struct radeon_llvm_context *radeon_bld,
419 unsigned param_start_instance, unsigned divisor)
420 {
421 struct si_shader_context *ctx =
422 si_shader_context(&radeon_bld->soa.bld_base);
423 struct gallivm_state *gallivm = radeon_bld->soa.bld_base.base.gallivm;
424
425 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
426 ctx->param_instance_id);
427
428 /* The division must be done before START_INSTANCE is added. */
429 if (divisor > 1)
430 result = LLVMBuildUDiv(gallivm->builder, result,
431 lp_build_const_int32(gallivm, divisor), "");
432
433 return LLVMBuildAdd(gallivm->builder, result,
434 LLVMGetParam(radeon_bld->main_fn, param_start_instance), "");
435 }
436
437 static void declare_input_vs(
438 struct radeon_llvm_context *radeon_bld,
439 unsigned input_index,
440 const struct tgsi_full_declaration *decl)
441 {
442 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
443 struct gallivm_state *gallivm = base->gallivm;
444 struct si_shader_context *ctx =
445 si_shader_context(&radeon_bld->soa.bld_base);
446 unsigned divisor =
447 ctx->shader->key.vs.prolog.instance_divisors[input_index];
448
449 unsigned chan;
450
451 LLVMValueRef t_list_ptr;
452 LLVMValueRef t_offset;
453 LLVMValueRef t_list;
454 LLVMValueRef attribute_offset;
455 LLVMValueRef buffer_index;
456 LLVMValueRef args[3];
457 LLVMValueRef input;
458
459 /* Load the T list */
460 t_list_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFERS);
461
462 t_offset = lp_build_const_int32(gallivm, input_index);
463
464 t_list = build_indexed_load_const(ctx, t_list_ptr, t_offset);
465
466 /* Build the attribute offset */
467 attribute_offset = lp_build_const_int32(gallivm, 0);
468
469 if (!ctx->is_monolithic) {
470 buffer_index = LLVMGetParam(radeon_bld->main_fn,
471 ctx->param_vertex_index0 +
472 input_index);
473 } else if (divisor) {
474 /* Build index from instance ID, start instance and divisor */
475 ctx->shader->info.uses_instanceid = true;
476 buffer_index = get_instance_index_for_fetch(&ctx->radeon_bld,
477 SI_PARAM_START_INSTANCE,
478 divisor);
479 } else {
480 /* Load the buffer index for vertices. */
481 LLVMValueRef vertex_id = LLVMGetParam(ctx->radeon_bld.main_fn,
482 ctx->param_vertex_id);
483 LLVMValueRef base_vertex = LLVMGetParam(radeon_bld->main_fn,
484 SI_PARAM_BASE_VERTEX);
485 buffer_index = LLVMBuildAdd(gallivm->builder, base_vertex, vertex_id, "");
486 }
487
488 args[0] = t_list;
489 args[1] = attribute_offset;
490 args[2] = buffer_index;
491 input = lp_build_intrinsic(gallivm->builder,
492 "llvm.SI.vs.load.input", ctx->v4f32, args, 3,
493 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
494
495 /* Break up the vec4 into individual components */
496 for (chan = 0; chan < 4; chan++) {
497 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
498 /* XXX: Use a helper function for this. There is one in
499 * tgsi_llvm.c. */
500 ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
501 LLVMBuildExtractElement(gallivm->builder,
502 input, llvm_chan, "");
503 }
504 }
505
506 static LLVMValueRef get_primitive_id(struct lp_build_tgsi_context *bld_base,
507 unsigned swizzle)
508 {
509 struct si_shader_context *ctx = si_shader_context(bld_base);
510
511 if (swizzle > 0)
512 return bld_base->uint_bld.zero;
513
514 switch (ctx->type) {
515 case PIPE_SHADER_VERTEX:
516 return LLVMGetParam(ctx->radeon_bld.main_fn,
517 ctx->param_vs_prim_id);
518 case PIPE_SHADER_TESS_CTRL:
519 return LLVMGetParam(ctx->radeon_bld.main_fn,
520 SI_PARAM_PATCH_ID);
521 case PIPE_SHADER_TESS_EVAL:
522 return LLVMGetParam(ctx->radeon_bld.main_fn,
523 ctx->param_tes_patch_id);
524 case PIPE_SHADER_GEOMETRY:
525 return LLVMGetParam(ctx->radeon_bld.main_fn,
526 SI_PARAM_PRIMITIVE_ID);
527 default:
528 assert(0);
529 return bld_base->uint_bld.zero;
530 }
531 }
532
533 /**
534 * Return the value of tgsi_ind_register for indexing.
535 * This is the indirect index with the constant offset added to it.
536 */
537 static LLVMValueRef get_indirect_index(struct si_shader_context *ctx,
538 const struct tgsi_ind_register *ind,
539 int rel_index)
540 {
541 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
542 LLVMValueRef result;
543
544 result = ctx->radeon_bld.soa.addr[ind->Index][ind->Swizzle];
545 result = LLVMBuildLoad(gallivm->builder, result, "");
546 result = LLVMBuildAdd(gallivm->builder, result,
547 lp_build_const_int32(gallivm, rel_index), "");
548 return result;
549 }
550
551 /**
552 * Like get_indirect_index, but restricts the return value to a (possibly
553 * undefined) value inside [0..num).
554 */
555 static LLVMValueRef get_bounded_indirect_index(struct si_shader_context *ctx,
556 const struct tgsi_ind_register *ind,
557 int rel_index, unsigned num)
558 {
559 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
560 LLVMBuilderRef builder = gallivm->builder;
561 LLVMValueRef result = get_indirect_index(ctx, ind, rel_index);
562 LLVMValueRef c_max = LLVMConstInt(ctx->i32, num - 1, 0);
563 LLVMValueRef cc;
564
565 /* LLVM 3.8: If indirect resource indexing is used:
566 * - SI & CIK hang
567 * - VI crashes
568 */
569 if (HAVE_LLVM <= 0x0308)
570 return LLVMGetUndef(ctx->i32);
571
572 if (util_is_power_of_two(num)) {
573 result = LLVMBuildAnd(builder, result, c_max, "");
574 } else {
575 /* In theory, this MAX pattern should result in code that is
576 * as good as the bit-wise AND above.
577 *
578 * In practice, LLVM generates worse code (at the time of
579 * writing), because its value tracking is not strong enough.
580 */
581 cc = LLVMBuildICmp(builder, LLVMIntULE, result, c_max, "");
582 result = LLVMBuildSelect(builder, cc, result, c_max, "");
583 }
584
585 return result;
586 }
587
588
589 /**
590 * Calculate a dword address given an input or output register and a stride.
591 */
592 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
593 const struct tgsi_full_dst_register *dst,
594 const struct tgsi_full_src_register *src,
595 LLVMValueRef vertex_dw_stride,
596 LLVMValueRef base_addr)
597 {
598 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
599 struct tgsi_shader_info *info = &ctx->shader->selector->info;
600 ubyte *name, *index, *array_first;
601 int first, param;
602 struct tgsi_full_dst_register reg;
603
604 /* Set the register description. The address computation is the same
605 * for sources and destinations. */
606 if (src) {
607 reg.Register.File = src->Register.File;
608 reg.Register.Index = src->Register.Index;
609 reg.Register.Indirect = src->Register.Indirect;
610 reg.Register.Dimension = src->Register.Dimension;
611 reg.Indirect = src->Indirect;
612 reg.Dimension = src->Dimension;
613 reg.DimIndirect = src->DimIndirect;
614 } else
615 reg = *dst;
616
617 /* If the register is 2-dimensional (e.g. an array of vertices
618 * in a primitive), calculate the base address of the vertex. */
619 if (reg.Register.Dimension) {
620 LLVMValueRef index;
621
622 if (reg.Dimension.Indirect)
623 index = get_indirect_index(ctx, &reg.DimIndirect,
624 reg.Dimension.Index);
625 else
626 index = lp_build_const_int32(gallivm, reg.Dimension.Index);
627
628 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
629 LLVMBuildMul(gallivm->builder, index,
630 vertex_dw_stride, ""), "");
631 }
632
633 /* Get information about the register. */
634 if (reg.Register.File == TGSI_FILE_INPUT) {
635 name = info->input_semantic_name;
636 index = info->input_semantic_index;
637 array_first = info->input_array_first;
638 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
639 name = info->output_semantic_name;
640 index = info->output_semantic_index;
641 array_first = info->output_array_first;
642 } else {
643 assert(0);
644 return NULL;
645 }
646
647 if (reg.Register.Indirect) {
648 /* Add the relative address of the element. */
649 LLVMValueRef ind_index;
650
651 if (reg.Indirect.ArrayID)
652 first = array_first[reg.Indirect.ArrayID];
653 else
654 first = reg.Register.Index;
655
656 ind_index = get_indirect_index(ctx, &reg.Indirect,
657 reg.Register.Index - first);
658
659 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
660 LLVMBuildMul(gallivm->builder, ind_index,
661 lp_build_const_int32(gallivm, 4), ""), "");
662
663 param = si_shader_io_get_unique_index(name[first], index[first]);
664 } else {
665 param = si_shader_io_get_unique_index(name[reg.Register.Index],
666 index[reg.Register.Index]);
667 }
668
669 /* Add the base address of the element. */
670 return LLVMBuildAdd(gallivm->builder, base_addr,
671 lp_build_const_int32(gallivm, param * 4), "");
672 }
673
674 /* The offchip buffer layout for TCS->TES is
675 *
676 * - attribute 0 of patch 0 vertex 0
677 * - attribute 0 of patch 0 vertex 1
678 * - attribute 0 of patch 0 vertex 2
679 * ...
680 * - attribute 0 of patch 1 vertex 0
681 * - attribute 0 of patch 1 vertex 1
682 * ...
683 * - attribute 1 of patch 0 vertex 0
684 * - attribute 1 of patch 0 vertex 1
685 * ...
686 * - per patch attribute 0 of patch 0
687 * - per patch attribute 0 of patch 1
688 * ...
689 *
690 * Note that every attribute has 4 components.
691 */
692 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
693 LLVMValueRef vertex_index,
694 LLVMValueRef param_index)
695 {
696 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
697 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
698 LLVMValueRef param_stride, constant16;
699
700 vertices_per_patch = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 9, 6);
701 num_patches = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 0, 9);
702 total_vertices = LLVMBuildMul(gallivm->builder, vertices_per_patch,
703 num_patches, "");
704
705 constant16 = lp_build_const_int32(gallivm, 16);
706 if (vertex_index) {
707 base_addr = LLVMBuildMul(gallivm->builder, get_rel_patch_id(ctx),
708 vertices_per_patch, "");
709
710 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
711 vertex_index, "");
712
713 param_stride = total_vertices;
714 } else {
715 base_addr = get_rel_patch_id(ctx);
716 param_stride = num_patches;
717 }
718
719 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
720 LLVMBuildMul(gallivm->builder, param_index,
721 param_stride, ""), "");
722
723 base_addr = LLVMBuildMul(gallivm->builder, base_addr, constant16, "");
724
725 if (!vertex_index) {
726 LLVMValueRef patch_data_offset =
727 unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 16, 16);
728
729 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
730 patch_data_offset, "");
731 }
732 return base_addr;
733 }
734
735 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
736 struct si_shader_context *ctx,
737 const struct tgsi_full_dst_register *dst,
738 const struct tgsi_full_src_register *src)
739 {
740 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
741 struct tgsi_shader_info *info = &ctx->shader->selector->info;
742 ubyte *name, *index, *array_first;
743 struct tgsi_full_src_register reg;
744 LLVMValueRef vertex_index = NULL;
745 LLVMValueRef param_index = NULL;
746 unsigned param_index_base, param_base;
747
748 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
749
750 if (reg.Register.Dimension) {
751
752 if (reg.Dimension.Indirect)
753 vertex_index = get_indirect_index(ctx, &reg.DimIndirect,
754 reg.Dimension.Index);
755 else
756 vertex_index = lp_build_const_int32(gallivm,
757 reg.Dimension.Index);
758 }
759
760 /* Get information about the register. */
761 if (reg.Register.File == TGSI_FILE_INPUT) {
762 name = info->input_semantic_name;
763 index = info->input_semantic_index;
764 array_first = info->input_array_first;
765 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
766 name = info->output_semantic_name;
767 index = info->output_semantic_index;
768 array_first = info->output_array_first;
769 } else {
770 assert(0);
771 return NULL;
772 }
773
774 if (reg.Register.Indirect) {
775 if (reg.Indirect.ArrayID)
776 param_base = array_first[reg.Indirect.ArrayID];
777 else
778 param_base = reg.Register.Index;
779
780 param_index = get_indirect_index(ctx, &reg.Indirect,
781 reg.Register.Index - param_base);
782
783 } else {
784 param_base = reg.Register.Index;
785 param_index = lp_build_const_int32(gallivm, 0);
786 }
787
788 param_index_base = si_shader_io_get_unique_index(name[param_base],
789 index[param_base]);
790
791 param_index = LLVMBuildAdd(gallivm->builder, param_index,
792 lp_build_const_int32(gallivm, param_index_base),
793 "");
794
795 return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
796 }
797
798 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
799 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
800 * or v4i32 (num_channels=3,4). */
801 static void build_tbuffer_store(struct si_shader_context *ctx,
802 LLVMValueRef rsrc,
803 LLVMValueRef vdata,
804 unsigned num_channels,
805 LLVMValueRef vaddr,
806 LLVMValueRef soffset,
807 unsigned inst_offset,
808 unsigned dfmt,
809 unsigned nfmt,
810 unsigned offen,
811 unsigned idxen,
812 unsigned glc,
813 unsigned slc,
814 unsigned tfe)
815 {
816 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
817 LLVMValueRef args[] = {
818 rsrc,
819 vdata,
820 LLVMConstInt(ctx->i32, num_channels, 0),
821 vaddr,
822 soffset,
823 LLVMConstInt(ctx->i32, inst_offset, 0),
824 LLVMConstInt(ctx->i32, dfmt, 0),
825 LLVMConstInt(ctx->i32, nfmt, 0),
826 LLVMConstInt(ctx->i32, offen, 0),
827 LLVMConstInt(ctx->i32, idxen, 0),
828 LLVMConstInt(ctx->i32, glc, 0),
829 LLVMConstInt(ctx->i32, slc, 0),
830 LLVMConstInt(ctx->i32, tfe, 0)
831 };
832
833 /* The instruction offset field has 12 bits */
834 assert(offen || inst_offset < (1 << 12));
835
836 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
837 unsigned func = CLAMP(num_channels, 1, 3) - 1;
838 const char *types[] = {"i32", "v2i32", "v4i32"};
839 char name[256];
840 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
841
842 lp_build_intrinsic(gallivm->builder, name, ctx->voidt,
843 args, ARRAY_SIZE(args), 0);
844 }
845
846 static void build_tbuffer_store_dwords(struct si_shader_context *ctx,
847 LLVMValueRef rsrc,
848 LLVMValueRef vdata,
849 unsigned num_channels,
850 LLVMValueRef vaddr,
851 LLVMValueRef soffset,
852 unsigned inst_offset)
853 {
854 static unsigned dfmt[] = {
855 V_008F0C_BUF_DATA_FORMAT_32,
856 V_008F0C_BUF_DATA_FORMAT_32_32,
857 V_008F0C_BUF_DATA_FORMAT_32_32_32,
858 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
859 };
860 assert(num_channels >= 1 && num_channels <= 4);
861
862 build_tbuffer_store(ctx, rsrc, vdata, num_channels, vaddr, soffset,
863 inst_offset, dfmt[num_channels-1],
864 V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
865 }
866
867 static LLVMValueRef build_buffer_load(struct si_shader_context *ctx,
868 LLVMValueRef rsrc,
869 int num_channels,
870 LLVMValueRef vindex,
871 LLVMValueRef voffset,
872 LLVMValueRef soffset,
873 unsigned inst_offset,
874 unsigned glc,
875 unsigned slc)
876 {
877 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
878 unsigned func = CLAMP(num_channels, 1, 3) - 1;
879
880 if (HAVE_LLVM >= 0x309) {
881 LLVMValueRef args[] = {
882 LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v4i32, ""),
883 vindex ? vindex : LLVMConstInt(ctx->i32, 0, 0),
884 LLVMConstInt(ctx->i32, inst_offset, 0),
885 LLVMConstInt(ctx->i1, glc, 0),
886 LLVMConstInt(ctx->i1, slc, 0)
887 };
888
889 LLVMTypeRef types[] = {ctx->f32, LLVMVectorType(ctx->f32, 2),
890 ctx->v4f32};
891 const char *type_names[] = {"f32", "v2f32", "v4f32"};
892 char name[256];
893
894 if (voffset) {
895 args[2] = LLVMBuildAdd(gallivm->builder, args[2], voffset,
896 "");
897 }
898
899 if (soffset) {
900 args[2] = LLVMBuildAdd(gallivm->builder, args[2], soffset,
901 "");
902 }
903
904 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s",
905 type_names[func]);
906
907 return lp_build_intrinsic(gallivm->builder, name, types[func], args,
908 ARRAY_SIZE(args), LLVMReadOnlyAttribute |
909 LLVMNoUnwindAttribute);
910 } else {
911 LLVMValueRef args[] = {
912 LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v16i8, ""),
913 voffset ? voffset : vindex,
914 soffset,
915 LLVMConstInt(ctx->i32, inst_offset, 0),
916 LLVMConstInt(ctx->i32, voffset ? 1 : 0, 0), // offen
917 LLVMConstInt(ctx->i32, vindex ? 1 : 0, 0), //idxen
918 LLVMConstInt(ctx->i32, glc, 0),
919 LLVMConstInt(ctx->i32, slc, 0),
920 LLVMConstInt(ctx->i32, 0, 0), // TFE
921 };
922
923 LLVMTypeRef types[] = {ctx->i32, LLVMVectorType(ctx->i32, 2),
924 ctx->v4i32};
925 const char *type_names[] = {"i32", "v2i32", "v4i32"};
926 const char *arg_type = "i32";
927 char name[256];
928
929 if (voffset && vindex) {
930 LLVMValueRef vaddr[] = {vindex, voffset};
931
932 arg_type = "v2i32";
933 args[1] = lp_build_gather_values(gallivm, vaddr, 2);
934 }
935
936 snprintf(name, sizeof(name), "llvm.SI.buffer.load.dword.%s.%s",
937 type_names[func], arg_type);
938
939 return lp_build_intrinsic(gallivm->builder, name, types[func], args,
940 ARRAY_SIZE(args), LLVMReadOnlyAttribute |
941 LLVMNoUnwindAttribute);
942 }
943 }
944
945 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
946 enum tgsi_opcode_type type, unsigned swizzle,
947 LLVMValueRef buffer, LLVMValueRef offset,
948 LLVMValueRef base)
949 {
950 struct si_shader_context *ctx = si_shader_context(bld_base);
951 struct gallivm_state *gallivm = bld_base->base.gallivm;
952 LLVMValueRef value, value2;
953 LLVMTypeRef llvm_type = tgsi2llvmtype(bld_base, type);
954 LLVMTypeRef vec_type = LLVMVectorType(llvm_type, 4);
955
956 if (swizzle == ~0) {
957 value = build_buffer_load(ctx, buffer, 4, NULL, base, offset,
958 0, 1, 0);
959
960 return LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
961 }
962
963 if (type != TGSI_TYPE_DOUBLE) {
964 value = build_buffer_load(ctx, buffer, 4, NULL, base, offset,
965 0, 1, 0);
966
967 value = LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
968 return LLVMBuildExtractElement(gallivm->builder, value,
969 lp_build_const_int32(gallivm, swizzle), "");
970 }
971
972 value = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
973 swizzle * 4, 1, 0);
974
975 value2 = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
976 swizzle * 4 + 4, 1, 0);
977
978 return radeon_llvm_emit_fetch_double(bld_base, value, value2);
979 }
980
981 /**
982 * Load from LDS.
983 *
984 * \param type output value type
985 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
986 * \param dw_addr address in dwords
987 */
988 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
989 enum tgsi_opcode_type type, unsigned swizzle,
990 LLVMValueRef dw_addr)
991 {
992 struct si_shader_context *ctx = si_shader_context(bld_base);
993 struct gallivm_state *gallivm = bld_base->base.gallivm;
994 LLVMValueRef value;
995
996 if (swizzle == ~0) {
997 LLVMValueRef values[TGSI_NUM_CHANNELS];
998
999 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
1000 values[chan] = lds_load(bld_base, type, chan, dw_addr);
1001
1002 return lp_build_gather_values(bld_base->base.gallivm, values,
1003 TGSI_NUM_CHANNELS);
1004 }
1005
1006 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
1007 lp_build_const_int32(gallivm, swizzle));
1008
1009 value = build_indexed_load(ctx, ctx->lds, dw_addr, false);
1010 if (type == TGSI_TYPE_DOUBLE) {
1011 LLVMValueRef value2;
1012 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
1013 lp_build_const_int32(gallivm, swizzle + 1));
1014 value2 = build_indexed_load(ctx, ctx->lds, dw_addr, false);
1015 return radeon_llvm_emit_fetch_double(bld_base, value, value2);
1016 }
1017
1018 return LLVMBuildBitCast(gallivm->builder, value,
1019 tgsi2llvmtype(bld_base, type), "");
1020 }
1021
1022 /**
1023 * Store to LDS.
1024 *
1025 * \param swizzle offset (typically 0..3)
1026 * \param dw_addr address in dwords
1027 * \param value value to store
1028 */
1029 static void lds_store(struct lp_build_tgsi_context *bld_base,
1030 unsigned swizzle, LLVMValueRef dw_addr,
1031 LLVMValueRef value)
1032 {
1033 struct si_shader_context *ctx = si_shader_context(bld_base);
1034 struct gallivm_state *gallivm = bld_base->base.gallivm;
1035
1036 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
1037 lp_build_const_int32(gallivm, swizzle));
1038
1039 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
1040 build_indexed_store(ctx, ctx->lds,
1041 dw_addr, value);
1042 }
1043
1044 static LLVMValueRef fetch_input_tcs(
1045 struct lp_build_tgsi_context *bld_base,
1046 const struct tgsi_full_src_register *reg,
1047 enum tgsi_opcode_type type, unsigned swizzle)
1048 {
1049 struct si_shader_context *ctx = si_shader_context(bld_base);
1050 LLVMValueRef dw_addr, stride;
1051
1052 stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
1053 dw_addr = get_tcs_in_current_patch_offset(ctx);
1054 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1055
1056 return lds_load(bld_base, type, swizzle, dw_addr);
1057 }
1058
1059 static LLVMValueRef fetch_output_tcs(
1060 struct lp_build_tgsi_context *bld_base,
1061 const struct tgsi_full_src_register *reg,
1062 enum tgsi_opcode_type type, unsigned swizzle)
1063 {
1064 struct si_shader_context *ctx = si_shader_context(bld_base);
1065 LLVMValueRef dw_addr, stride;
1066
1067 if (reg->Register.Dimension) {
1068 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
1069 dw_addr = get_tcs_out_current_patch_offset(ctx);
1070 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1071 } else {
1072 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1073 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
1074 }
1075
1076 return lds_load(bld_base, type, swizzle, dw_addr);
1077 }
1078
1079 static LLVMValueRef fetch_input_tes(
1080 struct lp_build_tgsi_context *bld_base,
1081 const struct tgsi_full_src_register *reg,
1082 enum tgsi_opcode_type type, unsigned swizzle)
1083 {
1084 struct si_shader_context *ctx = si_shader_context(bld_base);
1085 LLVMValueRef dw_addr, stride;
1086
1087 if (reg->Register.Dimension) {
1088 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
1089 dw_addr = get_tcs_out_current_patch_offset(ctx);
1090 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1091 } else {
1092 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1093 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
1094 }
1095
1096 return lds_load(bld_base, type, swizzle, dw_addr);
1097 }
1098
1099 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1100 const struct tgsi_full_instruction *inst,
1101 const struct tgsi_opcode_info *info,
1102 LLVMValueRef dst[4])
1103 {
1104 struct si_shader_context *ctx = si_shader_context(bld_base);
1105 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
1106 unsigned chan_index;
1107 LLVMValueRef dw_addr, stride;
1108
1109 /* Only handle per-patch and per-vertex outputs here.
1110 * Vectors will be lowered to scalars and this function will be called again.
1111 */
1112 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1113 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1114 radeon_llvm_emit_store(bld_base, inst, info, dst);
1115 return;
1116 }
1117
1118 if (reg->Register.Dimension) {
1119 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
1120 dw_addr = get_tcs_out_current_patch_offset(ctx);
1121 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1122 } else {
1123 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1124 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1125 }
1126
1127 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
1128 LLVMValueRef value = dst[chan_index];
1129
1130 if (inst->Instruction.Saturate)
1131 value = radeon_llvm_saturate(bld_base, value);
1132
1133 lds_store(bld_base, chan_index, dw_addr, value);
1134 }
1135 }
1136
1137 static LLVMValueRef fetch_input_gs(
1138 struct lp_build_tgsi_context *bld_base,
1139 const struct tgsi_full_src_register *reg,
1140 enum tgsi_opcode_type type,
1141 unsigned swizzle)
1142 {
1143 struct lp_build_context *base = &bld_base->base;
1144 struct si_shader_context *ctx = si_shader_context(bld_base);
1145 struct si_shader *shader = ctx->shader;
1146 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
1147 struct gallivm_state *gallivm = base->gallivm;
1148 LLVMValueRef vtx_offset;
1149 LLVMValueRef args[9];
1150 unsigned vtx_offset_param;
1151 struct tgsi_shader_info *info = &shader->selector->info;
1152 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1153 unsigned semantic_index = info->input_semantic_index[reg->Register.Index];
1154 unsigned param;
1155 LLVMValueRef value;
1156
1157 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1158 return get_primitive_id(bld_base, swizzle);
1159
1160 if (!reg->Register.Dimension)
1161 return NULL;
1162
1163 if (swizzle == ~0) {
1164 LLVMValueRef values[TGSI_NUM_CHANNELS];
1165 unsigned chan;
1166 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1167 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
1168 }
1169 return lp_build_gather_values(bld_base->base.gallivm, values,
1170 TGSI_NUM_CHANNELS);
1171 }
1172
1173 /* Get the vertex offset parameter */
1174 vtx_offset_param = reg->Dimension.Index;
1175 if (vtx_offset_param < 2) {
1176 vtx_offset_param += SI_PARAM_VTX0_OFFSET;
1177 } else {
1178 assert(vtx_offset_param < 6);
1179 vtx_offset_param += SI_PARAM_VTX2_OFFSET - 2;
1180 }
1181 vtx_offset = lp_build_mul_imm(uint,
1182 LLVMGetParam(ctx->radeon_bld.main_fn,
1183 vtx_offset_param),
1184 4);
1185
1186 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1187 args[0] = ctx->esgs_ring;
1188 args[1] = vtx_offset;
1189 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle) * 256);
1190 args[3] = uint->zero;
1191 args[4] = uint->one; /* OFFEN */
1192 args[5] = uint->zero; /* IDXEN */
1193 args[6] = uint->one; /* GLC */
1194 args[7] = uint->zero; /* SLC */
1195 args[8] = uint->zero; /* TFE */
1196
1197 value = lp_build_intrinsic(gallivm->builder,
1198 "llvm.SI.buffer.load.dword.i32.i32",
1199 ctx->i32, args, 9,
1200 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute);
1201 if (type == TGSI_TYPE_DOUBLE) {
1202 LLVMValueRef value2;
1203 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle + 1) * 256);
1204 value2 = lp_build_intrinsic(gallivm->builder,
1205 "llvm.SI.buffer.load.dword.i32.i32",
1206 ctx->i32, args, 9,
1207 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute);
1208 return radeon_llvm_emit_fetch_double(bld_base,
1209 value, value2);
1210 }
1211 return LLVMBuildBitCast(gallivm->builder,
1212 value,
1213 tgsi2llvmtype(bld_base, type), "");
1214 }
1215
1216 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1217 {
1218 switch (interpolate) {
1219 case TGSI_INTERPOLATE_CONSTANT:
1220 return 0;
1221
1222 case TGSI_INTERPOLATE_LINEAR:
1223 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1224 return SI_PARAM_LINEAR_SAMPLE;
1225 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1226 return SI_PARAM_LINEAR_CENTROID;
1227 else
1228 return SI_PARAM_LINEAR_CENTER;
1229 break;
1230 case TGSI_INTERPOLATE_COLOR:
1231 case TGSI_INTERPOLATE_PERSPECTIVE:
1232 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1233 return SI_PARAM_PERSP_SAMPLE;
1234 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1235 return SI_PARAM_PERSP_CENTROID;
1236 else
1237 return SI_PARAM_PERSP_CENTER;
1238 break;
1239 default:
1240 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1241 return -1;
1242 }
1243 }
1244
1245 /* This shouldn't be used by explicit INTERP opcodes. */
1246 static unsigned select_interp_param(struct si_shader_context *ctx,
1247 unsigned param)
1248 {
1249 if (!ctx->shader->key.ps.prolog.force_persample_interp ||
1250 !ctx->is_monolithic)
1251 return param;
1252
1253 /* If the shader doesn't use center/centroid, just return the parameter.
1254 *
1255 * If the shader only uses one set of (i,j), "si_emit_spi_ps_input" can
1256 * switch between center/centroid and sample without shader changes.
1257 */
1258 switch (param) {
1259 case SI_PARAM_PERSP_CENTROID:
1260 case SI_PARAM_PERSP_CENTER:
1261 return SI_PARAM_PERSP_SAMPLE;
1262
1263 case SI_PARAM_LINEAR_CENTROID:
1264 case SI_PARAM_LINEAR_CENTER:
1265 return SI_PARAM_LINEAR_SAMPLE;
1266
1267 default:
1268 return param;
1269 }
1270 }
1271
1272 /**
1273 * Interpolate a fragment shader input.
1274 *
1275 * @param ctx context
1276 * @param input_index index of the input in hardware
1277 * @param semantic_name TGSI_SEMANTIC_*
1278 * @param semantic_index semantic index
1279 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1280 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1281 * @param interp_param interpolation weights (i,j)
1282 * @param prim_mask SI_PARAM_PRIM_MASK
1283 * @param face SI_PARAM_FRONT_FACE
1284 * @param result the return value (4 components)
1285 */
1286 static void interp_fs_input(struct si_shader_context *ctx,
1287 unsigned input_index,
1288 unsigned semantic_name,
1289 unsigned semantic_index,
1290 unsigned num_interp_inputs,
1291 unsigned colors_read_mask,
1292 LLVMValueRef interp_param,
1293 LLVMValueRef prim_mask,
1294 LLVMValueRef face,
1295 LLVMValueRef result[4])
1296 {
1297 struct lp_build_context *base = &ctx->radeon_bld.soa.bld_base.base;
1298 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
1299 struct gallivm_state *gallivm = base->gallivm;
1300 const char *intr_name;
1301 LLVMValueRef attr_number;
1302
1303 unsigned chan;
1304
1305 attr_number = lp_build_const_int32(gallivm, input_index);
1306
1307 /* fs.constant returns the param from the middle vertex, so it's not
1308 * really useful for flat shading. It's meant to be used for custom
1309 * interpolation (but the intrinsic can't fetch from the other two
1310 * vertices).
1311 *
1312 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1313 * to do the right thing. The only reason we use fs.constant is that
1314 * fs.interp cannot be used on integers, because they can be equal
1315 * to NaN.
1316 */
1317 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
1318
1319 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1320 ctx->shader->key.ps.prolog.color_two_side) {
1321 LLVMValueRef args[4];
1322 LLVMValueRef is_face_positive;
1323 LLVMValueRef back_attr_number;
1324
1325 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1326 * otherwise it's at offset "num_inputs".
1327 */
1328 unsigned back_attr_offset = num_interp_inputs;
1329 if (semantic_index == 1 && colors_read_mask & 0xf)
1330 back_attr_offset += 1;
1331
1332 back_attr_number = lp_build_const_int32(gallivm, back_attr_offset);
1333
1334 is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1335 face, uint->zero, "");
1336
1337 args[2] = prim_mask;
1338 args[3] = interp_param;
1339 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1340 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
1341 LLVMValueRef front, back;
1342
1343 args[0] = llvm_chan;
1344 args[1] = attr_number;
1345 front = lp_build_intrinsic(gallivm->builder, intr_name,
1346 ctx->f32, args, args[3] ? 4 : 3,
1347 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1348
1349 args[1] = back_attr_number;
1350 back = lp_build_intrinsic(gallivm->builder, intr_name,
1351 ctx->f32, args, args[3] ? 4 : 3,
1352 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1353
1354 result[chan] = LLVMBuildSelect(gallivm->builder,
1355 is_face_positive,
1356 front,
1357 back,
1358 "");
1359 }
1360 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1361 LLVMValueRef args[4];
1362
1363 args[0] = uint->zero;
1364 args[1] = attr_number;
1365 args[2] = prim_mask;
1366 args[3] = interp_param;
1367 result[0] = lp_build_intrinsic(gallivm->builder, intr_name,
1368 ctx->f32, args, args[3] ? 4 : 3,
1369 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1370 result[1] =
1371 result[2] = lp_build_const_float(gallivm, 0.0f);
1372 result[3] = lp_build_const_float(gallivm, 1.0f);
1373 } else {
1374 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1375 LLVMValueRef args[4];
1376 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
1377
1378 args[0] = llvm_chan;
1379 args[1] = attr_number;
1380 args[2] = prim_mask;
1381 args[3] = interp_param;
1382 result[chan] = lp_build_intrinsic(gallivm->builder, intr_name,
1383 ctx->f32, args, args[3] ? 4 : 3,
1384 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1385 }
1386 }
1387 }
1388
1389 static void declare_input_fs(
1390 struct radeon_llvm_context *radeon_bld,
1391 unsigned input_index,
1392 const struct tgsi_full_declaration *decl)
1393 {
1394 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
1395 struct si_shader_context *ctx =
1396 si_shader_context(&radeon_bld->soa.bld_base);
1397 struct si_shader *shader = ctx->shader;
1398 LLVMValueRef main_fn = radeon_bld->main_fn;
1399 LLVMValueRef interp_param = NULL;
1400 int interp_param_idx;
1401
1402 /* Get colors from input VGPRs (set by the prolog). */
1403 if (!ctx->is_monolithic &&
1404 decl->Semantic.Name == TGSI_SEMANTIC_COLOR) {
1405 unsigned i = decl->Semantic.Index;
1406 unsigned colors_read = shader->selector->info.colors_read;
1407 unsigned mask = colors_read >> (i * 4);
1408 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1409 (i ? util_bitcount(colors_read & 0xf) : 0);
1410
1411 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
1412 mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1413 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
1414 mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1415 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
1416 mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1417 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
1418 mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1419 return;
1420 }
1421
1422 interp_param_idx = lookup_interp_param_index(decl->Interp.Interpolate,
1423 decl->Interp.Location);
1424 if (interp_param_idx == -1)
1425 return;
1426 else if (interp_param_idx) {
1427 interp_param_idx = select_interp_param(ctx,
1428 interp_param_idx);
1429 interp_param = LLVMGetParam(main_fn, interp_param_idx);
1430 }
1431
1432 interp_fs_input(ctx, input_index, decl->Semantic.Name,
1433 decl->Semantic.Index, shader->selector->info.num_inputs,
1434 shader->selector->info.colors_read, interp_param,
1435 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1436 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1437 &radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 0)]);
1438 }
1439
1440 static LLVMValueRef get_sample_id(struct radeon_llvm_context *radeon_bld)
1441 {
1442 return unpack_param(si_shader_context(&radeon_bld->soa.bld_base),
1443 SI_PARAM_ANCILLARY, 8, 4);
1444 }
1445
1446 /**
1447 * Set range metadata on an instruction. This can only be used on load and
1448 * call instructions. If you know an instruction can only produce the values
1449 * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
1450 * \p lo is the minimum value inclusive.
1451 * \p hi is the maximum value exclusive.
1452 */
1453 static void set_range_metadata(LLVMValueRef value, unsigned lo, unsigned hi)
1454 {
1455 const char *range_md_string = "range";
1456 LLVMValueRef range_md, md_args[2];
1457 LLVMTypeRef type = LLVMTypeOf(value);
1458 LLVMContextRef context = LLVMGetTypeContext(type);
1459 unsigned md_range_id = LLVMGetMDKindIDInContext(context,
1460 range_md_string, strlen(range_md_string));
1461
1462 md_args[0] = LLVMConstInt(type, lo, false);
1463 md_args[1] = LLVMConstInt(type, hi, false);
1464 range_md = LLVMMDNodeInContext(context, md_args, 2);
1465 LLVMSetMetadata(value, md_range_id, range_md);
1466 }
1467
1468 static LLVMValueRef get_thread_id(struct si_shader_context *ctx)
1469 {
1470 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
1471 LLVMValueRef tid;
1472
1473 if (HAVE_LLVM < 0x0308) {
1474 tid = lp_build_intrinsic(gallivm->builder, "llvm.SI.tid",
1475 ctx->i32, NULL, 0, LLVMReadNoneAttribute);
1476 } else {
1477 LLVMValueRef tid_args[2];
1478 tid_args[0] = lp_build_const_int32(gallivm, 0xffffffff);
1479 tid_args[1] = lp_build_const_int32(gallivm, 0);
1480 tid_args[1] = lp_build_intrinsic(gallivm->builder,
1481 "llvm.amdgcn.mbcnt.lo", ctx->i32,
1482 tid_args, 2, LLVMReadNoneAttribute);
1483
1484 tid = lp_build_intrinsic(gallivm->builder,
1485 "llvm.amdgcn.mbcnt.hi", ctx->i32,
1486 tid_args, 2, LLVMReadNoneAttribute);
1487 }
1488 set_range_metadata(tid, 0, 64);
1489 return tid;
1490 }
1491
1492 /**
1493 * Load a dword from a constant buffer.
1494 */
1495 static LLVMValueRef buffer_load_const(LLVMBuilderRef builder, LLVMValueRef resource,
1496 LLVMValueRef offset, LLVMTypeRef return_type)
1497 {
1498 LLVMValueRef args[2] = {resource, offset};
1499
1500 return lp_build_intrinsic(builder, "llvm.SI.load.const", return_type, args, 2,
1501 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1502 }
1503
1504 static LLVMValueRef load_sample_position(struct radeon_llvm_context *radeon_bld, LLVMValueRef sample_id)
1505 {
1506 struct si_shader_context *ctx =
1507 si_shader_context(&radeon_bld->soa.bld_base);
1508 struct lp_build_context *uint_bld = &radeon_bld->soa.bld_base.uint_bld;
1509 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1510 LLVMBuilderRef builder = gallivm->builder;
1511 LLVMValueRef desc = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_RW_BUFFERS);
1512 LLVMValueRef buf_index = lp_build_const_int32(gallivm, SI_PS_CONST_SAMPLE_POSITIONS);
1513 LLVMValueRef resource = build_indexed_load_const(ctx, desc, buf_index);
1514
1515 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1516 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1517 LLVMValueRef offset1 = LLVMBuildAdd(builder, offset0, lp_build_const_int32(gallivm, 4), "");
1518
1519 LLVMValueRef pos[4] = {
1520 buffer_load_const(builder, resource, offset0, ctx->f32),
1521 buffer_load_const(builder, resource, offset1, ctx->f32),
1522 lp_build_const_float(gallivm, 0),
1523 lp_build_const_float(gallivm, 0)
1524 };
1525
1526 return lp_build_gather_values(gallivm, pos, 4);
1527 }
1528
1529 static void declare_system_value(
1530 struct radeon_llvm_context *radeon_bld,
1531 unsigned index,
1532 const struct tgsi_full_declaration *decl)
1533 {
1534 struct si_shader_context *ctx =
1535 si_shader_context(&radeon_bld->soa.bld_base);
1536 struct lp_build_context *bld = &radeon_bld->soa.bld_base.base;
1537 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1538 LLVMValueRef value = 0;
1539
1540 switch (decl->Semantic.Name) {
1541 case TGSI_SEMANTIC_INSTANCEID:
1542 value = LLVMGetParam(radeon_bld->main_fn,
1543 ctx->param_instance_id);
1544 break;
1545
1546 case TGSI_SEMANTIC_VERTEXID:
1547 value = LLVMBuildAdd(gallivm->builder,
1548 LLVMGetParam(radeon_bld->main_fn,
1549 ctx->param_vertex_id),
1550 LLVMGetParam(radeon_bld->main_fn,
1551 SI_PARAM_BASE_VERTEX), "");
1552 break;
1553
1554 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1555 value = LLVMGetParam(radeon_bld->main_fn,
1556 ctx->param_vertex_id);
1557 break;
1558
1559 case TGSI_SEMANTIC_BASEVERTEX:
1560 value = LLVMGetParam(radeon_bld->main_fn,
1561 SI_PARAM_BASE_VERTEX);
1562 break;
1563
1564 case TGSI_SEMANTIC_INVOCATIONID:
1565 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1566 value = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
1567 else if (ctx->type == PIPE_SHADER_GEOMETRY)
1568 value = LLVMGetParam(radeon_bld->main_fn,
1569 SI_PARAM_GS_INSTANCE_ID);
1570 else
1571 assert(!"INVOCATIONID not implemented");
1572 break;
1573
1574 case TGSI_SEMANTIC_POSITION:
1575 {
1576 LLVMValueRef pos[4] = {
1577 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1578 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1579 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Z_FLOAT),
1580 lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base, TGSI_OPCODE_RCP,
1581 LLVMGetParam(radeon_bld->main_fn,
1582 SI_PARAM_POS_W_FLOAT)),
1583 };
1584 value = lp_build_gather_values(gallivm, pos, 4);
1585 break;
1586 }
1587
1588 case TGSI_SEMANTIC_FACE:
1589 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_FRONT_FACE);
1590 break;
1591
1592 case TGSI_SEMANTIC_SAMPLEID:
1593 value = get_sample_id(radeon_bld);
1594 break;
1595
1596 case TGSI_SEMANTIC_SAMPLEPOS: {
1597 LLVMValueRef pos[4] = {
1598 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1599 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1600 lp_build_const_float(gallivm, 0),
1601 lp_build_const_float(gallivm, 0)
1602 };
1603 pos[0] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1604 TGSI_OPCODE_FRC, pos[0]);
1605 pos[1] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1606 TGSI_OPCODE_FRC, pos[1]);
1607 value = lp_build_gather_values(gallivm, pos, 4);
1608 break;
1609 }
1610
1611 case TGSI_SEMANTIC_SAMPLEMASK:
1612 /* This can only occur with the OpenGL Core profile, which
1613 * doesn't support smoothing.
1614 */
1615 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1616 break;
1617
1618 case TGSI_SEMANTIC_TESSCOORD:
1619 {
1620 LLVMValueRef coord[4] = {
1621 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_u),
1622 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_v),
1623 bld->zero,
1624 bld->zero
1625 };
1626
1627 /* For triangles, the vector should be (u, v, 1-u-v). */
1628 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1629 PIPE_PRIM_TRIANGLES)
1630 coord[2] = lp_build_sub(bld, bld->one,
1631 lp_build_add(bld, coord[0], coord[1]));
1632
1633 value = lp_build_gather_values(gallivm, coord, 4);
1634 break;
1635 }
1636
1637 case TGSI_SEMANTIC_VERTICESIN:
1638 value = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 26, 6);
1639 break;
1640
1641 case TGSI_SEMANTIC_TESSINNER:
1642 case TGSI_SEMANTIC_TESSOUTER:
1643 {
1644 LLVMValueRef dw_addr;
1645 int param = si_shader_io_get_unique_index(decl->Semantic.Name, 0);
1646
1647 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1648 dw_addr = LLVMBuildAdd(gallivm->builder, dw_addr,
1649 lp_build_const_int32(gallivm, param * 4), "");
1650
1651 value = lds_load(&radeon_bld->soa.bld_base, TGSI_TYPE_FLOAT,
1652 ~0, dw_addr);
1653 break;
1654 }
1655
1656 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
1657 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
1658 {
1659 LLVMValueRef buf, slot, val[4];
1660 int i, offset;
1661
1662 slot = lp_build_const_int32(gallivm, SI_HS_CONST_DEFAULT_TESS_LEVELS);
1663 buf = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_RW_BUFFERS);
1664 buf = build_indexed_load_const(ctx, buf, slot);
1665 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
1666
1667 for (i = 0; i < 4; i++)
1668 val[i] = buffer_load_const(gallivm->builder, buf,
1669 lp_build_const_int32(gallivm, (offset + i) * 4),
1670 ctx->f32);
1671 value = lp_build_gather_values(gallivm, val, 4);
1672 break;
1673 }
1674
1675 case TGSI_SEMANTIC_PRIMID:
1676 value = get_primitive_id(&radeon_bld->soa.bld_base, 0);
1677 break;
1678
1679 case TGSI_SEMANTIC_GRID_SIZE:
1680 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_GRID_SIZE);
1681 break;
1682
1683 case TGSI_SEMANTIC_BLOCK_SIZE:
1684 {
1685 LLVMValueRef values[3];
1686 unsigned i;
1687 unsigned *properties = ctx->shader->selector->info.properties;
1688 unsigned sizes[3] = {
1689 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1690 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1691 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1692 };
1693
1694 for (i = 0; i < 3; ++i)
1695 values[i] = lp_build_const_int32(gallivm, sizes[i]);
1696
1697 value = lp_build_gather_values(gallivm, values, 3);
1698 break;
1699 }
1700
1701 case TGSI_SEMANTIC_BLOCK_ID:
1702 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_BLOCK_ID);
1703 break;
1704
1705 case TGSI_SEMANTIC_THREAD_ID:
1706 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_THREAD_ID);
1707 break;
1708
1709 #if HAVE_LLVM >= 0x0309
1710 case TGSI_SEMANTIC_HELPER_INVOCATION:
1711 value = lp_build_intrinsic(gallivm->builder,
1712 "llvm.amdgcn.ps.live",
1713 ctx->i1, NULL, 0,
1714 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1715 value = LLVMBuildNot(gallivm->builder, value, "");
1716 value = LLVMBuildSExt(gallivm->builder, value, ctx->i32, "");
1717 break;
1718 #endif
1719
1720 default:
1721 assert(!"unknown system value");
1722 return;
1723 }
1724
1725 radeon_bld->system_values[index] = value;
1726 }
1727
1728 static void declare_compute_memory(struct radeon_llvm_context *radeon_bld,
1729 const struct tgsi_full_declaration *decl)
1730 {
1731 struct si_shader_context *ctx =
1732 si_shader_context(&radeon_bld->soa.bld_base);
1733 struct si_shader_selector *sel = ctx->shader->selector;
1734 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1735
1736 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, LOCAL_ADDR_SPACE);
1737 LLVMValueRef var;
1738
1739 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
1740 assert(decl->Range.First == decl->Range.Last);
1741 assert(!ctx->shared_memory);
1742
1743 var = LLVMAddGlobalInAddressSpace(gallivm->module,
1744 LLVMArrayType(ctx->i8, sel->local_size),
1745 "compute_lds",
1746 LOCAL_ADDR_SPACE);
1747 LLVMSetAlignment(var, 4);
1748
1749 ctx->shared_memory = LLVMBuildBitCast(gallivm->builder, var, i8p, "");
1750 }
1751
1752 static LLVMValueRef fetch_constant(
1753 struct lp_build_tgsi_context *bld_base,
1754 const struct tgsi_full_src_register *reg,
1755 enum tgsi_opcode_type type,
1756 unsigned swizzle)
1757 {
1758 struct si_shader_context *ctx = si_shader_context(bld_base);
1759 struct lp_build_context *base = &bld_base->base;
1760 const struct tgsi_ind_register *ireg = &reg->Indirect;
1761 unsigned buf, idx;
1762
1763 LLVMValueRef addr, bufp;
1764 LLVMValueRef result;
1765
1766 if (swizzle == LP_CHAN_ALL) {
1767 unsigned chan;
1768 LLVMValueRef values[4];
1769 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
1770 values[chan] = fetch_constant(bld_base, reg, type, chan);
1771
1772 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
1773 }
1774
1775 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
1776 idx = reg->Register.Index * 4 + swizzle;
1777
1778 if (!reg->Register.Indirect && !reg->Dimension.Indirect) {
1779 if (type != TGSI_TYPE_DOUBLE)
1780 return bitcast(bld_base, type, ctx->constants[buf][idx]);
1781 else {
1782 return radeon_llvm_emit_fetch_double(bld_base,
1783 ctx->constants[buf][idx],
1784 ctx->constants[buf][idx + 1]);
1785 }
1786 }
1787
1788 if (reg->Register.Dimension && reg->Dimension.Indirect) {
1789 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
1790 LLVMValueRef index;
1791 index = get_bounded_indirect_index(ctx, &reg->DimIndirect,
1792 reg->Dimension.Index,
1793 SI_NUM_CONST_BUFFERS);
1794 bufp = build_indexed_load_const(ctx, ptr, index);
1795 } else
1796 bufp = ctx->const_buffers[buf];
1797
1798 addr = ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
1799 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
1800 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
1801 addr = lp_build_add(&bld_base->uint_bld, addr,
1802 lp_build_const_int32(base->gallivm, idx * 4));
1803
1804 result = buffer_load_const(base->gallivm->builder, bufp,
1805 addr, ctx->f32);
1806
1807 if (type != TGSI_TYPE_DOUBLE)
1808 result = bitcast(bld_base, type, result);
1809 else {
1810 LLVMValueRef addr2, result2;
1811 addr2 = ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle + 1];
1812 addr2 = LLVMBuildLoad(base->gallivm->builder, addr2, "load addr reg2");
1813 addr2 = lp_build_mul_imm(&bld_base->uint_bld, addr2, 16);
1814 addr2 = lp_build_add(&bld_base->uint_bld, addr2,
1815 lp_build_const_int32(base->gallivm, idx * 4));
1816
1817 result2 = buffer_load_const(base->gallivm->builder, ctx->const_buffers[buf],
1818 addr2, ctx->f32);
1819
1820 result = radeon_llvm_emit_fetch_double(bld_base,
1821 result, result2);
1822 }
1823 return result;
1824 }
1825
1826 /* Upper 16 bits must be zero. */
1827 static LLVMValueRef si_llvm_pack_two_int16(struct gallivm_state *gallivm,
1828 LLVMValueRef val[2])
1829 {
1830 return LLVMBuildOr(gallivm->builder, val[0],
1831 LLVMBuildShl(gallivm->builder, val[1],
1832 lp_build_const_int32(gallivm, 16),
1833 ""), "");
1834 }
1835
1836 /* Upper 16 bits are ignored and will be dropped. */
1837 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct gallivm_state *gallivm,
1838 LLVMValueRef val[2])
1839 {
1840 LLVMValueRef v[2] = {
1841 LLVMBuildAnd(gallivm->builder, val[0],
1842 lp_build_const_int32(gallivm, 0xffff), ""),
1843 val[1],
1844 };
1845 return si_llvm_pack_two_int16(gallivm, v);
1846 }
1847
1848 /* Initialize arguments for the shader export intrinsic */
1849 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
1850 LLVMValueRef *values,
1851 unsigned target,
1852 LLVMValueRef *args)
1853 {
1854 struct si_shader_context *ctx = si_shader_context(bld_base);
1855 struct lp_build_context *uint =
1856 &ctx->radeon_bld.soa.bld_base.uint_bld;
1857 struct lp_build_context *base = &bld_base->base;
1858 struct gallivm_state *gallivm = base->gallivm;
1859 LLVMBuilderRef builder = base->gallivm->builder;
1860 LLVMValueRef val[4];
1861 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1862 unsigned chan;
1863 bool is_int8;
1864
1865 /* Default is 0xf. Adjusted below depending on the format. */
1866 args[0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1867
1868 /* Specify whether the EXEC mask represents the valid mask */
1869 args[1] = uint->zero;
1870
1871 /* Specify whether this is the last export */
1872 args[2] = uint->zero;
1873
1874 /* Specify the target we are exporting */
1875 args[3] = lp_build_const_int32(base->gallivm, target);
1876
1877 if (ctx->type == PIPE_SHADER_FRAGMENT) {
1878 const union si_shader_key *key = &ctx->shader->key;
1879 unsigned col_formats = key->ps.epilog.spi_shader_col_format;
1880 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1881
1882 assert(cbuf >= 0 && cbuf < 8);
1883 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1884 is_int8 = (key->ps.epilog.color_is_int8 >> cbuf) & 0x1;
1885 }
1886
1887 args[4] = uint->zero; /* COMPR flag */
1888 args[5] = base->undef;
1889 args[6] = base->undef;
1890 args[7] = base->undef;
1891 args[8] = base->undef;
1892
1893 switch (spi_shader_col_format) {
1894 case V_028714_SPI_SHADER_ZERO:
1895 args[0] = uint->zero; /* writemask */
1896 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
1897 break;
1898
1899 case V_028714_SPI_SHADER_32_R:
1900 args[0] = uint->one; /* writemask */
1901 args[5] = values[0];
1902 break;
1903
1904 case V_028714_SPI_SHADER_32_GR:
1905 args[0] = lp_build_const_int32(base->gallivm, 0x3); /* writemask */
1906 args[5] = values[0];
1907 args[6] = values[1];
1908 break;
1909
1910 case V_028714_SPI_SHADER_32_AR:
1911 args[0] = lp_build_const_int32(base->gallivm, 0x9); /* writemask */
1912 args[5] = values[0];
1913 args[8] = values[3];
1914 break;
1915
1916 case V_028714_SPI_SHADER_FP16_ABGR:
1917 args[4] = uint->one; /* COMPR flag */
1918
1919 for (chan = 0; chan < 2; chan++) {
1920 LLVMValueRef pack_args[2] = {
1921 values[2 * chan],
1922 values[2 * chan + 1]
1923 };
1924 LLVMValueRef packed;
1925
1926 packed = lp_build_intrinsic(base->gallivm->builder,
1927 "llvm.SI.packf16",
1928 ctx->i32, pack_args, 2,
1929 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1930 args[chan + 5] =
1931 LLVMBuildBitCast(base->gallivm->builder,
1932 packed, ctx->f32, "");
1933 }
1934 break;
1935
1936 case V_028714_SPI_SHADER_UNORM16_ABGR:
1937 for (chan = 0; chan < 4; chan++) {
1938 val[chan] = radeon_llvm_saturate(bld_base, values[chan]);
1939 val[chan] = LLVMBuildFMul(builder, val[chan],
1940 lp_build_const_float(gallivm, 65535), "");
1941 val[chan] = LLVMBuildFAdd(builder, val[chan],
1942 lp_build_const_float(gallivm, 0.5), "");
1943 val[chan] = LLVMBuildFPToUI(builder, val[chan],
1944 ctx->i32, "");
1945 }
1946
1947 args[4] = uint->one; /* COMPR flag */
1948 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1949 si_llvm_pack_two_int16(gallivm, val));
1950 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1951 si_llvm_pack_two_int16(gallivm, val+2));
1952 break;
1953
1954 case V_028714_SPI_SHADER_SNORM16_ABGR:
1955 for (chan = 0; chan < 4; chan++) {
1956 /* Clamp between [-1, 1]. */
1957 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MIN,
1958 values[chan],
1959 lp_build_const_float(gallivm, 1));
1960 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MAX,
1961 val[chan],
1962 lp_build_const_float(gallivm, -1));
1963 /* Convert to a signed integer in [-32767, 32767]. */
1964 val[chan] = LLVMBuildFMul(builder, val[chan],
1965 lp_build_const_float(gallivm, 32767), "");
1966 /* If positive, add 0.5, else add -0.5. */
1967 val[chan] = LLVMBuildFAdd(builder, val[chan],
1968 LLVMBuildSelect(builder,
1969 LLVMBuildFCmp(builder, LLVMRealOGE,
1970 val[chan], base->zero, ""),
1971 lp_build_const_float(gallivm, 0.5),
1972 lp_build_const_float(gallivm, -0.5), ""), "");
1973 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
1974 }
1975
1976 args[4] = uint->one; /* COMPR flag */
1977 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1978 si_llvm_pack_two_int32_as_int16(gallivm, val));
1979 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1980 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
1981 break;
1982
1983 case V_028714_SPI_SHADER_UINT16_ABGR: {
1984 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
1985 255 : 65535);
1986 /* Clamp. */
1987 for (chan = 0; chan < 4; chan++) {
1988 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1989 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
1990 val[chan], max);
1991 }
1992
1993 args[4] = uint->one; /* COMPR flag */
1994 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1995 si_llvm_pack_two_int16(gallivm, val));
1996 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1997 si_llvm_pack_two_int16(gallivm, val+2));
1998 break;
1999 }
2000
2001 case V_028714_SPI_SHADER_SINT16_ABGR: {
2002 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
2003 127 : 32767);
2004 LLVMValueRef min = lp_build_const_int32(gallivm, is_int8 ?
2005 -128 : -32768);
2006 /* Clamp. */
2007 for (chan = 0; chan < 4; chan++) {
2008 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
2009 val[chan] = lp_build_emit_llvm_binary(bld_base,
2010 TGSI_OPCODE_IMIN,
2011 val[chan], max);
2012 val[chan] = lp_build_emit_llvm_binary(bld_base,
2013 TGSI_OPCODE_IMAX,
2014 val[chan], min);
2015 }
2016
2017 args[4] = uint->one; /* COMPR flag */
2018 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2019 si_llvm_pack_two_int32_as_int16(gallivm, val));
2020 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2021 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
2022 break;
2023 }
2024
2025 case V_028714_SPI_SHADER_32_ABGR:
2026 memcpy(&args[5], values, sizeof(values[0]) * 4);
2027 break;
2028 }
2029 }
2030
2031 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2032 LLVMValueRef alpha)
2033 {
2034 struct si_shader_context *ctx = si_shader_context(bld_base);
2035 struct gallivm_state *gallivm = bld_base->base.gallivm;
2036
2037 if (ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2038 LLVMValueRef alpha_ref = LLVMGetParam(ctx->radeon_bld.main_fn,
2039 SI_PARAM_ALPHA_REF);
2040
2041 LLVMValueRef alpha_pass =
2042 lp_build_cmp(&bld_base->base,
2043 ctx->shader->key.ps.epilog.alpha_func,
2044 alpha, alpha_ref);
2045 LLVMValueRef arg =
2046 lp_build_select(&bld_base->base,
2047 alpha_pass,
2048 lp_build_const_float(gallivm, 1.0f),
2049 lp_build_const_float(gallivm, -1.0f));
2050
2051 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
2052 ctx->voidt, &arg, 1, 0);
2053 } else {
2054 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kilp",
2055 ctx->voidt, NULL, 0, 0);
2056 }
2057 }
2058
2059 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2060 LLVMValueRef alpha,
2061 unsigned samplemask_param)
2062 {
2063 struct si_shader_context *ctx = si_shader_context(bld_base);
2064 struct gallivm_state *gallivm = bld_base->base.gallivm;
2065 LLVMValueRef coverage;
2066
2067 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2068 coverage = LLVMGetParam(ctx->radeon_bld.main_fn,
2069 samplemask_param);
2070 coverage = bitcast(bld_base, TGSI_TYPE_SIGNED, coverage);
2071
2072 coverage = lp_build_intrinsic(gallivm->builder, "llvm.ctpop.i32",
2073 ctx->i32,
2074 &coverage, 1, LLVMReadNoneAttribute);
2075
2076 coverage = LLVMBuildUIToFP(gallivm->builder, coverage,
2077 ctx->f32, "");
2078
2079 coverage = LLVMBuildFMul(gallivm->builder, coverage,
2080 lp_build_const_float(gallivm,
2081 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2082
2083 return LLVMBuildFMul(gallivm->builder, alpha, coverage, "");
2084 }
2085
2086 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context *bld_base,
2087 LLVMValueRef (*pos)[9], LLVMValueRef *out_elts)
2088 {
2089 struct si_shader_context *ctx = si_shader_context(bld_base);
2090 struct lp_build_context *base = &bld_base->base;
2091 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
2092 unsigned reg_index;
2093 unsigned chan;
2094 unsigned const_chan;
2095 LLVMValueRef base_elt;
2096 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_RW_BUFFERS);
2097 LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm,
2098 SI_VS_CONST_CLIP_PLANES);
2099 LLVMValueRef const_resource = build_indexed_load_const(ctx, ptr, constbuf_index);
2100
2101 for (reg_index = 0; reg_index < 2; reg_index ++) {
2102 LLVMValueRef *args = pos[2 + reg_index];
2103
2104 args[5] =
2105 args[6] =
2106 args[7] =
2107 args[8] = lp_build_const_float(base->gallivm, 0.0f);
2108
2109 /* Compute dot products of position and user clip plane vectors */
2110 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2111 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2112 args[1] = lp_build_const_int32(base->gallivm,
2113 ((reg_index * 4 + chan) * 4 +
2114 const_chan) * 4);
2115 base_elt = buffer_load_const(base->gallivm->builder, const_resource,
2116 args[1], ctx->f32);
2117 args[5 + chan] =
2118 lp_build_add(base, args[5 + chan],
2119 lp_build_mul(base, base_elt,
2120 out_elts[const_chan]));
2121 }
2122 }
2123
2124 args[0] = lp_build_const_int32(base->gallivm, 0xf);
2125 args[1] = uint->zero;
2126 args[2] = uint->zero;
2127 args[3] = lp_build_const_int32(base->gallivm,
2128 V_008DFC_SQ_EXP_POS + 2 + reg_index);
2129 args[4] = uint->zero;
2130 }
2131 }
2132
2133 static void si_dump_streamout(struct pipe_stream_output_info *so)
2134 {
2135 unsigned i;
2136
2137 if (so->num_outputs)
2138 fprintf(stderr, "STREAMOUT\n");
2139
2140 for (i = 0; i < so->num_outputs; i++) {
2141 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2142 so->output[i].start_component;
2143 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2144 i, so->output[i].output_buffer,
2145 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2146 so->output[i].register_index,
2147 mask & 1 ? "x" : "",
2148 mask & 2 ? "y" : "",
2149 mask & 4 ? "z" : "",
2150 mask & 8 ? "w" : "");
2151 }
2152 }
2153
2154 /* On SI, the vertex shader is responsible for writing streamout data
2155 * to buffers. */
2156 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2157 struct si_shader_output_values *outputs,
2158 unsigned noutput)
2159 {
2160 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
2161 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
2162 LLVMBuilderRef builder = gallivm->builder;
2163 int i, j;
2164 struct lp_build_if_state if_ctx;
2165
2166 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2167 LLVMValueRef so_vtx_count =
2168 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2169
2170 LLVMValueRef tid = get_thread_id(ctx);
2171
2172 /* can_emit = tid < so_vtx_count; */
2173 LLVMValueRef can_emit =
2174 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2175
2176 LLVMValueRef stream_id =
2177 unpack_param(ctx, ctx->param_streamout_config, 24, 2);
2178
2179 /* Emit the streamout code conditionally. This actually avoids
2180 * out-of-bounds buffer access. The hw tells us via the SGPR
2181 * (so_vtx_count) which threads are allowed to emit streamout data. */
2182 lp_build_if(&if_ctx, gallivm, can_emit);
2183 {
2184 /* The buffer offset is computed as follows:
2185 * ByteOffset = streamout_offset[buffer_id]*4 +
2186 * (streamout_write_index + thread_id)*stride[buffer_id] +
2187 * attrib_offset
2188 */
2189
2190 LLVMValueRef so_write_index =
2191 LLVMGetParam(ctx->radeon_bld.main_fn,
2192 ctx->param_streamout_write_index);
2193
2194 /* Compute (streamout_write_index + thread_id). */
2195 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2196
2197 /* Compute the write offset for each enabled buffer. */
2198 LLVMValueRef so_write_offset[4] = {};
2199 for (i = 0; i < 4; i++) {
2200 if (!so->stride[i])
2201 continue;
2202
2203 LLVMValueRef so_offset = LLVMGetParam(ctx->radeon_bld.main_fn,
2204 ctx->param_streamout_offset[i]);
2205 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2206
2207 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2208 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2209 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2210 }
2211
2212 /* Write streamout data. */
2213 for (i = 0; i < so->num_outputs; i++) {
2214 unsigned buf_idx = so->output[i].output_buffer;
2215 unsigned reg = so->output[i].register_index;
2216 unsigned start = so->output[i].start_component;
2217 unsigned num_comps = so->output[i].num_components;
2218 unsigned stream = so->output[i].stream;
2219 LLVMValueRef out[4];
2220 struct lp_build_if_state if_ctx_stream;
2221
2222 assert(num_comps && num_comps <= 4);
2223 if (!num_comps || num_comps > 4)
2224 continue;
2225
2226 if (reg >= noutput)
2227 continue;
2228
2229 /* Load the output as int. */
2230 for (j = 0; j < num_comps; j++) {
2231 out[j] = LLVMBuildBitCast(builder,
2232 outputs[reg].values[start+j],
2233 ctx->i32, "");
2234 }
2235
2236 /* Pack the output. */
2237 LLVMValueRef vdata = NULL;
2238
2239 switch (num_comps) {
2240 case 1: /* as i32 */
2241 vdata = out[0];
2242 break;
2243 case 2: /* as v2i32 */
2244 case 3: /* as v4i32 (aligned to 4) */
2245 case 4: /* as v4i32 */
2246 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2247 for (j = 0; j < num_comps; j++) {
2248 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
2249 LLVMConstInt(ctx->i32, j, 0), "");
2250 }
2251 break;
2252 }
2253
2254 LLVMValueRef can_emit_stream =
2255 LLVMBuildICmp(builder, LLVMIntEQ,
2256 stream_id,
2257 lp_build_const_int32(gallivm, stream), "");
2258
2259 lp_build_if(&if_ctx_stream, gallivm, can_emit_stream);
2260 build_tbuffer_store_dwords(ctx, ctx->so_buffers[buf_idx],
2261 vdata, num_comps,
2262 so_write_offset[buf_idx],
2263 LLVMConstInt(ctx->i32, 0, 0),
2264 so->output[i].dst_offset*4);
2265 lp_build_endif(&if_ctx_stream);
2266 }
2267 }
2268 lp_build_endif(&if_ctx);
2269 }
2270
2271
2272 /* Generate export instructions for hardware VS shader stage */
2273 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
2274 struct si_shader_output_values *outputs,
2275 unsigned noutput)
2276 {
2277 struct si_shader_context *ctx = si_shader_context(bld_base);
2278 struct si_shader *shader = ctx->shader;
2279 struct lp_build_context *base = &bld_base->base;
2280 struct lp_build_context *uint =
2281 &ctx->radeon_bld.soa.bld_base.uint_bld;
2282 LLVMValueRef args[9];
2283 LLVMValueRef pos_args[4][9] = { { 0 } };
2284 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2285 unsigned semantic_name, semantic_index;
2286 unsigned target;
2287 unsigned param_count = 0;
2288 unsigned pos_idx;
2289 int i;
2290
2291 if (outputs && ctx->shader->selector->so.num_outputs) {
2292 si_llvm_emit_streamout(ctx, outputs, noutput);
2293 }
2294
2295 for (i = 0; i < noutput; i++) {
2296 semantic_name = outputs[i].name;
2297 semantic_index = outputs[i].sid;
2298
2299 handle_semantic:
2300 /* Select the correct target */
2301 switch(semantic_name) {
2302 case TGSI_SEMANTIC_PSIZE:
2303 psize_value = outputs[i].values[0];
2304 continue;
2305 case TGSI_SEMANTIC_EDGEFLAG:
2306 edgeflag_value = outputs[i].values[0];
2307 continue;
2308 case TGSI_SEMANTIC_LAYER:
2309 layer_value = outputs[i].values[0];
2310 semantic_name = TGSI_SEMANTIC_GENERIC;
2311 goto handle_semantic;
2312 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2313 viewport_index_value = outputs[i].values[0];
2314 semantic_name = TGSI_SEMANTIC_GENERIC;
2315 goto handle_semantic;
2316 case TGSI_SEMANTIC_POSITION:
2317 target = V_008DFC_SQ_EXP_POS;
2318 break;
2319 case TGSI_SEMANTIC_COLOR:
2320 case TGSI_SEMANTIC_BCOLOR:
2321 target = V_008DFC_SQ_EXP_PARAM + param_count;
2322 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2323 shader->info.vs_output_param_offset[i] = param_count;
2324 param_count++;
2325 break;
2326 case TGSI_SEMANTIC_CLIPDIST:
2327 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
2328 break;
2329 case TGSI_SEMANTIC_CLIPVERTEX:
2330 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
2331 continue;
2332 case TGSI_SEMANTIC_PRIMID:
2333 case TGSI_SEMANTIC_FOG:
2334 case TGSI_SEMANTIC_TEXCOORD:
2335 case TGSI_SEMANTIC_GENERIC:
2336 target = V_008DFC_SQ_EXP_PARAM + param_count;
2337 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2338 shader->info.vs_output_param_offset[i] = param_count;
2339 param_count++;
2340 break;
2341 default:
2342 target = 0;
2343 fprintf(stderr,
2344 "Warning: SI unhandled vs output type:%d\n",
2345 semantic_name);
2346 }
2347
2348 si_llvm_init_export_args(bld_base, outputs[i].values, target, args);
2349
2350 if (target >= V_008DFC_SQ_EXP_POS &&
2351 target <= (V_008DFC_SQ_EXP_POS + 3)) {
2352 memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
2353 args, sizeof(args));
2354 } else {
2355 lp_build_intrinsic(base->gallivm->builder,
2356 "llvm.SI.export", ctx->voidt,
2357 args, 9, 0);
2358 }
2359
2360 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
2361 semantic_name = TGSI_SEMANTIC_GENERIC;
2362 goto handle_semantic;
2363 }
2364 }
2365
2366 shader->info.nr_param_exports = param_count;
2367
2368 /* We need to add the position output manually if it's missing. */
2369 if (!pos_args[0][0]) {
2370 pos_args[0][0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
2371 pos_args[0][1] = uint->zero; /* EXEC mask */
2372 pos_args[0][2] = uint->zero; /* last export? */
2373 pos_args[0][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS);
2374 pos_args[0][4] = uint->zero; /* COMPR flag */
2375 pos_args[0][5] = base->zero; /* X */
2376 pos_args[0][6] = base->zero; /* Y */
2377 pos_args[0][7] = base->zero; /* Z */
2378 pos_args[0][8] = base->one; /* W */
2379 }
2380
2381 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2382 if (shader->selector->info.writes_psize ||
2383 shader->selector->info.writes_edgeflag ||
2384 shader->selector->info.writes_viewport_index ||
2385 shader->selector->info.writes_layer) {
2386 pos_args[1][0] = lp_build_const_int32(base->gallivm, /* writemask */
2387 shader->selector->info.writes_psize |
2388 (shader->selector->info.writes_edgeflag << 1) |
2389 (shader->selector->info.writes_layer << 2) |
2390 (shader->selector->info.writes_viewport_index << 3));
2391 pos_args[1][1] = uint->zero; /* EXEC mask */
2392 pos_args[1][2] = uint->zero; /* last export? */
2393 pos_args[1][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + 1);
2394 pos_args[1][4] = uint->zero; /* COMPR flag */
2395 pos_args[1][5] = base->zero; /* X */
2396 pos_args[1][6] = base->zero; /* Y */
2397 pos_args[1][7] = base->zero; /* Z */
2398 pos_args[1][8] = base->zero; /* W */
2399
2400 if (shader->selector->info.writes_psize)
2401 pos_args[1][5] = psize_value;
2402
2403 if (shader->selector->info.writes_edgeflag) {
2404 /* The output is a float, but the hw expects an integer
2405 * with the first bit containing the edge flag. */
2406 edgeflag_value = LLVMBuildFPToUI(base->gallivm->builder,
2407 edgeflag_value,
2408 ctx->i32, "");
2409 edgeflag_value = lp_build_min(&bld_base->int_bld,
2410 edgeflag_value,
2411 bld_base->int_bld.one);
2412
2413 /* The LLVM intrinsic expects a float. */
2414 pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder,
2415 edgeflag_value,
2416 ctx->f32, "");
2417 }
2418
2419 if (shader->selector->info.writes_layer)
2420 pos_args[1][7] = layer_value;
2421
2422 if (shader->selector->info.writes_viewport_index)
2423 pos_args[1][8] = viewport_index_value;
2424 }
2425
2426 for (i = 0; i < 4; i++)
2427 if (pos_args[i][0])
2428 shader->info.nr_pos_exports++;
2429
2430 pos_idx = 0;
2431 for (i = 0; i < 4; i++) {
2432 if (!pos_args[i][0])
2433 continue;
2434
2435 /* Specify the target we are exporting */
2436 pos_args[i][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + pos_idx++);
2437
2438 if (pos_idx == shader->info.nr_pos_exports)
2439 /* Specify that this is the last export */
2440 pos_args[i][2] = uint->one;
2441
2442 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2443 ctx->voidt, pos_args[i], 9, 0);
2444 }
2445 }
2446
2447 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2448 {
2449 struct si_shader_context *ctx = si_shader_context(bld_base);
2450 struct gallivm_state *gallivm = bld_base->base.gallivm;
2451 LLVMValueRef invocation_id, rw_buffers, buffer, buffer_offset;
2452 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
2453 uint64_t inputs;
2454
2455 invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
2456
2457 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_RW_BUFFERS);
2458 buffer = build_indexed_load_const(ctx, rw_buffers,
2459 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
2460
2461 buffer_offset = LLVMGetParam(ctx->radeon_bld.main_fn, ctx->param_oc_lds);
2462
2463 lds_vertex_stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
2464 lds_vertex_offset = LLVMBuildMul(gallivm->builder, invocation_id,
2465 lds_vertex_stride, "");
2466 lds_base = get_tcs_in_current_patch_offset(ctx);
2467 lds_base = LLVMBuildAdd(gallivm->builder, lds_base, lds_vertex_offset, "");
2468
2469 inputs = ctx->shader->key.tcs.epilog.inputs_to_copy;
2470 while (inputs) {
2471 unsigned i = u_bit_scan64(&inputs);
2472
2473 LLVMValueRef lds_ptr = LLVMBuildAdd(gallivm->builder, lds_base,
2474 lp_build_const_int32(gallivm, 4 * i),
2475 "");
2476
2477 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
2478 invocation_id,
2479 lp_build_const_int32(gallivm, i));
2480
2481 LLVMValueRef value = lds_load(bld_base, TGSI_TYPE_SIGNED, ~0,
2482 lds_ptr);
2483
2484 build_tbuffer_store_dwords(ctx, buffer, value, 4, buffer_addr,
2485 buffer_offset, 0);
2486 }
2487 }
2488
2489 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
2490 LLVMValueRef rel_patch_id,
2491 LLVMValueRef invocation_id,
2492 LLVMValueRef tcs_out_current_patch_data_offset)
2493 {
2494 struct si_shader_context *ctx = si_shader_context(bld_base);
2495 struct gallivm_state *gallivm = bld_base->base.gallivm;
2496 struct si_shader *shader = ctx->shader;
2497 unsigned tess_inner_index, tess_outer_index;
2498 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2499 LLVMValueRef out[6], vec0, vec1, rw_buffers, tf_base;
2500 unsigned stride, outer_comps, inner_comps, i;
2501 struct lp_build_if_state if_ctx;
2502
2503 /* Do this only for invocation 0, because the tess levels are per-patch,
2504 * not per-vertex.
2505 *
2506 * This can't jump, because invocation 0 executes this. It should
2507 * at least mask out the loads and stores for other invocations.
2508 */
2509 lp_build_if(&if_ctx, gallivm,
2510 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2511 invocation_id, bld_base->uint_bld.zero, ""));
2512
2513 /* Determine the layout of one tess factor element in the buffer. */
2514 switch (shader->key.tcs.epilog.prim_mode) {
2515 case PIPE_PRIM_LINES:
2516 stride = 2; /* 2 dwords, 1 vec2 store */
2517 outer_comps = 2;
2518 inner_comps = 0;
2519 break;
2520 case PIPE_PRIM_TRIANGLES:
2521 stride = 4; /* 4 dwords, 1 vec4 store */
2522 outer_comps = 3;
2523 inner_comps = 1;
2524 break;
2525 case PIPE_PRIM_QUADS:
2526 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2527 outer_comps = 4;
2528 inner_comps = 2;
2529 break;
2530 default:
2531 assert(0);
2532 return;
2533 }
2534
2535 /* Load tess_inner and tess_outer from LDS.
2536 * Any invocation can write them, so we can't get them from a temporary.
2537 */
2538 tess_inner_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSINNER, 0);
2539 tess_outer_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSOUTER, 0);
2540
2541 lds_base = tcs_out_current_patch_data_offset;
2542 lds_inner = LLVMBuildAdd(gallivm->builder, lds_base,
2543 lp_build_const_int32(gallivm,
2544 tess_inner_index * 4), "");
2545 lds_outer = LLVMBuildAdd(gallivm->builder, lds_base,
2546 lp_build_const_int32(gallivm,
2547 tess_outer_index * 4), "");
2548
2549 for (i = 0; i < outer_comps; i++)
2550 out[i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
2551 for (i = 0; i < inner_comps; i++)
2552 out[outer_comps+i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
2553
2554 /* Convert the outputs to vectors for stores. */
2555 vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
2556 vec1 = NULL;
2557
2558 if (stride > 4)
2559 vec1 = lp_build_gather_values(gallivm, out+4, stride - 4);
2560
2561 /* Get the buffer. */
2562 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn,
2563 SI_PARAM_RW_BUFFERS);
2564 buffer = build_indexed_load_const(ctx, rw_buffers,
2565 lp_build_const_int32(gallivm, SI_HS_RING_TESS_FACTOR));
2566
2567 /* Get the offset. */
2568 tf_base = LLVMGetParam(ctx->radeon_bld.main_fn,
2569 SI_PARAM_TESS_FACTOR_OFFSET);
2570 byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
2571 lp_build_const_int32(gallivm, 4 * stride), "");
2572
2573 /* Store the outputs. */
2574 build_tbuffer_store_dwords(ctx, buffer, vec0,
2575 MIN2(stride, 4), byteoffset, tf_base, 0);
2576 if (vec1)
2577 build_tbuffer_store_dwords(ctx, buffer, vec1,
2578 stride - 4, byteoffset, tf_base, 16);
2579 lp_build_endif(&if_ctx);
2580 }
2581
2582 /* This only writes the tessellation factor levels. */
2583 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
2584 {
2585 struct si_shader_context *ctx = si_shader_context(bld_base);
2586 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2587
2588 rel_patch_id = get_rel_patch_id(ctx);
2589 invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
2590 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2591
2592 if (!ctx->is_monolithic) {
2593 /* Return epilog parameters from this function. */
2594 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
2595 LLVMValueRef ret = ctx->return_value;
2596 LLVMValueRef rw_buffers, rw0, rw1, tf_soffset;
2597 unsigned vgpr;
2598
2599 /* RW_BUFFERS pointer */
2600 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn,
2601 SI_PARAM_RW_BUFFERS);
2602 rw_buffers = LLVMBuildPtrToInt(builder, rw_buffers, ctx->i64, "");
2603 rw_buffers = LLVMBuildBitCast(builder, rw_buffers, ctx->v2i32, "");
2604 rw0 = LLVMBuildExtractElement(builder, rw_buffers,
2605 bld_base->uint_bld.zero, "");
2606 rw1 = LLVMBuildExtractElement(builder, rw_buffers,
2607 bld_base->uint_bld.one, "");
2608 ret = LLVMBuildInsertValue(builder, ret, rw0, 0, "");
2609 ret = LLVMBuildInsertValue(builder, ret, rw1, 1, "");
2610
2611 /* Tess factor buffer soffset is after user SGPRs. */
2612 tf_soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
2613 SI_PARAM_TESS_FACTOR_OFFSET);
2614 ret = LLVMBuildInsertValue(builder, ret, tf_soffset,
2615 SI_TCS_NUM_USER_SGPR + 1, "");
2616
2617 /* VGPRs */
2618 rel_patch_id = bitcast(bld_base, TGSI_TYPE_FLOAT, rel_patch_id);
2619 invocation_id = bitcast(bld_base, TGSI_TYPE_FLOAT, invocation_id);
2620 tf_lds_offset = bitcast(bld_base, TGSI_TYPE_FLOAT, tf_lds_offset);
2621
2622 vgpr = SI_TCS_NUM_USER_SGPR + 2;
2623 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2624 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2625 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2626 ctx->return_value = ret;
2627 return;
2628 }
2629
2630 si_copy_tcs_inputs(bld_base);
2631 si_write_tess_factors(bld_base, rel_patch_id, invocation_id, tf_lds_offset);
2632 }
2633
2634 static void si_llvm_emit_ls_epilogue(struct lp_build_tgsi_context *bld_base)
2635 {
2636 struct si_shader_context *ctx = si_shader_context(bld_base);
2637 struct si_shader *shader = ctx->shader;
2638 struct tgsi_shader_info *info = &shader->selector->info;
2639 struct gallivm_state *gallivm = bld_base->base.gallivm;
2640 unsigned i, chan;
2641 LLVMValueRef vertex_id = LLVMGetParam(ctx->radeon_bld.main_fn,
2642 ctx->param_rel_auto_id);
2643 LLVMValueRef vertex_dw_stride =
2644 unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 13, 8);
2645 LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
2646 vertex_dw_stride, "");
2647
2648 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2649 * its inputs from it. */
2650 for (i = 0; i < info->num_outputs; i++) {
2651 LLVMValueRef *out_ptr = ctx->radeon_bld.soa.outputs[i];
2652 unsigned name = info->output_semantic_name[i];
2653 unsigned index = info->output_semantic_index[i];
2654 int param = si_shader_io_get_unique_index(name, index);
2655 LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
2656 lp_build_const_int32(gallivm, param * 4), "");
2657
2658 for (chan = 0; chan < 4; chan++) {
2659 lds_store(bld_base, chan, dw_addr,
2660 LLVMBuildLoad(gallivm->builder, out_ptr[chan], ""));
2661 }
2662 }
2663 }
2664
2665 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context *bld_base)
2666 {
2667 struct si_shader_context *ctx = si_shader_context(bld_base);
2668 struct gallivm_state *gallivm = bld_base->base.gallivm;
2669 struct si_shader *es = ctx->shader;
2670 struct tgsi_shader_info *info = &es->selector->info;
2671 LLVMValueRef soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
2672 ctx->param_es2gs_offset);
2673 unsigned chan;
2674 int i;
2675
2676 for (i = 0; i < info->num_outputs; i++) {
2677 LLVMValueRef *out_ptr =
2678 ctx->radeon_bld.soa.outputs[i];
2679 int param_index;
2680
2681 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2682 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2683 continue;
2684
2685 param_index = si_shader_io_get_unique_index(info->output_semantic_name[i],
2686 info->output_semantic_index[i]);
2687
2688 for (chan = 0; chan < 4; chan++) {
2689 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
2690 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
2691
2692 build_tbuffer_store(ctx,
2693 ctx->esgs_ring,
2694 out_val, 1,
2695 LLVMGetUndef(ctx->i32), soffset,
2696 (4 * param_index + chan) * 4,
2697 V_008F0C_BUF_DATA_FORMAT_32,
2698 V_008F0C_BUF_NUM_FORMAT_UINT,
2699 0, 0, 1, 1, 0);
2700 }
2701 }
2702 }
2703
2704 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
2705 {
2706 struct si_shader_context *ctx = si_shader_context(bld_base);
2707 struct gallivm_state *gallivm = bld_base->base.gallivm;
2708 LLVMValueRef args[2];
2709
2710 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_NOP | SENDMSG_GS_DONE);
2711 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
2712 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
2713 ctx->voidt, args, 2, LLVMNoUnwindAttribute);
2714 }
2715
2716 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
2717 {
2718 struct si_shader_context *ctx = si_shader_context(bld_base);
2719 struct gallivm_state *gallivm = bld_base->base.gallivm;
2720 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2721 struct si_shader_output_values *outputs = NULL;
2722 int i,j;
2723
2724 assert(!ctx->is_gs_copy_shader);
2725
2726 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2727
2728 /* Vertex color clamping.
2729 *
2730 * This uses a state constant loaded in a user data SGPR and
2731 * an IF statement is added that clamps all colors if the constant
2732 * is true.
2733 */
2734 if (ctx->type == PIPE_SHADER_VERTEX) {
2735 struct lp_build_if_state if_ctx;
2736 LLVMValueRef cond = NULL;
2737 LLVMValueRef addr, val;
2738
2739 for (i = 0; i < info->num_outputs; i++) {
2740 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
2741 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
2742 continue;
2743
2744 /* We've found a color. */
2745 if (!cond) {
2746 /* The state is in the first bit of the user SGPR. */
2747 cond = LLVMGetParam(ctx->radeon_bld.main_fn,
2748 SI_PARAM_VS_STATE_BITS);
2749 cond = LLVMBuildTrunc(gallivm->builder, cond,
2750 ctx->i1, "");
2751 lp_build_if(&if_ctx, gallivm, cond);
2752 }
2753
2754 for (j = 0; j < 4; j++) {
2755 addr = ctx->radeon_bld.soa.outputs[i][j];
2756 val = LLVMBuildLoad(gallivm->builder, addr, "");
2757 val = radeon_llvm_saturate(bld_base, val);
2758 LLVMBuildStore(gallivm->builder, val, addr);
2759 }
2760 }
2761
2762 if (cond)
2763 lp_build_endif(&if_ctx);
2764 }
2765
2766 for (i = 0; i < info->num_outputs; i++) {
2767 outputs[i].name = info->output_semantic_name[i];
2768 outputs[i].sid = info->output_semantic_index[i];
2769
2770 for (j = 0; j < 4; j++)
2771 outputs[i].values[j] =
2772 LLVMBuildLoad(gallivm->builder,
2773 ctx->radeon_bld.soa.outputs[i][j],
2774 "");
2775 }
2776
2777 if (ctx->is_monolithic) {
2778 /* Export PrimitiveID when PS needs it. */
2779 if (si_vs_exports_prim_id(ctx->shader)) {
2780 outputs[i].name = TGSI_SEMANTIC_PRIMID;
2781 outputs[i].sid = 0;
2782 outputs[i].values[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2783 get_primitive_id(bld_base, 0));
2784 outputs[i].values[1] = bld_base->base.undef;
2785 outputs[i].values[2] = bld_base->base.undef;
2786 outputs[i].values[3] = bld_base->base.undef;
2787 i++;
2788 }
2789 } else {
2790 /* Return the primitive ID from the LLVM function. */
2791 ctx->return_value =
2792 LLVMBuildInsertValue(gallivm->builder,
2793 ctx->return_value,
2794 bitcast(bld_base, TGSI_TYPE_FLOAT,
2795 get_primitive_id(bld_base, 0)),
2796 VS_EPILOG_PRIMID_LOC, "");
2797 }
2798
2799 si_llvm_export_vs(bld_base, outputs, i);
2800 FREE(outputs);
2801 }
2802
2803 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
2804 LLVMValueRef depth, LLVMValueRef stencil,
2805 LLVMValueRef samplemask)
2806 {
2807 struct si_shader_context *ctx = si_shader_context(bld_base);
2808 struct lp_build_context *base = &bld_base->base;
2809 struct lp_build_context *uint = &bld_base->uint_bld;
2810 LLVMValueRef args[9];
2811 unsigned mask = 0;
2812
2813 assert(depth || stencil || samplemask);
2814
2815 args[1] = uint->one; /* whether the EXEC mask is valid */
2816 args[2] = uint->one; /* DONE bit */
2817
2818 /* Specify the target we are exporting */
2819 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
2820
2821 args[4] = uint->zero; /* COMP flag */
2822 args[5] = base->undef; /* R, depth */
2823 args[6] = base->undef; /* G, stencil test value[0:7], stencil op value[8:15] */
2824 args[7] = base->undef; /* B, sample mask */
2825 args[8] = base->undef; /* A, alpha to mask */
2826
2827 if (depth) {
2828 args[5] = depth;
2829 mask |= 0x1;
2830 }
2831
2832 if (stencil) {
2833 args[6] = stencil;
2834 mask |= 0x2;
2835 }
2836
2837 if (samplemask) {
2838 args[7] = samplemask;
2839 mask |= 0x4;
2840 }
2841
2842 /* SI (except OLAND) has a bug that it only looks
2843 * at the X writemask component. */
2844 if (ctx->screen->b.chip_class == SI &&
2845 ctx->screen->b.family != CHIP_OLAND)
2846 mask |= 0x1;
2847
2848 /* Specify which components to enable */
2849 args[0] = lp_build_const_int32(base->gallivm, mask);
2850
2851 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2852 ctx->voidt, args, 9, 0);
2853 }
2854
2855 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
2856 LLVMValueRef *color, unsigned index,
2857 unsigned samplemask_param,
2858 bool is_last)
2859 {
2860 struct si_shader_context *ctx = si_shader_context(bld_base);
2861 struct lp_build_context *base = &bld_base->base;
2862 int i;
2863
2864 /* Clamp color */
2865 if (ctx->shader->key.ps.epilog.clamp_color)
2866 for (i = 0; i < 4; i++)
2867 color[i] = radeon_llvm_saturate(bld_base, color[i]);
2868
2869 /* Alpha to one */
2870 if (ctx->shader->key.ps.epilog.alpha_to_one)
2871 color[3] = base->one;
2872
2873 /* Alpha test */
2874 if (index == 0 &&
2875 ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
2876 si_alpha_test(bld_base, color[3]);
2877
2878 /* Line & polygon smoothing */
2879 if (ctx->shader->key.ps.epilog.poly_line_smoothing)
2880 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
2881 samplemask_param);
2882
2883 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
2884 if (ctx->shader->key.ps.epilog.last_cbuf > 0) {
2885 LLVMValueRef args[8][9];
2886 int c, last = -1;
2887
2888 /* Get the export arguments, also find out what the last one is. */
2889 for (c = 0; c <= ctx->shader->key.ps.epilog.last_cbuf; c++) {
2890 si_llvm_init_export_args(bld_base, color,
2891 V_008DFC_SQ_EXP_MRT + c, args[c]);
2892 if (args[c][0] != bld_base->uint_bld.zero)
2893 last = c;
2894 }
2895
2896 /* Emit all exports. */
2897 for (c = 0; c <= ctx->shader->key.ps.epilog.last_cbuf; c++) {
2898 if (is_last && last == c) {
2899 args[c][1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
2900 args[c][2] = bld_base->uint_bld.one; /* DONE bit */
2901 } else if (args[c][0] == bld_base->uint_bld.zero)
2902 continue; /* unnecessary NULL export */
2903
2904 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2905 ctx->voidt, args[c], 9, 0);
2906 }
2907 } else {
2908 LLVMValueRef args[9];
2909
2910 /* Export */
2911 si_llvm_init_export_args(bld_base, color, V_008DFC_SQ_EXP_MRT + index,
2912 args);
2913 if (is_last) {
2914 args[1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
2915 args[2] = bld_base->uint_bld.one; /* DONE bit */
2916 } else if (args[0] == bld_base->uint_bld.zero)
2917 return; /* unnecessary NULL export */
2918
2919 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2920 ctx->voidt, args, 9, 0);
2921 }
2922 }
2923
2924 static void si_export_null(struct lp_build_tgsi_context *bld_base)
2925 {
2926 struct si_shader_context *ctx = si_shader_context(bld_base);
2927 struct lp_build_context *base = &bld_base->base;
2928 struct lp_build_context *uint = &bld_base->uint_bld;
2929 LLVMValueRef args[9];
2930
2931 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
2932 args[1] = uint->one; /* whether the EXEC mask is valid */
2933 args[2] = uint->one; /* DONE bit */
2934 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
2935 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
2936 args[5] = uint->undef; /* R */
2937 args[6] = uint->undef; /* G */
2938 args[7] = uint->undef; /* B */
2939 args[8] = uint->undef; /* A */
2940
2941 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2942 ctx->voidt, args, 9, 0);
2943 }
2944
2945 static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context *bld_base)
2946 {
2947 struct si_shader_context *ctx = si_shader_context(bld_base);
2948 struct si_shader *shader = ctx->shader;
2949 struct lp_build_context *base = &bld_base->base;
2950 struct tgsi_shader_info *info = &shader->selector->info;
2951 LLVMBuilderRef builder = base->gallivm->builder;
2952 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
2953 int last_color_export = -1;
2954 int i;
2955
2956 /* Determine the last export. If MRTZ is present, it's always last.
2957 * Otherwise, find the last color export.
2958 */
2959 if (!info->writes_z && !info->writes_stencil && !info->writes_samplemask) {
2960 unsigned spi_format = shader->key.ps.epilog.spi_shader_col_format;
2961
2962 /* Don't export NULL and return if alpha-test is enabled. */
2963 if (shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS &&
2964 shader->key.ps.epilog.alpha_func != PIPE_FUNC_NEVER &&
2965 (spi_format & 0xf) == 0)
2966 spi_format |= V_028714_SPI_SHADER_32_AR;
2967
2968 for (i = 0; i < info->num_outputs; i++) {
2969 unsigned index = info->output_semantic_index[i];
2970
2971 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR)
2972 continue;
2973
2974 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
2975 if (shader->key.ps.epilog.last_cbuf > 0) {
2976 /* Just set this if any of the colorbuffers are enabled. */
2977 if (spi_format &
2978 ((1llu << (4 * (shader->key.ps.epilog.last_cbuf + 1))) - 1))
2979 last_color_export = i;
2980 continue;
2981 }
2982
2983 if ((spi_format >> (index * 4)) & 0xf)
2984 last_color_export = i;
2985 }
2986
2987 /* If there are no outputs, export NULL. */
2988 if (last_color_export == -1) {
2989 si_export_null(bld_base);
2990 return;
2991 }
2992 }
2993
2994 for (i = 0; i < info->num_outputs; i++) {
2995 unsigned semantic_name = info->output_semantic_name[i];
2996 unsigned semantic_index = info->output_semantic_index[i];
2997 unsigned j;
2998 LLVMValueRef color[4] = {};
2999
3000 /* Select the correct target */
3001 switch (semantic_name) {
3002 case TGSI_SEMANTIC_POSITION:
3003 depth = LLVMBuildLoad(builder,
3004 ctx->radeon_bld.soa.outputs[i][2], "");
3005 break;
3006 case TGSI_SEMANTIC_STENCIL:
3007 stencil = LLVMBuildLoad(builder,
3008 ctx->radeon_bld.soa.outputs[i][1], "");
3009 break;
3010 case TGSI_SEMANTIC_SAMPLEMASK:
3011 samplemask = LLVMBuildLoad(builder,
3012 ctx->radeon_bld.soa.outputs[i][0], "");
3013 break;
3014 case TGSI_SEMANTIC_COLOR:
3015 for (j = 0; j < 4; j++)
3016 color[j] = LLVMBuildLoad(builder,
3017 ctx->radeon_bld.soa.outputs[i][j], "");
3018
3019 si_export_mrt_color(bld_base, color, semantic_index,
3020 SI_PARAM_SAMPLE_COVERAGE,
3021 last_color_export == i);
3022 break;
3023 default:
3024 fprintf(stderr,
3025 "Warning: SI unhandled fs output type:%d\n",
3026 semantic_name);
3027 }
3028 }
3029
3030 if (depth || stencil || samplemask)
3031 si_export_mrt_z(bld_base, depth, stencil, samplemask);
3032 }
3033
3034 /**
3035 * Return PS outputs in this order:
3036 *
3037 * v[0:3] = color0.xyzw
3038 * v[4:7] = color1.xyzw
3039 * ...
3040 * vN+0 = Depth
3041 * vN+1 = Stencil
3042 * vN+2 = SampleMask
3043 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3044 *
3045 * The alpha-ref SGPR is returned via its original location.
3046 */
3047 static void si_llvm_return_fs_outputs(struct lp_build_tgsi_context *bld_base)
3048 {
3049 struct si_shader_context *ctx = si_shader_context(bld_base);
3050 struct si_shader *shader = ctx->shader;
3051 struct lp_build_context *base = &bld_base->base;
3052 struct tgsi_shader_info *info = &shader->selector->info;
3053 LLVMBuilderRef builder = base->gallivm->builder;
3054 unsigned i, j, first_vgpr, vgpr;
3055
3056 LLVMValueRef color[8][4] = {};
3057 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3058 LLVMValueRef ret;
3059
3060 /* Read the output values. */
3061 for (i = 0; i < info->num_outputs; i++) {
3062 unsigned semantic_name = info->output_semantic_name[i];
3063 unsigned semantic_index = info->output_semantic_index[i];
3064
3065 switch (semantic_name) {
3066 case TGSI_SEMANTIC_COLOR:
3067 assert(semantic_index < 8);
3068 for (j = 0; j < 4; j++) {
3069 LLVMValueRef ptr = ctx->radeon_bld.soa.outputs[i][j];
3070 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3071 color[semantic_index][j] = result;
3072 }
3073 break;
3074 case TGSI_SEMANTIC_POSITION:
3075 depth = LLVMBuildLoad(builder,
3076 ctx->radeon_bld.soa.outputs[i][2], "");
3077 break;
3078 case TGSI_SEMANTIC_STENCIL:
3079 stencil = LLVMBuildLoad(builder,
3080 ctx->radeon_bld.soa.outputs[i][1], "");
3081 break;
3082 case TGSI_SEMANTIC_SAMPLEMASK:
3083 samplemask = LLVMBuildLoad(builder,
3084 ctx->radeon_bld.soa.outputs[i][0], "");
3085 break;
3086 default:
3087 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3088 semantic_name);
3089 }
3090 }
3091
3092 /* Fill the return structure. */
3093 ret = ctx->return_value;
3094
3095 /* Set SGPRs. */
3096 ret = LLVMBuildInsertValue(builder, ret,
3097 bitcast(bld_base, TGSI_TYPE_SIGNED,
3098 LLVMGetParam(ctx->radeon_bld.main_fn,
3099 SI_PARAM_ALPHA_REF)),
3100 SI_SGPR_ALPHA_REF, "");
3101
3102 /* Set VGPRs */
3103 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3104 for (i = 0; i < ARRAY_SIZE(color); i++) {
3105 if (!color[i][0])
3106 continue;
3107
3108 for (j = 0; j < 4; j++)
3109 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3110 }
3111 if (depth)
3112 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3113 if (stencil)
3114 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3115 if (samplemask)
3116 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3117
3118 /* Add the input sample mask for smoothing at the end. */
3119 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3120 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3121 ret = LLVMBuildInsertValue(builder, ret,
3122 LLVMGetParam(ctx->radeon_bld.main_fn,
3123 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3124
3125 ctx->return_value = ret;
3126 }
3127
3128 /**
3129 * Given a v8i32 resource descriptor for a buffer, extract the size of the
3130 * buffer in number of elements and return it as an i32.
3131 */
3132 static LLVMValueRef get_buffer_size(
3133 struct lp_build_tgsi_context *bld_base,
3134 LLVMValueRef descriptor)
3135 {
3136 struct si_shader_context *ctx = si_shader_context(bld_base);
3137 struct gallivm_state *gallivm = bld_base->base.gallivm;
3138 LLVMBuilderRef builder = gallivm->builder;
3139 LLVMValueRef size =
3140 LLVMBuildExtractElement(builder, descriptor,
3141 lp_build_const_int32(gallivm, 6), "");
3142
3143 if (ctx->screen->b.chip_class >= VI) {
3144 /* On VI, the descriptor contains the size in bytes,
3145 * but TXQ must return the size in elements.
3146 * The stride is always non-zero for resources using TXQ.
3147 */
3148 LLVMValueRef stride =
3149 LLVMBuildExtractElement(builder, descriptor,
3150 lp_build_const_int32(gallivm, 5), "");
3151 stride = LLVMBuildLShr(builder, stride,
3152 lp_build_const_int32(gallivm, 16), "");
3153 stride = LLVMBuildAnd(builder, stride,
3154 lp_build_const_int32(gallivm, 0x3FFF), "");
3155
3156 size = LLVMBuildUDiv(builder, size, stride, "");
3157 }
3158
3159 return size;
3160 }
3161
3162 /**
3163 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
3164 * intrinsic names).
3165 */
3166 static void build_int_type_name(
3167 LLVMTypeRef type,
3168 char *buf, unsigned bufsize)
3169 {
3170 assert(bufsize >= 6);
3171
3172 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind)
3173 snprintf(buf, bufsize, "v%ui32",
3174 LLVMGetVectorSize(type));
3175 else
3176 strcpy(buf, "i32");
3177 }
3178
3179 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
3180 struct lp_build_tgsi_context *bld_base,
3181 struct lp_build_emit_data *emit_data);
3182
3183 /* Prevent optimizations (at least of memory accesses) across the current
3184 * point in the program by emitting empty inline assembly that is marked as
3185 * having side effects.
3186 */
3187 static void emit_optimization_barrier(struct si_shader_context *ctx)
3188 {
3189 LLVMBuilderRef builder = ctx->radeon_bld.gallivm.builder;
3190 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
3191 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, "", "", true, false);
3192 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
3193 }
3194
3195 static void emit_waitcnt(struct si_shader_context *ctx)
3196 {
3197 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3198 LLVMBuilderRef builder = gallivm->builder;
3199 LLVMValueRef args[1] = {
3200 lp_build_const_int32(gallivm, 0xf70)
3201 };
3202 lp_build_intrinsic(builder, "llvm.amdgcn.s.waitcnt",
3203 ctx->voidt, args, 1, LLVMNoUnwindAttribute);
3204 }
3205
3206 static void membar_emit(
3207 const struct lp_build_tgsi_action *action,
3208 struct lp_build_tgsi_context *bld_base,
3209 struct lp_build_emit_data *emit_data)
3210 {
3211 struct si_shader_context *ctx = si_shader_context(bld_base);
3212
3213 emit_waitcnt(ctx);
3214 }
3215
3216 static LLVMValueRef
3217 shader_buffer_fetch_rsrc(struct si_shader_context *ctx,
3218 const struct tgsi_full_src_register *reg)
3219 {
3220 LLVMValueRef ind_index;
3221 LLVMValueRef rsrc_ptr;
3222
3223 if (!reg->Register.Indirect)
3224 return ctx->shader_buffers[reg->Register.Index];
3225
3226 ind_index = get_bounded_indirect_index(ctx, &reg->Indirect,
3227 reg->Register.Index,
3228 SI_NUM_SHADER_BUFFERS);
3229
3230 rsrc_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_SHADER_BUFFERS);
3231 return build_indexed_load_const(ctx, rsrc_ptr, ind_index);
3232 }
3233
3234 static bool tgsi_is_array_sampler(unsigned target)
3235 {
3236 return target == TGSI_TEXTURE_1D_ARRAY ||
3237 target == TGSI_TEXTURE_SHADOW1D_ARRAY ||
3238 target == TGSI_TEXTURE_2D_ARRAY ||
3239 target == TGSI_TEXTURE_SHADOW2D_ARRAY ||
3240 target == TGSI_TEXTURE_CUBE_ARRAY ||
3241 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY ||
3242 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
3243 }
3244
3245 static bool tgsi_is_array_image(unsigned target)
3246 {
3247 return target == TGSI_TEXTURE_3D ||
3248 target == TGSI_TEXTURE_CUBE ||
3249 target == TGSI_TEXTURE_1D_ARRAY ||
3250 target == TGSI_TEXTURE_2D_ARRAY ||
3251 target == TGSI_TEXTURE_CUBE_ARRAY ||
3252 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
3253 }
3254
3255 /**
3256 * Given a 256-bit resource descriptor, force the DCC enable bit to off.
3257 *
3258 * At least on Tonga, executing image stores on images with DCC enabled and
3259 * non-trivial can eventually lead to lockups. This can occur when an
3260 * application binds an image as read-only but then uses a shader that writes
3261 * to it. The OpenGL spec allows almost arbitrarily bad behavior (including
3262 * program termination) in this case, but it doesn't cost much to be a bit
3263 * nicer: disabling DCC in the shader still leads to undefined results but
3264 * avoids the lockup.
3265 */
3266 static LLVMValueRef force_dcc_off(struct si_shader_context *ctx,
3267 LLVMValueRef rsrc)
3268 {
3269 if (ctx->screen->b.chip_class <= CIK) {
3270 return rsrc;
3271 } else {
3272 LLVMBuilderRef builder = ctx->radeon_bld.gallivm.builder;
3273 LLVMValueRef i32_6 = LLVMConstInt(ctx->i32, 6, 0);
3274 LLVMValueRef i32_C = LLVMConstInt(ctx->i32, C_008F28_COMPRESSION_EN, 0);
3275 LLVMValueRef tmp;
3276
3277 tmp = LLVMBuildExtractElement(builder, rsrc, i32_6, "");
3278 tmp = LLVMBuildAnd(builder, tmp, i32_C, "");
3279 return LLVMBuildInsertElement(builder, rsrc, tmp, i32_6, "");
3280 }
3281 }
3282
3283 /**
3284 * Load the resource descriptor for \p image.
3285 */
3286 static void
3287 image_fetch_rsrc(
3288 struct lp_build_tgsi_context *bld_base,
3289 const struct tgsi_full_src_register *image,
3290 bool dcc_off,
3291 LLVMValueRef *rsrc)
3292 {
3293 struct si_shader_context *ctx = si_shader_context(bld_base);
3294
3295 assert(image->Register.File == TGSI_FILE_IMAGE);
3296
3297 if (!image->Register.Indirect) {
3298 /* Fast path: use preloaded resources */
3299 *rsrc = ctx->images[image->Register.Index];
3300 } else {
3301 /* Indexing and manual load */
3302 LLVMValueRef ind_index;
3303 LLVMValueRef rsrc_ptr;
3304 LLVMValueRef tmp;
3305
3306 /* From the GL_ARB_shader_image_load_store extension spec:
3307 *
3308 * If a shader performs an image load, store, or atomic
3309 * operation using an image variable declared as an array,
3310 * and if the index used to select an individual element is
3311 * negative or greater than or equal to the size of the
3312 * array, the results of the operation are undefined but may
3313 * not lead to termination.
3314 */
3315 ind_index = get_bounded_indirect_index(ctx, &image->Indirect,
3316 image->Register.Index,
3317 SI_NUM_IMAGES);
3318
3319 rsrc_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_IMAGES);
3320 tmp = build_indexed_load_const(ctx, rsrc_ptr, ind_index);
3321 if (dcc_off)
3322 tmp = force_dcc_off(ctx, tmp);
3323 *rsrc = tmp;
3324 }
3325 }
3326
3327 static LLVMValueRef image_fetch_coords(
3328 struct lp_build_tgsi_context *bld_base,
3329 const struct tgsi_full_instruction *inst,
3330 unsigned src)
3331 {
3332 struct gallivm_state *gallivm = bld_base->base.gallivm;
3333 LLVMBuilderRef builder = gallivm->builder;
3334 unsigned target = inst->Memory.Texture;
3335 unsigned num_coords = tgsi_util_get_texture_coord_dim(target);
3336 LLVMValueRef coords[4];
3337 LLVMValueRef tmp;
3338 int chan;
3339
3340 for (chan = 0; chan < num_coords; ++chan) {
3341 tmp = lp_build_emit_fetch(bld_base, inst, src, chan);
3342 tmp = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3343 coords[chan] = tmp;
3344 }
3345
3346 if (num_coords == 1)
3347 return coords[0];
3348
3349 if (num_coords == 3) {
3350 /* LLVM has difficulties lowering 3-element vectors. */
3351 coords[3] = bld_base->uint_bld.undef;
3352 num_coords = 4;
3353 }
3354
3355 return lp_build_gather_values(gallivm, coords, num_coords);
3356 }
3357
3358 /**
3359 * Append the extra mode bits that are used by image load and store.
3360 */
3361 static void image_append_args(
3362 struct si_shader_context *ctx,
3363 struct lp_build_emit_data * emit_data,
3364 unsigned target,
3365 bool atomic)
3366 {
3367 const struct tgsi_full_instruction *inst = emit_data->inst;
3368 LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
3369 LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
3370
3371 emit_data->args[emit_data->arg_count++] = i1false; /* r128 */
3372 emit_data->args[emit_data->arg_count++] =
3373 tgsi_is_array_image(target) ? i1true : i1false; /* da */
3374 if (!atomic) {
3375 emit_data->args[emit_data->arg_count++] =
3376 inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
3377 i1true : i1false; /* glc */
3378 }
3379 emit_data->args[emit_data->arg_count++] = i1false; /* slc */
3380 }
3381
3382 /**
3383 * Given a 256 bit resource, extract the top half (which stores the buffer
3384 * resource in the case of textures and images).
3385 */
3386 static LLVMValueRef extract_rsrc_top_half(
3387 struct si_shader_context *ctx,
3388 LLVMValueRef rsrc)
3389 {
3390 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3391 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
3392 LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
3393
3394 rsrc = LLVMBuildBitCast(gallivm->builder, rsrc, v2i128, "");
3395 rsrc = LLVMBuildExtractElement(gallivm->builder, rsrc, bld_base->uint_bld.one, "");
3396 rsrc = LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v4i32, "");
3397
3398 return rsrc;
3399 }
3400
3401 /**
3402 * Append the resource and indexing arguments for buffer intrinsics.
3403 *
3404 * \param rsrc the v4i32 buffer resource
3405 * \param index index into the buffer (stride-based)
3406 * \param offset byte offset into the buffer
3407 */
3408 static void buffer_append_args(
3409 struct si_shader_context *ctx,
3410 struct lp_build_emit_data *emit_data,
3411 LLVMValueRef rsrc,
3412 LLVMValueRef index,
3413 LLVMValueRef offset,
3414 bool atomic)
3415 {
3416 const struct tgsi_full_instruction *inst = emit_data->inst;
3417 LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
3418 LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
3419
3420 emit_data->args[emit_data->arg_count++] = rsrc;
3421 emit_data->args[emit_data->arg_count++] = index; /* vindex */
3422 emit_data->args[emit_data->arg_count++] = offset; /* voffset */
3423 if (!atomic) {
3424 emit_data->args[emit_data->arg_count++] =
3425 inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
3426 i1true : i1false; /* glc */
3427 }
3428 emit_data->args[emit_data->arg_count++] = i1false; /* slc */
3429 }
3430
3431 static void load_fetch_args(
3432 struct lp_build_tgsi_context * bld_base,
3433 struct lp_build_emit_data * emit_data)
3434 {
3435 struct si_shader_context *ctx = si_shader_context(bld_base);
3436 struct gallivm_state *gallivm = bld_base->base.gallivm;
3437 const struct tgsi_full_instruction * inst = emit_data->inst;
3438 unsigned target = inst->Memory.Texture;
3439 LLVMValueRef rsrc;
3440
3441 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
3442
3443 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3444 LLVMBuilderRef builder = gallivm->builder;
3445 LLVMValueRef offset;
3446 LLVMValueRef tmp;
3447
3448 rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0]);
3449
3450 tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
3451 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3452
3453 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
3454 offset, false);
3455 } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
3456 LLVMValueRef coords;
3457
3458 image_fetch_rsrc(bld_base, &inst->Src[0], false, &rsrc);
3459 coords = image_fetch_coords(bld_base, inst, 1);
3460
3461 if (target == TGSI_TEXTURE_BUFFER) {
3462 rsrc = extract_rsrc_top_half(ctx, rsrc);
3463 buffer_append_args(ctx, emit_data, rsrc, coords,
3464 bld_base->uint_bld.zero, false);
3465 } else {
3466 emit_data->args[0] = coords;
3467 emit_data->args[1] = rsrc;
3468 emit_data->args[2] = lp_build_const_int32(gallivm, 15); /* dmask */
3469 emit_data->arg_count = 3;
3470
3471 image_append_args(ctx, emit_data, target, false);
3472 }
3473 }
3474 }
3475
3476 static void load_emit_buffer(struct si_shader_context *ctx,
3477 struct lp_build_emit_data *emit_data)
3478 {
3479 const struct tgsi_full_instruction *inst = emit_data->inst;
3480 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3481 LLVMBuilderRef builder = gallivm->builder;
3482 uint writemask = inst->Dst[0].Register.WriteMask;
3483 uint count = util_last_bit(writemask);
3484 const char *intrinsic_name;
3485 LLVMTypeRef dst_type;
3486
3487 switch (count) {
3488 case 1:
3489 intrinsic_name = "llvm.amdgcn.buffer.load.f32";
3490 dst_type = ctx->f32;
3491 break;
3492 case 2:
3493 intrinsic_name = "llvm.amdgcn.buffer.load.v2f32";
3494 dst_type = LLVMVectorType(ctx->f32, 2);
3495 break;
3496 default: // 3 & 4
3497 intrinsic_name = "llvm.amdgcn.buffer.load.v4f32";
3498 dst_type = ctx->v4f32;
3499 count = 4;
3500 }
3501
3502 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3503 builder, intrinsic_name, dst_type,
3504 emit_data->args, emit_data->arg_count,
3505 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute);
3506 }
3507
3508 static LLVMValueRef get_memory_ptr(struct si_shader_context *ctx,
3509 const struct tgsi_full_instruction *inst,
3510 LLVMTypeRef type, int arg)
3511 {
3512 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3513 LLVMBuilderRef builder = gallivm->builder;
3514 LLVMValueRef offset, ptr;
3515 int addr_space;
3516
3517 offset = lp_build_emit_fetch(&ctx->radeon_bld.soa.bld_base, inst, arg, 0);
3518 offset = LLVMBuildBitCast(builder, offset, ctx->i32, "");
3519
3520 ptr = ctx->shared_memory;
3521 ptr = LLVMBuildGEP(builder, ptr, &offset, 1, "");
3522 addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
3523 ptr = LLVMBuildBitCast(builder, ptr, LLVMPointerType(type, addr_space), "");
3524
3525 return ptr;
3526 }
3527
3528 static void load_emit_memory(
3529 struct si_shader_context *ctx,
3530 struct lp_build_emit_data *emit_data)
3531 {
3532 const struct tgsi_full_instruction *inst = emit_data->inst;
3533 struct lp_build_context *base = &ctx->radeon_bld.soa.bld_base.base;
3534 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3535 LLVMBuilderRef builder = gallivm->builder;
3536 unsigned writemask = inst->Dst[0].Register.WriteMask;
3537 LLVMValueRef channels[4], ptr, derived_ptr, index;
3538 int chan;
3539
3540 ptr = get_memory_ptr(ctx, inst, base->elem_type, 1);
3541
3542 for (chan = 0; chan < 4; ++chan) {
3543 if (!(writemask & (1 << chan))) {
3544 channels[chan] = LLVMGetUndef(base->elem_type);
3545 continue;
3546 }
3547
3548 index = lp_build_const_int32(gallivm, chan);
3549 derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
3550 channels[chan] = LLVMBuildLoad(builder, derived_ptr, "");
3551 }
3552 emit_data->output[emit_data->chan] = lp_build_gather_values(gallivm, channels, 4);
3553 }
3554
3555 static void load_emit(
3556 const struct lp_build_tgsi_action *action,
3557 struct lp_build_tgsi_context *bld_base,
3558 struct lp_build_emit_data *emit_data)
3559 {
3560 struct si_shader_context *ctx = si_shader_context(bld_base);
3561 struct gallivm_state *gallivm = bld_base->base.gallivm;
3562 LLVMBuilderRef builder = gallivm->builder;
3563 const struct tgsi_full_instruction * inst = emit_data->inst;
3564 char intrinsic_name[32];
3565 char coords_type[8];
3566
3567 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
3568 load_emit_memory(ctx, emit_data);
3569 return;
3570 }
3571
3572 if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
3573 emit_waitcnt(ctx);
3574
3575 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3576 load_emit_buffer(ctx, emit_data);
3577 return;
3578 }
3579
3580 if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
3581 emit_data->output[emit_data->chan] =
3582 lp_build_intrinsic(
3583 builder, "llvm.amdgcn.buffer.load.format.v4f32", emit_data->dst_type,
3584 emit_data->args, emit_data->arg_count,
3585 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute);
3586 } else {
3587 build_int_type_name(LLVMTypeOf(emit_data->args[0]),
3588 coords_type, sizeof(coords_type));
3589
3590 snprintf(intrinsic_name, sizeof(intrinsic_name),
3591 "llvm.amdgcn.image.load.%s", coords_type);
3592
3593 emit_data->output[emit_data->chan] =
3594 lp_build_intrinsic(
3595 builder, intrinsic_name, emit_data->dst_type,
3596 emit_data->args, emit_data->arg_count,
3597 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute);
3598 }
3599 }
3600
3601 static void store_fetch_args(
3602 struct lp_build_tgsi_context * bld_base,
3603 struct lp_build_emit_data * emit_data)
3604 {
3605 struct si_shader_context *ctx = si_shader_context(bld_base);
3606 struct gallivm_state *gallivm = bld_base->base.gallivm;
3607 LLVMBuilderRef builder = gallivm->builder;
3608 const struct tgsi_full_instruction * inst = emit_data->inst;
3609 struct tgsi_full_src_register memory;
3610 LLVMValueRef chans[4];
3611 LLVMValueRef data;
3612 LLVMValueRef rsrc;
3613 unsigned chan;
3614
3615 emit_data->dst_type = LLVMVoidTypeInContext(gallivm->context);
3616
3617 for (chan = 0; chan < 4; ++chan) {
3618 chans[chan] = lp_build_emit_fetch(bld_base, inst, 1, chan);
3619 }
3620 data = lp_build_gather_values(gallivm, chans, 4);
3621
3622 emit_data->args[emit_data->arg_count++] = data;
3623
3624 memory = tgsi_full_src_register_from_dst(&inst->Dst[0]);
3625
3626 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
3627 LLVMValueRef offset;
3628 LLVMValueRef tmp;
3629
3630 rsrc = shader_buffer_fetch_rsrc(ctx, &memory);
3631
3632 tmp = lp_build_emit_fetch(bld_base, inst, 0, 0);
3633 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3634
3635 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
3636 offset, false);
3637 } else if (inst->Dst[0].Register.File == TGSI_FILE_IMAGE) {
3638 unsigned target = inst->Memory.Texture;
3639 LLVMValueRef coords;
3640
3641 coords = image_fetch_coords(bld_base, inst, 0);
3642
3643 if (target == TGSI_TEXTURE_BUFFER) {
3644 image_fetch_rsrc(bld_base, &memory, false, &rsrc);
3645
3646 rsrc = extract_rsrc_top_half(ctx, rsrc);
3647 buffer_append_args(ctx, emit_data, rsrc, coords,
3648 bld_base->uint_bld.zero, false);
3649 } else {
3650 emit_data->args[1] = coords;
3651 image_fetch_rsrc(bld_base, &memory, true, &emit_data->args[2]);
3652 emit_data->args[3] = lp_build_const_int32(gallivm, 15); /* dmask */
3653 emit_data->arg_count = 4;
3654
3655 image_append_args(ctx, emit_data, target, false);
3656 }
3657 }
3658 }
3659
3660 static void store_emit_buffer(
3661 struct si_shader_context *ctx,
3662 struct lp_build_emit_data *emit_data)
3663 {
3664 const struct tgsi_full_instruction *inst = emit_data->inst;
3665 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3666 LLVMBuilderRef builder = gallivm->builder;
3667 struct lp_build_context *uint_bld = &ctx->radeon_bld.soa.bld_base.uint_bld;
3668 LLVMValueRef base_data = emit_data->args[0];
3669 LLVMValueRef base_offset = emit_data->args[3];
3670 unsigned writemask = inst->Dst[0].Register.WriteMask;
3671
3672 while (writemask) {
3673 int start, count;
3674 const char *intrinsic_name;
3675 LLVMValueRef data;
3676 LLVMValueRef offset;
3677 LLVMValueRef tmp;
3678
3679 u_bit_scan_consecutive_range(&writemask, &start, &count);
3680
3681 /* Due to an LLVM limitation, split 3-element writes
3682 * into a 2-element and a 1-element write. */
3683 if (count == 3) {
3684 writemask |= 1 << (start + 2);
3685 count = 2;
3686 }
3687
3688 if (count == 4) {
3689 data = base_data;
3690 intrinsic_name = "llvm.amdgcn.buffer.store.v4f32";
3691 } else if (count == 2) {
3692 LLVMTypeRef v2f32 = LLVMVectorType(ctx->f32, 2);
3693
3694 tmp = LLVMBuildExtractElement(
3695 builder, base_data,
3696 lp_build_const_int32(gallivm, start), "");
3697 data = LLVMBuildInsertElement(
3698 builder, LLVMGetUndef(v2f32), tmp,
3699 uint_bld->zero, "");
3700
3701 tmp = LLVMBuildExtractElement(
3702 builder, base_data,
3703 lp_build_const_int32(gallivm, start + 1), "");
3704 data = LLVMBuildInsertElement(
3705 builder, data, tmp, uint_bld->one, "");
3706
3707 intrinsic_name = "llvm.amdgcn.buffer.store.v2f32";
3708 } else {
3709 assert(count == 1);
3710 data = LLVMBuildExtractElement(
3711 builder, base_data,
3712 lp_build_const_int32(gallivm, start), "");
3713 intrinsic_name = "llvm.amdgcn.buffer.store.f32";
3714 }
3715
3716 offset = base_offset;
3717 if (start != 0) {
3718 offset = LLVMBuildAdd(
3719 builder, offset,
3720 lp_build_const_int32(gallivm, start * 4), "");
3721 }
3722
3723 emit_data->args[0] = data;
3724 emit_data->args[3] = offset;
3725
3726 lp_build_intrinsic(
3727 builder, intrinsic_name, emit_data->dst_type,
3728 emit_data->args, emit_data->arg_count,
3729 LLVMNoUnwindAttribute);
3730 }
3731 }
3732
3733 static void store_emit_memory(
3734 struct si_shader_context *ctx,
3735 struct lp_build_emit_data *emit_data)
3736 {
3737 const struct tgsi_full_instruction *inst = emit_data->inst;
3738 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3739 struct lp_build_context *base = &ctx->radeon_bld.soa.bld_base.base;
3740 LLVMBuilderRef builder = gallivm->builder;
3741 unsigned writemask = inst->Dst[0].Register.WriteMask;
3742 LLVMValueRef ptr, derived_ptr, data, index;
3743 int chan;
3744
3745 ptr = get_memory_ptr(ctx, inst, base->elem_type, 0);
3746
3747 for (chan = 0; chan < 4; ++chan) {
3748 if (!(writemask & (1 << chan))) {
3749 continue;
3750 }
3751 data = lp_build_emit_fetch(&ctx->radeon_bld.soa.bld_base, inst, 1, chan);
3752 index = lp_build_const_int32(gallivm, chan);
3753 derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
3754 LLVMBuildStore(builder, data, derived_ptr);
3755 }
3756 }
3757
3758 static void store_emit(
3759 const struct lp_build_tgsi_action *action,
3760 struct lp_build_tgsi_context *bld_base,
3761 struct lp_build_emit_data *emit_data)
3762 {
3763 struct si_shader_context *ctx = si_shader_context(bld_base);
3764 struct gallivm_state *gallivm = bld_base->base.gallivm;
3765 LLVMBuilderRef builder = gallivm->builder;
3766 const struct tgsi_full_instruction * inst = emit_data->inst;
3767 unsigned target = inst->Memory.Texture;
3768 char intrinsic_name[32];
3769 char coords_type[8];
3770
3771 if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY) {
3772 store_emit_memory(ctx, emit_data);
3773 return;
3774 }
3775
3776 if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
3777 emit_waitcnt(ctx);
3778
3779 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
3780 store_emit_buffer(ctx, emit_data);
3781 return;
3782 }
3783
3784 if (target == TGSI_TEXTURE_BUFFER) {
3785 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3786 builder, "llvm.amdgcn.buffer.store.format.v4f32",
3787 emit_data->dst_type, emit_data->args, emit_data->arg_count,
3788 LLVMNoUnwindAttribute);
3789 } else {
3790 build_int_type_name(LLVMTypeOf(emit_data->args[1]),
3791 coords_type, sizeof(coords_type));
3792 snprintf(intrinsic_name, sizeof(intrinsic_name),
3793 "llvm.amdgcn.image.store.%s", coords_type);
3794
3795 emit_data->output[emit_data->chan] =
3796 lp_build_intrinsic(
3797 builder, intrinsic_name, emit_data->dst_type,
3798 emit_data->args, emit_data->arg_count,
3799 LLVMNoUnwindAttribute);
3800 }
3801 }
3802
3803 static void atomic_fetch_args(
3804 struct lp_build_tgsi_context * bld_base,
3805 struct lp_build_emit_data * emit_data)
3806 {
3807 struct si_shader_context *ctx = si_shader_context(bld_base);
3808 struct gallivm_state *gallivm = bld_base->base.gallivm;
3809 LLVMBuilderRef builder = gallivm->builder;
3810 const struct tgsi_full_instruction * inst = emit_data->inst;
3811 LLVMValueRef data1, data2;
3812 LLVMValueRef rsrc;
3813 LLVMValueRef tmp;
3814
3815 emit_data->dst_type = bld_base->base.elem_type;
3816
3817 tmp = lp_build_emit_fetch(bld_base, inst, 2, 0);
3818 data1 = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3819
3820 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
3821 tmp = lp_build_emit_fetch(bld_base, inst, 3, 0);
3822 data2 = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3823 }
3824
3825 /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
3826 * of arguments, which is reversed relative to TGSI (and GLSL)
3827 */
3828 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
3829 emit_data->args[emit_data->arg_count++] = data2;
3830 emit_data->args[emit_data->arg_count++] = data1;
3831
3832 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3833 LLVMValueRef offset;
3834
3835 rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0]);
3836
3837 tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
3838 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3839
3840 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
3841 offset, true);
3842 } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
3843 unsigned target = inst->Memory.Texture;
3844 LLVMValueRef coords;
3845
3846 image_fetch_rsrc(bld_base, &inst->Src[0],
3847 target != TGSI_TEXTURE_BUFFER, &rsrc);
3848 coords = image_fetch_coords(bld_base, inst, 1);
3849
3850 if (target == TGSI_TEXTURE_BUFFER) {
3851 rsrc = extract_rsrc_top_half(ctx, rsrc);
3852 buffer_append_args(ctx, emit_data, rsrc, coords,
3853 bld_base->uint_bld.zero, true);
3854 } else {
3855 emit_data->args[emit_data->arg_count++] = coords;
3856 emit_data->args[emit_data->arg_count++] = rsrc;
3857
3858 image_append_args(ctx, emit_data, target, true);
3859 }
3860 }
3861 }
3862
3863 static void atomic_emit_memory(struct si_shader_context *ctx,
3864 struct lp_build_emit_data *emit_data) {
3865 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3866 LLVMBuilderRef builder = gallivm->builder;
3867 const struct tgsi_full_instruction * inst = emit_data->inst;
3868 LLVMValueRef ptr, result, arg;
3869
3870 ptr = get_memory_ptr(ctx, inst, ctx->i32, 1);
3871
3872 arg = lp_build_emit_fetch(&ctx->radeon_bld.soa.bld_base, inst, 2, 0);
3873 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
3874
3875 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
3876 LLVMValueRef new_data;
3877 new_data = lp_build_emit_fetch(&ctx->radeon_bld.soa.bld_base,
3878 inst, 3, 0);
3879
3880 new_data = LLVMBuildBitCast(builder, new_data, ctx->i32, "");
3881
3882 #if HAVE_LLVM >= 0x309
3883 result = LLVMBuildAtomicCmpXchg(builder, ptr, arg, new_data,
3884 LLVMAtomicOrderingSequentiallyConsistent,
3885 LLVMAtomicOrderingSequentiallyConsistent,
3886 false);
3887 #endif
3888
3889 result = LLVMBuildExtractValue(builder, result, 0, "");
3890 } else {
3891 LLVMAtomicRMWBinOp op;
3892
3893 switch(inst->Instruction.Opcode) {
3894 case TGSI_OPCODE_ATOMUADD:
3895 op = LLVMAtomicRMWBinOpAdd;
3896 break;
3897 case TGSI_OPCODE_ATOMXCHG:
3898 op = LLVMAtomicRMWBinOpXchg;
3899 break;
3900 case TGSI_OPCODE_ATOMAND:
3901 op = LLVMAtomicRMWBinOpAnd;
3902 break;
3903 case TGSI_OPCODE_ATOMOR:
3904 op = LLVMAtomicRMWBinOpOr;
3905 break;
3906 case TGSI_OPCODE_ATOMXOR:
3907 op = LLVMAtomicRMWBinOpXor;
3908 break;
3909 case TGSI_OPCODE_ATOMUMIN:
3910 op = LLVMAtomicRMWBinOpUMin;
3911 break;
3912 case TGSI_OPCODE_ATOMUMAX:
3913 op = LLVMAtomicRMWBinOpUMax;
3914 break;
3915 case TGSI_OPCODE_ATOMIMIN:
3916 op = LLVMAtomicRMWBinOpMin;
3917 break;
3918 case TGSI_OPCODE_ATOMIMAX:
3919 op = LLVMAtomicRMWBinOpMax;
3920 break;
3921 default:
3922 unreachable("unknown atomic opcode");
3923 }
3924
3925 result = LLVMBuildAtomicRMW(builder, op, ptr, arg,
3926 LLVMAtomicOrderingSequentiallyConsistent,
3927 false);
3928 }
3929 emit_data->output[emit_data->chan] = LLVMBuildBitCast(builder, result, emit_data->dst_type, "");
3930 }
3931
3932 static void atomic_emit(
3933 const struct lp_build_tgsi_action *action,
3934 struct lp_build_tgsi_context *bld_base,
3935 struct lp_build_emit_data *emit_data)
3936 {
3937 struct si_shader_context *ctx = si_shader_context(bld_base);
3938 struct gallivm_state *gallivm = bld_base->base.gallivm;
3939 LLVMBuilderRef builder = gallivm->builder;
3940 const struct tgsi_full_instruction * inst = emit_data->inst;
3941 char intrinsic_name[40];
3942 LLVMValueRef tmp;
3943
3944 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
3945 atomic_emit_memory(ctx, emit_data);
3946 return;
3947 }
3948
3949 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
3950 inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
3951 snprintf(intrinsic_name, sizeof(intrinsic_name),
3952 "llvm.amdgcn.buffer.atomic.%s", action->intr_name);
3953 } else {
3954 char coords_type[8];
3955
3956 build_int_type_name(LLVMTypeOf(emit_data->args[1]),
3957 coords_type, sizeof(coords_type));
3958 snprintf(intrinsic_name, sizeof(intrinsic_name),
3959 "llvm.amdgcn.image.atomic.%s.%s",
3960 action->intr_name, coords_type);
3961 }
3962
3963 tmp = lp_build_intrinsic(
3964 builder, intrinsic_name, bld_base->uint_bld.elem_type,
3965 emit_data->args, emit_data->arg_count,
3966 LLVMNoUnwindAttribute);
3967 emit_data->output[emit_data->chan] =
3968 LLVMBuildBitCast(builder, tmp, bld_base->base.elem_type, "");
3969 }
3970
3971 static void resq_fetch_args(
3972 struct lp_build_tgsi_context * bld_base,
3973 struct lp_build_emit_data * emit_data)
3974 {
3975 struct si_shader_context *ctx = si_shader_context(bld_base);
3976 struct gallivm_state *gallivm = bld_base->base.gallivm;
3977 const struct tgsi_full_instruction *inst = emit_data->inst;
3978 const struct tgsi_full_src_register *reg = &inst->Src[0];
3979
3980 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
3981
3982 if (reg->Register.File == TGSI_FILE_BUFFER) {
3983 emit_data->args[0] = shader_buffer_fetch_rsrc(ctx, reg);
3984 emit_data->arg_count = 1;
3985 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
3986 image_fetch_rsrc(bld_base, reg, false, &emit_data->args[0]);
3987 emit_data->arg_count = 1;
3988 } else {
3989 emit_data->args[0] = bld_base->uint_bld.zero; /* mip level */
3990 image_fetch_rsrc(bld_base, reg, false, &emit_data->args[1]);
3991 emit_data->args[2] = lp_build_const_int32(gallivm, 15); /* dmask */
3992 emit_data->args[3] = bld_base->uint_bld.zero; /* unorm */
3993 emit_data->args[4] = bld_base->uint_bld.zero; /* r128 */
3994 emit_data->args[5] = tgsi_is_array_image(inst->Memory.Texture) ?
3995 bld_base->uint_bld.one : bld_base->uint_bld.zero; /* da */
3996 emit_data->args[6] = bld_base->uint_bld.zero; /* glc */
3997 emit_data->args[7] = bld_base->uint_bld.zero; /* slc */
3998 emit_data->args[8] = bld_base->uint_bld.zero; /* tfe */
3999 emit_data->args[9] = bld_base->uint_bld.zero; /* lwe */
4000 emit_data->arg_count = 10;
4001 }
4002 }
4003
4004 static void resq_emit(
4005 const struct lp_build_tgsi_action *action,
4006 struct lp_build_tgsi_context *bld_base,
4007 struct lp_build_emit_data *emit_data)
4008 {
4009 struct gallivm_state *gallivm = bld_base->base.gallivm;
4010 LLVMBuilderRef builder = gallivm->builder;
4011 const struct tgsi_full_instruction *inst = emit_data->inst;
4012 LLVMValueRef out;
4013
4014 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
4015 out = LLVMBuildExtractElement(builder, emit_data->args[0],
4016 lp_build_const_int32(gallivm, 2), "");
4017 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
4018 out = get_buffer_size(bld_base, emit_data->args[0]);
4019 } else {
4020 out = lp_build_intrinsic(
4021 builder, "llvm.SI.getresinfo.i32", emit_data->dst_type,
4022 emit_data->args, emit_data->arg_count,
4023 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
4024
4025 /* Divide the number of layers by 6 to get the number of cubes. */
4026 if (inst->Memory.Texture == TGSI_TEXTURE_CUBE_ARRAY) {
4027 LLVMValueRef imm2 = lp_build_const_int32(gallivm, 2);
4028 LLVMValueRef imm6 = lp_build_const_int32(gallivm, 6);
4029
4030 LLVMValueRef z = LLVMBuildExtractElement(builder, out, imm2, "");
4031 z = LLVMBuildBitCast(builder, z, bld_base->uint_bld.elem_type, "");
4032 z = LLVMBuildSDiv(builder, z, imm6, "");
4033 z = LLVMBuildBitCast(builder, z, bld_base->base.elem_type, "");
4034 out = LLVMBuildInsertElement(builder, out, z, imm2, "");
4035 }
4036 }
4037
4038 emit_data->output[emit_data->chan] = out;
4039 }
4040
4041 static void set_tex_fetch_args(struct si_shader_context *ctx,
4042 struct lp_build_emit_data *emit_data,
4043 unsigned opcode, unsigned target,
4044 LLVMValueRef res_ptr, LLVMValueRef samp_ptr,
4045 LLVMValueRef *param, unsigned count,
4046 unsigned dmask)
4047 {
4048 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
4049 unsigned num_args;
4050 unsigned is_rect = target == TGSI_TEXTURE_RECT;
4051
4052 /* Pad to power of two vector */
4053 while (count < util_next_power_of_two(count))
4054 param[count++] = LLVMGetUndef(ctx->i32);
4055
4056 /* Texture coordinates. */
4057 if (count > 1)
4058 emit_data->args[0] = lp_build_gather_values(gallivm, param, count);
4059 else
4060 emit_data->args[0] = param[0];
4061
4062 /* Resource. */
4063 emit_data->args[1] = res_ptr;
4064 num_args = 2;
4065
4066 if (opcode == TGSI_OPCODE_TXF || opcode == TGSI_OPCODE_TXQ)
4067 emit_data->dst_type = ctx->v4i32;
4068 else {
4069 emit_data->dst_type = ctx->v4f32;
4070
4071 emit_data->args[num_args++] = samp_ptr;
4072 }
4073
4074 emit_data->args[num_args++] = lp_build_const_int32(gallivm, dmask);
4075 emit_data->args[num_args++] = lp_build_const_int32(gallivm, is_rect); /* unorm */
4076 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* r128 */
4077 emit_data->args[num_args++] = lp_build_const_int32(gallivm,
4078 tgsi_is_array_sampler(target)); /* da */
4079 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* glc */
4080 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* slc */
4081 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* tfe */
4082 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* lwe */
4083
4084 emit_data->arg_count = num_args;
4085 }
4086
4087 static const struct lp_build_tgsi_action tex_action;
4088
4089 enum desc_type {
4090 DESC_IMAGE,
4091 DESC_FMASK,
4092 DESC_SAMPLER
4093 };
4094
4095 static LLVMTypeRef const_array(LLVMTypeRef elem_type, int num_elements)
4096 {
4097 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
4098 CONST_ADDR_SPACE);
4099 }
4100
4101 /**
4102 * Load an image view, fmask view. or sampler state descriptor.
4103 */
4104 static LLVMValueRef get_sampler_desc_custom(struct si_shader_context *ctx,
4105 LLVMValueRef list, LLVMValueRef index,
4106 enum desc_type type)
4107 {
4108 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
4109 LLVMBuilderRef builder = gallivm->builder;
4110
4111 switch (type) {
4112 case DESC_IMAGE:
4113 /* The image is at [0:7]. */
4114 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
4115 break;
4116 case DESC_FMASK:
4117 /* The FMASK is at [8:15]. */
4118 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
4119 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 1, 0), "");
4120 break;
4121 case DESC_SAMPLER:
4122 /* The sampler state is at [12:15]. */
4123 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
4124 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 3, 0), "");
4125 list = LLVMBuildPointerCast(builder, list,
4126 const_array(ctx->v4i32, 0), "");
4127 break;
4128 }
4129
4130 return build_indexed_load_const(ctx, list, index);
4131 }
4132
4133 static LLVMValueRef get_sampler_desc(struct si_shader_context *ctx,
4134 LLVMValueRef index, enum desc_type type)
4135 {
4136 LLVMValueRef list = LLVMGetParam(ctx->radeon_bld.main_fn,
4137 SI_PARAM_SAMPLERS);
4138
4139 return get_sampler_desc_custom(ctx, list, index, type);
4140 }
4141
4142 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4143 *
4144 * SI-CI:
4145 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4146 * filtering manually. The driver sets img7 to a mask clearing
4147 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4148 * s_and_b32 samp0, samp0, img7
4149 *
4150 * VI:
4151 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4152 */
4153 static LLVMValueRef sici_fix_sampler_aniso(struct si_shader_context *ctx,
4154 LLVMValueRef res, LLVMValueRef samp)
4155 {
4156 LLVMBuilderRef builder = ctx->radeon_bld.gallivm.builder;
4157 LLVMValueRef img7, samp0;
4158
4159 if (ctx->screen->b.chip_class >= VI)
4160 return samp;
4161
4162 img7 = LLVMBuildExtractElement(builder, res,
4163 LLVMConstInt(ctx->i32, 7, 0), "");
4164 samp0 = LLVMBuildExtractElement(builder, samp,
4165 LLVMConstInt(ctx->i32, 0, 0), "");
4166 samp0 = LLVMBuildAnd(builder, samp0, img7, "");
4167 return LLVMBuildInsertElement(builder, samp, samp0,
4168 LLVMConstInt(ctx->i32, 0, 0), "");
4169 }
4170
4171 static void tex_fetch_ptrs(
4172 struct lp_build_tgsi_context *bld_base,
4173 struct lp_build_emit_data *emit_data,
4174 LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr, LLVMValueRef *fmask_ptr)
4175 {
4176 struct si_shader_context *ctx = si_shader_context(bld_base);
4177 const struct tgsi_full_instruction *inst = emit_data->inst;
4178 unsigned target = inst->Texture.Texture;
4179 unsigned sampler_src;
4180 unsigned sampler_index;
4181
4182 sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
4183 sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
4184
4185 if (emit_data->inst->Src[sampler_src].Register.Indirect) {
4186 const struct tgsi_full_src_register *reg = &emit_data->inst->Src[sampler_src];
4187 LLVMValueRef ind_index;
4188
4189 ind_index = get_bounded_indirect_index(ctx,
4190 &reg->Indirect,
4191 reg->Register.Index,
4192 SI_NUM_SAMPLERS);
4193
4194 *res_ptr = get_sampler_desc(ctx, ind_index, DESC_IMAGE);
4195
4196 if (target == TGSI_TEXTURE_2D_MSAA ||
4197 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
4198 if (samp_ptr)
4199 *samp_ptr = NULL;
4200 if (fmask_ptr)
4201 *fmask_ptr = get_sampler_desc(ctx, ind_index, DESC_FMASK);
4202 } else {
4203 if (samp_ptr) {
4204 *samp_ptr = get_sampler_desc(ctx, ind_index, DESC_SAMPLER);
4205 *samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
4206 }
4207 if (fmask_ptr)
4208 *fmask_ptr = NULL;
4209 }
4210 } else {
4211 *res_ptr = ctx->sampler_views[sampler_index];
4212 if (samp_ptr)
4213 *samp_ptr = ctx->sampler_states[sampler_index];
4214 if (fmask_ptr)
4215 *fmask_ptr = ctx->fmasks[sampler_index];
4216 }
4217 }
4218
4219 static void txq_fetch_args(
4220 struct lp_build_tgsi_context *bld_base,
4221 struct lp_build_emit_data *emit_data)
4222 {
4223 struct si_shader_context *ctx = si_shader_context(bld_base);
4224 struct gallivm_state *gallivm = bld_base->base.gallivm;
4225 LLVMBuilderRef builder = gallivm->builder;
4226 const struct tgsi_full_instruction *inst = emit_data->inst;
4227 unsigned target = inst->Texture.Texture;
4228 LLVMValueRef res_ptr;
4229 LLVMValueRef address;
4230
4231 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, NULL, NULL);
4232
4233 if (target == TGSI_TEXTURE_BUFFER) {
4234 /* Read the size from the buffer descriptor directly. */
4235 LLVMValueRef res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
4236 emit_data->args[0] = get_buffer_size(bld_base, res);
4237 return;
4238 }
4239
4240 /* Textures - set the mip level. */
4241 address = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
4242
4243 set_tex_fetch_args(ctx, emit_data, TGSI_OPCODE_TXQ, target, res_ptr,
4244 NULL, &address, 1, 0xf);
4245 }
4246
4247 static void txq_emit(const struct lp_build_tgsi_action *action,
4248 struct lp_build_tgsi_context *bld_base,
4249 struct lp_build_emit_data *emit_data)
4250 {
4251 struct lp_build_context *base = &bld_base->base;
4252 unsigned target = emit_data->inst->Texture.Texture;
4253
4254 if (target == TGSI_TEXTURE_BUFFER) {
4255 /* Just return the buffer size. */
4256 emit_data->output[emit_data->chan] = emit_data->args[0];
4257 return;
4258 }
4259
4260 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4261 base->gallivm->builder, "llvm.SI.getresinfo.i32",
4262 emit_data->dst_type, emit_data->args, emit_data->arg_count,
4263 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
4264
4265 /* Divide the number of layers by 6 to get the number of cubes. */
4266 if (target == TGSI_TEXTURE_CUBE_ARRAY ||
4267 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4268 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
4269 LLVMValueRef two = lp_build_const_int32(bld_base->base.gallivm, 2);
4270 LLVMValueRef six = lp_build_const_int32(bld_base->base.gallivm, 6);
4271
4272 LLVMValueRef v4 = emit_data->output[emit_data->chan];
4273 LLVMValueRef z = LLVMBuildExtractElement(builder, v4, two, "");
4274 z = LLVMBuildSDiv(builder, z, six, "");
4275
4276 emit_data->output[emit_data->chan] =
4277 LLVMBuildInsertElement(builder, v4, z, two, "");
4278 }
4279 }
4280
4281 static void tex_fetch_args(
4282 struct lp_build_tgsi_context *bld_base,
4283 struct lp_build_emit_data *emit_data)
4284 {
4285 struct si_shader_context *ctx = si_shader_context(bld_base);
4286 struct gallivm_state *gallivm = bld_base->base.gallivm;
4287 const struct tgsi_full_instruction *inst = emit_data->inst;
4288 unsigned opcode = inst->Instruction.Opcode;
4289 unsigned target = inst->Texture.Texture;
4290 LLVMValueRef coords[5], derivs[6];
4291 LLVMValueRef address[16];
4292 unsigned num_coords = tgsi_util_get_texture_coord_dim(target);
4293 int ref_pos = tgsi_util_get_shadow_ref_src_index(target);
4294 unsigned count = 0;
4295 unsigned chan;
4296 unsigned num_deriv_channels = 0;
4297 bool has_offset = inst->Texture.NumOffsets > 0;
4298 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
4299 unsigned dmask = 0xf;
4300
4301 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
4302
4303 if (target == TGSI_TEXTURE_BUFFER) {
4304 LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
4305
4306 /* Bitcast and truncate v8i32 to v16i8. */
4307 LLVMValueRef res = res_ptr;
4308 res = LLVMBuildBitCast(gallivm->builder, res, v2i128, "");
4309 res = LLVMBuildExtractElement(gallivm->builder, res, bld_base->uint_bld.one, "");
4310 res = LLVMBuildBitCast(gallivm->builder, res, ctx->v16i8, "");
4311
4312 emit_data->dst_type = ctx->v4f32;
4313 emit_data->args[0] = res;
4314 emit_data->args[1] = bld_base->uint_bld.zero;
4315 emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
4316 emit_data->arg_count = 3;
4317 return;
4318 }
4319
4320 /* Fetch and project texture coordinates */
4321 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
4322 for (chan = 0; chan < 3; chan++ ) {
4323 coords[chan] = lp_build_emit_fetch(bld_base,
4324 emit_data->inst, 0,
4325 chan);
4326 if (opcode == TGSI_OPCODE_TXP)
4327 coords[chan] = lp_build_emit_llvm_binary(bld_base,
4328 TGSI_OPCODE_DIV,
4329 coords[chan],
4330 coords[3]);
4331 }
4332
4333 if (opcode == TGSI_OPCODE_TXP)
4334 coords[3] = bld_base->base.one;
4335
4336 /* Pack offsets. */
4337 if (has_offset && opcode != TGSI_OPCODE_TXF) {
4338 /* The offsets are six-bit signed integers packed like this:
4339 * X=[5:0], Y=[13:8], and Z=[21:16].
4340 */
4341 LLVMValueRef offset[3], pack;
4342
4343 assert(inst->Texture.NumOffsets == 1);
4344
4345 for (chan = 0; chan < 3; chan++) {
4346 offset[chan] = lp_build_emit_fetch_texoffset(bld_base,
4347 emit_data->inst, 0, chan);
4348 offset[chan] = LLVMBuildAnd(gallivm->builder, offset[chan],
4349 lp_build_const_int32(gallivm, 0x3f), "");
4350 if (chan)
4351 offset[chan] = LLVMBuildShl(gallivm->builder, offset[chan],
4352 lp_build_const_int32(gallivm, chan*8), "");
4353 }
4354
4355 pack = LLVMBuildOr(gallivm->builder, offset[0], offset[1], "");
4356 pack = LLVMBuildOr(gallivm->builder, pack, offset[2], "");
4357 address[count++] = pack;
4358 }
4359
4360 /* Pack LOD bias value */
4361 if (opcode == TGSI_OPCODE_TXB)
4362 address[count++] = coords[3];
4363 if (opcode == TGSI_OPCODE_TXB2)
4364 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4365
4366 /* Pack depth comparison value */
4367 if (tgsi_is_shadow_target(target) && opcode != TGSI_OPCODE_LODQ) {
4368 if (target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4369 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4370 } else {
4371 assert(ref_pos >= 0);
4372 address[count++] = coords[ref_pos];
4373 }
4374 }
4375
4376 /* Pack user derivatives */
4377 if (opcode == TGSI_OPCODE_TXD) {
4378 int param, num_src_deriv_channels;
4379
4380 switch (target) {
4381 case TGSI_TEXTURE_3D:
4382 num_src_deriv_channels = 3;
4383 num_deriv_channels = 3;
4384 break;
4385 case TGSI_TEXTURE_2D:
4386 case TGSI_TEXTURE_SHADOW2D:
4387 case TGSI_TEXTURE_RECT:
4388 case TGSI_TEXTURE_SHADOWRECT:
4389 case TGSI_TEXTURE_2D_ARRAY:
4390 case TGSI_TEXTURE_SHADOW2D_ARRAY:
4391 num_src_deriv_channels = 2;
4392 num_deriv_channels = 2;
4393 break;
4394 case TGSI_TEXTURE_CUBE:
4395 case TGSI_TEXTURE_SHADOWCUBE:
4396 case TGSI_TEXTURE_CUBE_ARRAY:
4397 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
4398 /* Cube derivatives will be converted to 2D. */
4399 num_src_deriv_channels = 3;
4400 num_deriv_channels = 2;
4401 break;
4402 case TGSI_TEXTURE_1D:
4403 case TGSI_TEXTURE_SHADOW1D:
4404 case TGSI_TEXTURE_1D_ARRAY:
4405 case TGSI_TEXTURE_SHADOW1D_ARRAY:
4406 num_src_deriv_channels = 1;
4407 num_deriv_channels = 1;
4408 break;
4409 default:
4410 unreachable("invalid target");
4411 }
4412
4413 for (param = 0; param < 2; param++)
4414 for (chan = 0; chan < num_src_deriv_channels; chan++)
4415 derivs[param * num_src_deriv_channels + chan] =
4416 lp_build_emit_fetch(bld_base, inst, param+1, chan);
4417 }
4418
4419 if (target == TGSI_TEXTURE_CUBE ||
4420 target == TGSI_TEXTURE_CUBE_ARRAY ||
4421 target == TGSI_TEXTURE_SHADOWCUBE ||
4422 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
4423 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords, derivs);
4424
4425 if (opcode == TGSI_OPCODE_TXD)
4426 for (int i = 0; i < num_deriv_channels * 2; i++)
4427 address[count++] = derivs[i];
4428
4429 /* Pack texture coordinates */
4430 address[count++] = coords[0];
4431 if (num_coords > 1)
4432 address[count++] = coords[1];
4433 if (num_coords > 2)
4434 address[count++] = coords[2];
4435
4436 /* Pack LOD or sample index */
4437 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
4438 address[count++] = coords[3];
4439 else if (opcode == TGSI_OPCODE_TXL2)
4440 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4441
4442 if (count > 16) {
4443 assert(!"Cannot handle more than 16 texture address parameters");
4444 count = 16;
4445 }
4446
4447 for (chan = 0; chan < count; chan++ ) {
4448 address[chan] = LLVMBuildBitCast(gallivm->builder,
4449 address[chan], ctx->i32, "");
4450 }
4451
4452 /* Adjust the sample index according to FMASK.
4453 *
4454 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
4455 * which is the identity mapping. Each nibble says which physical sample
4456 * should be fetched to get that sample.
4457 *
4458 * For example, 0x11111100 means there are only 2 samples stored and
4459 * the second sample covers 3/4 of the pixel. When reading samples 0
4460 * and 1, return physical sample 0 (determined by the first two 0s
4461 * in FMASK), otherwise return physical sample 1.
4462 *
4463 * The sample index should be adjusted as follows:
4464 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
4465 */
4466 if (target == TGSI_TEXTURE_2D_MSAA ||
4467 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
4468 struct lp_build_context *uint_bld = &bld_base->uint_bld;
4469 struct lp_build_emit_data txf_emit_data = *emit_data;
4470 LLVMValueRef txf_address[4];
4471 unsigned txf_count = count;
4472 struct tgsi_full_instruction inst = {};
4473
4474 memcpy(txf_address, address, sizeof(txf_address));
4475
4476 if (target == TGSI_TEXTURE_2D_MSAA) {
4477 txf_address[2] = bld_base->uint_bld.zero;
4478 }
4479 txf_address[3] = bld_base->uint_bld.zero;
4480
4481 /* Read FMASK using TXF. */
4482 inst.Instruction.Opcode = TGSI_OPCODE_TXF;
4483 inst.Texture.Texture = target;
4484 txf_emit_data.inst = &inst;
4485 txf_emit_data.chan = 0;
4486 set_tex_fetch_args(ctx, &txf_emit_data, TGSI_OPCODE_TXF,
4487 target, fmask_ptr, NULL,
4488 txf_address, txf_count, 0xf);
4489 build_tex_intrinsic(&tex_action, bld_base, &txf_emit_data);
4490
4491 /* Initialize some constants. */
4492 LLVMValueRef four = LLVMConstInt(ctx->i32, 4, 0);
4493 LLVMValueRef F = LLVMConstInt(ctx->i32, 0xF, 0);
4494
4495 /* Apply the formula. */
4496 LLVMValueRef fmask =
4497 LLVMBuildExtractElement(gallivm->builder,
4498 txf_emit_data.output[0],
4499 uint_bld->zero, "");
4500
4501 unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
4502
4503 LLVMValueRef sample_index4 =
4504 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
4505
4506 LLVMValueRef shifted_fmask =
4507 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
4508
4509 LLVMValueRef final_sample =
4510 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
4511
4512 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
4513 * resource descriptor is 0 (invalid),
4514 */
4515 LLVMValueRef fmask_desc =
4516 LLVMBuildBitCast(gallivm->builder, fmask_ptr,
4517 ctx->v8i32, "");
4518
4519 LLVMValueRef fmask_word1 =
4520 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
4521 uint_bld->one, "");
4522
4523 LLVMValueRef word1_is_nonzero =
4524 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
4525 fmask_word1, uint_bld->zero, "");
4526
4527 /* Replace the MSAA sample index. */
4528 address[sample_chan] =
4529 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
4530 final_sample, address[sample_chan], "");
4531 }
4532
4533 if (opcode == TGSI_OPCODE_TXF) {
4534 /* add tex offsets */
4535 if (inst->Texture.NumOffsets) {
4536 struct lp_build_context *uint_bld = &bld_base->uint_bld;
4537 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
4538 const struct tgsi_texture_offset *off = inst->TexOffsets;
4539
4540 assert(inst->Texture.NumOffsets == 1);
4541
4542 switch (target) {
4543 case TGSI_TEXTURE_3D:
4544 address[2] = lp_build_add(uint_bld, address[2],
4545 bld->immediates[off->Index][off->SwizzleZ]);
4546 /* fall through */
4547 case TGSI_TEXTURE_2D:
4548 case TGSI_TEXTURE_SHADOW2D:
4549 case TGSI_TEXTURE_RECT:
4550 case TGSI_TEXTURE_SHADOWRECT:
4551 case TGSI_TEXTURE_2D_ARRAY:
4552 case TGSI_TEXTURE_SHADOW2D_ARRAY:
4553 address[1] =
4554 lp_build_add(uint_bld, address[1],
4555 bld->immediates[off->Index][off->SwizzleY]);
4556 /* fall through */
4557 case TGSI_TEXTURE_1D:
4558 case TGSI_TEXTURE_SHADOW1D:
4559 case TGSI_TEXTURE_1D_ARRAY:
4560 case TGSI_TEXTURE_SHADOW1D_ARRAY:
4561 address[0] =
4562 lp_build_add(uint_bld, address[0],
4563 bld->immediates[off->Index][off->SwizzleX]);
4564 break;
4565 /* texture offsets do not apply to other texture targets */
4566 }
4567 }
4568 }
4569
4570 if (opcode == TGSI_OPCODE_TG4) {
4571 unsigned gather_comp = 0;
4572
4573 /* DMASK was repurposed for GATHER4. 4 components are always
4574 * returned and DMASK works like a swizzle - it selects
4575 * the component to fetch. The only valid DMASK values are
4576 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4577 * (red,red,red,red) etc.) The ISA document doesn't mention
4578 * this.
4579 */
4580
4581 /* Get the component index from src1.x for Gather4. */
4582 if (!tgsi_is_shadow_target(target)) {
4583 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
4584 LLVMValueRef comp_imm;
4585 struct tgsi_src_register src1 = inst->Src[1].Register;
4586
4587 assert(src1.File == TGSI_FILE_IMMEDIATE);
4588
4589 comp_imm = imms[src1.Index][src1.SwizzleX];
4590 gather_comp = LLVMConstIntGetZExtValue(comp_imm);
4591 gather_comp = CLAMP(gather_comp, 0, 3);
4592 }
4593
4594 dmask = 1 << gather_comp;
4595 }
4596
4597 set_tex_fetch_args(ctx, emit_data, opcode, target, res_ptr,
4598 samp_ptr, address, count, dmask);
4599 }
4600
4601 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
4602 struct lp_build_tgsi_context *bld_base,
4603 struct lp_build_emit_data *emit_data)
4604 {
4605 struct si_shader_context *ctx = si_shader_context(bld_base);
4606 struct lp_build_context *base = &bld_base->base;
4607 unsigned opcode = emit_data->inst->Instruction.Opcode;
4608 unsigned target = emit_data->inst->Texture.Texture;
4609 char intr_name[127];
4610 bool has_offset = emit_data->inst->Texture.NumOffsets > 0;
4611 bool is_shadow = tgsi_is_shadow_target(target);
4612 char type[64];
4613 const char *name = "llvm.SI.image.sample";
4614 const char *infix = "";
4615
4616 if (target == TGSI_TEXTURE_BUFFER) {
4617 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4618 base->gallivm->builder,
4619 "llvm.SI.vs.load.input", emit_data->dst_type,
4620 emit_data->args, emit_data->arg_count,
4621 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
4622 return;
4623 }
4624
4625 switch (opcode) {
4626 case TGSI_OPCODE_TXF:
4627 name = target == TGSI_TEXTURE_2D_MSAA ||
4628 target == TGSI_TEXTURE_2D_ARRAY_MSAA ?
4629 "llvm.SI.image.load" :
4630 "llvm.SI.image.load.mip";
4631 is_shadow = false;
4632 has_offset = false;
4633 break;
4634 case TGSI_OPCODE_LODQ:
4635 name = "llvm.SI.getlod";
4636 is_shadow = false;
4637 has_offset = false;
4638 break;
4639 case TGSI_OPCODE_TEX:
4640 case TGSI_OPCODE_TEX2:
4641 case TGSI_OPCODE_TXP:
4642 if (ctx->type != PIPE_SHADER_FRAGMENT)
4643 infix = ".lz";
4644 break;
4645 case TGSI_OPCODE_TXB:
4646 case TGSI_OPCODE_TXB2:
4647 assert(ctx->type == PIPE_SHADER_FRAGMENT);
4648 infix = ".b";
4649 break;
4650 case TGSI_OPCODE_TXL:
4651 case TGSI_OPCODE_TXL2:
4652 infix = ".l";
4653 break;
4654 case TGSI_OPCODE_TXD:
4655 infix = ".d";
4656 break;
4657 case TGSI_OPCODE_TG4:
4658 name = "llvm.SI.gather4";
4659 infix = ".lz";
4660 break;
4661 default:
4662 assert(0);
4663 return;
4664 }
4665
4666 /* Add the type and suffixes .c, .o if needed. */
4667 build_int_type_name(LLVMTypeOf(emit_data->args[0]), type, sizeof(type));
4668 sprintf(intr_name, "%s%s%s%s.%s",
4669 name, is_shadow ? ".c" : "", infix,
4670 has_offset ? ".o" : "", type);
4671
4672 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4673 base->gallivm->builder, intr_name, emit_data->dst_type,
4674 emit_data->args, emit_data->arg_count,
4675 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
4676 }
4677
4678 static void si_llvm_emit_txqs(
4679 const struct lp_build_tgsi_action *action,
4680 struct lp_build_tgsi_context *bld_base,
4681 struct lp_build_emit_data *emit_data)
4682 {
4683 struct si_shader_context *ctx = si_shader_context(bld_base);
4684 struct gallivm_state *gallivm = bld_base->base.gallivm;
4685 LLVMBuilderRef builder = gallivm->builder;
4686 LLVMValueRef res, samples;
4687 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
4688
4689 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
4690
4691
4692 /* Read the samples from the descriptor directly. */
4693 res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
4694 samples = LLVMBuildExtractElement(
4695 builder, res,
4696 lp_build_const_int32(gallivm, 3), "");
4697 samples = LLVMBuildLShr(builder, samples,
4698 lp_build_const_int32(gallivm, 16), "");
4699 samples = LLVMBuildAnd(builder, samples,
4700 lp_build_const_int32(gallivm, 0xf), "");
4701 samples = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1),
4702 samples, "");
4703
4704 emit_data->output[emit_data->chan] = samples;
4705 }
4706
4707 /*
4708 * SI implements derivatives using the local data store (LDS)
4709 * All writes to the LDS happen in all executing threads at
4710 * the same time. TID is the Thread ID for the current
4711 * thread and is a value between 0 and 63, representing
4712 * the thread's position in the wavefront.
4713 *
4714 * For the pixel shader threads are grouped into quads of four pixels.
4715 * The TIDs of the pixels of a quad are:
4716 *
4717 * +------+------+
4718 * |4n + 0|4n + 1|
4719 * +------+------+
4720 * |4n + 2|4n + 3|
4721 * +------+------+
4722 *
4723 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
4724 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
4725 * the current pixel's column, and masking with 0xfffffffe yields the TID
4726 * of the left pixel of the current pixel's row.
4727 *
4728 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
4729 * adding 2 yields the TID of the pixel below the top pixel.
4730 */
4731 /* masks for thread ID. */
4732 #define TID_MASK_TOP_LEFT 0xfffffffc
4733 #define TID_MASK_TOP 0xfffffffd
4734 #define TID_MASK_LEFT 0xfffffffe
4735
4736 static void si_llvm_emit_ddxy(
4737 const struct lp_build_tgsi_action *action,
4738 struct lp_build_tgsi_context *bld_base,
4739 struct lp_build_emit_data *emit_data)
4740 {
4741 struct si_shader_context *ctx = si_shader_context(bld_base);
4742 struct gallivm_state *gallivm = bld_base->base.gallivm;
4743 const struct tgsi_full_instruction *inst = emit_data->inst;
4744 unsigned opcode = inst->Instruction.Opcode;
4745 LLVMValueRef indices[2];
4746 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
4747 LLVMValueRef tl, trbl, result[4];
4748 LLVMValueRef tl_tid, trbl_tid;
4749 unsigned swizzle[4];
4750 unsigned c;
4751 int idx;
4752 unsigned mask;
4753
4754 indices[0] = bld_base->uint_bld.zero;
4755 indices[1] = get_thread_id(ctx);
4756 store_ptr = LLVMBuildGEP(gallivm->builder, ctx->lds,
4757 indices, 2, "");
4758
4759 if (opcode == TGSI_OPCODE_DDX_FINE)
4760 mask = TID_MASK_LEFT;
4761 else if (opcode == TGSI_OPCODE_DDY_FINE)
4762 mask = TID_MASK_TOP;
4763 else
4764 mask = TID_MASK_TOP_LEFT;
4765
4766 tl_tid = LLVMBuildAnd(gallivm->builder, indices[1],
4767 lp_build_const_int32(gallivm, mask), "");
4768 indices[1] = tl_tid;
4769 load_ptr0 = LLVMBuildGEP(gallivm->builder, ctx->lds,
4770 indices, 2, "");
4771
4772 /* for DDX we want to next X pixel, DDY next Y pixel. */
4773 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
4774 trbl_tid = LLVMBuildAdd(gallivm->builder, indices[1],
4775 lp_build_const_int32(gallivm, idx), "");
4776 indices[1] = trbl_tid;
4777 load_ptr1 = LLVMBuildGEP(gallivm->builder, ctx->lds,
4778 indices, 2, "");
4779
4780 for (c = 0; c < 4; ++c) {
4781 unsigned i;
4782 LLVMValueRef val;
4783 LLVMValueRef args[2];
4784
4785 swizzle[c] = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], c);
4786 for (i = 0; i < c; ++i) {
4787 if (swizzle[i] == swizzle[c]) {
4788 result[c] = result[i];
4789 break;
4790 }
4791 }
4792 if (i != c)
4793 continue;
4794
4795 val = LLVMBuildBitCast(gallivm->builder,
4796 lp_build_emit_fetch(bld_base, inst, 0, c),
4797 ctx->i32, "");
4798
4799 if ((HAVE_LLVM >= 0x0309) && ctx->screen->b.family >= CHIP_TONGA) {
4800
4801 args[0] = LLVMBuildMul(gallivm->builder, tl_tid,
4802 lp_build_const_int32(gallivm, 4), "");
4803 args[1] = val;
4804 tl = lp_build_intrinsic(gallivm->builder,
4805 "llvm.amdgcn.ds.bpermute", ctx->i32,
4806 args, 2, LLVMReadNoneAttribute);
4807
4808 args[0] = LLVMBuildMul(gallivm->builder, trbl_tid,
4809 lp_build_const_int32(gallivm, 4), "");
4810 trbl = lp_build_intrinsic(gallivm->builder,
4811 "llvm.amdgcn.ds.bpermute", ctx->i32,
4812 args, 2, LLVMReadNoneAttribute);
4813 } else {
4814 LLVMBuildStore(gallivm->builder, val, store_ptr);
4815 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
4816 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
4817 }
4818 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
4819 trbl = LLVMBuildBitCast(gallivm->builder, trbl, ctx->f32, "");
4820 result[c] = LLVMBuildFSub(gallivm->builder, trbl, tl, "");
4821 }
4822
4823 emit_data->output[0] = lp_build_gather_values(gallivm, result, 4);
4824 }
4825
4826 /*
4827 * this takes an I,J coordinate pair,
4828 * and works out the X and Y derivatives.
4829 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
4830 */
4831 static LLVMValueRef si_llvm_emit_ddxy_interp(
4832 struct lp_build_tgsi_context *bld_base,
4833 LLVMValueRef interp_ij)
4834 {
4835 struct si_shader_context *ctx = si_shader_context(bld_base);
4836 struct gallivm_state *gallivm = bld_base->base.gallivm;
4837 LLVMValueRef indices[2];
4838 LLVMValueRef store_ptr, load_ptr_x, load_ptr_y, load_ptr_ddx, load_ptr_ddy, temp, temp2;
4839 LLVMValueRef tl, tr, bl, result[4];
4840 unsigned c;
4841
4842 indices[0] = bld_base->uint_bld.zero;
4843 indices[1] = get_thread_id(ctx);
4844 store_ptr = LLVMBuildGEP(gallivm->builder, ctx->lds,
4845 indices, 2, "");
4846
4847 temp = LLVMBuildAnd(gallivm->builder, indices[1],
4848 lp_build_const_int32(gallivm, TID_MASK_LEFT), "");
4849
4850 temp2 = LLVMBuildAnd(gallivm->builder, indices[1],
4851 lp_build_const_int32(gallivm, TID_MASK_TOP), "");
4852
4853 indices[1] = temp;
4854 load_ptr_x = LLVMBuildGEP(gallivm->builder, ctx->lds,
4855 indices, 2, "");
4856
4857 indices[1] = temp2;
4858 load_ptr_y = LLVMBuildGEP(gallivm->builder, ctx->lds,
4859 indices, 2, "");
4860
4861 indices[1] = LLVMBuildAdd(gallivm->builder, temp,
4862 lp_build_const_int32(gallivm, 1), "");
4863 load_ptr_ddx = LLVMBuildGEP(gallivm->builder, ctx->lds,
4864 indices, 2, "");
4865
4866 indices[1] = LLVMBuildAdd(gallivm->builder, temp2,
4867 lp_build_const_int32(gallivm, 2), "");
4868 load_ptr_ddy = LLVMBuildGEP(gallivm->builder, ctx->lds,
4869 indices, 2, "");
4870
4871 for (c = 0; c < 2; ++c) {
4872 LLVMValueRef store_val;
4873 LLVMValueRef c_ll = lp_build_const_int32(gallivm, c);
4874
4875 store_val = LLVMBuildExtractElement(gallivm->builder,
4876 interp_ij, c_ll, "");
4877 LLVMBuildStore(gallivm->builder,
4878 store_val,
4879 store_ptr);
4880
4881 tl = LLVMBuildLoad(gallivm->builder, load_ptr_x, "");
4882 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
4883
4884 tr = LLVMBuildLoad(gallivm->builder, load_ptr_ddx, "");
4885 tr = LLVMBuildBitCast(gallivm->builder, tr, ctx->f32, "");
4886
4887 result[c] = LLVMBuildFSub(gallivm->builder, tr, tl, "");
4888
4889 tl = LLVMBuildLoad(gallivm->builder, load_ptr_y, "");
4890 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
4891
4892 bl = LLVMBuildLoad(gallivm->builder, load_ptr_ddy, "");
4893 bl = LLVMBuildBitCast(gallivm->builder, bl, ctx->f32, "");
4894
4895 result[c + 2] = LLVMBuildFSub(gallivm->builder, bl, tl, "");
4896 }
4897
4898 return lp_build_gather_values(gallivm, result, 4);
4899 }
4900
4901 static void interp_fetch_args(
4902 struct lp_build_tgsi_context *bld_base,
4903 struct lp_build_emit_data *emit_data)
4904 {
4905 struct si_shader_context *ctx = si_shader_context(bld_base);
4906 struct gallivm_state *gallivm = bld_base->base.gallivm;
4907 const struct tgsi_full_instruction *inst = emit_data->inst;
4908
4909 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
4910 /* offset is in second src, first two channels */
4911 emit_data->args[0] = lp_build_emit_fetch(bld_base,
4912 emit_data->inst, 1,
4913 TGSI_CHAN_X);
4914 emit_data->args[1] = lp_build_emit_fetch(bld_base,
4915 emit_data->inst, 1,
4916 TGSI_CHAN_Y);
4917 emit_data->arg_count = 2;
4918 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
4919 LLVMValueRef sample_position;
4920 LLVMValueRef sample_id;
4921 LLVMValueRef halfval = lp_build_const_float(gallivm, 0.5f);
4922
4923 /* fetch sample ID, then fetch its sample position,
4924 * and place into first two channels.
4925 */
4926 sample_id = lp_build_emit_fetch(bld_base,
4927 emit_data->inst, 1, TGSI_CHAN_X);
4928 sample_id = LLVMBuildBitCast(gallivm->builder, sample_id,
4929 ctx->i32, "");
4930 sample_position = load_sample_position(&ctx->radeon_bld, sample_id);
4931
4932 emit_data->args[0] = LLVMBuildExtractElement(gallivm->builder,
4933 sample_position,
4934 lp_build_const_int32(gallivm, 0), "");
4935
4936 emit_data->args[0] = LLVMBuildFSub(gallivm->builder, emit_data->args[0], halfval, "");
4937 emit_data->args[1] = LLVMBuildExtractElement(gallivm->builder,
4938 sample_position,
4939 lp_build_const_int32(gallivm, 1), "");
4940 emit_data->args[1] = LLVMBuildFSub(gallivm->builder, emit_data->args[1], halfval, "");
4941 emit_data->arg_count = 2;
4942 }
4943 }
4944
4945 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
4946 struct lp_build_tgsi_context *bld_base,
4947 struct lp_build_emit_data *emit_data)
4948 {
4949 struct si_shader_context *ctx = si_shader_context(bld_base);
4950 struct si_shader *shader = ctx->shader;
4951 struct gallivm_state *gallivm = bld_base->base.gallivm;
4952 LLVMValueRef interp_param;
4953 const struct tgsi_full_instruction *inst = emit_data->inst;
4954 const char *intr_name;
4955 int input_index = inst->Src[0].Register.Index;
4956 int chan;
4957 int i;
4958 LLVMValueRef attr_number;
4959 LLVMValueRef params = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
4960 int interp_param_idx;
4961 unsigned interp = shader->selector->info.input_interpolate[input_index];
4962 unsigned location;
4963
4964 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
4965
4966 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
4967 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
4968 location = TGSI_INTERPOLATE_LOC_CENTER;
4969 else
4970 location = TGSI_INTERPOLATE_LOC_CENTROID;
4971
4972 interp_param_idx = lookup_interp_param_index(interp, location);
4973 if (interp_param_idx == -1)
4974 return;
4975 else if (interp_param_idx)
4976 interp_param = LLVMGetParam(ctx->radeon_bld.main_fn, interp_param_idx);
4977 else
4978 interp_param = NULL;
4979
4980 attr_number = lp_build_const_int32(gallivm, input_index);
4981
4982 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
4983 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
4984 LLVMValueRef ij_out[2];
4985 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
4986
4987 /*
4988 * take the I then J parameters, and the DDX/Y for it, and
4989 * calculate the IJ inputs for the interpolator.
4990 * temp1 = ddx * offset/sample.x + I;
4991 * interp_param.I = ddy * offset/sample.y + temp1;
4992 * temp1 = ddx * offset/sample.x + J;
4993 * interp_param.J = ddy * offset/sample.y + temp1;
4994 */
4995 for (i = 0; i < 2; i++) {
4996 LLVMValueRef ix_ll = lp_build_const_int32(gallivm, i);
4997 LLVMValueRef iy_ll = lp_build_const_int32(gallivm, i + 2);
4998 LLVMValueRef ddx_el = LLVMBuildExtractElement(gallivm->builder,
4999 ddxy_out, ix_ll, "");
5000 LLVMValueRef ddy_el = LLVMBuildExtractElement(gallivm->builder,
5001 ddxy_out, iy_ll, "");
5002 LLVMValueRef interp_el = LLVMBuildExtractElement(gallivm->builder,
5003 interp_param, ix_ll, "");
5004 LLVMValueRef temp1, temp2;
5005
5006 interp_el = LLVMBuildBitCast(gallivm->builder, interp_el,
5007 ctx->f32, "");
5008
5009 temp1 = LLVMBuildFMul(gallivm->builder, ddx_el, emit_data->args[0], "");
5010
5011 temp1 = LLVMBuildFAdd(gallivm->builder, temp1, interp_el, "");
5012
5013 temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
5014
5015 temp2 = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
5016
5017 ij_out[i] = LLVMBuildBitCast(gallivm->builder,
5018 temp2, ctx->i32, "");
5019 }
5020 interp_param = lp_build_gather_values(bld_base->base.gallivm, ij_out, 2);
5021 }
5022
5023 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
5024 for (chan = 0; chan < 2; chan++) {
5025 LLVMValueRef args[4];
5026 LLVMValueRef llvm_chan;
5027 unsigned schan;
5028
5029 schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
5030 llvm_chan = lp_build_const_int32(gallivm, schan);
5031
5032 args[0] = llvm_chan;
5033 args[1] = attr_number;
5034 args[2] = params;
5035 args[3] = interp_param;
5036
5037 emit_data->output[chan] =
5038 lp_build_intrinsic(gallivm->builder, intr_name,
5039 ctx->f32, args, args[3] ? 4 : 3,
5040 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
5041 }
5042 }
5043
5044 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
5045 struct lp_build_emit_data *emit_data)
5046 {
5047 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
5048 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
5049 unsigned stream;
5050
5051 assert(src0.File == TGSI_FILE_IMMEDIATE);
5052
5053 stream = LLVMConstIntGetZExtValue(imms[src0.Index][src0.SwizzleX]) & 0x3;
5054 return stream;
5055 }
5056
5057 /* Emit one vertex from the geometry shader */
5058 static void si_llvm_emit_vertex(
5059 const struct lp_build_tgsi_action *action,
5060 struct lp_build_tgsi_context *bld_base,
5061 struct lp_build_emit_data *emit_data)
5062 {
5063 struct si_shader_context *ctx = si_shader_context(bld_base);
5064 struct lp_build_context *uint = &bld_base->uint_bld;
5065 struct si_shader *shader = ctx->shader;
5066 struct tgsi_shader_info *info = &shader->selector->info;
5067 struct gallivm_state *gallivm = bld_base->base.gallivm;
5068 LLVMValueRef soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
5069 SI_PARAM_GS2VS_OFFSET);
5070 LLVMValueRef gs_next_vertex;
5071 LLVMValueRef can_emit, kill;
5072 LLVMValueRef args[2];
5073 unsigned chan;
5074 int i;
5075 unsigned stream;
5076
5077 stream = si_llvm_get_stream(bld_base, emit_data);
5078
5079 /* Write vertex attribute values to GSVS ring */
5080 gs_next_vertex = LLVMBuildLoad(gallivm->builder,
5081 ctx->gs_next_vertex[stream],
5082 "");
5083
5084 /* If this thread has already emitted the declared maximum number of
5085 * vertices, kill it: excessive vertex emissions are not supposed to
5086 * have any effect, and GS threads have no externally observable
5087 * effects other than emitting vertices.
5088 */
5089 can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULE, gs_next_vertex,
5090 lp_build_const_int32(gallivm,
5091 shader->selector->gs_max_out_vertices), "");
5092 kill = lp_build_select(&bld_base->base, can_emit,
5093 lp_build_const_float(gallivm, 1.0f),
5094 lp_build_const_float(gallivm, -1.0f));
5095
5096 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
5097 ctx->voidt, &kill, 1, 0);
5098
5099 for (i = 0; i < info->num_outputs; i++) {
5100 LLVMValueRef *out_ptr =
5101 ctx->radeon_bld.soa.outputs[i];
5102
5103 for (chan = 0; chan < 4; chan++) {
5104 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
5105 LLVMValueRef voffset =
5106 lp_build_const_int32(gallivm, (i * 4 + chan) *
5107 shader->selector->gs_max_out_vertices);
5108
5109 voffset = lp_build_add(uint, voffset, gs_next_vertex);
5110 voffset = lp_build_mul_imm(uint, voffset, 4);
5111
5112 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
5113
5114 build_tbuffer_store(ctx,
5115 ctx->gsvs_ring[stream],
5116 out_val, 1,
5117 voffset, soffset, 0,
5118 V_008F0C_BUF_DATA_FORMAT_32,
5119 V_008F0C_BUF_NUM_FORMAT_UINT,
5120 1, 0, 1, 1, 0);
5121 }
5122 }
5123 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
5124 lp_build_const_int32(gallivm, 1));
5125
5126 LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
5127
5128 /* Signal vertex emission */
5129 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_EMIT | SENDMSG_GS | (stream << 8));
5130 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
5131 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
5132 ctx->voidt, args, 2, LLVMNoUnwindAttribute);
5133 }
5134
5135 /* Cut one primitive from the geometry shader */
5136 static void si_llvm_emit_primitive(
5137 const struct lp_build_tgsi_action *action,
5138 struct lp_build_tgsi_context *bld_base,
5139 struct lp_build_emit_data *emit_data)
5140 {
5141 struct si_shader_context *ctx = si_shader_context(bld_base);
5142 struct gallivm_state *gallivm = bld_base->base.gallivm;
5143 LLVMValueRef args[2];
5144 unsigned stream;
5145
5146 /* Signal primitive cut */
5147 stream = si_llvm_get_stream(bld_base, emit_data);
5148 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_CUT | SENDMSG_GS | (stream << 8));
5149 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
5150 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
5151 ctx->voidt, args, 2, LLVMNoUnwindAttribute);
5152 }
5153
5154 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
5155 struct lp_build_tgsi_context *bld_base,
5156 struct lp_build_emit_data *emit_data)
5157 {
5158 struct si_shader_context *ctx = si_shader_context(bld_base);
5159 struct gallivm_state *gallivm = bld_base->base.gallivm;
5160
5161 /* The real barrier instruction isn’t needed, because an entire patch
5162 * always fits into a single wave.
5163 */
5164 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
5165 emit_optimization_barrier(ctx);
5166 return;
5167 }
5168
5169 lp_build_intrinsic(gallivm->builder,
5170 HAVE_LLVM >= 0x0309 ? "llvm.amdgcn.s.barrier"
5171 : "llvm.AMDGPU.barrier.local",
5172 ctx->voidt, NULL, 0, LLVMNoUnwindAttribute);
5173 }
5174
5175 static const struct lp_build_tgsi_action tex_action = {
5176 .fetch_args = tex_fetch_args,
5177 .emit = build_tex_intrinsic,
5178 };
5179
5180 static const struct lp_build_tgsi_action interp_action = {
5181 .fetch_args = interp_fetch_args,
5182 .emit = build_interp_intrinsic,
5183 };
5184
5185 static void si_create_function(struct si_shader_context *ctx,
5186 LLVMTypeRef *returns, unsigned num_returns,
5187 LLVMTypeRef *params, unsigned num_params,
5188 int last_array_pointer, int last_sgpr)
5189 {
5190 int i;
5191
5192 radeon_llvm_create_func(&ctx->radeon_bld, returns, num_returns,
5193 params, num_params);
5194 radeon_llvm_shader_type(ctx->radeon_bld.main_fn, ctx->type);
5195 ctx->return_value = LLVMGetUndef(ctx->radeon_bld.return_type);
5196
5197 for (i = 0; i <= last_sgpr; ++i) {
5198 LLVMValueRef P = LLVMGetParam(ctx->radeon_bld.main_fn, i);
5199
5200 /* We tell llvm that array inputs are passed by value to allow Sinking pass
5201 * to move load. Inputs are constant so this is fine. */
5202 if (i <= last_array_pointer)
5203 LLVMAddAttribute(P, LLVMByValAttribute);
5204 else
5205 LLVMAddAttribute(P, LLVMInRegAttribute);
5206 }
5207 }
5208
5209 static void create_meta_data(struct si_shader_context *ctx)
5210 {
5211 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
5212 LLVMValueRef args[3];
5213
5214 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
5215 args[1] = 0;
5216 args[2] = lp_build_const_int32(gallivm, 1);
5217
5218 ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
5219
5220 ctx->uniform_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
5221 "amdgpu.uniform", 14);
5222
5223 ctx->empty_md = LLVMMDNodeInContext(gallivm->context, NULL, 0);
5224 }
5225
5226 static void declare_streamout_params(struct si_shader_context *ctx,
5227 struct pipe_stream_output_info *so,
5228 LLVMTypeRef *params, LLVMTypeRef i32,
5229 unsigned *num_params)
5230 {
5231 int i;
5232
5233 /* Streamout SGPRs. */
5234 if (so->num_outputs) {
5235 if (ctx->type != PIPE_SHADER_TESS_EVAL)
5236 params[ctx->param_streamout_config = (*num_params)++] = i32;
5237 else
5238 ctx->param_streamout_config = ctx->param_tess_offchip;
5239
5240 params[ctx->param_streamout_write_index = (*num_params)++] = i32;
5241 }
5242 /* A streamout buffer offset is loaded if the stride is non-zero. */
5243 for (i = 0; i < 4; i++) {
5244 if (!so->stride[i])
5245 continue;
5246
5247 params[ctx->param_streamout_offset[i] = (*num_params)++] = i32;
5248 }
5249 }
5250
5251 static unsigned llvm_get_type_size(LLVMTypeRef type)
5252 {
5253 LLVMTypeKind kind = LLVMGetTypeKind(type);
5254
5255 switch (kind) {
5256 case LLVMIntegerTypeKind:
5257 return LLVMGetIntTypeWidth(type) / 8;
5258 case LLVMFloatTypeKind:
5259 return 4;
5260 case LLVMPointerTypeKind:
5261 return 8;
5262 case LLVMVectorTypeKind:
5263 return LLVMGetVectorSize(type) *
5264 llvm_get_type_size(LLVMGetElementType(type));
5265 default:
5266 assert(0);
5267 return 0;
5268 }
5269 }
5270
5271 static void declare_tess_lds(struct si_shader_context *ctx)
5272 {
5273 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
5274 LLVMTypeRef i32 = ctx->radeon_bld.soa.bld_base.uint_bld.elem_type;
5275 unsigned lds_size = ctx->screen->b.chip_class >= CIK ? 65536 : 32768;
5276
5277 /* The actual size is computed outside of the shader to reduce
5278 * the number of shader variants. */
5279 ctx->lds =
5280 LLVMAddGlobalInAddressSpace(gallivm->module,
5281 LLVMArrayType(i32, lds_size / 4),
5282 "tess_lds",
5283 LOCAL_ADDR_SPACE);
5284 }
5285
5286 static void create_function(struct si_shader_context *ctx)
5287 {
5288 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5289 struct gallivm_state *gallivm = bld_base->base.gallivm;
5290 struct si_shader *shader = ctx->shader;
5291 LLVMTypeRef params[SI_NUM_PARAMS + SI_NUM_VERTEX_BUFFERS], v3i32;
5292 LLVMTypeRef returns[16+32*4];
5293 unsigned i, last_array_pointer, last_sgpr, num_params, num_return_sgprs;
5294 unsigned num_returns = 0;
5295
5296 v3i32 = LLVMVectorType(ctx->i32, 3);
5297
5298 params[SI_PARAM_RW_BUFFERS] = const_array(ctx->v16i8, SI_NUM_RW_BUFFERS);
5299 params[SI_PARAM_CONST_BUFFERS] = const_array(ctx->v16i8, SI_NUM_CONST_BUFFERS);
5300 params[SI_PARAM_SAMPLERS] = const_array(ctx->v8i32, SI_NUM_SAMPLERS);
5301 params[SI_PARAM_IMAGES] = const_array(ctx->v8i32, SI_NUM_IMAGES);
5302 params[SI_PARAM_SHADER_BUFFERS] = const_array(ctx->v4i32, SI_NUM_SHADER_BUFFERS);
5303 last_array_pointer = SI_PARAM_SHADER_BUFFERS;
5304
5305 switch (ctx->type) {
5306 case PIPE_SHADER_VERTEX:
5307 params[SI_PARAM_VERTEX_BUFFERS] = const_array(ctx->v16i8, SI_NUM_VERTEX_BUFFERS);
5308 last_array_pointer = SI_PARAM_VERTEX_BUFFERS;
5309 params[SI_PARAM_BASE_VERTEX] = ctx->i32;
5310 params[SI_PARAM_START_INSTANCE] = ctx->i32;
5311 num_params = SI_PARAM_START_INSTANCE+1;
5312
5313 if (shader->key.vs.as_es) {
5314 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
5315 } else if (shader->key.vs.as_ls) {
5316 params[SI_PARAM_LS_OUT_LAYOUT] = ctx->i32;
5317 num_params = SI_PARAM_LS_OUT_LAYOUT+1;
5318 } else {
5319 if (ctx->is_gs_copy_shader) {
5320 last_array_pointer = SI_PARAM_RW_BUFFERS;
5321 num_params = SI_PARAM_RW_BUFFERS+1;
5322 } else {
5323 params[SI_PARAM_VS_STATE_BITS] = ctx->i32;
5324 num_params = SI_PARAM_VS_STATE_BITS+1;
5325 }
5326
5327 /* The locations of the other parameters are assigned dynamically. */
5328 declare_streamout_params(ctx, &shader->selector->so,
5329 params, ctx->i32, &num_params);
5330 }
5331
5332 last_sgpr = num_params-1;
5333
5334 /* VGPRs */
5335 params[ctx->param_vertex_id = num_params++] = ctx->i32;
5336 params[ctx->param_rel_auto_id = num_params++] = ctx->i32;
5337 params[ctx->param_vs_prim_id = num_params++] = ctx->i32;
5338 params[ctx->param_instance_id = num_params++] = ctx->i32;
5339
5340 if (!ctx->is_monolithic &&
5341 !ctx->is_gs_copy_shader) {
5342 /* Vertex load indices. */
5343 ctx->param_vertex_index0 = num_params;
5344
5345 for (i = 0; i < shader->selector->info.num_inputs; i++)
5346 params[num_params++] = ctx->i32;
5347
5348 /* PrimitiveID output. */
5349 if (!shader->key.vs.as_es && !shader->key.vs.as_ls)
5350 for (i = 0; i <= VS_EPILOG_PRIMID_LOC; i++)
5351 returns[num_returns++] = ctx->f32;
5352 }
5353 break;
5354
5355 case PIPE_SHADER_TESS_CTRL:
5356 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
5357 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
5358 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
5359 params[SI_PARAM_TCS_IN_LAYOUT] = ctx->i32;
5360 params[ctx->param_oc_lds = SI_PARAM_TCS_OC_LDS] = ctx->i32;
5361 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx->i32;
5362 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
5363
5364 /* VGPRs */
5365 params[SI_PARAM_PATCH_ID] = ctx->i32;
5366 params[SI_PARAM_REL_IDS] = ctx->i32;
5367 num_params = SI_PARAM_REL_IDS+1;
5368
5369 if (!ctx->is_monolithic) {
5370 /* SI_PARAM_TCS_OC_LDS and PARAM_TESS_FACTOR_OFFSET are
5371 * placed after the user SGPRs.
5372 */
5373 for (i = 0; i < SI_TCS_NUM_USER_SGPR + 2; i++)
5374 returns[num_returns++] = ctx->i32; /* SGPRs */
5375
5376 for (i = 0; i < 3; i++)
5377 returns[num_returns++] = ctx->f32; /* VGPRs */
5378 }
5379 break;
5380
5381 case PIPE_SHADER_TESS_EVAL:
5382 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
5383 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
5384 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
5385 num_params = SI_PARAM_TCS_OUT_LAYOUT+1;
5386
5387 if (shader->key.tes.as_es) {
5388 params[ctx->param_oc_lds = num_params++] = ctx->i32;
5389 params[ctx->param_tess_offchip = num_params++] = ctx->i32;
5390 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
5391 } else {
5392 params[ctx->param_tess_offchip = num_params++] = ctx->i32;
5393 declare_streamout_params(ctx, &shader->selector->so,
5394 params, ctx->i32, &num_params);
5395 params[ctx->param_oc_lds = num_params++] = ctx->i32;
5396 }
5397 last_sgpr = num_params - 1;
5398
5399 /* VGPRs */
5400 params[ctx->param_tes_u = num_params++] = ctx->f32;
5401 params[ctx->param_tes_v = num_params++] = ctx->f32;
5402 params[ctx->param_tes_rel_patch_id = num_params++] = ctx->i32;
5403 params[ctx->param_tes_patch_id = num_params++] = ctx->i32;
5404
5405 /* PrimitiveID output. */
5406 if (!ctx->is_monolithic && !shader->key.tes.as_es)
5407 for (i = 0; i <= VS_EPILOG_PRIMID_LOC; i++)
5408 returns[num_returns++] = ctx->f32;
5409 break;
5410
5411 case PIPE_SHADER_GEOMETRY:
5412 params[SI_PARAM_GS2VS_OFFSET] = ctx->i32;
5413 params[SI_PARAM_GS_WAVE_ID] = ctx->i32;
5414 last_sgpr = SI_PARAM_GS_WAVE_ID;
5415
5416 /* VGPRs */
5417 params[SI_PARAM_VTX0_OFFSET] = ctx->i32;
5418 params[SI_PARAM_VTX1_OFFSET] = ctx->i32;
5419 params[SI_PARAM_PRIMITIVE_ID] = ctx->i32;
5420 params[SI_PARAM_VTX2_OFFSET] = ctx->i32;
5421 params[SI_PARAM_VTX3_OFFSET] = ctx->i32;
5422 params[SI_PARAM_VTX4_OFFSET] = ctx->i32;
5423 params[SI_PARAM_VTX5_OFFSET] = ctx->i32;
5424 params[SI_PARAM_GS_INSTANCE_ID] = ctx->i32;
5425 num_params = SI_PARAM_GS_INSTANCE_ID+1;
5426 break;
5427
5428 case PIPE_SHADER_FRAGMENT:
5429 params[SI_PARAM_ALPHA_REF] = ctx->f32;
5430 params[SI_PARAM_PRIM_MASK] = ctx->i32;
5431 last_sgpr = SI_PARAM_PRIM_MASK;
5432 params[SI_PARAM_PERSP_SAMPLE] = ctx->v2i32;
5433 params[SI_PARAM_PERSP_CENTER] = ctx->v2i32;
5434 params[SI_PARAM_PERSP_CENTROID] = ctx->v2i32;
5435 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
5436 params[SI_PARAM_LINEAR_SAMPLE] = ctx->v2i32;
5437 params[SI_PARAM_LINEAR_CENTER] = ctx->v2i32;
5438 params[SI_PARAM_LINEAR_CENTROID] = ctx->v2i32;
5439 params[SI_PARAM_LINE_STIPPLE_TEX] = ctx->f32;
5440 params[SI_PARAM_POS_X_FLOAT] = ctx->f32;
5441 params[SI_PARAM_POS_Y_FLOAT] = ctx->f32;
5442 params[SI_PARAM_POS_Z_FLOAT] = ctx->f32;
5443 params[SI_PARAM_POS_W_FLOAT] = ctx->f32;
5444 params[SI_PARAM_FRONT_FACE] = ctx->i32;
5445 params[SI_PARAM_ANCILLARY] = ctx->i32;
5446 params[SI_PARAM_SAMPLE_COVERAGE] = ctx->f32;
5447 params[SI_PARAM_POS_FIXED_PT] = ctx->i32;
5448 num_params = SI_PARAM_POS_FIXED_PT+1;
5449
5450 if (!ctx->is_monolithic) {
5451 /* Color inputs from the prolog. */
5452 if (shader->selector->info.colors_read) {
5453 unsigned num_color_elements =
5454 util_bitcount(shader->selector->info.colors_read);
5455
5456 assert(num_params + num_color_elements <= ARRAY_SIZE(params));
5457 for (i = 0; i < num_color_elements; i++)
5458 params[num_params++] = ctx->f32;
5459 }
5460
5461 /* Outputs for the epilog. */
5462 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
5463 num_returns =
5464 num_return_sgprs +
5465 util_bitcount(shader->selector->info.colors_written) * 4 +
5466 shader->selector->info.writes_z +
5467 shader->selector->info.writes_stencil +
5468 shader->selector->info.writes_samplemask +
5469 1 /* SampleMaskIn */;
5470
5471 num_returns = MAX2(num_returns,
5472 num_return_sgprs +
5473 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
5474
5475 for (i = 0; i < num_return_sgprs; i++)
5476 returns[i] = ctx->i32;
5477 for (; i < num_returns; i++)
5478 returns[i] = ctx->f32;
5479 }
5480 break;
5481
5482 case PIPE_SHADER_COMPUTE:
5483 params[SI_PARAM_GRID_SIZE] = v3i32;
5484 params[SI_PARAM_BLOCK_ID] = v3i32;
5485 last_sgpr = SI_PARAM_BLOCK_ID;
5486
5487 params[SI_PARAM_THREAD_ID] = v3i32;
5488 num_params = SI_PARAM_THREAD_ID + 1;
5489 break;
5490 default:
5491 assert(0 && "unimplemented shader");
5492 return;
5493 }
5494
5495 assert(num_params <= ARRAY_SIZE(params));
5496
5497 si_create_function(ctx, returns, num_returns, params,
5498 num_params, last_array_pointer, last_sgpr);
5499
5500 /* Reserve register locations for VGPR inputs the PS prolog may need. */
5501 if (ctx->type == PIPE_SHADER_FRAGMENT &&
5502 !ctx->is_monolithic) {
5503 radeon_llvm_add_attribute(ctx->radeon_bld.main_fn,
5504 "InitialPSInputAddr",
5505 S_0286D0_PERSP_SAMPLE_ENA(1) |
5506 S_0286D0_PERSP_CENTER_ENA(1) |
5507 S_0286D0_PERSP_CENTROID_ENA(1) |
5508 S_0286D0_LINEAR_SAMPLE_ENA(1) |
5509 S_0286D0_LINEAR_CENTER_ENA(1) |
5510 S_0286D0_LINEAR_CENTROID_ENA(1) |
5511 S_0286D0_FRONT_FACE_ENA(1) |
5512 S_0286D0_POS_FIXED_PT_ENA(1));
5513 } else if (ctx->type == PIPE_SHADER_COMPUTE) {
5514 const unsigned *properties = shader->selector->info.properties;
5515 unsigned max_work_group_size =
5516 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
5517 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
5518 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
5519
5520 assert(max_work_group_size);
5521
5522 radeon_llvm_add_attribute(ctx->radeon_bld.main_fn,
5523 "amdgpu-max-work-group-size",
5524 max_work_group_size);
5525 }
5526
5527 shader->info.num_input_sgprs = 0;
5528 shader->info.num_input_vgprs = 0;
5529
5530 for (i = 0; i <= last_sgpr; ++i)
5531 shader->info.num_input_sgprs += llvm_get_type_size(params[i]) / 4;
5532
5533 /* Unused fragment shader inputs are eliminated by the compiler,
5534 * so we don't know yet how many there will be.
5535 */
5536 if (ctx->type != PIPE_SHADER_FRAGMENT)
5537 for (; i < num_params; ++i)
5538 shader->info.num_input_vgprs += llvm_get_type_size(params[i]) / 4;
5539
5540 if (bld_base->info &&
5541 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
5542 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0 ||
5543 bld_base->info->opcode_count[TGSI_OPCODE_DDX_FINE] > 0 ||
5544 bld_base->info->opcode_count[TGSI_OPCODE_DDY_FINE] > 0 ||
5545 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_OFFSET] > 0 ||
5546 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_SAMPLE] > 0))
5547 ctx->lds =
5548 LLVMAddGlobalInAddressSpace(gallivm->module,
5549 LLVMArrayType(ctx->i32, 64),
5550 "ddxy_lds",
5551 LOCAL_ADDR_SPACE);
5552
5553 if ((ctx->type == PIPE_SHADER_VERTEX && shader->key.vs.as_ls) ||
5554 ctx->type == PIPE_SHADER_TESS_CTRL ||
5555 ctx->type == PIPE_SHADER_TESS_EVAL)
5556 declare_tess_lds(ctx);
5557 }
5558
5559 static void preload_constants(struct si_shader_context *ctx)
5560 {
5561 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5562 struct gallivm_state *gallivm = bld_base->base.gallivm;
5563 const struct tgsi_shader_info *info = bld_base->info;
5564 unsigned buf;
5565 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
5566
5567 for (buf = 0; buf < SI_NUM_CONST_BUFFERS; buf++) {
5568 unsigned i, num_const = info->const_file_max[buf] + 1;
5569
5570 if (num_const == 0)
5571 continue;
5572
5573 /* Allocate space for the constant values */
5574 ctx->constants[buf] = CALLOC(num_const * 4, sizeof(LLVMValueRef));
5575
5576 /* Load the resource descriptor */
5577 ctx->const_buffers[buf] =
5578 build_indexed_load_const(ctx, ptr, lp_build_const_int32(gallivm, buf));
5579
5580 /* Load the constants, we rely on the code sinking to do the rest */
5581 for (i = 0; i < num_const * 4; ++i) {
5582 ctx->constants[buf][i] =
5583 buffer_load_const(gallivm->builder,
5584 ctx->const_buffers[buf],
5585 lp_build_const_int32(gallivm, i * 4),
5586 ctx->f32);
5587 }
5588 }
5589 }
5590
5591 static void preload_shader_buffers(struct si_shader_context *ctx)
5592 {
5593 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
5594 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_SHADER_BUFFERS);
5595 int buf, maxbuf;
5596
5597 maxbuf = MIN2(ctx->shader->selector->info.file_max[TGSI_FILE_BUFFER],
5598 SI_NUM_SHADER_BUFFERS - 1);
5599 for (buf = 0; buf <= maxbuf; ++buf) {
5600 ctx->shader_buffers[buf] =
5601 build_indexed_load_const(
5602 ctx, ptr, lp_build_const_int32(gallivm, buf));
5603 }
5604 }
5605
5606 static void preload_samplers(struct si_shader_context *ctx)
5607 {
5608 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5609 struct gallivm_state *gallivm = bld_base->base.gallivm;
5610 const struct tgsi_shader_info *info = bld_base->info;
5611 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
5612 LLVMValueRef offset;
5613
5614 if (num_samplers == 0)
5615 return;
5616
5617 /* Load the resources and samplers, we rely on the code sinking to do the rest */
5618 for (i = 0; i < num_samplers; ++i) {
5619 /* Resource */
5620 offset = lp_build_const_int32(gallivm, i);
5621 ctx->sampler_views[i] =
5622 get_sampler_desc(ctx, offset, DESC_IMAGE);
5623
5624 /* FMASK resource */
5625 if (info->is_msaa_sampler[i])
5626 ctx->fmasks[i] =
5627 get_sampler_desc(ctx, offset, DESC_FMASK);
5628 else {
5629 ctx->sampler_states[i] =
5630 get_sampler_desc(ctx, offset, DESC_SAMPLER);
5631 ctx->sampler_states[i] =
5632 sici_fix_sampler_aniso(ctx, ctx->sampler_views[i],
5633 ctx->sampler_states[i]);
5634 }
5635 }
5636 }
5637
5638 static void preload_images(struct si_shader_context *ctx)
5639 {
5640 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5641 struct tgsi_shader_info *info = &ctx->shader->selector->info;
5642 struct gallivm_state *gallivm = bld_base->base.gallivm;
5643 unsigned num_images = bld_base->info->file_max[TGSI_FILE_IMAGE] + 1;
5644 LLVMValueRef res_ptr;
5645 unsigned i;
5646
5647 if (num_images == 0)
5648 return;
5649
5650 res_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_IMAGES);
5651
5652 for (i = 0; i < num_images; ++i) {
5653 /* Rely on LLVM to shrink the load for buffer resources. */
5654 LLVMValueRef rsrc =
5655 build_indexed_load_const(ctx, res_ptr,
5656 lp_build_const_int32(gallivm, i));
5657
5658 if (info->images_writemask & (1 << i) &&
5659 !(info->images_buffers & (1 << i)))
5660 rsrc = force_dcc_off(ctx, rsrc);
5661
5662 ctx->images[i] = rsrc;
5663 }
5664 }
5665
5666 static void preload_streamout_buffers(struct si_shader_context *ctx)
5667 {
5668 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5669 struct gallivm_state *gallivm = bld_base->base.gallivm;
5670 unsigned i;
5671
5672 /* Streamout can only be used if the shader is compiled as VS. */
5673 if (!ctx->shader->selector->so.num_outputs ||
5674 (ctx->type == PIPE_SHADER_VERTEX &&
5675 (ctx->shader->key.vs.as_es ||
5676 ctx->shader->key.vs.as_ls)) ||
5677 (ctx->type == PIPE_SHADER_TESS_EVAL &&
5678 ctx->shader->key.tes.as_es))
5679 return;
5680
5681 LLVMValueRef buf_ptr = LLVMGetParam(ctx->radeon_bld.main_fn,
5682 SI_PARAM_RW_BUFFERS);
5683
5684 /* Load the resources, we rely on the code sinking to do the rest */
5685 for (i = 0; i < 4; ++i) {
5686 if (ctx->shader->selector->so.stride[i]) {
5687 LLVMValueRef offset = lp_build_const_int32(gallivm,
5688 SI_VS_STREAMOUT_BUF0 + i);
5689
5690 ctx->so_buffers[i] = build_indexed_load_const(ctx, buf_ptr, offset);
5691 }
5692 }
5693 }
5694
5695 /**
5696 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
5697 * for later use.
5698 */
5699 static void preload_ring_buffers(struct si_shader_context *ctx)
5700 {
5701 struct gallivm_state *gallivm =
5702 ctx->radeon_bld.soa.bld_base.base.gallivm;
5703
5704 LLVMValueRef buf_ptr = LLVMGetParam(ctx->radeon_bld.main_fn,
5705 SI_PARAM_RW_BUFFERS);
5706
5707 if ((ctx->type == PIPE_SHADER_VERTEX &&
5708 ctx->shader->key.vs.as_es) ||
5709 (ctx->type == PIPE_SHADER_TESS_EVAL &&
5710 ctx->shader->key.tes.as_es) ||
5711 ctx->type == PIPE_SHADER_GEOMETRY) {
5712 unsigned ring =
5713 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
5714 : SI_ES_RING_ESGS;
5715 LLVMValueRef offset = lp_build_const_int32(gallivm, ring);
5716
5717 ctx->esgs_ring =
5718 build_indexed_load_const(ctx, buf_ptr, offset);
5719 }
5720
5721 if (ctx->is_gs_copy_shader) {
5722 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_VS_RING_GSVS);
5723
5724 ctx->gsvs_ring[0] =
5725 build_indexed_load_const(ctx, buf_ptr, offset);
5726 }
5727 if (ctx->type == PIPE_SHADER_GEOMETRY) {
5728 int i;
5729 for (i = 0; i < 4; i++) {
5730 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_GS_RING_GSVS0 + i);
5731
5732 ctx->gsvs_ring[i] =
5733 build_indexed_load_const(ctx, buf_ptr, offset);
5734 }
5735 }
5736 }
5737
5738 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
5739 LLVMValueRef param_rw_buffers,
5740 unsigned param_pos_fixed_pt)
5741 {
5742 struct lp_build_tgsi_context *bld_base =
5743 &ctx->radeon_bld.soa.bld_base;
5744 struct gallivm_state *gallivm = bld_base->base.gallivm;
5745 LLVMBuilderRef builder = gallivm->builder;
5746 LLVMValueRef slot, desc, offset, row, bit, address[2];
5747
5748 /* Use the fixed-point gl_FragCoord input.
5749 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
5750 * per coordinate to get the repeating effect.
5751 */
5752 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
5753 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
5754
5755 /* Load the buffer descriptor. */
5756 slot = lp_build_const_int32(gallivm, SI_PS_CONST_POLY_STIPPLE);
5757 desc = build_indexed_load_const(ctx, param_rw_buffers, slot);
5758
5759 /* The stipple pattern is 32x32, each row has 32 bits. */
5760 offset = LLVMBuildMul(builder, address[1],
5761 LLVMConstInt(ctx->i32, 4, 0), "");
5762 row = buffer_load_const(builder, desc, offset, ctx->i32);
5763 bit = LLVMBuildLShr(builder, row, address[0], "");
5764 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
5765
5766 /* The intrinsic kills the thread if arg < 0. */
5767 bit = LLVMBuildSelect(builder, bit, LLVMConstReal(ctx->f32, 0),
5768 LLVMConstReal(ctx->f32, -1), "");
5769 lp_build_intrinsic(builder, "llvm.AMDGPU.kill", ctx->voidt, &bit, 1, 0);
5770 }
5771
5772 void si_shader_binary_read_config(struct radeon_shader_binary *binary,
5773 struct si_shader_config *conf,
5774 unsigned symbol_offset)
5775 {
5776 unsigned i;
5777 const unsigned char *config =
5778 radeon_shader_binary_config_start(binary, symbol_offset);
5779
5780 /* XXX: We may be able to emit some of these values directly rather than
5781 * extracting fields to be emitted later.
5782 */
5783
5784 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
5785 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
5786 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
5787 switch (reg) {
5788 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
5789 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
5790 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
5791 case R_00B848_COMPUTE_PGM_RSRC1:
5792 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
5793 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
5794 conf->float_mode = G_00B028_FLOAT_MODE(value);
5795 conf->rsrc1 = value;
5796 break;
5797 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
5798 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
5799 break;
5800 case R_00B84C_COMPUTE_PGM_RSRC2:
5801 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
5802 conf->rsrc2 = value;
5803 break;
5804 case R_0286CC_SPI_PS_INPUT_ENA:
5805 conf->spi_ps_input_ena = value;
5806 break;
5807 case R_0286D0_SPI_PS_INPUT_ADDR:
5808 conf->spi_ps_input_addr = value;
5809 break;
5810 case R_0286E8_SPI_TMPRING_SIZE:
5811 case R_00B860_COMPUTE_TMPRING_SIZE:
5812 /* WAVESIZE is in units of 256 dwords. */
5813 conf->scratch_bytes_per_wave =
5814 G_00B860_WAVESIZE(value) * 256 * 4 * 1;
5815 break;
5816 default:
5817 {
5818 static bool printed;
5819
5820 if (!printed) {
5821 fprintf(stderr, "Warning: LLVM emitted unknown "
5822 "config register: 0x%x\n", reg);
5823 printed = true;
5824 }
5825 }
5826 break;
5827 }
5828
5829 if (!conf->spi_ps_input_addr)
5830 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
5831 }
5832 }
5833
5834 void si_shader_apply_scratch_relocs(struct si_context *sctx,
5835 struct si_shader *shader,
5836 struct si_shader_config *config,
5837 uint64_t scratch_va)
5838 {
5839 unsigned i;
5840 uint32_t scratch_rsrc_dword0 = scratch_va;
5841 uint32_t scratch_rsrc_dword1 =
5842 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32)
5843 | S_008F04_STRIDE(config->scratch_bytes_per_wave / 64);
5844
5845 for (i = 0 ; i < shader->binary.reloc_count; i++) {
5846 const struct radeon_shader_reloc *reloc =
5847 &shader->binary.relocs[i];
5848 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
5849 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5850 &scratch_rsrc_dword0, 4);
5851 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5852 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5853 &scratch_rsrc_dword1, 4);
5854 }
5855 }
5856 }
5857
5858 static unsigned si_get_shader_binary_size(struct si_shader *shader)
5859 {
5860 unsigned size = shader->binary.code_size;
5861
5862 if (shader->prolog)
5863 size += shader->prolog->binary.code_size;
5864 if (shader->epilog)
5865 size += shader->epilog->binary.code_size;
5866 return size;
5867 }
5868
5869 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
5870 {
5871 const struct radeon_shader_binary *prolog =
5872 shader->prolog ? &shader->prolog->binary : NULL;
5873 const struct radeon_shader_binary *epilog =
5874 shader->epilog ? &shader->epilog->binary : NULL;
5875 const struct radeon_shader_binary *mainb = &shader->binary;
5876 unsigned bo_size = si_get_shader_binary_size(shader) +
5877 (!epilog ? mainb->rodata_size : 0);
5878 unsigned char *ptr;
5879
5880 assert(!prolog || !prolog->rodata_size);
5881 assert((!prolog && !epilog) || !mainb->rodata_size);
5882 assert(!epilog || !epilog->rodata_size);
5883
5884 r600_resource_reference(&shader->bo, NULL);
5885 shader->bo = si_resource_create_custom(&sscreen->b.b,
5886 PIPE_USAGE_IMMUTABLE,
5887 bo_size);
5888 if (!shader->bo)
5889 return -ENOMEM;
5890
5891 /* Upload. */
5892 ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
5893 PIPE_TRANSFER_READ_WRITE);
5894
5895 if (prolog) {
5896 util_memcpy_cpu_to_le32(ptr, prolog->code, prolog->code_size);
5897 ptr += prolog->code_size;
5898 }
5899
5900 util_memcpy_cpu_to_le32(ptr, mainb->code, mainb->code_size);
5901 ptr += mainb->code_size;
5902
5903 if (epilog)
5904 util_memcpy_cpu_to_le32(ptr, epilog->code, epilog->code_size);
5905 else if (mainb->rodata_size > 0)
5906 util_memcpy_cpu_to_le32(ptr, mainb->rodata, mainb->rodata_size);
5907
5908 sscreen->b.ws->buffer_unmap(shader->bo->buf);
5909 return 0;
5910 }
5911
5912 static void si_shader_dump_disassembly(const struct radeon_shader_binary *binary,
5913 struct pipe_debug_callback *debug,
5914 const char *name, FILE *file)
5915 {
5916 char *line, *p;
5917 unsigned i, count;
5918
5919 if (binary->disasm_string) {
5920 fprintf(file, "Shader %s disassembly:\n", name);
5921 fprintf(file, "%s", binary->disasm_string);
5922
5923 if (debug && debug->debug_message) {
5924 /* Very long debug messages are cut off, so send the
5925 * disassembly one line at a time. This causes more
5926 * overhead, but on the plus side it simplifies
5927 * parsing of resulting logs.
5928 */
5929 pipe_debug_message(debug, SHADER_INFO,
5930 "Shader Disassembly Begin");
5931
5932 line = binary->disasm_string;
5933 while (*line) {
5934 p = util_strchrnul(line, '\n');
5935 count = p - line;
5936
5937 if (count) {
5938 pipe_debug_message(debug, SHADER_INFO,
5939 "%.*s", count, line);
5940 }
5941
5942 if (!*p)
5943 break;
5944 line = p + 1;
5945 }
5946
5947 pipe_debug_message(debug, SHADER_INFO,
5948 "Shader Disassembly End");
5949 }
5950 } else {
5951 fprintf(file, "Shader %s binary:\n", name);
5952 for (i = 0; i < binary->code_size; i += 4) {
5953 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
5954 binary->code[i + 3], binary->code[i + 2],
5955 binary->code[i + 1], binary->code[i]);
5956 }
5957 }
5958 }
5959
5960 static void si_shader_dump_stats(struct si_screen *sscreen,
5961 struct si_shader_config *conf,
5962 unsigned num_inputs,
5963 unsigned code_size,
5964 struct pipe_debug_callback *debug,
5965 unsigned processor,
5966 FILE *file)
5967 {
5968 unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
5969 unsigned lds_per_wave = 0;
5970 unsigned max_simd_waves = 10;
5971
5972 /* Compute LDS usage for PS. */
5973 if (processor == PIPE_SHADER_FRAGMENT) {
5974 /* The minimum usage per wave is (num_inputs * 48). The maximum
5975 * usage is (num_inputs * 48 * 16).
5976 * We can get anything in between and it varies between waves.
5977 *
5978 * The 48 bytes per input for a single primitive is equal to
5979 * 4 bytes/component * 4 components/input * 3 points.
5980 *
5981 * Other stages don't know the size at compile time or don't
5982 * allocate LDS per wave, but instead they do it per thread group.
5983 */
5984 lds_per_wave = conf->lds_size * lds_increment +
5985 align(num_inputs * 48, lds_increment);
5986 }
5987
5988 /* Compute the per-SIMD wave counts. */
5989 if (conf->num_sgprs) {
5990 if (sscreen->b.chip_class >= VI)
5991 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
5992 else
5993 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
5994 }
5995
5996 if (conf->num_vgprs)
5997 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
5998
5999 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
6000 * that PS can use.
6001 */
6002 if (lds_per_wave)
6003 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
6004
6005 if (file != stderr ||
6006 r600_can_dump_shader(&sscreen->b, processor)) {
6007 if (processor == PIPE_SHADER_FRAGMENT) {
6008 fprintf(file, "*** SHADER CONFIG ***\n"
6009 "SPI_PS_INPUT_ADDR = 0x%04x\n"
6010 "SPI_PS_INPUT_ENA = 0x%04x\n",
6011 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
6012 }
6013
6014 fprintf(file, "*** SHADER STATS ***\n"
6015 "SGPRS: %d\n"
6016 "VGPRS: %d\n"
6017 "Code Size: %d bytes\n"
6018 "LDS: %d blocks\n"
6019 "Scratch: %d bytes per wave\n"
6020 "Max Waves: %d\n"
6021 "********************\n",
6022 conf->num_sgprs, conf->num_vgprs, code_size,
6023 conf->lds_size, conf->scratch_bytes_per_wave,
6024 max_simd_waves);
6025 }
6026
6027 pipe_debug_message(debug, SHADER_INFO,
6028 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
6029 "LDS: %d Scratch: %d Max Waves: %d",
6030 conf->num_sgprs, conf->num_vgprs, code_size,
6031 conf->lds_size, conf->scratch_bytes_per_wave,
6032 max_simd_waves);
6033 }
6034
6035 static const char *si_get_shader_name(struct si_shader *shader,
6036 unsigned processor)
6037 {
6038 switch (processor) {
6039 case PIPE_SHADER_VERTEX:
6040 if (shader->key.vs.as_es)
6041 return "Vertex Shader as ES";
6042 else if (shader->key.vs.as_ls)
6043 return "Vertex Shader as LS";
6044 else
6045 return "Vertex Shader as VS";
6046 case PIPE_SHADER_TESS_CTRL:
6047 return "Tessellation Control Shader";
6048 case PIPE_SHADER_TESS_EVAL:
6049 if (shader->key.tes.as_es)
6050 return "Tessellation Evaluation Shader as ES";
6051 else
6052 return "Tessellation Evaluation Shader as VS";
6053 case PIPE_SHADER_GEOMETRY:
6054 if (shader->gs_copy_shader == NULL)
6055 return "GS Copy Shader as VS";
6056 else
6057 return "Geometry Shader";
6058 case PIPE_SHADER_FRAGMENT:
6059 return "Pixel Shader";
6060 case PIPE_SHADER_COMPUTE:
6061 return "Compute Shader";
6062 default:
6063 return "Unknown Shader";
6064 }
6065 }
6066
6067 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
6068 struct pipe_debug_callback *debug, unsigned processor,
6069 FILE *file)
6070 {
6071 if (file != stderr ||
6072 (r600_can_dump_shader(&sscreen->b, processor) &&
6073 !(sscreen->b.debug_flags & DBG_NO_ASM))) {
6074 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
6075
6076 if (shader->prolog)
6077 si_shader_dump_disassembly(&shader->prolog->binary,
6078 debug, "prolog", file);
6079
6080 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
6081
6082 if (shader->epilog)
6083 si_shader_dump_disassembly(&shader->epilog->binary,
6084 debug, "epilog", file);
6085 fprintf(file, "\n");
6086 }
6087
6088 si_shader_dump_stats(sscreen, &shader->config,
6089 shader->selector ? shader->selector->info.num_inputs : 0,
6090 si_get_shader_binary_size(shader), debug, processor,
6091 file);
6092 }
6093
6094 int si_compile_llvm(struct si_screen *sscreen,
6095 struct radeon_shader_binary *binary,
6096 struct si_shader_config *conf,
6097 LLVMTargetMachineRef tm,
6098 LLVMModuleRef mod,
6099 struct pipe_debug_callback *debug,
6100 unsigned processor,
6101 const char *name)
6102 {
6103 int r = 0;
6104 unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
6105
6106 if (r600_can_dump_shader(&sscreen->b, processor)) {
6107 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
6108
6109 if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
6110 fprintf(stderr, "%s LLVM IR:\n\n", name);
6111 LLVMDumpModule(mod);
6112 fprintf(stderr, "\n");
6113 }
6114 }
6115
6116 if (!si_replace_shader(count, binary)) {
6117 r = radeon_llvm_compile(mod, binary,
6118 r600_get_llvm_processor_name(sscreen->b.family), tm,
6119 debug);
6120 if (r)
6121 return r;
6122 }
6123
6124 si_shader_binary_read_config(binary, conf, 0);
6125
6126 /* Enable 64-bit and 16-bit denormals, because there is no performance
6127 * cost.
6128 *
6129 * If denormals are enabled, all floating-point output modifiers are
6130 * ignored.
6131 *
6132 * Don't enable denormals for 32-bit floats, because:
6133 * - Floating-point output modifiers would be ignored by the hw.
6134 * - Some opcodes don't support denormals, such as v_mad_f32. We would
6135 * have to stop using those.
6136 * - SI & CI would be very slow.
6137 */
6138 conf->float_mode |= V_00B028_FP_64_DENORMS;
6139
6140 FREE(binary->config);
6141 FREE(binary->global_symbol_offsets);
6142 binary->config = NULL;
6143 binary->global_symbol_offsets = NULL;
6144
6145 /* Some shaders can't have rodata because their binaries can be
6146 * concatenated.
6147 */
6148 if (binary->rodata_size &&
6149 (processor == PIPE_SHADER_VERTEX ||
6150 processor == PIPE_SHADER_TESS_CTRL ||
6151 processor == PIPE_SHADER_TESS_EVAL ||
6152 processor == PIPE_SHADER_FRAGMENT)) {
6153 fprintf(stderr, "radeonsi: The shader can't have rodata.");
6154 return -EINVAL;
6155 }
6156
6157 return r;
6158 }
6159
6160 /* Generate code for the hardware VS shader stage to go with a geometry shader */
6161 static int si_generate_gs_copy_shader(struct si_screen *sscreen,
6162 struct si_shader_context *ctx,
6163 struct si_shader *gs,
6164 struct pipe_debug_callback *debug)
6165 {
6166 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
6167 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
6168 struct lp_build_context *uint = &bld_base->uint_bld;
6169 struct si_shader_output_values *outputs;
6170 struct tgsi_shader_info *gsinfo = &gs->selector->info;
6171 LLVMValueRef args[9];
6172 int i, r;
6173
6174 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
6175
6176 si_init_shader_ctx(ctx, sscreen, ctx->shader, ctx->tm);
6177 ctx->type = PIPE_SHADER_VERTEX;
6178 ctx->is_gs_copy_shader = true;
6179
6180 create_meta_data(ctx);
6181 create_function(ctx);
6182 preload_streamout_buffers(ctx);
6183 preload_ring_buffers(ctx);
6184
6185 args[0] = ctx->gsvs_ring[0];
6186 args[1] = lp_build_mul_imm(uint,
6187 LLVMGetParam(ctx->radeon_bld.main_fn,
6188 ctx->param_vertex_id),
6189 4);
6190 args[3] = uint->zero;
6191 args[4] = uint->one; /* OFFEN */
6192 args[5] = uint->zero; /* IDXEN */
6193 args[6] = uint->one; /* GLC */
6194 args[7] = uint->one; /* SLC */
6195 args[8] = uint->zero; /* TFE */
6196
6197 /* Fetch vertex data from GSVS ring */
6198 for (i = 0; i < gsinfo->num_outputs; ++i) {
6199 unsigned chan;
6200
6201 outputs[i].name = gsinfo->output_semantic_name[i];
6202 outputs[i].sid = gsinfo->output_semantic_index[i];
6203
6204 for (chan = 0; chan < 4; chan++) {
6205 args[2] = lp_build_const_int32(gallivm,
6206 (i * 4 + chan) *
6207 gs->selector->gs_max_out_vertices * 16 * 4);
6208
6209 outputs[i].values[chan] =
6210 LLVMBuildBitCast(gallivm->builder,
6211 lp_build_intrinsic(gallivm->builder,
6212 "llvm.SI.buffer.load.dword.i32.i32",
6213 ctx->i32, args, 9,
6214 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute),
6215 ctx->f32, "");
6216 }
6217 }
6218
6219 si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
6220
6221 LLVMBuildRet(gallivm->builder, ctx->return_value);
6222
6223 /* Dump LLVM IR before any optimization passes */
6224 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
6225 r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
6226 LLVMDumpModule(bld_base->base.gallivm->module);
6227
6228 radeon_llvm_finalize_module(&ctx->radeon_bld);
6229
6230 r = si_compile_llvm(sscreen, &ctx->shader->binary,
6231 &ctx->shader->config, ctx->tm,
6232 bld_base->base.gallivm->module,
6233 debug, PIPE_SHADER_GEOMETRY,
6234 "GS Copy Shader");
6235 if (!r) {
6236 if (r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
6237 fprintf(stderr, "GS Copy Shader:\n");
6238 si_shader_dump(sscreen, ctx->shader, debug,
6239 PIPE_SHADER_GEOMETRY, stderr);
6240 r = si_shader_binary_upload(sscreen, ctx->shader);
6241 }
6242
6243 radeon_llvm_dispose(&ctx->radeon_bld);
6244
6245 FREE(outputs);
6246 return r;
6247 }
6248
6249 void si_dump_shader_key(unsigned shader, union si_shader_key *key, FILE *f)
6250 {
6251 int i;
6252
6253 fprintf(f, "SHADER KEY\n");
6254
6255 switch (shader) {
6256 case PIPE_SHADER_VERTEX:
6257 fprintf(f, " instance_divisors = {");
6258 for (i = 0; i < ARRAY_SIZE(key->vs.prolog.instance_divisors); i++)
6259 fprintf(f, !i ? "%u" : ", %u",
6260 key->vs.prolog.instance_divisors[i]);
6261 fprintf(f, "}\n");
6262 fprintf(f, " as_es = %u\n", key->vs.as_es);
6263 fprintf(f, " as_ls = %u\n", key->vs.as_ls);
6264 fprintf(f, " export_prim_id = %u\n", key->vs.epilog.export_prim_id);
6265 break;
6266
6267 case PIPE_SHADER_TESS_CTRL:
6268 fprintf(f, " prim_mode = %u\n", key->tcs.epilog.prim_mode);
6269 break;
6270
6271 case PIPE_SHADER_TESS_EVAL:
6272 fprintf(f, " as_es = %u\n", key->tes.as_es);
6273 fprintf(f, " export_prim_id = %u\n", key->tes.epilog.export_prim_id);
6274 break;
6275
6276 case PIPE_SHADER_GEOMETRY:
6277 case PIPE_SHADER_COMPUTE:
6278 break;
6279
6280 case PIPE_SHADER_FRAGMENT:
6281 fprintf(f, " prolog.color_two_side = %u\n", key->ps.prolog.color_two_side);
6282 fprintf(f, " prolog.poly_stipple = %u\n", key->ps.prolog.poly_stipple);
6283 fprintf(f, " prolog.force_persample_interp = %u\n", key->ps.prolog.force_persample_interp);
6284 fprintf(f, " epilog.spi_shader_col_format = 0x%x\n", key->ps.epilog.spi_shader_col_format);
6285 fprintf(f, " epilog.color_is_int8 = 0x%X\n", key->ps.epilog.color_is_int8);
6286 fprintf(f, " epilog.last_cbuf = %u\n", key->ps.epilog.last_cbuf);
6287 fprintf(f, " epilog.alpha_func = %u\n", key->ps.epilog.alpha_func);
6288 fprintf(f, " epilog.alpha_to_one = %u\n", key->ps.epilog.alpha_to_one);
6289 fprintf(f, " epilog.poly_line_smoothing = %u\n", key->ps.epilog.poly_line_smoothing);
6290 fprintf(f, " epilog.clamp_color = %u\n", key->ps.epilog.clamp_color);
6291 break;
6292
6293 default:
6294 assert(0);
6295 }
6296 }
6297
6298 static void si_init_shader_ctx(struct si_shader_context *ctx,
6299 struct si_screen *sscreen,
6300 struct si_shader *shader,
6301 LLVMTargetMachineRef tm)
6302 {
6303 struct lp_build_tgsi_context *bld_base;
6304 struct lp_build_tgsi_action tmpl = {};
6305
6306 memset(ctx, 0, sizeof(*ctx));
6307 radeon_llvm_context_init(&ctx->radeon_bld, "amdgcn--");
6308 ctx->tm = tm;
6309 ctx->screen = sscreen;
6310 if (shader && shader->selector)
6311 ctx->type = shader->selector->info.processor;
6312 else
6313 ctx->type = -1;
6314 ctx->shader = shader;
6315
6316 ctx->voidt = LLVMVoidTypeInContext(ctx->radeon_bld.gallivm.context);
6317 ctx->i1 = LLVMInt1TypeInContext(ctx->radeon_bld.gallivm.context);
6318 ctx->i8 = LLVMInt8TypeInContext(ctx->radeon_bld.gallivm.context);
6319 ctx->i32 = LLVMInt32TypeInContext(ctx->radeon_bld.gallivm.context);
6320 ctx->i64 = LLVMInt64TypeInContext(ctx->radeon_bld.gallivm.context);
6321 ctx->i128 = LLVMIntTypeInContext(ctx->radeon_bld.gallivm.context, 128);
6322 ctx->f32 = LLVMFloatTypeInContext(ctx->radeon_bld.gallivm.context);
6323 ctx->v16i8 = LLVMVectorType(ctx->i8, 16);
6324 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
6325 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
6326 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
6327 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
6328
6329 bld_base = &ctx->radeon_bld.soa.bld_base;
6330 if (shader && shader->selector)
6331 bld_base->info = &shader->selector->info;
6332 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
6333
6334 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
6335 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
6336 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
6337
6338 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
6339 bld_base->op_actions[TGSI_OPCODE_TEX2] = tex_action;
6340 bld_base->op_actions[TGSI_OPCODE_TXB] = tex_action;
6341 bld_base->op_actions[TGSI_OPCODE_TXB2] = tex_action;
6342 bld_base->op_actions[TGSI_OPCODE_TXD] = tex_action;
6343 bld_base->op_actions[TGSI_OPCODE_TXF] = tex_action;
6344 bld_base->op_actions[TGSI_OPCODE_TXL] = tex_action;
6345 bld_base->op_actions[TGSI_OPCODE_TXL2] = tex_action;
6346 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
6347 bld_base->op_actions[TGSI_OPCODE_TXQ].fetch_args = txq_fetch_args;
6348 bld_base->op_actions[TGSI_OPCODE_TXQ].emit = txq_emit;
6349 bld_base->op_actions[TGSI_OPCODE_TG4] = tex_action;
6350 bld_base->op_actions[TGSI_OPCODE_LODQ] = tex_action;
6351 bld_base->op_actions[TGSI_OPCODE_TXQS].emit = si_llvm_emit_txqs;
6352
6353 bld_base->op_actions[TGSI_OPCODE_LOAD].fetch_args = load_fetch_args;
6354 bld_base->op_actions[TGSI_OPCODE_LOAD].emit = load_emit;
6355 bld_base->op_actions[TGSI_OPCODE_STORE].fetch_args = store_fetch_args;
6356 bld_base->op_actions[TGSI_OPCODE_STORE].emit = store_emit;
6357 bld_base->op_actions[TGSI_OPCODE_RESQ].fetch_args = resq_fetch_args;
6358 bld_base->op_actions[TGSI_OPCODE_RESQ].emit = resq_emit;
6359
6360 tmpl.fetch_args = atomic_fetch_args;
6361 tmpl.emit = atomic_emit;
6362 bld_base->op_actions[TGSI_OPCODE_ATOMUADD] = tmpl;
6363 bld_base->op_actions[TGSI_OPCODE_ATOMUADD].intr_name = "add";
6364 bld_base->op_actions[TGSI_OPCODE_ATOMXCHG] = tmpl;
6365 bld_base->op_actions[TGSI_OPCODE_ATOMXCHG].intr_name = "swap";
6366 bld_base->op_actions[TGSI_OPCODE_ATOMCAS] = tmpl;
6367 bld_base->op_actions[TGSI_OPCODE_ATOMCAS].intr_name = "cmpswap";
6368 bld_base->op_actions[TGSI_OPCODE_ATOMAND] = tmpl;
6369 bld_base->op_actions[TGSI_OPCODE_ATOMAND].intr_name = "and";
6370 bld_base->op_actions[TGSI_OPCODE_ATOMOR] = tmpl;
6371 bld_base->op_actions[TGSI_OPCODE_ATOMOR].intr_name = "or";
6372 bld_base->op_actions[TGSI_OPCODE_ATOMXOR] = tmpl;
6373 bld_base->op_actions[TGSI_OPCODE_ATOMXOR].intr_name = "xor";
6374 bld_base->op_actions[TGSI_OPCODE_ATOMUMIN] = tmpl;
6375 bld_base->op_actions[TGSI_OPCODE_ATOMUMIN].intr_name = "umin";
6376 bld_base->op_actions[TGSI_OPCODE_ATOMUMAX] = tmpl;
6377 bld_base->op_actions[TGSI_OPCODE_ATOMUMAX].intr_name = "umax";
6378 bld_base->op_actions[TGSI_OPCODE_ATOMIMIN] = tmpl;
6379 bld_base->op_actions[TGSI_OPCODE_ATOMIMIN].intr_name = "smin";
6380 bld_base->op_actions[TGSI_OPCODE_ATOMIMAX] = tmpl;
6381 bld_base->op_actions[TGSI_OPCODE_ATOMIMAX].intr_name = "smax";
6382
6383 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
6384
6385 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
6386 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
6387 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
6388 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
6389
6390 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
6391 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
6392 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
6393
6394 bld_base->op_actions[TGSI_OPCODE_MAX].emit = build_tgsi_intrinsic_nomem;
6395 bld_base->op_actions[TGSI_OPCODE_MAX].intr_name = "llvm.maxnum.f32";
6396 bld_base->op_actions[TGSI_OPCODE_MIN].emit = build_tgsi_intrinsic_nomem;
6397 bld_base->op_actions[TGSI_OPCODE_MIN].intr_name = "llvm.minnum.f32";
6398 }
6399
6400 int si_compile_tgsi_shader(struct si_screen *sscreen,
6401 LLVMTargetMachineRef tm,
6402 struct si_shader *shader,
6403 bool is_monolithic,
6404 struct pipe_debug_callback *debug)
6405 {
6406 struct si_shader_selector *sel = shader->selector;
6407 struct si_shader_context ctx;
6408 struct lp_build_tgsi_context *bld_base;
6409 LLVMModuleRef mod;
6410 int r = 0;
6411
6412 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6413 * conversion fails. */
6414 if (r600_can_dump_shader(&sscreen->b, sel->info.processor) &&
6415 !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
6416 si_dump_shader_key(sel->type, &shader->key, stderr);
6417 tgsi_dump(sel->tokens, 0);
6418 si_dump_streamout(&sel->so);
6419 }
6420
6421 si_init_shader_ctx(&ctx, sscreen, shader, tm);
6422 ctx.is_monolithic = is_monolithic;
6423
6424 shader->info.uses_instanceid = sel->info.uses_instanceid;
6425
6426 bld_base = &ctx.radeon_bld.soa.bld_base;
6427 ctx.radeon_bld.load_system_value = declare_system_value;
6428
6429 switch (ctx.type) {
6430 case PIPE_SHADER_VERTEX:
6431 ctx.radeon_bld.load_input = declare_input_vs;
6432 if (shader->key.vs.as_ls)
6433 bld_base->emit_epilogue = si_llvm_emit_ls_epilogue;
6434 else if (shader->key.vs.as_es)
6435 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
6436 else
6437 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
6438 break;
6439 case PIPE_SHADER_TESS_CTRL:
6440 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
6441 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
6442 bld_base->emit_store = store_output_tcs;
6443 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
6444 break;
6445 case PIPE_SHADER_TESS_EVAL:
6446 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
6447 if (shader->key.tes.as_es)
6448 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
6449 else
6450 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
6451 break;
6452 case PIPE_SHADER_GEOMETRY:
6453 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
6454 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
6455 break;
6456 case PIPE_SHADER_FRAGMENT:
6457 ctx.radeon_bld.load_input = declare_input_fs;
6458 if (is_monolithic)
6459 bld_base->emit_epilogue = si_llvm_emit_fs_epilogue;
6460 else
6461 bld_base->emit_epilogue = si_llvm_return_fs_outputs;
6462 break;
6463 case PIPE_SHADER_COMPUTE:
6464 ctx.radeon_bld.declare_memory_region = declare_compute_memory;
6465 break;
6466 default:
6467 assert(!"Unsupported shader type");
6468 return -1;
6469 }
6470
6471 create_meta_data(&ctx);
6472 create_function(&ctx);
6473 preload_constants(&ctx);
6474 preload_shader_buffers(&ctx);
6475 preload_samplers(&ctx);
6476 preload_images(&ctx);
6477 preload_streamout_buffers(&ctx);
6478 preload_ring_buffers(&ctx);
6479
6480 if (ctx.is_monolithic && sel->type == PIPE_SHADER_FRAGMENT &&
6481 shader->key.ps.prolog.poly_stipple) {
6482 LLVMValueRef list = LLVMGetParam(ctx.radeon_bld.main_fn,
6483 SI_PARAM_RW_BUFFERS);
6484 si_llvm_emit_polygon_stipple(&ctx, list,
6485 SI_PARAM_POS_FIXED_PT);
6486 }
6487
6488 if (ctx.type == PIPE_SHADER_GEOMETRY) {
6489 int i;
6490 for (i = 0; i < 4; i++) {
6491 ctx.gs_next_vertex[i] =
6492 lp_build_alloca(bld_base->base.gallivm,
6493 ctx.i32, "");
6494 }
6495 }
6496
6497 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
6498 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
6499 goto out;
6500 }
6501
6502 LLVMBuildRet(bld_base->base.gallivm->builder, ctx.return_value);
6503 mod = bld_base->base.gallivm->module;
6504
6505 /* Dump LLVM IR before any optimization passes */
6506 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
6507 r600_can_dump_shader(&sscreen->b, ctx.type))
6508 LLVMDumpModule(mod);
6509
6510 radeon_llvm_finalize_module(&ctx.radeon_bld);
6511
6512 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
6513 mod, debug, ctx.type, "TGSI shader");
6514 if (r) {
6515 fprintf(stderr, "LLVM failed to compile shader\n");
6516 goto out;
6517 }
6518
6519 radeon_llvm_dispose(&ctx.radeon_bld);
6520
6521 /* Add the scratch offset to input SGPRs. */
6522 if (shader->config.scratch_bytes_per_wave)
6523 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6524
6525 /* Calculate the number of fragment input VGPRs. */
6526 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6527 shader->info.num_input_vgprs = 0;
6528 shader->info.face_vgpr_index = -1;
6529
6530 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6531 shader->info.num_input_vgprs += 2;
6532 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6533 shader->info.num_input_vgprs += 2;
6534 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6535 shader->info.num_input_vgprs += 2;
6536 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6537 shader->info.num_input_vgprs += 3;
6538 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6539 shader->info.num_input_vgprs += 2;
6540 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6541 shader->info.num_input_vgprs += 2;
6542 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6543 shader->info.num_input_vgprs += 2;
6544 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6545 shader->info.num_input_vgprs += 1;
6546 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6547 shader->info.num_input_vgprs += 1;
6548 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6549 shader->info.num_input_vgprs += 1;
6550 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6551 shader->info.num_input_vgprs += 1;
6552 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6553 shader->info.num_input_vgprs += 1;
6554 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6555 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6556 shader->info.num_input_vgprs += 1;
6557 }
6558 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr))
6559 shader->info.num_input_vgprs += 1;
6560 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6561 shader->info.num_input_vgprs += 1;
6562 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6563 shader->info.num_input_vgprs += 1;
6564 }
6565
6566 if (ctx.type == PIPE_SHADER_GEOMETRY) {
6567 shader->gs_copy_shader = CALLOC_STRUCT(si_shader);
6568 shader->gs_copy_shader->selector = shader->selector;
6569 ctx.shader = shader->gs_copy_shader;
6570 if ((r = si_generate_gs_copy_shader(sscreen, &ctx,
6571 shader, debug))) {
6572 free(shader->gs_copy_shader);
6573 shader->gs_copy_shader = NULL;
6574 goto out;
6575 }
6576 }
6577
6578 out:
6579 for (int i = 0; i < SI_NUM_CONST_BUFFERS; i++)
6580 FREE(ctx.constants[i]);
6581 return r;
6582 }
6583
6584 /**
6585 * Create, compile and return a shader part (prolog or epilog).
6586 *
6587 * \param sscreen screen
6588 * \param list list of shader parts of the same category
6589 * \param key shader part key
6590 * \param tm LLVM target machine
6591 * \param debug debug callback
6592 * \param compile the callback responsible for compilation
6593 * \return non-NULL on success
6594 */
6595 static struct si_shader_part *
6596 si_get_shader_part(struct si_screen *sscreen,
6597 struct si_shader_part **list,
6598 union si_shader_part_key *key,
6599 LLVMTargetMachineRef tm,
6600 struct pipe_debug_callback *debug,
6601 bool (*compile)(struct si_screen *,
6602 LLVMTargetMachineRef,
6603 struct pipe_debug_callback *,
6604 struct si_shader_part *))
6605 {
6606 struct si_shader_part *result;
6607
6608 pipe_mutex_lock(sscreen->shader_parts_mutex);
6609
6610 /* Find existing. */
6611 for (result = *list; result; result = result->next) {
6612 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
6613 pipe_mutex_unlock(sscreen->shader_parts_mutex);
6614 return result;
6615 }
6616 }
6617
6618 /* Compile a new one. */
6619 result = CALLOC_STRUCT(si_shader_part);
6620 result->key = *key;
6621 if (!compile(sscreen, tm, debug, result)) {
6622 FREE(result);
6623 pipe_mutex_unlock(sscreen->shader_parts_mutex);
6624 return NULL;
6625 }
6626
6627 result->next = *list;
6628 *list = result;
6629 pipe_mutex_unlock(sscreen->shader_parts_mutex);
6630 return result;
6631 }
6632
6633 /**
6634 * Create a vertex shader prolog.
6635 *
6636 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
6637 * All inputs are returned unmodified. The vertex load indices are
6638 * stored after them, which will used by the API VS for fetching inputs.
6639 *
6640 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
6641 * input_v0,
6642 * input_v1,
6643 * input_v2,
6644 * input_v3,
6645 * (VertexID + BaseVertex),
6646 * (InstanceID + StartInstance),
6647 * (InstanceID / 2 + StartInstance)
6648 */
6649 static bool si_compile_vs_prolog(struct si_screen *sscreen,
6650 LLVMTargetMachineRef tm,
6651 struct pipe_debug_callback *debug,
6652 struct si_shader_part *out)
6653 {
6654 union si_shader_part_key *key = &out->key;
6655 struct si_shader shader = {};
6656 struct si_shader_context ctx;
6657 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
6658 LLVMTypeRef *params, *returns;
6659 LLVMValueRef ret, func;
6660 int last_sgpr, num_params, num_returns, i;
6661 bool status = true;
6662
6663 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
6664 ctx.type = PIPE_SHADER_VERTEX;
6665 ctx.param_vertex_id = key->vs_prolog.num_input_sgprs;
6666 ctx.param_instance_id = key->vs_prolog.num_input_sgprs + 3;
6667
6668 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
6669 params = alloca((key->vs_prolog.num_input_sgprs + 4) *
6670 sizeof(LLVMTypeRef));
6671 returns = alloca((key->vs_prolog.num_input_sgprs + 4 +
6672 key->vs_prolog.last_input + 1) *
6673 sizeof(LLVMTypeRef));
6674 num_params = 0;
6675 num_returns = 0;
6676
6677 /* Declare input and output SGPRs. */
6678 num_params = 0;
6679 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6680 params[num_params++] = ctx.i32;
6681 returns[num_returns++] = ctx.i32;
6682 }
6683 last_sgpr = num_params - 1;
6684
6685 /* 4 preloaded VGPRs (outputs must be floats) */
6686 for (i = 0; i < 4; i++) {
6687 params[num_params++] = ctx.i32;
6688 returns[num_returns++] = ctx.f32;
6689 }
6690
6691 /* Vertex load indices. */
6692 for (i = 0; i <= key->vs_prolog.last_input; i++)
6693 returns[num_returns++] = ctx.f32;
6694
6695 /* Create the function. */
6696 si_create_function(&ctx, returns, num_returns, params,
6697 num_params, -1, last_sgpr);
6698 func = ctx.radeon_bld.main_fn;
6699
6700 /* Copy inputs to outputs. This should be no-op, as the registers match,
6701 * but it will prevent the compiler from overwriting them unintentionally.
6702 */
6703 ret = ctx.return_value;
6704 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6705 LLVMValueRef p = LLVMGetParam(func, i);
6706 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6707 }
6708 for (i = num_params - 4; i < num_params; i++) {
6709 LLVMValueRef p = LLVMGetParam(func, i);
6710 p = LLVMBuildBitCast(gallivm->builder, p, ctx.f32, "");
6711 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
6712 }
6713
6714 /* Compute vertex load indices from instance divisors. */
6715 for (i = 0; i <= key->vs_prolog.last_input; i++) {
6716 unsigned divisor = key->vs_prolog.states.instance_divisors[i];
6717 LLVMValueRef index;
6718
6719 if (divisor) {
6720 /* InstanceID / Divisor + StartInstance */
6721 index = get_instance_index_for_fetch(&ctx.radeon_bld,
6722 SI_SGPR_START_INSTANCE,
6723 divisor);
6724 } else {
6725 /* VertexID + BaseVertex */
6726 index = LLVMBuildAdd(gallivm->builder,
6727 LLVMGetParam(func, ctx.param_vertex_id),
6728 LLVMGetParam(func, SI_SGPR_BASE_VERTEX), "");
6729 }
6730
6731 index = LLVMBuildBitCast(gallivm->builder, index, ctx.f32, "");
6732 ret = LLVMBuildInsertValue(gallivm->builder, ret, index,
6733 num_params++, "");
6734 }
6735
6736 /* Compile. */
6737 LLVMBuildRet(gallivm->builder, ret);
6738 radeon_llvm_finalize_module(&ctx.radeon_bld);
6739
6740 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
6741 gallivm->module, debug, ctx.type,
6742 "Vertex Shader Prolog"))
6743 status = false;
6744
6745 radeon_llvm_dispose(&ctx.radeon_bld);
6746 return status;
6747 }
6748
6749 /**
6750 * Compile the vertex shader epilog. This is also used by the tessellation
6751 * evaluation shader compiled as VS.
6752 *
6753 * The input is PrimitiveID.
6754 *
6755 * If PrimitiveID is required by the pixel shader, export it.
6756 * Otherwise, do nothing.
6757 */
6758 static bool si_compile_vs_epilog(struct si_screen *sscreen,
6759 LLVMTargetMachineRef tm,
6760 struct pipe_debug_callback *debug,
6761 struct si_shader_part *out)
6762 {
6763 union si_shader_part_key *key = &out->key;
6764 struct si_shader_context ctx;
6765 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
6766 struct lp_build_tgsi_context *bld_base = &ctx.radeon_bld.soa.bld_base;
6767 LLVMTypeRef params[5];
6768 int num_params, i;
6769 bool status = true;
6770
6771 si_init_shader_ctx(&ctx, sscreen, NULL, tm);
6772 ctx.type = PIPE_SHADER_VERTEX;
6773
6774 /* Declare input VGPRs. */
6775 num_params = key->vs_epilog.states.export_prim_id ?
6776 (VS_EPILOG_PRIMID_LOC + 1) : 0;
6777 assert(num_params <= ARRAY_SIZE(params));
6778
6779 for (i = 0; i < num_params; i++)
6780 params[i] = ctx.f32;
6781
6782 /* Create the function. */
6783 si_create_function(&ctx, NULL, 0, params, num_params,
6784 -1, -1);
6785
6786 /* Emit exports. */
6787 if (key->vs_epilog.states.export_prim_id) {
6788 struct lp_build_context *base = &bld_base->base;
6789 struct lp_build_context *uint = &bld_base->uint_bld;
6790 LLVMValueRef args[9];
6791
6792 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
6793 args[1] = uint->zero; /* whether the EXEC mask is valid */
6794 args[2] = uint->zero; /* DONE bit */
6795 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_PARAM +
6796 key->vs_epilog.prim_id_param_offset);
6797 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
6798 args[5] = LLVMGetParam(ctx.radeon_bld.main_fn,
6799 VS_EPILOG_PRIMID_LOC); /* X */
6800 args[6] = uint->undef; /* Y */
6801 args[7] = uint->undef; /* Z */
6802 args[8] = uint->undef; /* W */
6803
6804 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
6805 LLVMVoidTypeInContext(base->gallivm->context),
6806 args, 9, 0);
6807 }
6808
6809 /* Compile. */
6810 LLVMBuildRet(gallivm->builder, ctx.return_value);
6811 radeon_llvm_finalize_module(&ctx.radeon_bld);
6812
6813 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
6814 gallivm->module, debug, ctx.type,
6815 "Vertex Shader Epilog"))
6816 status = false;
6817
6818 radeon_llvm_dispose(&ctx.radeon_bld);
6819 return status;
6820 }
6821
6822 /**
6823 * Create & compile a vertex shader epilog. This a helper used by VS and TES.
6824 */
6825 static bool si_get_vs_epilog(struct si_screen *sscreen,
6826 LLVMTargetMachineRef tm,
6827 struct si_shader *shader,
6828 struct pipe_debug_callback *debug,
6829 struct si_vs_epilog_bits *states)
6830 {
6831 union si_shader_part_key epilog_key;
6832
6833 memset(&epilog_key, 0, sizeof(epilog_key));
6834 epilog_key.vs_epilog.states = *states;
6835
6836 /* Set up the PrimitiveID output. */
6837 if (shader->key.vs.epilog.export_prim_id) {
6838 unsigned index = shader->selector->info.num_outputs;
6839 unsigned offset = shader->info.nr_param_exports++;
6840
6841 epilog_key.vs_epilog.prim_id_param_offset = offset;
6842 assert(index < ARRAY_SIZE(shader->info.vs_output_param_offset));
6843 shader->info.vs_output_param_offset[index] = offset;
6844 }
6845
6846 shader->epilog = si_get_shader_part(sscreen, &sscreen->vs_epilogs,
6847 &epilog_key, tm, debug,
6848 si_compile_vs_epilog);
6849 return shader->epilog != NULL;
6850 }
6851
6852 /**
6853 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
6854 */
6855 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
6856 LLVMTargetMachineRef tm,
6857 struct si_shader *shader,
6858 struct pipe_debug_callback *debug)
6859 {
6860 struct tgsi_shader_info *info = &shader->selector->info;
6861 union si_shader_part_key prolog_key;
6862 unsigned i;
6863
6864 /* Get the prolog. */
6865 memset(&prolog_key, 0, sizeof(prolog_key));
6866 prolog_key.vs_prolog.states = shader->key.vs.prolog;
6867 prolog_key.vs_prolog.num_input_sgprs = shader->info.num_input_sgprs;
6868 prolog_key.vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
6869
6870 /* The prolog is a no-op if there are no inputs. */
6871 if (info->num_inputs) {
6872 shader->prolog =
6873 si_get_shader_part(sscreen, &sscreen->vs_prologs,
6874 &prolog_key, tm, debug,
6875 si_compile_vs_prolog);
6876 if (!shader->prolog)
6877 return false;
6878 }
6879
6880 /* Get the epilog. */
6881 if (!shader->key.vs.as_es && !shader->key.vs.as_ls &&
6882 !si_get_vs_epilog(sscreen, tm, shader, debug,
6883 &shader->key.vs.epilog))
6884 return false;
6885
6886 /* Set the instanceID flag. */
6887 for (i = 0; i < info->num_inputs; i++)
6888 if (prolog_key.vs_prolog.states.instance_divisors[i])
6889 shader->info.uses_instanceid = true;
6890
6891 return true;
6892 }
6893
6894 /**
6895 * Select and compile (or reuse) TES parts (epilog).
6896 */
6897 static bool si_shader_select_tes_parts(struct si_screen *sscreen,
6898 LLVMTargetMachineRef tm,
6899 struct si_shader *shader,
6900 struct pipe_debug_callback *debug)
6901 {
6902 if (shader->key.tes.as_es)
6903 return true;
6904
6905 /* TES compiled as VS. */
6906 return si_get_vs_epilog(sscreen, tm, shader, debug,
6907 &shader->key.tes.epilog);
6908 }
6909
6910 /**
6911 * Compile the TCS epilog. This writes tesselation factors to memory based on
6912 * the output primitive type of the tesselator (determined by TES).
6913 */
6914 static bool si_compile_tcs_epilog(struct si_screen *sscreen,
6915 LLVMTargetMachineRef tm,
6916 struct pipe_debug_callback *debug,
6917 struct si_shader_part *out)
6918 {
6919 union si_shader_part_key *key = &out->key;
6920 struct si_shader shader = {};
6921 struct si_shader_context ctx;
6922 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
6923 struct lp_build_tgsi_context *bld_base = &ctx.radeon_bld.soa.bld_base;
6924 LLVMTypeRef params[16];
6925 LLVMValueRef func;
6926 int last_array_pointer, last_sgpr, num_params;
6927 bool status = true;
6928
6929 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
6930 ctx.type = PIPE_SHADER_TESS_CTRL;
6931 shader.key.tcs.epilog = key->tcs_epilog.states;
6932
6933 /* Declare inputs. Only RW_BUFFERS and TESS_FACTOR_OFFSET are used. */
6934 params[SI_PARAM_RW_BUFFERS] = const_array(ctx.v16i8, SI_NUM_RW_BUFFERS);
6935 last_array_pointer = SI_PARAM_RW_BUFFERS;
6936 params[SI_PARAM_CONST_BUFFERS] = ctx.i64;
6937 params[SI_PARAM_SAMPLERS] = ctx.i64;
6938 params[SI_PARAM_IMAGES] = ctx.i64;
6939 params[SI_PARAM_SHADER_BUFFERS] = ctx.i64;
6940 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx.i32;
6941 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx.i32;
6942 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx.i32;
6943 params[SI_PARAM_TCS_IN_LAYOUT] = ctx.i32;
6944 params[ctx.param_oc_lds = SI_PARAM_TCS_OC_LDS] = ctx.i32;
6945 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx.i32;
6946 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
6947 num_params = last_sgpr + 1;
6948
6949 params[num_params++] = ctx.i32; /* patch index within the wave (REL_PATCH_ID) */
6950 params[num_params++] = ctx.i32; /* invocation ID within the patch */
6951 params[num_params++] = ctx.i32; /* LDS offset where tess factors should be loaded from */
6952
6953 /* Create the function. */
6954 si_create_function(&ctx, NULL, 0, params, num_params,
6955 last_array_pointer, last_sgpr);
6956 declare_tess_lds(&ctx);
6957 func = ctx.radeon_bld.main_fn;
6958
6959 si_write_tess_factors(bld_base,
6960 LLVMGetParam(func, last_sgpr + 1),
6961 LLVMGetParam(func, last_sgpr + 2),
6962 LLVMGetParam(func, last_sgpr + 3));
6963
6964 /* Compile. */
6965 LLVMBuildRet(gallivm->builder, ctx.return_value);
6966 radeon_llvm_finalize_module(&ctx.radeon_bld);
6967
6968 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
6969 gallivm->module, debug, ctx.type,
6970 "Tessellation Control Shader Epilog"))
6971 status = false;
6972
6973 radeon_llvm_dispose(&ctx.radeon_bld);
6974 return status;
6975 }
6976
6977 /**
6978 * Select and compile (or reuse) TCS parts (epilog).
6979 */
6980 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
6981 LLVMTargetMachineRef tm,
6982 struct si_shader *shader,
6983 struct pipe_debug_callback *debug)
6984 {
6985 union si_shader_part_key epilog_key;
6986
6987 /* Get the epilog. */
6988 memset(&epilog_key, 0, sizeof(epilog_key));
6989 epilog_key.tcs_epilog.states = shader->key.tcs.epilog;
6990
6991 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
6992 &epilog_key, tm, debug,
6993 si_compile_tcs_epilog);
6994 return shader->epilog != NULL;
6995 }
6996
6997 /**
6998 * Compile the pixel shader prolog. This handles:
6999 * - two-side color selection and interpolation
7000 * - overriding interpolation parameters for the API PS
7001 * - polygon stippling
7002 *
7003 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
7004 * overriden by other states. (e.g. per-sample interpolation)
7005 * Interpolated colors are stored after the preloaded VGPRs.
7006 */
7007 static bool si_compile_ps_prolog(struct si_screen *sscreen,
7008 LLVMTargetMachineRef tm,
7009 struct pipe_debug_callback *debug,
7010 struct si_shader_part *out)
7011 {
7012 union si_shader_part_key *key = &out->key;
7013 struct si_shader shader = {};
7014 struct si_shader_context ctx;
7015 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
7016 LLVMTypeRef *params;
7017 LLVMValueRef ret, func;
7018 int last_sgpr, num_params, num_returns, i, num_color_channels;
7019 bool status = true;
7020
7021 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7022 ctx.type = PIPE_SHADER_FRAGMENT;
7023 shader.key.ps.prolog = key->ps_prolog.states;
7024
7025 /* Number of inputs + 8 color elements. */
7026 params = alloca((key->ps_prolog.num_input_sgprs +
7027 key->ps_prolog.num_input_vgprs + 8) *
7028 sizeof(LLVMTypeRef));
7029
7030 /* Declare inputs. */
7031 num_params = 0;
7032 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
7033 params[num_params++] = ctx.i32;
7034 last_sgpr = num_params - 1;
7035
7036 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
7037 params[num_params++] = ctx.f32;
7038
7039 /* Declare outputs (same as inputs + add colors if needed) */
7040 num_returns = num_params;
7041 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
7042 for (i = 0; i < num_color_channels; i++)
7043 params[num_returns++] = ctx.f32;
7044
7045 /* Create the function. */
7046 si_create_function(&ctx, params, num_returns, params,
7047 num_params, -1, last_sgpr);
7048 func = ctx.radeon_bld.main_fn;
7049
7050 /* Copy inputs to outputs. This should be no-op, as the registers match,
7051 * but it will prevent the compiler from overwriting them unintentionally.
7052 */
7053 ret = ctx.return_value;
7054 for (i = 0; i < num_params; i++) {
7055 LLVMValueRef p = LLVMGetParam(func, i);
7056 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
7057 }
7058
7059 /* Polygon stippling. */
7060 if (key->ps_prolog.states.poly_stipple) {
7061 /* POS_FIXED_PT is always last. */
7062 unsigned pos = key->ps_prolog.num_input_sgprs +
7063 key->ps_prolog.num_input_vgprs - 1;
7064 LLVMValueRef ptr[2], list;
7065
7066 /* Get the pointer to rw buffers. */
7067 ptr[0] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS);
7068 ptr[1] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS_HI);
7069 list = lp_build_gather_values(gallivm, ptr, 2);
7070 list = LLVMBuildBitCast(gallivm->builder, list, ctx.i64, "");
7071 list = LLVMBuildIntToPtr(gallivm->builder, list,
7072 const_array(ctx.v16i8, SI_NUM_RW_BUFFERS), "");
7073
7074 si_llvm_emit_polygon_stipple(&ctx, list, pos);
7075 }
7076
7077 /* Interpolate colors. */
7078 for (i = 0; i < 2; i++) {
7079 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
7080 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
7081 key->ps_prolog.face_vgpr_index;
7082 LLVMValueRef interp[2], color[4];
7083 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
7084
7085 if (!writemask)
7086 continue;
7087
7088 /* If the interpolation qualifier is not CONSTANT (-1). */
7089 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
7090 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
7091 key->ps_prolog.color_interp_vgpr_index[i];
7092
7093 interp[0] = LLVMGetParam(func, interp_vgpr);
7094 interp[1] = LLVMGetParam(func, interp_vgpr + 1);
7095 interp_ij = lp_build_gather_values(gallivm, interp, 2);
7096 interp_ij = LLVMBuildBitCast(gallivm->builder, interp_ij,
7097 ctx.v2i32, "");
7098 }
7099
7100 /* Use the absolute location of the input. */
7101 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7102
7103 if (key->ps_prolog.states.color_two_side) {
7104 face = LLVMGetParam(func, face_vgpr);
7105 face = LLVMBuildBitCast(gallivm->builder, face, ctx.i32, "");
7106 }
7107
7108 interp_fs_input(&ctx,
7109 key->ps_prolog.color_attr_index[i],
7110 TGSI_SEMANTIC_COLOR, i,
7111 key->ps_prolog.num_interp_inputs,
7112 key->ps_prolog.colors_read, interp_ij,
7113 prim_mask, face, color);
7114
7115 while (writemask) {
7116 unsigned chan = u_bit_scan(&writemask);
7117 ret = LLVMBuildInsertValue(gallivm->builder, ret, color[chan],
7118 num_params++, "");
7119 }
7120 }
7121
7122 /* Force per-sample interpolation. */
7123 if (key->ps_prolog.states.force_persample_interp) {
7124 unsigned i, base = key->ps_prolog.num_input_sgprs;
7125 LLVMValueRef persp_sample[2], linear_sample[2];
7126
7127 /* Read PERSP_SAMPLE. */
7128 for (i = 0; i < 2; i++)
7129 persp_sample[i] = LLVMGetParam(func, base + i);
7130 /* Overwrite PERSP_CENTER. */
7131 for (i = 0; i < 2; i++)
7132 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7133 persp_sample[i], base + 2 + i, "");
7134 /* Overwrite PERSP_CENTROID. */
7135 for (i = 0; i < 2; i++)
7136 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7137 persp_sample[i], base + 4 + i, "");
7138 /* Read LINEAR_SAMPLE. */
7139 for (i = 0; i < 2; i++)
7140 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
7141 /* Overwrite LINEAR_CENTER. */
7142 for (i = 0; i < 2; i++)
7143 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7144 linear_sample[i], base + 8 + i, "");
7145 /* Overwrite LINEAR_CENTROID. */
7146 for (i = 0; i < 2; i++)
7147 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7148 linear_sample[i], base + 10 + i, "");
7149 }
7150
7151 /* Compile. */
7152 LLVMBuildRet(gallivm->builder, ret);
7153 radeon_llvm_finalize_module(&ctx.radeon_bld);
7154
7155 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7156 gallivm->module, debug, ctx.type,
7157 "Fragment Shader Prolog"))
7158 status = false;
7159
7160 radeon_llvm_dispose(&ctx.radeon_bld);
7161 return status;
7162 }
7163
7164 /**
7165 * Compile the pixel shader epilog. This handles everything that must be
7166 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7167 */
7168 static bool si_compile_ps_epilog(struct si_screen *sscreen,
7169 LLVMTargetMachineRef tm,
7170 struct pipe_debug_callback *debug,
7171 struct si_shader_part *out)
7172 {
7173 union si_shader_part_key *key = &out->key;
7174 struct si_shader shader = {};
7175 struct si_shader_context ctx;
7176 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
7177 struct lp_build_tgsi_context *bld_base = &ctx.radeon_bld.soa.bld_base;
7178 LLVMTypeRef params[16+8*4+3];
7179 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7180 int last_array_pointer, last_sgpr, num_params, i;
7181 bool status = true;
7182
7183 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7184 ctx.type = PIPE_SHADER_FRAGMENT;
7185 shader.key.ps.epilog = key->ps_epilog.states;
7186
7187 /* Declare input SGPRs. */
7188 params[SI_PARAM_RW_BUFFERS] = ctx.i64;
7189 params[SI_PARAM_CONST_BUFFERS] = ctx.i64;
7190 params[SI_PARAM_SAMPLERS] = ctx.i64;
7191 params[SI_PARAM_IMAGES] = ctx.i64;
7192 params[SI_PARAM_SHADER_BUFFERS] = ctx.i64;
7193 params[SI_PARAM_ALPHA_REF] = ctx.f32;
7194 last_array_pointer = -1;
7195 last_sgpr = SI_PARAM_ALPHA_REF;
7196
7197 /* Declare input VGPRs. */
7198 num_params = (last_sgpr + 1) +
7199 util_bitcount(key->ps_epilog.colors_written) * 4 +
7200 key->ps_epilog.writes_z +
7201 key->ps_epilog.writes_stencil +
7202 key->ps_epilog.writes_samplemask;
7203
7204 num_params = MAX2(num_params,
7205 last_sgpr + 1 + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7206
7207 assert(num_params <= ARRAY_SIZE(params));
7208
7209 for (i = last_sgpr + 1; i < num_params; i++)
7210 params[i] = ctx.f32;
7211
7212 /* Create the function. */
7213 si_create_function(&ctx, NULL, 0, params, num_params,
7214 last_array_pointer, last_sgpr);
7215 /* Disable elimination of unused inputs. */
7216 radeon_llvm_add_attribute(ctx.radeon_bld.main_fn,
7217 "InitialPSInputAddr", 0xffffff);
7218
7219 /* Process colors. */
7220 unsigned vgpr = last_sgpr + 1;
7221 unsigned colors_written = key->ps_epilog.colors_written;
7222 int last_color_export = -1;
7223
7224 /* Find the last color export. */
7225 if (!key->ps_epilog.writes_z &&
7226 !key->ps_epilog.writes_stencil &&
7227 !key->ps_epilog.writes_samplemask) {
7228 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7229
7230 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7231 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7232 /* Just set this if any of the colorbuffers are enabled. */
7233 if (spi_format &
7234 ((1llu << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7235 last_color_export = 0;
7236 } else {
7237 for (i = 0; i < 8; i++)
7238 if (colors_written & (1 << i) &&
7239 (spi_format >> (i * 4)) & 0xf)
7240 last_color_export = i;
7241 }
7242 }
7243
7244 while (colors_written) {
7245 LLVMValueRef color[4];
7246 int mrt = u_bit_scan(&colors_written);
7247
7248 for (i = 0; i < 4; i++)
7249 color[i] = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
7250
7251 si_export_mrt_color(bld_base, color, mrt,
7252 num_params - 1,
7253 mrt == last_color_export);
7254 }
7255
7256 /* Process depth, stencil, samplemask. */
7257 if (key->ps_epilog.writes_z)
7258 depth = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
7259 if (key->ps_epilog.writes_stencil)
7260 stencil = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
7261 if (key->ps_epilog.writes_samplemask)
7262 samplemask = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
7263
7264 if (depth || stencil || samplemask)
7265 si_export_mrt_z(bld_base, depth, stencil, samplemask);
7266 else if (last_color_export == -1)
7267 si_export_null(bld_base);
7268
7269 /* Compile. */
7270 LLVMBuildRetVoid(gallivm->builder);
7271 radeon_llvm_finalize_module(&ctx.radeon_bld);
7272
7273 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7274 gallivm->module, debug, ctx.type,
7275 "Fragment Shader Epilog"))
7276 status = false;
7277
7278 radeon_llvm_dispose(&ctx.radeon_bld);
7279 return status;
7280 }
7281
7282 /**
7283 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7284 */
7285 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7286 LLVMTargetMachineRef tm,
7287 struct si_shader *shader,
7288 struct pipe_debug_callback *debug)
7289 {
7290 struct tgsi_shader_info *info = &shader->selector->info;
7291 union si_shader_part_key prolog_key;
7292 union si_shader_part_key epilog_key;
7293 unsigned i;
7294
7295 /* Get the prolog. */
7296 memset(&prolog_key, 0, sizeof(prolog_key));
7297 prolog_key.ps_prolog.states = shader->key.ps.prolog;
7298 prolog_key.ps_prolog.colors_read = info->colors_read;
7299 prolog_key.ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
7300 prolog_key.ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
7301
7302 if (info->colors_read) {
7303 unsigned *color = shader->selector->color_attr_index;
7304
7305 if (shader->key.ps.prolog.color_two_side) {
7306 /* BCOLORs are stored after the last input. */
7307 prolog_key.ps_prolog.num_interp_inputs = info->num_inputs;
7308 prolog_key.ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
7309 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
7310 }
7311
7312 for (i = 0; i < 2; i++) {
7313 unsigned location = info->input_interpolate_loc[color[i]];
7314
7315 if (!(info->colors_read & (0xf << i*4)))
7316 continue;
7317
7318 prolog_key.ps_prolog.color_attr_index[i] = color[i];
7319
7320 /* Force per-sample interpolation for the colors here. */
7321 if (shader->key.ps.prolog.force_persample_interp)
7322 location = TGSI_INTERPOLATE_LOC_SAMPLE;
7323
7324 switch (info->input_interpolate[color[i]]) {
7325 case TGSI_INTERPOLATE_CONSTANT:
7326 prolog_key.ps_prolog.color_interp_vgpr_index[i] = -1;
7327 break;
7328 case TGSI_INTERPOLATE_PERSPECTIVE:
7329 case TGSI_INTERPOLATE_COLOR:
7330 switch (location) {
7331 case TGSI_INTERPOLATE_LOC_SAMPLE:
7332 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 0;
7333 shader->config.spi_ps_input_ena |=
7334 S_0286CC_PERSP_SAMPLE_ENA(1);
7335 break;
7336 case TGSI_INTERPOLATE_LOC_CENTER:
7337 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 2;
7338 shader->config.spi_ps_input_ena |=
7339 S_0286CC_PERSP_CENTER_ENA(1);
7340 break;
7341 case TGSI_INTERPOLATE_LOC_CENTROID:
7342 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 4;
7343 shader->config.spi_ps_input_ena |=
7344 S_0286CC_PERSP_CENTROID_ENA(1);
7345 break;
7346 default:
7347 assert(0);
7348 }
7349 break;
7350 case TGSI_INTERPOLATE_LINEAR:
7351 switch (location) {
7352 case TGSI_INTERPOLATE_LOC_SAMPLE:
7353 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 6;
7354 shader->config.spi_ps_input_ena |=
7355 S_0286CC_LINEAR_SAMPLE_ENA(1);
7356 break;
7357 case TGSI_INTERPOLATE_LOC_CENTER:
7358 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 8;
7359 shader->config.spi_ps_input_ena |=
7360 S_0286CC_LINEAR_CENTER_ENA(1);
7361 break;
7362 case TGSI_INTERPOLATE_LOC_CENTROID:
7363 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 10;
7364 shader->config.spi_ps_input_ena |=
7365 S_0286CC_LINEAR_CENTROID_ENA(1);
7366 break;
7367 default:
7368 assert(0);
7369 }
7370 break;
7371 default:
7372 assert(0);
7373 }
7374 }
7375 }
7376
7377 /* The prolog is a no-op if these aren't set. */
7378 if (prolog_key.ps_prolog.colors_read ||
7379 prolog_key.ps_prolog.states.force_persample_interp ||
7380 prolog_key.ps_prolog.states.poly_stipple) {
7381 shader->prolog =
7382 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7383 &prolog_key, tm, debug,
7384 si_compile_ps_prolog);
7385 if (!shader->prolog)
7386 return false;
7387 }
7388
7389 /* Get the epilog. */
7390 memset(&epilog_key, 0, sizeof(epilog_key));
7391 epilog_key.ps_epilog.colors_written = info->colors_written;
7392 epilog_key.ps_epilog.writes_z = info->writes_z;
7393 epilog_key.ps_epilog.writes_stencil = info->writes_stencil;
7394 epilog_key.ps_epilog.writes_samplemask = info->writes_samplemask;
7395 epilog_key.ps_epilog.states = shader->key.ps.epilog;
7396
7397 shader->epilog =
7398 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7399 &epilog_key, tm, debug,
7400 si_compile_ps_epilog);
7401 if (!shader->epilog)
7402 return false;
7403
7404 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7405 if (shader->key.ps.prolog.poly_stipple) {
7406 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7407 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7408 }
7409
7410 /* Set up the enable bits for per-sample shading if needed. */
7411 if (shader->key.ps.prolog.force_persample_interp) {
7412 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7413 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena)) {
7414 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7415 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7416 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7417 }
7418 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7419 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena)) {
7420 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7421 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7422 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7423 }
7424 }
7425
7426 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7427 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7428 !(shader->config.spi_ps_input_ena & 0xf)) {
7429 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7430 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7431 }
7432
7433 /* At least one pair of interpolation weights must be enabled. */
7434 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7435 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7436 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7437 }
7438
7439 /* The sample mask input is always enabled, because the API shader always
7440 * passes it through to the epilog. Disable it here if it's unused.
7441 */
7442 if (!shader->key.ps.epilog.poly_line_smoothing &&
7443 !shader->selector->info.reads_samplemask)
7444 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7445
7446 return true;
7447 }
7448
7449 static void si_fix_num_sgprs(struct si_shader *shader)
7450 {
7451 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7452
7453 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7454 }
7455
7456 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
7457 struct si_shader *shader,
7458 struct pipe_debug_callback *debug)
7459 {
7460 struct si_shader *mainp = shader->selector->main_shader_part;
7461 int r;
7462
7463 /* LS, ES, VS are compiled on demand if the main part hasn't been
7464 * compiled for that stage.
7465 */
7466 if (!mainp ||
7467 (shader->selector->type == PIPE_SHADER_VERTEX &&
7468 (shader->key.vs.as_es != mainp->key.vs.as_es ||
7469 shader->key.vs.as_ls != mainp->key.vs.as_ls)) ||
7470 (shader->selector->type == PIPE_SHADER_TESS_EVAL &&
7471 shader->key.tes.as_es != mainp->key.tes.as_es) ||
7472 (shader->selector->type == PIPE_SHADER_TESS_CTRL &&
7473 shader->key.tcs.epilog.inputs_to_copy) ||
7474 shader->selector->type == PIPE_SHADER_COMPUTE) {
7475 /* Monolithic shader (compiled as a whole, has many variants,
7476 * may take a long time to compile).
7477 */
7478 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
7479 if (r)
7480 return r;
7481 } else {
7482 /* The shader consists of 2-3 parts:
7483 *
7484 * - the middle part is the user shader, it has 1 variant only
7485 * and it was compiled during the creation of the shader
7486 * selector
7487 * - the prolog part is inserted at the beginning
7488 * - the epilog part is inserted at the end
7489 *
7490 * The prolog and epilog have many (but simple) variants.
7491 */
7492
7493 /* Copy the compiled TGSI shader data over. */
7494 shader->is_binary_shared = true;
7495 shader->binary = mainp->binary;
7496 shader->config = mainp->config;
7497 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7498 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7499 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7500 memcpy(shader->info.vs_output_param_offset,
7501 mainp->info.vs_output_param_offset,
7502 sizeof(mainp->info.vs_output_param_offset));
7503 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7504 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7505 shader->info.nr_param_exports = mainp->info.nr_param_exports;
7506
7507 /* Select prologs and/or epilogs. */
7508 switch (shader->selector->type) {
7509 case PIPE_SHADER_VERTEX:
7510 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
7511 return -1;
7512 break;
7513 case PIPE_SHADER_TESS_CTRL:
7514 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
7515 return -1;
7516 break;
7517 case PIPE_SHADER_TESS_EVAL:
7518 if (!si_shader_select_tes_parts(sscreen, tm, shader, debug))
7519 return -1;
7520 break;
7521 case PIPE_SHADER_FRAGMENT:
7522 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
7523 return -1;
7524
7525 /* Make sure we have at least as many VGPRs as there
7526 * are allocated inputs.
7527 */
7528 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7529 shader->info.num_input_vgprs);
7530 break;
7531 }
7532
7533 /* Update SGPR and VGPR counts. */
7534 if (shader->prolog) {
7535 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7536 shader->prolog->config.num_sgprs);
7537 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7538 shader->prolog->config.num_vgprs);
7539 }
7540 if (shader->epilog) {
7541 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7542 shader->epilog->config.num_sgprs);
7543 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7544 shader->epilog->config.num_vgprs);
7545 }
7546 }
7547
7548 si_fix_num_sgprs(shader);
7549 si_shader_dump(sscreen, shader, debug, shader->selector->info.processor,
7550 stderr);
7551
7552 /* Upload. */
7553 r = si_shader_binary_upload(sscreen, shader);
7554 if (r) {
7555 fprintf(stderr, "LLVM failed to upload shader\n");
7556 return r;
7557 }
7558
7559 return 0;
7560 }
7561
7562 void si_shader_destroy(struct si_shader *shader)
7563 {
7564 if (shader->gs_copy_shader) {
7565 si_shader_destroy(shader->gs_copy_shader);
7566 FREE(shader->gs_copy_shader);
7567 }
7568
7569 if (shader->scratch_bo)
7570 r600_resource_reference(&shader->scratch_bo, NULL);
7571
7572 r600_resource_reference(&shader->bo, NULL);
7573
7574 if (!shader->is_binary_shared)
7575 radeon_shader_binary_clean(&shader->binary);
7576 }