be6fae75ab062ad511c9948ad0f7eff456857089
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_bitarit.h"
35 #include "gallivm/lp_bld_flow.h"
36 #include "gallivm/lp_bld_misc.h"
37 #include "radeon/r600_cs.h"
38 #include "radeon/radeon_llvm.h"
39 #include "radeon/radeon_elf_util.h"
40 #include "radeon/radeon_llvm_emit.h"
41 #include "util/u_memory.h"
42 #include "util/u_string.h"
43 #include "tgsi/tgsi_parse.h"
44 #include "tgsi/tgsi_build.h"
45 #include "tgsi/tgsi_util.h"
46 #include "tgsi/tgsi_dump.h"
47
48 #include "si_pipe.h"
49 #include "si_shader.h"
50 #include "sid.h"
51
52 #include <errno.h>
53
54 static const char *scratch_rsrc_dword0_symbol =
55 "SCRATCH_RSRC_DWORD0";
56
57 static const char *scratch_rsrc_dword1_symbol =
58 "SCRATCH_RSRC_DWORD1";
59
60 struct si_shader_output_values
61 {
62 LLVMValueRef values[4];
63 unsigned name;
64 unsigned sid;
65 };
66
67 struct si_shader_context
68 {
69 struct radeon_llvm_context radeon_bld;
70 struct si_shader *shader;
71 struct si_screen *screen;
72
73 unsigned type; /* PIPE_SHADER_* specifies the type of shader. */
74 bool is_gs_copy_shader;
75
76 /* Whether to generate the optimized shader variant compiled as a whole
77 * (without a prolog and epilog)
78 */
79 bool is_monolithic;
80
81 int param_streamout_config;
82 int param_streamout_write_index;
83 int param_streamout_offset[4];
84 int param_vertex_id;
85 int param_rel_auto_id;
86 int param_vs_prim_id;
87 int param_instance_id;
88 int param_vertex_index0;
89 int param_tes_u;
90 int param_tes_v;
91 int param_tes_rel_patch_id;
92 int param_tes_patch_id;
93 int param_es2gs_offset;
94 int param_oc_lds;
95
96 /* Sets a bit if the dynamic HS control word was 0x80000000. The bit is
97 * 0x800000 for VS, 0x1 for ES.
98 */
99 int param_tess_offchip;
100
101 LLVMTargetMachineRef tm;
102
103 unsigned invariant_load_md_kind;
104 unsigned range_md_kind;
105 unsigned uniform_md_kind;
106 LLVMValueRef empty_md;
107
108 LLVMValueRef const_buffers[SI_NUM_CONST_BUFFERS];
109 LLVMValueRef lds;
110 LLVMValueRef shader_buffers[SI_NUM_SHADER_BUFFERS];
111 LLVMValueRef sampler_views[SI_NUM_SAMPLERS];
112 LLVMValueRef sampler_states[SI_NUM_SAMPLERS];
113 LLVMValueRef fmasks[SI_NUM_SAMPLERS];
114 LLVMValueRef images[SI_NUM_IMAGES];
115 LLVMValueRef so_buffers[4];
116 LLVMValueRef esgs_ring;
117 LLVMValueRef gsvs_ring[4];
118 LLVMValueRef gs_next_vertex[4];
119 LLVMValueRef return_value;
120
121 LLVMTypeRef voidt;
122 LLVMTypeRef i1;
123 LLVMTypeRef i8;
124 LLVMTypeRef i32;
125 LLVMTypeRef i64;
126 LLVMTypeRef i128;
127 LLVMTypeRef f32;
128 LLVMTypeRef v16i8;
129 LLVMTypeRef v2i32;
130 LLVMTypeRef v4i32;
131 LLVMTypeRef v4f32;
132 LLVMTypeRef v8i32;
133
134 LLVMValueRef shared_memory;
135 };
136
137 static struct si_shader_context *si_shader_context(
138 struct lp_build_tgsi_context *bld_base)
139 {
140 return (struct si_shader_context *)bld_base;
141 }
142
143 static void si_init_shader_ctx(struct si_shader_context *ctx,
144 struct si_screen *sscreen,
145 struct si_shader *shader,
146 LLVMTargetMachineRef tm);
147
148 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
149 struct lp_build_tgsi_context *bld_base,
150 struct lp_build_emit_data *emit_data);
151
152 static void si_dump_shader_key(unsigned shader, union si_shader_key *key,
153 FILE *f);
154
155 /* Ideally pass the sample mask input to the PS epilog as v13, which
156 * is its usual location, so that the shader doesn't have to add v_mov.
157 */
158 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 13
159
160 /* The VS location of the PrimitiveID input is the same in the epilog,
161 * so that the main shader part doesn't have to move it.
162 */
163 #define VS_EPILOG_PRIMID_LOC 2
164
165 #define PERSPECTIVE_BASE 0
166 #define LINEAR_BASE 9
167
168 #define SAMPLE_OFFSET 0
169 #define CENTER_OFFSET 2
170 #define CENTROID_OFSET 4
171
172 #define USE_SGPR_MAX_SUFFIX_LEN 5
173 #define CONST_ADDR_SPACE 2
174 #define LOCAL_ADDR_SPACE 3
175 #define USER_SGPR_ADDR_SPACE 8
176
177
178 #define SENDMSG_GS 2
179 #define SENDMSG_GS_DONE 3
180
181 #define SENDMSG_GS_OP_NOP (0 << 4)
182 #define SENDMSG_GS_OP_CUT (1 << 4)
183 #define SENDMSG_GS_OP_EMIT (2 << 4)
184 #define SENDMSG_GS_OP_EMIT_CUT (3 << 4)
185
186 /**
187 * Returns a unique index for a semantic name and index. The index must be
188 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
189 * calculated.
190 */
191 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
192 {
193 switch (semantic_name) {
194 case TGSI_SEMANTIC_POSITION:
195 return 0;
196 case TGSI_SEMANTIC_PSIZE:
197 return 1;
198 case TGSI_SEMANTIC_CLIPDIST:
199 assert(index <= 1);
200 return 2 + index;
201 case TGSI_SEMANTIC_GENERIC:
202 if (index <= 63-4)
203 return 4 + index;
204 else
205 /* same explanation as in the default statement,
206 * the only user hitting this is st/nine.
207 */
208 return 0;
209
210 /* patch indices are completely separate and thus start from 0 */
211 case TGSI_SEMANTIC_TESSOUTER:
212 return 0;
213 case TGSI_SEMANTIC_TESSINNER:
214 return 1;
215 case TGSI_SEMANTIC_PATCH:
216 return 2 + index;
217
218 default:
219 /* Don't fail here. The result of this function is only used
220 * for LS, TCS, TES, and GS, where legacy GL semantics can't
221 * occur, but this function is called for all vertex shaders
222 * before it's known whether LS will be compiled or not.
223 */
224 return 0;
225 }
226 }
227
228 /**
229 * Get the value of a shader input parameter and extract a bitfield.
230 */
231 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
232 unsigned param, unsigned rshift,
233 unsigned bitwidth)
234 {
235 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
236 LLVMValueRef value = LLVMGetParam(ctx->radeon_bld.main_fn,
237 param);
238
239 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
240 value = bitcast(&ctx->radeon_bld.soa.bld_base,
241 TGSI_TYPE_UNSIGNED, value);
242
243 if (rshift)
244 value = LLVMBuildLShr(gallivm->builder, value,
245 lp_build_const_int32(gallivm, rshift), "");
246
247 if (rshift + bitwidth < 32) {
248 unsigned mask = (1 << bitwidth) - 1;
249 value = LLVMBuildAnd(gallivm->builder, value,
250 lp_build_const_int32(gallivm, mask), "");
251 }
252
253 return value;
254 }
255
256 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
257 {
258 switch (ctx->type) {
259 case PIPE_SHADER_TESS_CTRL:
260 return unpack_param(ctx, SI_PARAM_REL_IDS, 0, 8);
261
262 case PIPE_SHADER_TESS_EVAL:
263 return LLVMGetParam(ctx->radeon_bld.main_fn,
264 ctx->param_tes_rel_patch_id);
265
266 default:
267 assert(0);
268 return NULL;
269 }
270 }
271
272 /* Tessellation shaders pass outputs to the next shader using LDS.
273 *
274 * LS outputs = TCS inputs
275 * TCS outputs = TES inputs
276 *
277 * The LDS layout is:
278 * - TCS inputs for patch 0
279 * - TCS inputs for patch 1
280 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
281 * - ...
282 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
283 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
284 * - TCS outputs for patch 1
285 * - Per-patch TCS outputs for patch 1
286 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
287 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
288 * - ...
289 *
290 * All three shaders VS(LS), TCS, TES share the same LDS space.
291 */
292
293 static LLVMValueRef
294 get_tcs_in_patch_stride(struct si_shader_context *ctx)
295 {
296 if (ctx->type == PIPE_SHADER_VERTEX)
297 return unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 0, 13);
298 else if (ctx->type == PIPE_SHADER_TESS_CTRL)
299 return unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 0, 13);
300 else {
301 assert(0);
302 return NULL;
303 }
304 }
305
306 static LLVMValueRef
307 get_tcs_out_patch_stride(struct si_shader_context *ctx)
308 {
309 return unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 0, 13);
310 }
311
312 static LLVMValueRef
313 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
314 {
315 return lp_build_mul_imm(&ctx->radeon_bld.soa.bld_base.uint_bld,
316 unpack_param(ctx,
317 SI_PARAM_TCS_OUT_OFFSETS,
318 0, 16),
319 4);
320 }
321
322 static LLVMValueRef
323 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
324 {
325 return lp_build_mul_imm(&ctx->radeon_bld.soa.bld_base.uint_bld,
326 unpack_param(ctx,
327 SI_PARAM_TCS_OUT_OFFSETS,
328 16, 16),
329 4);
330 }
331
332 static LLVMValueRef
333 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
334 {
335 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
336 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
337 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
338
339 return LLVMBuildMul(gallivm->builder, patch_stride, rel_patch_id, "");
340 }
341
342 static LLVMValueRef
343 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
344 {
345 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
346 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
347 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
348 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
349
350 return LLVMBuildAdd(gallivm->builder, patch0_offset,
351 LLVMBuildMul(gallivm->builder, patch_stride,
352 rel_patch_id, ""),
353 "");
354 }
355
356 static LLVMValueRef
357 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
358 {
359 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
360 LLVMValueRef patch0_patch_data_offset =
361 get_tcs_out_patch0_patch_data_offset(ctx);
362 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
363 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
364
365 return LLVMBuildAdd(gallivm->builder, patch0_patch_data_offset,
366 LLVMBuildMul(gallivm->builder, patch_stride,
367 rel_patch_id, ""),
368 "");
369 }
370
371 static void build_indexed_store(struct si_shader_context *ctx,
372 LLVMValueRef base_ptr, LLVMValueRef index,
373 LLVMValueRef value)
374 {
375 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
376 struct gallivm_state *gallivm = bld_base->base.gallivm;
377 LLVMValueRef indices[2], pointer;
378
379 indices[0] = bld_base->uint_bld.zero;
380 indices[1] = index;
381
382 pointer = LLVMBuildGEP(gallivm->builder, base_ptr, indices, 2, "");
383 LLVMBuildStore(gallivm->builder, value, pointer);
384 }
385
386 /**
387 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
388 * It's equivalent to doing a load from &base_ptr[index].
389 *
390 * \param base_ptr Where the array starts.
391 * \param index The element index into the array.
392 * \param uniform Whether the base_ptr and index can be assumed to be
393 * dynamically uniform
394 */
395 static LLVMValueRef build_indexed_load(struct si_shader_context *ctx,
396 LLVMValueRef base_ptr, LLVMValueRef index,
397 bool uniform)
398 {
399 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
400 struct gallivm_state *gallivm = bld_base->base.gallivm;
401 LLVMValueRef indices[2], pointer;
402
403 indices[0] = bld_base->uint_bld.zero;
404 indices[1] = index;
405
406 pointer = LLVMBuildGEP(gallivm->builder, base_ptr, indices, 2, "");
407 if (uniform)
408 LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
409 return LLVMBuildLoad(gallivm->builder, pointer, "");
410 }
411
412 /**
413 * Do a load from &base_ptr[index], but also add a flag that it's loading
414 * a constant from a dynamically uniform index.
415 */
416 static LLVMValueRef build_indexed_load_const(
417 struct si_shader_context *ctx,
418 LLVMValueRef base_ptr, LLVMValueRef index)
419 {
420 LLVMValueRef result = build_indexed_load(ctx, base_ptr, index, true);
421 LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
422 return result;
423 }
424
425 static LLVMValueRef get_instance_index_for_fetch(
426 struct radeon_llvm_context *radeon_bld,
427 unsigned param_start_instance, unsigned divisor)
428 {
429 struct si_shader_context *ctx =
430 si_shader_context(&radeon_bld->soa.bld_base);
431 struct gallivm_state *gallivm = radeon_bld->soa.bld_base.base.gallivm;
432
433 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
434 ctx->param_instance_id);
435
436 /* The division must be done before START_INSTANCE is added. */
437 if (divisor > 1)
438 result = LLVMBuildUDiv(gallivm->builder, result,
439 lp_build_const_int32(gallivm, divisor), "");
440
441 return LLVMBuildAdd(gallivm->builder, result,
442 LLVMGetParam(radeon_bld->main_fn, param_start_instance), "");
443 }
444
445 static void declare_input_vs(
446 struct radeon_llvm_context *radeon_bld,
447 unsigned input_index,
448 const struct tgsi_full_declaration *decl)
449 {
450 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
451 struct gallivm_state *gallivm = base->gallivm;
452 struct si_shader_context *ctx =
453 si_shader_context(&radeon_bld->soa.bld_base);
454 unsigned divisor =
455 ctx->shader->key.vs.prolog.instance_divisors[input_index];
456
457 unsigned chan;
458
459 LLVMValueRef t_list_ptr;
460 LLVMValueRef t_offset;
461 LLVMValueRef t_list;
462 LLVMValueRef attribute_offset;
463 LLVMValueRef buffer_index;
464 LLVMValueRef args[3];
465 LLVMValueRef input;
466
467 /* Load the T list */
468 t_list_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFERS);
469
470 t_offset = lp_build_const_int32(gallivm, input_index);
471
472 t_list = build_indexed_load_const(ctx, t_list_ptr, t_offset);
473
474 /* Build the attribute offset */
475 attribute_offset = lp_build_const_int32(gallivm, 0);
476
477 if (!ctx->is_monolithic) {
478 buffer_index = LLVMGetParam(radeon_bld->main_fn,
479 ctx->param_vertex_index0 +
480 input_index);
481 } else if (divisor) {
482 /* Build index from instance ID, start instance and divisor */
483 ctx->shader->info.uses_instanceid = true;
484 buffer_index = get_instance_index_for_fetch(&ctx->radeon_bld,
485 SI_PARAM_START_INSTANCE,
486 divisor);
487 } else {
488 /* Load the buffer index for vertices. */
489 LLVMValueRef vertex_id = LLVMGetParam(ctx->radeon_bld.main_fn,
490 ctx->param_vertex_id);
491 LLVMValueRef base_vertex = LLVMGetParam(radeon_bld->main_fn,
492 SI_PARAM_BASE_VERTEX);
493 buffer_index = LLVMBuildAdd(gallivm->builder, base_vertex, vertex_id, "");
494 }
495
496 args[0] = t_list;
497 args[1] = attribute_offset;
498 args[2] = buffer_index;
499 input = lp_build_intrinsic(gallivm->builder,
500 "llvm.SI.vs.load.input", ctx->v4f32, args, 3,
501 LLVMReadNoneAttribute);
502
503 /* Break up the vec4 into individual components */
504 for (chan = 0; chan < 4; chan++) {
505 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
506 /* XXX: Use a helper function for this. There is one in
507 * tgsi_llvm.c. */
508 ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
509 LLVMBuildExtractElement(gallivm->builder,
510 input, llvm_chan, "");
511 }
512 }
513
514 static LLVMValueRef get_primitive_id(struct lp_build_tgsi_context *bld_base,
515 unsigned swizzle)
516 {
517 struct si_shader_context *ctx = si_shader_context(bld_base);
518
519 if (swizzle > 0)
520 return bld_base->uint_bld.zero;
521
522 switch (ctx->type) {
523 case PIPE_SHADER_VERTEX:
524 return LLVMGetParam(ctx->radeon_bld.main_fn,
525 ctx->param_vs_prim_id);
526 case PIPE_SHADER_TESS_CTRL:
527 return LLVMGetParam(ctx->radeon_bld.main_fn,
528 SI_PARAM_PATCH_ID);
529 case PIPE_SHADER_TESS_EVAL:
530 return LLVMGetParam(ctx->radeon_bld.main_fn,
531 ctx->param_tes_patch_id);
532 case PIPE_SHADER_GEOMETRY:
533 return LLVMGetParam(ctx->radeon_bld.main_fn,
534 SI_PARAM_PRIMITIVE_ID);
535 default:
536 assert(0);
537 return bld_base->uint_bld.zero;
538 }
539 }
540
541 /**
542 * Return the value of tgsi_ind_register for indexing.
543 * This is the indirect index with the constant offset added to it.
544 */
545 static LLVMValueRef get_indirect_index(struct si_shader_context *ctx,
546 const struct tgsi_ind_register *ind,
547 int rel_index)
548 {
549 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
550 LLVMValueRef result;
551
552 result = ctx->radeon_bld.soa.addr[ind->Index][ind->Swizzle];
553 result = LLVMBuildLoad(gallivm->builder, result, "");
554 result = LLVMBuildAdd(gallivm->builder, result,
555 lp_build_const_int32(gallivm, rel_index), "");
556 return result;
557 }
558
559 /**
560 * Like get_indirect_index, but restricts the return value to a (possibly
561 * undefined) value inside [0..num).
562 */
563 static LLVMValueRef get_bounded_indirect_index(struct si_shader_context *ctx,
564 const struct tgsi_ind_register *ind,
565 int rel_index, unsigned num)
566 {
567 LLVMValueRef result = get_indirect_index(ctx, ind, rel_index);
568
569 /* LLVM 3.8: If indirect resource indexing is used:
570 * - SI & CIK hang
571 * - VI crashes
572 */
573 if (HAVE_LLVM <= 0x0308)
574 return LLVMGetUndef(ctx->i32);
575
576 return radeon_llvm_bound_index(&ctx->radeon_bld, result, num);
577 }
578
579
580 /**
581 * Calculate a dword address given an input or output register and a stride.
582 */
583 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
584 const struct tgsi_full_dst_register *dst,
585 const struct tgsi_full_src_register *src,
586 LLVMValueRef vertex_dw_stride,
587 LLVMValueRef base_addr)
588 {
589 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
590 struct tgsi_shader_info *info = &ctx->shader->selector->info;
591 ubyte *name, *index, *array_first;
592 int first, param;
593 struct tgsi_full_dst_register reg;
594
595 /* Set the register description. The address computation is the same
596 * for sources and destinations. */
597 if (src) {
598 reg.Register.File = src->Register.File;
599 reg.Register.Index = src->Register.Index;
600 reg.Register.Indirect = src->Register.Indirect;
601 reg.Register.Dimension = src->Register.Dimension;
602 reg.Indirect = src->Indirect;
603 reg.Dimension = src->Dimension;
604 reg.DimIndirect = src->DimIndirect;
605 } else
606 reg = *dst;
607
608 /* If the register is 2-dimensional (e.g. an array of vertices
609 * in a primitive), calculate the base address of the vertex. */
610 if (reg.Register.Dimension) {
611 LLVMValueRef index;
612
613 if (reg.Dimension.Indirect)
614 index = get_indirect_index(ctx, &reg.DimIndirect,
615 reg.Dimension.Index);
616 else
617 index = lp_build_const_int32(gallivm, reg.Dimension.Index);
618
619 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
620 LLVMBuildMul(gallivm->builder, index,
621 vertex_dw_stride, ""), "");
622 }
623
624 /* Get information about the register. */
625 if (reg.Register.File == TGSI_FILE_INPUT) {
626 name = info->input_semantic_name;
627 index = info->input_semantic_index;
628 array_first = info->input_array_first;
629 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
630 name = info->output_semantic_name;
631 index = info->output_semantic_index;
632 array_first = info->output_array_first;
633 } else {
634 assert(0);
635 return NULL;
636 }
637
638 if (reg.Register.Indirect) {
639 /* Add the relative address of the element. */
640 LLVMValueRef ind_index;
641
642 if (reg.Indirect.ArrayID)
643 first = array_first[reg.Indirect.ArrayID];
644 else
645 first = reg.Register.Index;
646
647 ind_index = get_indirect_index(ctx, &reg.Indirect,
648 reg.Register.Index - first);
649
650 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
651 LLVMBuildMul(gallivm->builder, ind_index,
652 lp_build_const_int32(gallivm, 4), ""), "");
653
654 param = si_shader_io_get_unique_index(name[first], index[first]);
655 } else {
656 param = si_shader_io_get_unique_index(name[reg.Register.Index],
657 index[reg.Register.Index]);
658 }
659
660 /* Add the base address of the element. */
661 return LLVMBuildAdd(gallivm->builder, base_addr,
662 lp_build_const_int32(gallivm, param * 4), "");
663 }
664
665 /* The offchip buffer layout for TCS->TES is
666 *
667 * - attribute 0 of patch 0 vertex 0
668 * - attribute 0 of patch 0 vertex 1
669 * - attribute 0 of patch 0 vertex 2
670 * ...
671 * - attribute 0 of patch 1 vertex 0
672 * - attribute 0 of patch 1 vertex 1
673 * ...
674 * - attribute 1 of patch 0 vertex 0
675 * - attribute 1 of patch 0 vertex 1
676 * ...
677 * - per patch attribute 0 of patch 0
678 * - per patch attribute 0 of patch 1
679 * ...
680 *
681 * Note that every attribute has 4 components.
682 */
683 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
684 LLVMValueRef vertex_index,
685 LLVMValueRef param_index)
686 {
687 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
688 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
689 LLVMValueRef param_stride, constant16;
690
691 vertices_per_patch = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 9, 6);
692 num_patches = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 0, 9);
693 total_vertices = LLVMBuildMul(gallivm->builder, vertices_per_patch,
694 num_patches, "");
695
696 constant16 = lp_build_const_int32(gallivm, 16);
697 if (vertex_index) {
698 base_addr = LLVMBuildMul(gallivm->builder, get_rel_patch_id(ctx),
699 vertices_per_patch, "");
700
701 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
702 vertex_index, "");
703
704 param_stride = total_vertices;
705 } else {
706 base_addr = get_rel_patch_id(ctx);
707 param_stride = num_patches;
708 }
709
710 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
711 LLVMBuildMul(gallivm->builder, param_index,
712 param_stride, ""), "");
713
714 base_addr = LLVMBuildMul(gallivm->builder, base_addr, constant16, "");
715
716 if (!vertex_index) {
717 LLVMValueRef patch_data_offset =
718 unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 16, 16);
719
720 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
721 patch_data_offset, "");
722 }
723 return base_addr;
724 }
725
726 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
727 struct si_shader_context *ctx,
728 const struct tgsi_full_dst_register *dst,
729 const struct tgsi_full_src_register *src)
730 {
731 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
732 struct tgsi_shader_info *info = &ctx->shader->selector->info;
733 ubyte *name, *index, *array_first;
734 struct tgsi_full_src_register reg;
735 LLVMValueRef vertex_index = NULL;
736 LLVMValueRef param_index = NULL;
737 unsigned param_index_base, param_base;
738
739 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
740
741 if (reg.Register.Dimension) {
742
743 if (reg.Dimension.Indirect)
744 vertex_index = get_indirect_index(ctx, &reg.DimIndirect,
745 reg.Dimension.Index);
746 else
747 vertex_index = lp_build_const_int32(gallivm,
748 reg.Dimension.Index);
749 }
750
751 /* Get information about the register. */
752 if (reg.Register.File == TGSI_FILE_INPUT) {
753 name = info->input_semantic_name;
754 index = info->input_semantic_index;
755 array_first = info->input_array_first;
756 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
757 name = info->output_semantic_name;
758 index = info->output_semantic_index;
759 array_first = info->output_array_first;
760 } else {
761 assert(0);
762 return NULL;
763 }
764
765 if (reg.Register.Indirect) {
766 if (reg.Indirect.ArrayID)
767 param_base = array_first[reg.Indirect.ArrayID];
768 else
769 param_base = reg.Register.Index;
770
771 param_index = get_indirect_index(ctx, &reg.Indirect,
772 reg.Register.Index - param_base);
773
774 } else {
775 param_base = reg.Register.Index;
776 param_index = lp_build_const_int32(gallivm, 0);
777 }
778
779 param_index_base = si_shader_io_get_unique_index(name[param_base],
780 index[param_base]);
781
782 param_index = LLVMBuildAdd(gallivm->builder, param_index,
783 lp_build_const_int32(gallivm, param_index_base),
784 "");
785
786 return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
787 }
788
789 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
790 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
791 * or v4i32 (num_channels=3,4). */
792 static void build_tbuffer_store(struct si_shader_context *ctx,
793 LLVMValueRef rsrc,
794 LLVMValueRef vdata,
795 unsigned num_channels,
796 LLVMValueRef vaddr,
797 LLVMValueRef soffset,
798 unsigned inst_offset,
799 unsigned dfmt,
800 unsigned nfmt,
801 unsigned offen,
802 unsigned idxen,
803 unsigned glc,
804 unsigned slc,
805 unsigned tfe)
806 {
807 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
808 LLVMValueRef args[] = {
809 rsrc,
810 vdata,
811 LLVMConstInt(ctx->i32, num_channels, 0),
812 vaddr,
813 soffset,
814 LLVMConstInt(ctx->i32, inst_offset, 0),
815 LLVMConstInt(ctx->i32, dfmt, 0),
816 LLVMConstInt(ctx->i32, nfmt, 0),
817 LLVMConstInt(ctx->i32, offen, 0),
818 LLVMConstInt(ctx->i32, idxen, 0),
819 LLVMConstInt(ctx->i32, glc, 0),
820 LLVMConstInt(ctx->i32, slc, 0),
821 LLVMConstInt(ctx->i32, tfe, 0)
822 };
823
824 /* The instruction offset field has 12 bits */
825 assert(offen || inst_offset < (1 << 12));
826
827 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
828 unsigned func = CLAMP(num_channels, 1, 3) - 1;
829 const char *types[] = {"i32", "v2i32", "v4i32"};
830 char name[256];
831 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
832
833 lp_build_intrinsic(gallivm->builder, name, ctx->voidt,
834 args, ARRAY_SIZE(args), 0);
835 }
836
837 static void build_tbuffer_store_dwords(struct si_shader_context *ctx,
838 LLVMValueRef rsrc,
839 LLVMValueRef vdata,
840 unsigned num_channels,
841 LLVMValueRef vaddr,
842 LLVMValueRef soffset,
843 unsigned inst_offset)
844 {
845 static unsigned dfmt[] = {
846 V_008F0C_BUF_DATA_FORMAT_32,
847 V_008F0C_BUF_DATA_FORMAT_32_32,
848 V_008F0C_BUF_DATA_FORMAT_32_32_32,
849 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
850 };
851 assert(num_channels >= 1 && num_channels <= 4);
852
853 build_tbuffer_store(ctx, rsrc, vdata, num_channels, vaddr, soffset,
854 inst_offset, dfmt[num_channels-1],
855 V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
856 }
857
858 static LLVMValueRef build_buffer_load(struct si_shader_context *ctx,
859 LLVMValueRef rsrc,
860 int num_channels,
861 LLVMValueRef vindex,
862 LLVMValueRef voffset,
863 LLVMValueRef soffset,
864 unsigned inst_offset,
865 unsigned glc,
866 unsigned slc)
867 {
868 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
869 unsigned func = CLAMP(num_channels, 1, 3) - 1;
870
871 if (HAVE_LLVM >= 0x309) {
872 LLVMValueRef args[] = {
873 LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v4i32, ""),
874 vindex ? vindex : LLVMConstInt(ctx->i32, 0, 0),
875 LLVMConstInt(ctx->i32, inst_offset, 0),
876 LLVMConstInt(ctx->i1, glc, 0),
877 LLVMConstInt(ctx->i1, slc, 0)
878 };
879
880 LLVMTypeRef types[] = {ctx->f32, LLVMVectorType(ctx->f32, 2),
881 ctx->v4f32};
882 const char *type_names[] = {"f32", "v2f32", "v4f32"};
883 char name[256];
884
885 if (voffset) {
886 args[2] = LLVMBuildAdd(gallivm->builder, args[2], voffset,
887 "");
888 }
889
890 if (soffset) {
891 args[2] = LLVMBuildAdd(gallivm->builder, args[2], soffset,
892 "");
893 }
894
895 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s",
896 type_names[func]);
897
898 return lp_build_intrinsic(gallivm->builder, name, types[func], args,
899 ARRAY_SIZE(args), LLVMReadOnlyAttribute);
900 } else {
901 LLVMValueRef args[] = {
902 LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v16i8, ""),
903 voffset ? voffset : vindex,
904 soffset,
905 LLVMConstInt(ctx->i32, inst_offset, 0),
906 LLVMConstInt(ctx->i32, voffset ? 1 : 0, 0), // offen
907 LLVMConstInt(ctx->i32, vindex ? 1 : 0, 0), //idxen
908 LLVMConstInt(ctx->i32, glc, 0),
909 LLVMConstInt(ctx->i32, slc, 0),
910 LLVMConstInt(ctx->i32, 0, 0), // TFE
911 };
912
913 LLVMTypeRef types[] = {ctx->i32, LLVMVectorType(ctx->i32, 2),
914 ctx->v4i32};
915 const char *type_names[] = {"i32", "v2i32", "v4i32"};
916 const char *arg_type = "i32";
917 char name[256];
918
919 if (voffset && vindex) {
920 LLVMValueRef vaddr[] = {vindex, voffset};
921
922 arg_type = "v2i32";
923 args[1] = lp_build_gather_values(gallivm, vaddr, 2);
924 }
925
926 snprintf(name, sizeof(name), "llvm.SI.buffer.load.dword.%s.%s",
927 type_names[func], arg_type);
928
929 return lp_build_intrinsic(gallivm->builder, name, types[func], args,
930 ARRAY_SIZE(args), LLVMReadOnlyAttribute);
931 }
932 }
933
934 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
935 enum tgsi_opcode_type type, unsigned swizzle,
936 LLVMValueRef buffer, LLVMValueRef offset,
937 LLVMValueRef base)
938 {
939 struct si_shader_context *ctx = si_shader_context(bld_base);
940 struct gallivm_state *gallivm = bld_base->base.gallivm;
941 LLVMValueRef value, value2;
942 LLVMTypeRef llvm_type = tgsi2llvmtype(bld_base, type);
943 LLVMTypeRef vec_type = LLVMVectorType(llvm_type, 4);
944
945 if (swizzle == ~0) {
946 value = build_buffer_load(ctx, buffer, 4, NULL, base, offset,
947 0, 1, 0);
948
949 return LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
950 }
951
952 if (!tgsi_type_is_64bit(type)) {
953 value = build_buffer_load(ctx, buffer, 4, NULL, base, offset,
954 0, 1, 0);
955
956 value = LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
957 return LLVMBuildExtractElement(gallivm->builder, value,
958 lp_build_const_int32(gallivm, swizzle), "");
959 }
960
961 value = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
962 swizzle * 4, 1, 0);
963
964 value2 = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
965 swizzle * 4 + 4, 1, 0);
966
967 return radeon_llvm_emit_fetch_64bit(bld_base, type, value, value2);
968 }
969
970 /**
971 * Load from LDS.
972 *
973 * \param type output value type
974 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
975 * \param dw_addr address in dwords
976 */
977 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
978 enum tgsi_opcode_type type, unsigned swizzle,
979 LLVMValueRef dw_addr)
980 {
981 struct si_shader_context *ctx = si_shader_context(bld_base);
982 struct gallivm_state *gallivm = bld_base->base.gallivm;
983 LLVMValueRef value;
984
985 if (swizzle == ~0) {
986 LLVMValueRef values[TGSI_NUM_CHANNELS];
987
988 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
989 values[chan] = lds_load(bld_base, type, chan, dw_addr);
990
991 return lp_build_gather_values(bld_base->base.gallivm, values,
992 TGSI_NUM_CHANNELS);
993 }
994
995 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
996 lp_build_const_int32(gallivm, swizzle));
997
998 value = build_indexed_load(ctx, ctx->lds, dw_addr, false);
999 if (tgsi_type_is_64bit(type)) {
1000 LLVMValueRef value2;
1001 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
1002 lp_build_const_int32(gallivm, swizzle + 1));
1003 value2 = build_indexed_load(ctx, ctx->lds, dw_addr, false);
1004 return radeon_llvm_emit_fetch_64bit(bld_base, type, value, value2);
1005 }
1006
1007 return LLVMBuildBitCast(gallivm->builder, value,
1008 tgsi2llvmtype(bld_base, type), "");
1009 }
1010
1011 /**
1012 * Store to LDS.
1013 *
1014 * \param swizzle offset (typically 0..3)
1015 * \param dw_addr address in dwords
1016 * \param value value to store
1017 */
1018 static void lds_store(struct lp_build_tgsi_context *bld_base,
1019 unsigned swizzle, LLVMValueRef dw_addr,
1020 LLVMValueRef value)
1021 {
1022 struct si_shader_context *ctx = si_shader_context(bld_base);
1023 struct gallivm_state *gallivm = bld_base->base.gallivm;
1024
1025 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
1026 lp_build_const_int32(gallivm, swizzle));
1027
1028 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
1029 build_indexed_store(ctx, ctx->lds,
1030 dw_addr, value);
1031 }
1032
1033 static LLVMValueRef fetch_input_tcs(
1034 struct lp_build_tgsi_context *bld_base,
1035 const struct tgsi_full_src_register *reg,
1036 enum tgsi_opcode_type type, unsigned swizzle)
1037 {
1038 struct si_shader_context *ctx = si_shader_context(bld_base);
1039 LLVMValueRef dw_addr, stride;
1040
1041 stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
1042 dw_addr = get_tcs_in_current_patch_offset(ctx);
1043 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1044
1045 return lds_load(bld_base, type, swizzle, dw_addr);
1046 }
1047
1048 static LLVMValueRef fetch_output_tcs(
1049 struct lp_build_tgsi_context *bld_base,
1050 const struct tgsi_full_src_register *reg,
1051 enum tgsi_opcode_type type, unsigned swizzle)
1052 {
1053 struct si_shader_context *ctx = si_shader_context(bld_base);
1054 LLVMValueRef dw_addr, stride;
1055
1056 if (reg->Register.Dimension) {
1057 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
1058 dw_addr = get_tcs_out_current_patch_offset(ctx);
1059 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
1060 } else {
1061 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1062 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
1063 }
1064
1065 return lds_load(bld_base, type, swizzle, dw_addr);
1066 }
1067
1068 static LLVMValueRef fetch_input_tes(
1069 struct lp_build_tgsi_context *bld_base,
1070 const struct tgsi_full_src_register *reg,
1071 enum tgsi_opcode_type type, unsigned swizzle)
1072 {
1073 struct si_shader_context *ctx = si_shader_context(bld_base);
1074 struct gallivm_state *gallivm = bld_base->base.gallivm;
1075 LLVMValueRef rw_buffers, buffer, base, addr;
1076
1077 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn,
1078 SI_PARAM_RW_BUFFERS);
1079 buffer = build_indexed_load_const(ctx, rw_buffers,
1080 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
1081
1082 base = LLVMGetParam(ctx->radeon_bld.main_fn, ctx->param_oc_lds);
1083 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
1084
1085 return buffer_load(bld_base, type, swizzle, buffer, base, addr);
1086 }
1087
1088 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1089 const struct tgsi_full_instruction *inst,
1090 const struct tgsi_opcode_info *info,
1091 LLVMValueRef dst[4])
1092 {
1093 struct si_shader_context *ctx = si_shader_context(bld_base);
1094 struct gallivm_state *gallivm = bld_base->base.gallivm;
1095 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
1096 unsigned chan_index;
1097 LLVMValueRef dw_addr, stride;
1098 LLVMValueRef rw_buffers, buffer, base, buf_addr;
1099 LLVMValueRef values[4];
1100
1101 /* Only handle per-patch and per-vertex outputs here.
1102 * Vectors will be lowered to scalars and this function will be called again.
1103 */
1104 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1105 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1106 radeon_llvm_emit_store(bld_base, inst, info, dst);
1107 return;
1108 }
1109
1110 if (reg->Register.Dimension) {
1111 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
1112 dw_addr = get_tcs_out_current_patch_offset(ctx);
1113 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1114 } else {
1115 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1116 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1117 }
1118
1119 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn,
1120 SI_PARAM_RW_BUFFERS);
1121 buffer = build_indexed_load_const(ctx, rw_buffers,
1122 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
1123
1124 base = LLVMGetParam(ctx->radeon_bld.main_fn, ctx->param_oc_lds);
1125 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1126
1127
1128 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
1129 LLVMValueRef value = dst[chan_index];
1130
1131 if (inst->Instruction.Saturate)
1132 value = radeon_llvm_saturate(bld_base, value);
1133
1134 lds_store(bld_base, chan_index, dw_addr, value);
1135
1136 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
1137 values[chan_index] = value;
1138
1139 if (inst->Dst[0].Register.WriteMask != 0xF) {
1140 build_tbuffer_store_dwords(ctx, buffer, value, 1,
1141 buf_addr, base,
1142 4 * chan_index);
1143 }
1144 }
1145
1146 if (inst->Dst[0].Register.WriteMask == 0xF) {
1147 LLVMValueRef value = lp_build_gather_values(bld_base->base.gallivm,
1148 values, 4);
1149 build_tbuffer_store_dwords(ctx, buffer, value, 4, buf_addr,
1150 base, 0);
1151 }
1152 }
1153
1154 static LLVMValueRef fetch_input_gs(
1155 struct lp_build_tgsi_context *bld_base,
1156 const struct tgsi_full_src_register *reg,
1157 enum tgsi_opcode_type type,
1158 unsigned swizzle)
1159 {
1160 struct lp_build_context *base = &bld_base->base;
1161 struct si_shader_context *ctx = si_shader_context(bld_base);
1162 struct si_shader *shader = ctx->shader;
1163 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
1164 struct gallivm_state *gallivm = base->gallivm;
1165 LLVMValueRef vtx_offset;
1166 LLVMValueRef args[9];
1167 unsigned vtx_offset_param;
1168 struct tgsi_shader_info *info = &shader->selector->info;
1169 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1170 unsigned semantic_index = info->input_semantic_index[reg->Register.Index];
1171 unsigned param;
1172 LLVMValueRef value;
1173
1174 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1175 return get_primitive_id(bld_base, swizzle);
1176
1177 if (!reg->Register.Dimension)
1178 return NULL;
1179
1180 if (swizzle == ~0) {
1181 LLVMValueRef values[TGSI_NUM_CHANNELS];
1182 unsigned chan;
1183 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1184 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
1185 }
1186 return lp_build_gather_values(bld_base->base.gallivm, values,
1187 TGSI_NUM_CHANNELS);
1188 }
1189
1190 /* Get the vertex offset parameter */
1191 vtx_offset_param = reg->Dimension.Index;
1192 if (vtx_offset_param < 2) {
1193 vtx_offset_param += SI_PARAM_VTX0_OFFSET;
1194 } else {
1195 assert(vtx_offset_param < 6);
1196 vtx_offset_param += SI_PARAM_VTX2_OFFSET - 2;
1197 }
1198 vtx_offset = lp_build_mul_imm(uint,
1199 LLVMGetParam(ctx->radeon_bld.main_fn,
1200 vtx_offset_param),
1201 4);
1202
1203 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1204 args[0] = ctx->esgs_ring;
1205 args[1] = vtx_offset;
1206 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle) * 256);
1207 args[3] = uint->zero;
1208 args[4] = uint->one; /* OFFEN */
1209 args[5] = uint->zero; /* IDXEN */
1210 args[6] = uint->one; /* GLC */
1211 args[7] = uint->zero; /* SLC */
1212 args[8] = uint->zero; /* TFE */
1213
1214 value = lp_build_intrinsic(gallivm->builder,
1215 "llvm.SI.buffer.load.dword.i32.i32",
1216 ctx->i32, args, 9,
1217 LLVMReadOnlyAttribute);
1218 if (tgsi_type_is_64bit(type)) {
1219 LLVMValueRef value2;
1220 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle + 1) * 256);
1221 value2 = lp_build_intrinsic(gallivm->builder,
1222 "llvm.SI.buffer.load.dword.i32.i32",
1223 ctx->i32, args, 9,
1224 LLVMReadOnlyAttribute);
1225 return radeon_llvm_emit_fetch_64bit(bld_base, type,
1226 value, value2);
1227 }
1228 return LLVMBuildBitCast(gallivm->builder,
1229 value,
1230 tgsi2llvmtype(bld_base, type), "");
1231 }
1232
1233 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1234 {
1235 switch (interpolate) {
1236 case TGSI_INTERPOLATE_CONSTANT:
1237 return 0;
1238
1239 case TGSI_INTERPOLATE_LINEAR:
1240 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1241 return SI_PARAM_LINEAR_SAMPLE;
1242 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1243 return SI_PARAM_LINEAR_CENTROID;
1244 else
1245 return SI_PARAM_LINEAR_CENTER;
1246 break;
1247 case TGSI_INTERPOLATE_COLOR:
1248 case TGSI_INTERPOLATE_PERSPECTIVE:
1249 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1250 return SI_PARAM_PERSP_SAMPLE;
1251 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1252 return SI_PARAM_PERSP_CENTROID;
1253 else
1254 return SI_PARAM_PERSP_CENTER;
1255 break;
1256 default:
1257 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1258 return -1;
1259 }
1260 }
1261
1262 /* This shouldn't be used by explicit INTERP opcodes. */
1263 static unsigned select_interp_param(struct si_shader_context *ctx,
1264 unsigned param)
1265 {
1266 if (!ctx->is_monolithic)
1267 return param;
1268
1269 if (ctx->shader->key.ps.prolog.force_persp_sample_interp) {
1270 switch (param) {
1271 case SI_PARAM_PERSP_CENTROID:
1272 case SI_PARAM_PERSP_CENTER:
1273 return SI_PARAM_PERSP_SAMPLE;
1274 }
1275 }
1276 if (ctx->shader->key.ps.prolog.force_linear_sample_interp) {
1277 switch (param) {
1278 case SI_PARAM_LINEAR_CENTROID:
1279 case SI_PARAM_LINEAR_CENTER:
1280 return SI_PARAM_LINEAR_SAMPLE;
1281 }
1282 }
1283 if (ctx->shader->key.ps.prolog.force_persp_center_interp) {
1284 switch (param) {
1285 case SI_PARAM_PERSP_CENTROID:
1286 case SI_PARAM_PERSP_SAMPLE:
1287 return SI_PARAM_PERSP_CENTER;
1288 }
1289 }
1290 if (ctx->shader->key.ps.prolog.force_linear_center_interp) {
1291 switch (param) {
1292 case SI_PARAM_LINEAR_CENTROID:
1293 case SI_PARAM_LINEAR_SAMPLE:
1294 return SI_PARAM_LINEAR_CENTER;
1295 }
1296 }
1297
1298 return param;
1299 }
1300
1301 /**
1302 * Interpolate a fragment shader input.
1303 *
1304 * @param ctx context
1305 * @param input_index index of the input in hardware
1306 * @param semantic_name TGSI_SEMANTIC_*
1307 * @param semantic_index semantic index
1308 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1309 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1310 * @param interp_param interpolation weights (i,j)
1311 * @param prim_mask SI_PARAM_PRIM_MASK
1312 * @param face SI_PARAM_FRONT_FACE
1313 * @param result the return value (4 components)
1314 */
1315 static void interp_fs_input(struct si_shader_context *ctx,
1316 unsigned input_index,
1317 unsigned semantic_name,
1318 unsigned semantic_index,
1319 unsigned num_interp_inputs,
1320 unsigned colors_read_mask,
1321 LLVMValueRef interp_param,
1322 LLVMValueRef prim_mask,
1323 LLVMValueRef face,
1324 LLVMValueRef result[4])
1325 {
1326 struct lp_build_context *base = &ctx->radeon_bld.soa.bld_base.base;
1327 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
1328 struct gallivm_state *gallivm = base->gallivm;
1329 const char *intr_name;
1330 LLVMValueRef attr_number;
1331
1332 unsigned chan;
1333
1334 attr_number = lp_build_const_int32(gallivm, input_index);
1335
1336 /* fs.constant returns the param from the middle vertex, so it's not
1337 * really useful for flat shading. It's meant to be used for custom
1338 * interpolation (but the intrinsic can't fetch from the other two
1339 * vertices).
1340 *
1341 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1342 * to do the right thing. The only reason we use fs.constant is that
1343 * fs.interp cannot be used on integers, because they can be equal
1344 * to NaN.
1345 */
1346 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
1347
1348 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1349 ctx->shader->key.ps.prolog.color_two_side) {
1350 LLVMValueRef args[4];
1351 LLVMValueRef is_face_positive;
1352 LLVMValueRef back_attr_number;
1353
1354 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1355 * otherwise it's at offset "num_inputs".
1356 */
1357 unsigned back_attr_offset = num_interp_inputs;
1358 if (semantic_index == 1 && colors_read_mask & 0xf)
1359 back_attr_offset += 1;
1360
1361 back_attr_number = lp_build_const_int32(gallivm, back_attr_offset);
1362
1363 is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1364 face, uint->zero, "");
1365
1366 args[2] = prim_mask;
1367 args[3] = interp_param;
1368 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1369 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
1370 LLVMValueRef front, back;
1371
1372 args[0] = llvm_chan;
1373 args[1] = attr_number;
1374 front = lp_build_intrinsic(gallivm->builder, intr_name,
1375 ctx->f32, args, args[3] ? 4 : 3,
1376 LLVMReadNoneAttribute);
1377
1378 args[1] = back_attr_number;
1379 back = lp_build_intrinsic(gallivm->builder, intr_name,
1380 ctx->f32, args, args[3] ? 4 : 3,
1381 LLVMReadNoneAttribute);
1382
1383 result[chan] = LLVMBuildSelect(gallivm->builder,
1384 is_face_positive,
1385 front,
1386 back,
1387 "");
1388 }
1389 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1390 LLVMValueRef args[4];
1391
1392 args[0] = uint->zero;
1393 args[1] = attr_number;
1394 args[2] = prim_mask;
1395 args[3] = interp_param;
1396 result[0] = lp_build_intrinsic(gallivm->builder, intr_name,
1397 ctx->f32, args, args[3] ? 4 : 3,
1398 LLVMReadNoneAttribute);
1399 result[1] =
1400 result[2] = lp_build_const_float(gallivm, 0.0f);
1401 result[3] = lp_build_const_float(gallivm, 1.0f);
1402 } else {
1403 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1404 LLVMValueRef args[4];
1405 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
1406
1407 args[0] = llvm_chan;
1408 args[1] = attr_number;
1409 args[2] = prim_mask;
1410 args[3] = interp_param;
1411 result[chan] = lp_build_intrinsic(gallivm->builder, intr_name,
1412 ctx->f32, args, args[3] ? 4 : 3,
1413 LLVMReadNoneAttribute);
1414 }
1415 }
1416 }
1417
1418 /* LLVMGetParam with bc_optimize resolved. */
1419 static LLVMValueRef get_interp_param(struct si_shader_context *ctx,
1420 int interp_param_idx)
1421 {
1422 LLVMBuilderRef builder = ctx->radeon_bld.gallivm.builder;
1423 LLVMValueRef main_fn = ctx->radeon_bld.main_fn;
1424 LLVMValueRef param = NULL;
1425
1426 /* Handle PRIM_MASK[31] (bc_optimize). */
1427 if (ctx->is_monolithic &&
1428 ((ctx->shader->key.ps.prolog.bc_optimize_for_persp &&
1429 interp_param_idx == SI_PARAM_PERSP_CENTROID) ||
1430 (ctx->shader->key.ps.prolog.bc_optimize_for_linear &&
1431 interp_param_idx == SI_PARAM_LINEAR_CENTROID))) {
1432 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
1433 * The hw doesn't compute CENTROID if the whole wave only
1434 * contains fully-covered quads.
1435 */
1436 LLVMValueRef bc_optimize =
1437 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK);
1438 bc_optimize = LLVMBuildLShr(builder,
1439 bc_optimize,
1440 LLVMConstInt(ctx->i32, 31, 0), "");
1441 bc_optimize = LLVMBuildTrunc(builder, bc_optimize, ctx->i1, "");
1442
1443 if (ctx->shader->key.ps.prolog.bc_optimize_for_persp &&
1444 interp_param_idx == SI_PARAM_PERSP_CENTROID) {
1445 param = LLVMBuildSelect(builder, bc_optimize,
1446 LLVMGetParam(main_fn,
1447 SI_PARAM_PERSP_CENTER),
1448 LLVMGetParam(main_fn,
1449 SI_PARAM_PERSP_CENTROID),
1450 "");
1451 }
1452 if (ctx->shader->key.ps.prolog.bc_optimize_for_linear &&
1453 interp_param_idx == SI_PARAM_LINEAR_CENTROID) {
1454 param = LLVMBuildSelect(builder, bc_optimize,
1455 LLVMGetParam(main_fn,
1456 SI_PARAM_LINEAR_CENTER),
1457 LLVMGetParam(main_fn,
1458 SI_PARAM_LINEAR_CENTROID),
1459 "");
1460 }
1461 }
1462
1463 if (!param)
1464 param = LLVMGetParam(main_fn, interp_param_idx);
1465 return param;
1466 }
1467
1468 static void declare_input_fs(
1469 struct radeon_llvm_context *radeon_bld,
1470 unsigned input_index,
1471 const struct tgsi_full_declaration *decl)
1472 {
1473 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
1474 struct si_shader_context *ctx =
1475 si_shader_context(&radeon_bld->soa.bld_base);
1476 struct si_shader *shader = ctx->shader;
1477 LLVMValueRef main_fn = radeon_bld->main_fn;
1478 LLVMValueRef interp_param = NULL;
1479 int interp_param_idx;
1480
1481 /* Get colors from input VGPRs (set by the prolog). */
1482 if (!ctx->is_monolithic &&
1483 decl->Semantic.Name == TGSI_SEMANTIC_COLOR) {
1484 unsigned i = decl->Semantic.Index;
1485 unsigned colors_read = shader->selector->info.colors_read;
1486 unsigned mask = colors_read >> (i * 4);
1487 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1488 (i ? util_bitcount(colors_read & 0xf) : 0);
1489
1490 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 0)] =
1491 mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1492 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 1)] =
1493 mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1494 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 2)] =
1495 mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1496 radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 3)] =
1497 mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1498 return;
1499 }
1500
1501 interp_param_idx = lookup_interp_param_index(decl->Interp.Interpolate,
1502 decl->Interp.Location);
1503 if (interp_param_idx == -1)
1504 return;
1505 else if (interp_param_idx) {
1506 interp_param_idx = select_interp_param(ctx,
1507 interp_param_idx);
1508 interp_param = get_interp_param(ctx, interp_param_idx);
1509 }
1510
1511 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
1512 decl->Interp.Interpolate == TGSI_INTERPOLATE_COLOR &&
1513 ctx->shader->key.ps.prolog.flatshade_colors)
1514 interp_param = NULL; /* load the constant color */
1515
1516 interp_fs_input(ctx, input_index, decl->Semantic.Name,
1517 decl->Semantic.Index, shader->selector->info.num_inputs,
1518 shader->selector->info.colors_read, interp_param,
1519 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1520 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1521 &radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 0)]);
1522 }
1523
1524 static LLVMValueRef get_sample_id(struct radeon_llvm_context *radeon_bld)
1525 {
1526 return unpack_param(si_shader_context(&radeon_bld->soa.bld_base),
1527 SI_PARAM_ANCILLARY, 8, 4);
1528 }
1529
1530 /**
1531 * Set range metadata on an instruction. This can only be used on load and
1532 * call instructions. If you know an instruction can only produce the values
1533 * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
1534 * \p lo is the minimum value inclusive.
1535 * \p hi is the maximum value exclusive.
1536 */
1537 static void set_range_metadata(struct si_shader_context *ctx,
1538 LLVMValueRef value, unsigned lo, unsigned hi)
1539 {
1540 LLVMValueRef range_md, md_args[2];
1541 LLVMTypeRef type = LLVMTypeOf(value);
1542 LLVMContextRef context = LLVMGetTypeContext(type);
1543
1544 md_args[0] = LLVMConstInt(type, lo, false);
1545 md_args[1] = LLVMConstInt(type, hi, false);
1546 range_md = LLVMMDNodeInContext(context, md_args, 2);
1547 LLVMSetMetadata(value, ctx->range_md_kind, range_md);
1548 }
1549
1550 static LLVMValueRef get_thread_id(struct si_shader_context *ctx)
1551 {
1552 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
1553 LLVMValueRef tid;
1554
1555 if (HAVE_LLVM < 0x0308) {
1556 tid = lp_build_intrinsic(gallivm->builder, "llvm.SI.tid",
1557 ctx->i32, NULL, 0, LLVMReadNoneAttribute);
1558 } else {
1559 LLVMValueRef tid_args[2];
1560 tid_args[0] = lp_build_const_int32(gallivm, 0xffffffff);
1561 tid_args[1] = lp_build_const_int32(gallivm, 0);
1562 tid_args[1] = lp_build_intrinsic(gallivm->builder,
1563 "llvm.amdgcn.mbcnt.lo", ctx->i32,
1564 tid_args, 2, LLVMReadNoneAttribute);
1565
1566 tid = lp_build_intrinsic(gallivm->builder,
1567 "llvm.amdgcn.mbcnt.hi", ctx->i32,
1568 tid_args, 2, LLVMReadNoneAttribute);
1569 }
1570 set_range_metadata(ctx, tid, 0, 64);
1571 return tid;
1572 }
1573
1574 /**
1575 * Load a dword from a constant buffer.
1576 */
1577 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1578 LLVMValueRef resource,
1579 LLVMValueRef offset)
1580 {
1581 LLVMBuilderRef builder = ctx->radeon_bld.gallivm.builder;
1582 LLVMValueRef args[2] = {resource, offset};
1583
1584 return lp_build_intrinsic(builder, "llvm.SI.load.const", ctx->f32, args, 2,
1585 LLVMReadNoneAttribute);
1586 }
1587
1588 static LLVMValueRef load_sample_position(struct radeon_llvm_context *radeon_bld, LLVMValueRef sample_id)
1589 {
1590 struct si_shader_context *ctx =
1591 si_shader_context(&radeon_bld->soa.bld_base);
1592 struct lp_build_context *uint_bld = &radeon_bld->soa.bld_base.uint_bld;
1593 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1594 LLVMBuilderRef builder = gallivm->builder;
1595 LLVMValueRef desc = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_RW_BUFFERS);
1596 LLVMValueRef buf_index = lp_build_const_int32(gallivm, SI_PS_CONST_SAMPLE_POSITIONS);
1597 LLVMValueRef resource = build_indexed_load_const(ctx, desc, buf_index);
1598
1599 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1600 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1601 LLVMValueRef offset1 = LLVMBuildAdd(builder, offset0, lp_build_const_int32(gallivm, 4), "");
1602
1603 LLVMValueRef pos[4] = {
1604 buffer_load_const(ctx, resource, offset0),
1605 buffer_load_const(ctx, resource, offset1),
1606 lp_build_const_float(gallivm, 0),
1607 lp_build_const_float(gallivm, 0)
1608 };
1609
1610 return lp_build_gather_values(gallivm, pos, 4);
1611 }
1612
1613 static void declare_system_value(
1614 struct radeon_llvm_context *radeon_bld,
1615 unsigned index,
1616 const struct tgsi_full_declaration *decl)
1617 {
1618 struct si_shader_context *ctx =
1619 si_shader_context(&radeon_bld->soa.bld_base);
1620 struct lp_build_context *bld = &radeon_bld->soa.bld_base.base;
1621 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1622 LLVMValueRef value = 0;
1623
1624 switch (decl->Semantic.Name) {
1625 case TGSI_SEMANTIC_INSTANCEID:
1626 value = LLVMGetParam(radeon_bld->main_fn,
1627 ctx->param_instance_id);
1628 break;
1629
1630 case TGSI_SEMANTIC_VERTEXID:
1631 value = LLVMBuildAdd(gallivm->builder,
1632 LLVMGetParam(radeon_bld->main_fn,
1633 ctx->param_vertex_id),
1634 LLVMGetParam(radeon_bld->main_fn,
1635 SI_PARAM_BASE_VERTEX), "");
1636 break;
1637
1638 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1639 value = LLVMGetParam(radeon_bld->main_fn,
1640 ctx->param_vertex_id);
1641 break;
1642
1643 case TGSI_SEMANTIC_BASEVERTEX:
1644 value = LLVMGetParam(radeon_bld->main_fn,
1645 SI_PARAM_BASE_VERTEX);
1646 break;
1647
1648 case TGSI_SEMANTIC_BASEINSTANCE:
1649 value = LLVMGetParam(radeon_bld->main_fn,
1650 SI_PARAM_START_INSTANCE);
1651 break;
1652
1653 case TGSI_SEMANTIC_DRAWID:
1654 value = LLVMGetParam(radeon_bld->main_fn,
1655 SI_PARAM_DRAWID);
1656 break;
1657
1658 case TGSI_SEMANTIC_INVOCATIONID:
1659 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1660 value = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
1661 else if (ctx->type == PIPE_SHADER_GEOMETRY)
1662 value = LLVMGetParam(radeon_bld->main_fn,
1663 SI_PARAM_GS_INSTANCE_ID);
1664 else
1665 assert(!"INVOCATIONID not implemented");
1666 break;
1667
1668 case TGSI_SEMANTIC_POSITION:
1669 {
1670 LLVMValueRef pos[4] = {
1671 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1672 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1673 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Z_FLOAT),
1674 lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base, TGSI_OPCODE_RCP,
1675 LLVMGetParam(radeon_bld->main_fn,
1676 SI_PARAM_POS_W_FLOAT)),
1677 };
1678 value = lp_build_gather_values(gallivm, pos, 4);
1679 break;
1680 }
1681
1682 case TGSI_SEMANTIC_FACE:
1683 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_FRONT_FACE);
1684 break;
1685
1686 case TGSI_SEMANTIC_SAMPLEID:
1687 value = get_sample_id(radeon_bld);
1688 break;
1689
1690 case TGSI_SEMANTIC_SAMPLEPOS: {
1691 LLVMValueRef pos[4] = {
1692 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1693 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1694 lp_build_const_float(gallivm, 0),
1695 lp_build_const_float(gallivm, 0)
1696 };
1697 pos[0] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1698 TGSI_OPCODE_FRC, pos[0]);
1699 pos[1] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1700 TGSI_OPCODE_FRC, pos[1]);
1701 value = lp_build_gather_values(gallivm, pos, 4);
1702 break;
1703 }
1704
1705 case TGSI_SEMANTIC_SAMPLEMASK:
1706 /* This can only occur with the OpenGL Core profile, which
1707 * doesn't support smoothing.
1708 */
1709 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1710 break;
1711
1712 case TGSI_SEMANTIC_TESSCOORD:
1713 {
1714 LLVMValueRef coord[4] = {
1715 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_u),
1716 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_v),
1717 bld->zero,
1718 bld->zero
1719 };
1720
1721 /* For triangles, the vector should be (u, v, 1-u-v). */
1722 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1723 PIPE_PRIM_TRIANGLES)
1724 coord[2] = lp_build_sub(bld, bld->one,
1725 lp_build_add(bld, coord[0], coord[1]));
1726
1727 value = lp_build_gather_values(gallivm, coord, 4);
1728 break;
1729 }
1730
1731 case TGSI_SEMANTIC_VERTICESIN:
1732 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1733 value = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 26, 6);
1734 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1735 value = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 9, 7);
1736 else
1737 assert(!"invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1738 break;
1739
1740 case TGSI_SEMANTIC_TESSINNER:
1741 case TGSI_SEMANTIC_TESSOUTER:
1742 {
1743 LLVMValueRef rw_buffers, buffer, base, addr;
1744 int param = si_shader_io_get_unique_index(decl->Semantic.Name, 0);
1745
1746 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn,
1747 SI_PARAM_RW_BUFFERS);
1748 buffer = build_indexed_load_const(ctx, rw_buffers,
1749 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
1750
1751 base = LLVMGetParam(ctx->radeon_bld.main_fn, ctx->param_oc_lds);
1752 addr = get_tcs_tes_buffer_address(ctx, NULL,
1753 lp_build_const_int32(gallivm, param));
1754
1755 value = buffer_load(&radeon_bld->soa.bld_base, TGSI_TYPE_FLOAT,
1756 ~0, buffer, base, addr);
1757
1758 break;
1759 }
1760
1761 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
1762 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
1763 {
1764 LLVMValueRef buf, slot, val[4];
1765 int i, offset;
1766
1767 slot = lp_build_const_int32(gallivm, SI_HS_CONST_DEFAULT_TESS_LEVELS);
1768 buf = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_RW_BUFFERS);
1769 buf = build_indexed_load_const(ctx, buf, slot);
1770 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
1771
1772 for (i = 0; i < 4; i++)
1773 val[i] = buffer_load_const(ctx, buf,
1774 lp_build_const_int32(gallivm, (offset + i) * 4));
1775 value = lp_build_gather_values(gallivm, val, 4);
1776 break;
1777 }
1778
1779 case TGSI_SEMANTIC_PRIMID:
1780 value = get_primitive_id(&radeon_bld->soa.bld_base, 0);
1781 break;
1782
1783 case TGSI_SEMANTIC_GRID_SIZE:
1784 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_GRID_SIZE);
1785 break;
1786
1787 case TGSI_SEMANTIC_BLOCK_SIZE:
1788 {
1789 LLVMValueRef values[3];
1790 unsigned i;
1791 unsigned *properties = ctx->shader->selector->info.properties;
1792 unsigned sizes[3] = {
1793 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1794 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1795 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1796 };
1797
1798 for (i = 0; i < 3; ++i)
1799 values[i] = lp_build_const_int32(gallivm, sizes[i]);
1800
1801 value = lp_build_gather_values(gallivm, values, 3);
1802 break;
1803 }
1804
1805 case TGSI_SEMANTIC_BLOCK_ID:
1806 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_BLOCK_ID);
1807 break;
1808
1809 case TGSI_SEMANTIC_THREAD_ID:
1810 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_THREAD_ID);
1811 break;
1812
1813 #if HAVE_LLVM >= 0x0309
1814 case TGSI_SEMANTIC_HELPER_INVOCATION:
1815 value = lp_build_intrinsic(gallivm->builder,
1816 "llvm.amdgcn.ps.live",
1817 ctx->i1, NULL, 0,
1818 LLVMReadNoneAttribute);
1819 value = LLVMBuildNot(gallivm->builder, value, "");
1820 value = LLVMBuildSExt(gallivm->builder, value, ctx->i32, "");
1821 break;
1822 #endif
1823
1824 default:
1825 assert(!"unknown system value");
1826 return;
1827 }
1828
1829 radeon_bld->system_values[index] = value;
1830 }
1831
1832 static void declare_compute_memory(struct radeon_llvm_context *radeon_bld,
1833 const struct tgsi_full_declaration *decl)
1834 {
1835 struct si_shader_context *ctx =
1836 si_shader_context(&radeon_bld->soa.bld_base);
1837 struct si_shader_selector *sel = ctx->shader->selector;
1838 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1839
1840 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, LOCAL_ADDR_SPACE);
1841 LLVMValueRef var;
1842
1843 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
1844 assert(decl->Range.First == decl->Range.Last);
1845 assert(!ctx->shared_memory);
1846
1847 var = LLVMAddGlobalInAddressSpace(gallivm->module,
1848 LLVMArrayType(ctx->i8, sel->local_size),
1849 "compute_lds",
1850 LOCAL_ADDR_SPACE);
1851 LLVMSetAlignment(var, 4);
1852
1853 ctx->shared_memory = LLVMBuildBitCast(gallivm->builder, var, i8p, "");
1854 }
1855
1856 static LLVMValueRef fetch_constant(
1857 struct lp_build_tgsi_context *bld_base,
1858 const struct tgsi_full_src_register *reg,
1859 enum tgsi_opcode_type type,
1860 unsigned swizzle)
1861 {
1862 struct si_shader_context *ctx = si_shader_context(bld_base);
1863 struct lp_build_context *base = &bld_base->base;
1864 const struct tgsi_ind_register *ireg = &reg->Indirect;
1865 unsigned buf, idx;
1866
1867 LLVMValueRef addr, bufp;
1868 LLVMValueRef result;
1869
1870 if (swizzle == LP_CHAN_ALL) {
1871 unsigned chan;
1872 LLVMValueRef values[4];
1873 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
1874 values[chan] = fetch_constant(bld_base, reg, type, chan);
1875
1876 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
1877 }
1878
1879 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
1880 idx = reg->Register.Index * 4 + swizzle;
1881
1882 if (!reg->Register.Indirect && !reg->Dimension.Indirect) {
1883 LLVMValueRef c0, c1;
1884
1885 c0 = buffer_load_const(ctx, ctx->const_buffers[buf],
1886 LLVMConstInt(ctx->i32, idx * 4, 0));
1887
1888 if (!tgsi_type_is_64bit(type))
1889 return bitcast(bld_base, type, c0);
1890 else {
1891 c1 = buffer_load_const(ctx, ctx->const_buffers[buf],
1892 LLVMConstInt(ctx->i32,
1893 (idx + 1) * 4, 0));
1894 return radeon_llvm_emit_fetch_64bit(bld_base, type,
1895 c0, c1);
1896 }
1897 }
1898
1899 if (reg->Register.Dimension && reg->Dimension.Indirect) {
1900 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
1901 LLVMValueRef index;
1902 index = get_bounded_indirect_index(ctx, &reg->DimIndirect,
1903 reg->Dimension.Index,
1904 SI_NUM_CONST_BUFFERS);
1905 bufp = build_indexed_load_const(ctx, ptr, index);
1906 } else
1907 bufp = ctx->const_buffers[buf];
1908
1909 addr = ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
1910 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
1911 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
1912 addr = lp_build_add(&bld_base->uint_bld, addr,
1913 lp_build_const_int32(base->gallivm, idx * 4));
1914
1915 result = buffer_load_const(ctx, bufp, addr);
1916
1917 if (!tgsi_type_is_64bit(type))
1918 result = bitcast(bld_base, type, result);
1919 else {
1920 LLVMValueRef addr2, result2;
1921 addr2 = ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle + 1];
1922 addr2 = LLVMBuildLoad(base->gallivm->builder, addr2, "load addr reg2");
1923 addr2 = lp_build_mul_imm(&bld_base->uint_bld, addr2, 16);
1924 addr2 = lp_build_add(&bld_base->uint_bld, addr2,
1925 lp_build_const_int32(base->gallivm, idx * 4));
1926
1927 result2 = buffer_load_const(ctx, bufp, addr2);
1928
1929 result = radeon_llvm_emit_fetch_64bit(bld_base, type,
1930 result, result2);
1931 }
1932 return result;
1933 }
1934
1935 /* Upper 16 bits must be zero. */
1936 static LLVMValueRef si_llvm_pack_two_int16(struct gallivm_state *gallivm,
1937 LLVMValueRef val[2])
1938 {
1939 return LLVMBuildOr(gallivm->builder, val[0],
1940 LLVMBuildShl(gallivm->builder, val[1],
1941 lp_build_const_int32(gallivm, 16),
1942 ""), "");
1943 }
1944
1945 /* Upper 16 bits are ignored and will be dropped. */
1946 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct gallivm_state *gallivm,
1947 LLVMValueRef val[2])
1948 {
1949 LLVMValueRef v[2] = {
1950 LLVMBuildAnd(gallivm->builder, val[0],
1951 lp_build_const_int32(gallivm, 0xffff), ""),
1952 val[1],
1953 };
1954 return si_llvm_pack_two_int16(gallivm, v);
1955 }
1956
1957 /* Initialize arguments for the shader export intrinsic */
1958 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
1959 LLVMValueRef *values,
1960 unsigned target,
1961 LLVMValueRef *args)
1962 {
1963 struct si_shader_context *ctx = si_shader_context(bld_base);
1964 struct lp_build_context *uint =
1965 &ctx->radeon_bld.soa.bld_base.uint_bld;
1966 struct lp_build_context *base = &bld_base->base;
1967 struct gallivm_state *gallivm = base->gallivm;
1968 LLVMBuilderRef builder = base->gallivm->builder;
1969 LLVMValueRef val[4];
1970 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1971 unsigned chan;
1972 bool is_int8;
1973
1974 /* Default is 0xf. Adjusted below depending on the format. */
1975 args[0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1976
1977 /* Specify whether the EXEC mask represents the valid mask */
1978 args[1] = uint->zero;
1979
1980 /* Specify whether this is the last export */
1981 args[2] = uint->zero;
1982
1983 /* Specify the target we are exporting */
1984 args[3] = lp_build_const_int32(base->gallivm, target);
1985
1986 if (ctx->type == PIPE_SHADER_FRAGMENT) {
1987 const union si_shader_key *key = &ctx->shader->key;
1988 unsigned col_formats = key->ps.epilog.spi_shader_col_format;
1989 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1990
1991 assert(cbuf >= 0 && cbuf < 8);
1992 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1993 is_int8 = (key->ps.epilog.color_is_int8 >> cbuf) & 0x1;
1994 }
1995
1996 args[4] = uint->zero; /* COMPR flag */
1997 args[5] = base->undef;
1998 args[6] = base->undef;
1999 args[7] = base->undef;
2000 args[8] = base->undef;
2001
2002 switch (spi_shader_col_format) {
2003 case V_028714_SPI_SHADER_ZERO:
2004 args[0] = uint->zero; /* writemask */
2005 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
2006 break;
2007
2008 case V_028714_SPI_SHADER_32_R:
2009 args[0] = uint->one; /* writemask */
2010 args[5] = values[0];
2011 break;
2012
2013 case V_028714_SPI_SHADER_32_GR:
2014 args[0] = lp_build_const_int32(base->gallivm, 0x3); /* writemask */
2015 args[5] = values[0];
2016 args[6] = values[1];
2017 break;
2018
2019 case V_028714_SPI_SHADER_32_AR:
2020 args[0] = lp_build_const_int32(base->gallivm, 0x9); /* writemask */
2021 args[5] = values[0];
2022 args[8] = values[3];
2023 break;
2024
2025 case V_028714_SPI_SHADER_FP16_ABGR:
2026 args[4] = uint->one; /* COMPR flag */
2027
2028 for (chan = 0; chan < 2; chan++) {
2029 LLVMValueRef pack_args[2] = {
2030 values[2 * chan],
2031 values[2 * chan + 1]
2032 };
2033 LLVMValueRef packed;
2034
2035 packed = lp_build_intrinsic(base->gallivm->builder,
2036 "llvm.SI.packf16",
2037 ctx->i32, pack_args, 2,
2038 LLVMReadNoneAttribute);
2039 args[chan + 5] =
2040 LLVMBuildBitCast(base->gallivm->builder,
2041 packed, ctx->f32, "");
2042 }
2043 break;
2044
2045 case V_028714_SPI_SHADER_UNORM16_ABGR:
2046 for (chan = 0; chan < 4; chan++) {
2047 val[chan] = radeon_llvm_saturate(bld_base, values[chan]);
2048 val[chan] = LLVMBuildFMul(builder, val[chan],
2049 lp_build_const_float(gallivm, 65535), "");
2050 val[chan] = LLVMBuildFAdd(builder, val[chan],
2051 lp_build_const_float(gallivm, 0.5), "");
2052 val[chan] = LLVMBuildFPToUI(builder, val[chan],
2053 ctx->i32, "");
2054 }
2055
2056 args[4] = uint->one; /* COMPR flag */
2057 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2058 si_llvm_pack_two_int16(gallivm, val));
2059 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2060 si_llvm_pack_two_int16(gallivm, val+2));
2061 break;
2062
2063 case V_028714_SPI_SHADER_SNORM16_ABGR:
2064 for (chan = 0; chan < 4; chan++) {
2065 /* Clamp between [-1, 1]. */
2066 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MIN,
2067 values[chan],
2068 lp_build_const_float(gallivm, 1));
2069 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MAX,
2070 val[chan],
2071 lp_build_const_float(gallivm, -1));
2072 /* Convert to a signed integer in [-32767, 32767]. */
2073 val[chan] = LLVMBuildFMul(builder, val[chan],
2074 lp_build_const_float(gallivm, 32767), "");
2075 /* If positive, add 0.5, else add -0.5. */
2076 val[chan] = LLVMBuildFAdd(builder, val[chan],
2077 LLVMBuildSelect(builder,
2078 LLVMBuildFCmp(builder, LLVMRealOGE,
2079 val[chan], base->zero, ""),
2080 lp_build_const_float(gallivm, 0.5),
2081 lp_build_const_float(gallivm, -0.5), ""), "");
2082 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
2083 }
2084
2085 args[4] = uint->one; /* COMPR flag */
2086 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2087 si_llvm_pack_two_int32_as_int16(gallivm, val));
2088 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2089 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
2090 break;
2091
2092 case V_028714_SPI_SHADER_UINT16_ABGR: {
2093 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
2094 255 : 65535);
2095 /* Clamp. */
2096 for (chan = 0; chan < 4; chan++) {
2097 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
2098 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
2099 val[chan], max);
2100 }
2101
2102 args[4] = uint->one; /* COMPR flag */
2103 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2104 si_llvm_pack_two_int16(gallivm, val));
2105 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2106 si_llvm_pack_two_int16(gallivm, val+2));
2107 break;
2108 }
2109
2110 case V_028714_SPI_SHADER_SINT16_ABGR: {
2111 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
2112 127 : 32767);
2113 LLVMValueRef min = lp_build_const_int32(gallivm, is_int8 ?
2114 -128 : -32768);
2115 /* Clamp. */
2116 for (chan = 0; chan < 4; chan++) {
2117 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
2118 val[chan] = lp_build_emit_llvm_binary(bld_base,
2119 TGSI_OPCODE_IMIN,
2120 val[chan], max);
2121 val[chan] = lp_build_emit_llvm_binary(bld_base,
2122 TGSI_OPCODE_IMAX,
2123 val[chan], min);
2124 }
2125
2126 args[4] = uint->one; /* COMPR flag */
2127 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2128 si_llvm_pack_two_int32_as_int16(gallivm, val));
2129 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2130 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
2131 break;
2132 }
2133
2134 case V_028714_SPI_SHADER_32_ABGR:
2135 memcpy(&args[5], values, sizeof(values[0]) * 4);
2136 break;
2137 }
2138 }
2139
2140 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2141 LLVMValueRef alpha)
2142 {
2143 struct si_shader_context *ctx = si_shader_context(bld_base);
2144 struct gallivm_state *gallivm = bld_base->base.gallivm;
2145
2146 if (ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2147 LLVMValueRef alpha_ref = LLVMGetParam(ctx->radeon_bld.main_fn,
2148 SI_PARAM_ALPHA_REF);
2149
2150 LLVMValueRef alpha_pass =
2151 lp_build_cmp(&bld_base->base,
2152 ctx->shader->key.ps.epilog.alpha_func,
2153 alpha, alpha_ref);
2154 LLVMValueRef arg =
2155 lp_build_select(&bld_base->base,
2156 alpha_pass,
2157 lp_build_const_float(gallivm, 1.0f),
2158 lp_build_const_float(gallivm, -1.0f));
2159
2160 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
2161 ctx->voidt, &arg, 1, 0);
2162 } else {
2163 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kilp",
2164 ctx->voidt, NULL, 0, 0);
2165 }
2166 }
2167
2168 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2169 LLVMValueRef alpha,
2170 unsigned samplemask_param)
2171 {
2172 struct si_shader_context *ctx = si_shader_context(bld_base);
2173 struct gallivm_state *gallivm = bld_base->base.gallivm;
2174 LLVMValueRef coverage;
2175
2176 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2177 coverage = LLVMGetParam(ctx->radeon_bld.main_fn,
2178 samplemask_param);
2179 coverage = bitcast(bld_base, TGSI_TYPE_SIGNED, coverage);
2180
2181 coverage = lp_build_intrinsic(gallivm->builder, "llvm.ctpop.i32",
2182 ctx->i32,
2183 &coverage, 1, LLVMReadNoneAttribute);
2184
2185 coverage = LLVMBuildUIToFP(gallivm->builder, coverage,
2186 ctx->f32, "");
2187
2188 coverage = LLVMBuildFMul(gallivm->builder, coverage,
2189 lp_build_const_float(gallivm,
2190 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2191
2192 return LLVMBuildFMul(gallivm->builder, alpha, coverage, "");
2193 }
2194
2195 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context *bld_base,
2196 LLVMValueRef (*pos)[9], LLVMValueRef *out_elts)
2197 {
2198 struct si_shader_context *ctx = si_shader_context(bld_base);
2199 struct lp_build_context *base = &bld_base->base;
2200 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
2201 unsigned reg_index;
2202 unsigned chan;
2203 unsigned const_chan;
2204 LLVMValueRef base_elt;
2205 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_RW_BUFFERS);
2206 LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm,
2207 SI_VS_CONST_CLIP_PLANES);
2208 LLVMValueRef const_resource = build_indexed_load_const(ctx, ptr, constbuf_index);
2209
2210 for (reg_index = 0; reg_index < 2; reg_index ++) {
2211 LLVMValueRef *args = pos[2 + reg_index];
2212
2213 args[5] =
2214 args[6] =
2215 args[7] =
2216 args[8] = lp_build_const_float(base->gallivm, 0.0f);
2217
2218 /* Compute dot products of position and user clip plane vectors */
2219 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2220 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2221 args[1] = lp_build_const_int32(base->gallivm,
2222 ((reg_index * 4 + chan) * 4 +
2223 const_chan) * 4);
2224 base_elt = buffer_load_const(ctx, const_resource,
2225 args[1]);
2226 args[5 + chan] =
2227 lp_build_add(base, args[5 + chan],
2228 lp_build_mul(base, base_elt,
2229 out_elts[const_chan]));
2230 }
2231 }
2232
2233 args[0] = lp_build_const_int32(base->gallivm, 0xf);
2234 args[1] = uint->zero;
2235 args[2] = uint->zero;
2236 args[3] = lp_build_const_int32(base->gallivm,
2237 V_008DFC_SQ_EXP_POS + 2 + reg_index);
2238 args[4] = uint->zero;
2239 }
2240 }
2241
2242 static void si_dump_streamout(struct pipe_stream_output_info *so)
2243 {
2244 unsigned i;
2245
2246 if (so->num_outputs)
2247 fprintf(stderr, "STREAMOUT\n");
2248
2249 for (i = 0; i < so->num_outputs; i++) {
2250 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2251 so->output[i].start_component;
2252 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2253 i, so->output[i].output_buffer,
2254 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2255 so->output[i].register_index,
2256 mask & 1 ? "x" : "",
2257 mask & 2 ? "y" : "",
2258 mask & 4 ? "z" : "",
2259 mask & 8 ? "w" : "");
2260 }
2261 }
2262
2263 /* On SI, the vertex shader is responsible for writing streamout data
2264 * to buffers. */
2265 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2266 struct si_shader_output_values *outputs,
2267 unsigned noutput)
2268 {
2269 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
2270 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
2271 LLVMBuilderRef builder = gallivm->builder;
2272 int i, j;
2273 struct lp_build_if_state if_ctx;
2274
2275 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2276 LLVMValueRef so_vtx_count =
2277 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2278
2279 LLVMValueRef tid = get_thread_id(ctx);
2280
2281 /* can_emit = tid < so_vtx_count; */
2282 LLVMValueRef can_emit =
2283 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2284
2285 LLVMValueRef stream_id =
2286 unpack_param(ctx, ctx->param_streamout_config, 24, 2);
2287
2288 /* Emit the streamout code conditionally. This actually avoids
2289 * out-of-bounds buffer access. The hw tells us via the SGPR
2290 * (so_vtx_count) which threads are allowed to emit streamout data. */
2291 lp_build_if(&if_ctx, gallivm, can_emit);
2292 {
2293 /* The buffer offset is computed as follows:
2294 * ByteOffset = streamout_offset[buffer_id]*4 +
2295 * (streamout_write_index + thread_id)*stride[buffer_id] +
2296 * attrib_offset
2297 */
2298
2299 LLVMValueRef so_write_index =
2300 LLVMGetParam(ctx->radeon_bld.main_fn,
2301 ctx->param_streamout_write_index);
2302
2303 /* Compute (streamout_write_index + thread_id). */
2304 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2305
2306 /* Compute the write offset for each enabled buffer. */
2307 LLVMValueRef so_write_offset[4] = {};
2308 for (i = 0; i < 4; i++) {
2309 if (!so->stride[i])
2310 continue;
2311
2312 LLVMValueRef so_offset = LLVMGetParam(ctx->radeon_bld.main_fn,
2313 ctx->param_streamout_offset[i]);
2314 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2315
2316 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2317 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2318 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2319 }
2320
2321 /* Write streamout data. */
2322 for (i = 0; i < so->num_outputs; i++) {
2323 unsigned buf_idx = so->output[i].output_buffer;
2324 unsigned reg = so->output[i].register_index;
2325 unsigned start = so->output[i].start_component;
2326 unsigned num_comps = so->output[i].num_components;
2327 unsigned stream = so->output[i].stream;
2328 LLVMValueRef out[4];
2329 struct lp_build_if_state if_ctx_stream;
2330
2331 assert(num_comps && num_comps <= 4);
2332 if (!num_comps || num_comps > 4)
2333 continue;
2334
2335 if (reg >= noutput)
2336 continue;
2337
2338 /* Load the output as int. */
2339 for (j = 0; j < num_comps; j++) {
2340 out[j] = LLVMBuildBitCast(builder,
2341 outputs[reg].values[start+j],
2342 ctx->i32, "");
2343 }
2344
2345 /* Pack the output. */
2346 LLVMValueRef vdata = NULL;
2347
2348 switch (num_comps) {
2349 case 1: /* as i32 */
2350 vdata = out[0];
2351 break;
2352 case 2: /* as v2i32 */
2353 case 3: /* as v4i32 (aligned to 4) */
2354 case 4: /* as v4i32 */
2355 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2356 for (j = 0; j < num_comps; j++) {
2357 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
2358 LLVMConstInt(ctx->i32, j, 0), "");
2359 }
2360 break;
2361 }
2362
2363 LLVMValueRef can_emit_stream =
2364 LLVMBuildICmp(builder, LLVMIntEQ,
2365 stream_id,
2366 lp_build_const_int32(gallivm, stream), "");
2367
2368 lp_build_if(&if_ctx_stream, gallivm, can_emit_stream);
2369 build_tbuffer_store_dwords(ctx, ctx->so_buffers[buf_idx],
2370 vdata, num_comps,
2371 so_write_offset[buf_idx],
2372 LLVMConstInt(ctx->i32, 0, 0),
2373 so->output[i].dst_offset*4);
2374 lp_build_endif(&if_ctx_stream);
2375 }
2376 }
2377 lp_build_endif(&if_ctx);
2378 }
2379
2380
2381 /* Generate export instructions for hardware VS shader stage */
2382 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
2383 struct si_shader_output_values *outputs,
2384 unsigned noutput)
2385 {
2386 struct si_shader_context *ctx = si_shader_context(bld_base);
2387 struct si_shader *shader = ctx->shader;
2388 struct lp_build_context *base = &bld_base->base;
2389 struct lp_build_context *uint =
2390 &ctx->radeon_bld.soa.bld_base.uint_bld;
2391 LLVMValueRef args[9];
2392 LLVMValueRef pos_args[4][9] = { { 0 } };
2393 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2394 unsigned semantic_name, semantic_index;
2395 unsigned target;
2396 unsigned param_count = 0;
2397 unsigned pos_idx;
2398 int i;
2399
2400 if (outputs && ctx->shader->selector->so.num_outputs) {
2401 si_llvm_emit_streamout(ctx, outputs, noutput);
2402 }
2403
2404 for (i = 0; i < noutput; i++) {
2405 semantic_name = outputs[i].name;
2406 semantic_index = outputs[i].sid;
2407
2408 handle_semantic:
2409 /* Select the correct target */
2410 switch(semantic_name) {
2411 case TGSI_SEMANTIC_PSIZE:
2412 psize_value = outputs[i].values[0];
2413 continue;
2414 case TGSI_SEMANTIC_EDGEFLAG:
2415 edgeflag_value = outputs[i].values[0];
2416 continue;
2417 case TGSI_SEMANTIC_LAYER:
2418 layer_value = outputs[i].values[0];
2419 semantic_name = TGSI_SEMANTIC_GENERIC;
2420 goto handle_semantic;
2421 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2422 viewport_index_value = outputs[i].values[0];
2423 semantic_name = TGSI_SEMANTIC_GENERIC;
2424 goto handle_semantic;
2425 case TGSI_SEMANTIC_POSITION:
2426 target = V_008DFC_SQ_EXP_POS;
2427 break;
2428 case TGSI_SEMANTIC_COLOR:
2429 case TGSI_SEMANTIC_BCOLOR:
2430 target = V_008DFC_SQ_EXP_PARAM + param_count;
2431 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2432 shader->info.vs_output_param_offset[i] = param_count;
2433 param_count++;
2434 break;
2435 case TGSI_SEMANTIC_CLIPDIST:
2436 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
2437 break;
2438 case TGSI_SEMANTIC_CLIPVERTEX:
2439 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
2440 continue;
2441 case TGSI_SEMANTIC_PRIMID:
2442 case TGSI_SEMANTIC_FOG:
2443 case TGSI_SEMANTIC_TEXCOORD:
2444 case TGSI_SEMANTIC_GENERIC:
2445 target = V_008DFC_SQ_EXP_PARAM + param_count;
2446 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2447 shader->info.vs_output_param_offset[i] = param_count;
2448 param_count++;
2449 break;
2450 default:
2451 target = 0;
2452 fprintf(stderr,
2453 "Warning: SI unhandled vs output type:%d\n",
2454 semantic_name);
2455 }
2456
2457 si_llvm_init_export_args(bld_base, outputs[i].values, target, args);
2458
2459 if (target >= V_008DFC_SQ_EXP_POS &&
2460 target <= (V_008DFC_SQ_EXP_POS + 3)) {
2461 memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
2462 args, sizeof(args));
2463 } else {
2464 lp_build_intrinsic(base->gallivm->builder,
2465 "llvm.SI.export", ctx->voidt,
2466 args, 9, 0);
2467 }
2468
2469 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
2470 semantic_name = TGSI_SEMANTIC_GENERIC;
2471 goto handle_semantic;
2472 }
2473 }
2474
2475 shader->info.nr_param_exports = param_count;
2476
2477 /* We need to add the position output manually if it's missing. */
2478 if (!pos_args[0][0]) {
2479 pos_args[0][0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
2480 pos_args[0][1] = uint->zero; /* EXEC mask */
2481 pos_args[0][2] = uint->zero; /* last export? */
2482 pos_args[0][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS);
2483 pos_args[0][4] = uint->zero; /* COMPR flag */
2484 pos_args[0][5] = base->zero; /* X */
2485 pos_args[0][6] = base->zero; /* Y */
2486 pos_args[0][7] = base->zero; /* Z */
2487 pos_args[0][8] = base->one; /* W */
2488 }
2489
2490 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2491 if (shader->selector->info.writes_psize ||
2492 shader->selector->info.writes_edgeflag ||
2493 shader->selector->info.writes_viewport_index ||
2494 shader->selector->info.writes_layer) {
2495 pos_args[1][0] = lp_build_const_int32(base->gallivm, /* writemask */
2496 shader->selector->info.writes_psize |
2497 (shader->selector->info.writes_edgeflag << 1) |
2498 (shader->selector->info.writes_layer << 2) |
2499 (shader->selector->info.writes_viewport_index << 3));
2500 pos_args[1][1] = uint->zero; /* EXEC mask */
2501 pos_args[1][2] = uint->zero; /* last export? */
2502 pos_args[1][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + 1);
2503 pos_args[1][4] = uint->zero; /* COMPR flag */
2504 pos_args[1][5] = base->zero; /* X */
2505 pos_args[1][6] = base->zero; /* Y */
2506 pos_args[1][7] = base->zero; /* Z */
2507 pos_args[1][8] = base->zero; /* W */
2508
2509 if (shader->selector->info.writes_psize)
2510 pos_args[1][5] = psize_value;
2511
2512 if (shader->selector->info.writes_edgeflag) {
2513 /* The output is a float, but the hw expects an integer
2514 * with the first bit containing the edge flag. */
2515 edgeflag_value = LLVMBuildFPToUI(base->gallivm->builder,
2516 edgeflag_value,
2517 ctx->i32, "");
2518 edgeflag_value = lp_build_min(&bld_base->int_bld,
2519 edgeflag_value,
2520 bld_base->int_bld.one);
2521
2522 /* The LLVM intrinsic expects a float. */
2523 pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder,
2524 edgeflag_value,
2525 ctx->f32, "");
2526 }
2527
2528 if (shader->selector->info.writes_layer)
2529 pos_args[1][7] = layer_value;
2530
2531 if (shader->selector->info.writes_viewport_index)
2532 pos_args[1][8] = viewport_index_value;
2533 }
2534
2535 for (i = 0; i < 4; i++)
2536 if (pos_args[i][0])
2537 shader->info.nr_pos_exports++;
2538
2539 pos_idx = 0;
2540 for (i = 0; i < 4; i++) {
2541 if (!pos_args[i][0])
2542 continue;
2543
2544 /* Specify the target we are exporting */
2545 pos_args[i][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + pos_idx++);
2546
2547 if (pos_idx == shader->info.nr_pos_exports)
2548 /* Specify that this is the last export */
2549 pos_args[i][2] = uint->one;
2550
2551 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2552 ctx->voidt, pos_args[i], 9, 0);
2553 }
2554 }
2555
2556 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2557 {
2558 struct si_shader_context *ctx = si_shader_context(bld_base);
2559 struct gallivm_state *gallivm = bld_base->base.gallivm;
2560 LLVMValueRef invocation_id, rw_buffers, buffer, buffer_offset;
2561 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
2562 uint64_t inputs;
2563
2564 invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
2565
2566 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_RW_BUFFERS);
2567 buffer = build_indexed_load_const(ctx, rw_buffers,
2568 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
2569
2570 buffer_offset = LLVMGetParam(ctx->radeon_bld.main_fn, ctx->param_oc_lds);
2571
2572 lds_vertex_stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
2573 lds_vertex_offset = LLVMBuildMul(gallivm->builder, invocation_id,
2574 lds_vertex_stride, "");
2575 lds_base = get_tcs_in_current_patch_offset(ctx);
2576 lds_base = LLVMBuildAdd(gallivm->builder, lds_base, lds_vertex_offset, "");
2577
2578 inputs = ctx->shader->key.tcs.epilog.inputs_to_copy;
2579 while (inputs) {
2580 unsigned i = u_bit_scan64(&inputs);
2581
2582 LLVMValueRef lds_ptr = LLVMBuildAdd(gallivm->builder, lds_base,
2583 lp_build_const_int32(gallivm, 4 * i),
2584 "");
2585
2586 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
2587 invocation_id,
2588 lp_build_const_int32(gallivm, i));
2589
2590 LLVMValueRef value = lds_load(bld_base, TGSI_TYPE_SIGNED, ~0,
2591 lds_ptr);
2592
2593 build_tbuffer_store_dwords(ctx, buffer, value, 4, buffer_addr,
2594 buffer_offset, 0);
2595 }
2596 }
2597
2598 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
2599 LLVMValueRef rel_patch_id,
2600 LLVMValueRef invocation_id,
2601 LLVMValueRef tcs_out_current_patch_data_offset)
2602 {
2603 struct si_shader_context *ctx = si_shader_context(bld_base);
2604 struct gallivm_state *gallivm = bld_base->base.gallivm;
2605 struct si_shader *shader = ctx->shader;
2606 unsigned tess_inner_index, tess_outer_index;
2607 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2608 LLVMValueRef out[6], vec0, vec1, rw_buffers, tf_base;
2609 unsigned stride, outer_comps, inner_comps, i;
2610 struct lp_build_if_state if_ctx, inner_if_ctx;
2611
2612 si_llvm_emit_barrier(NULL, bld_base, NULL);
2613
2614 /* Do this only for invocation 0, because the tess levels are per-patch,
2615 * not per-vertex.
2616 *
2617 * This can't jump, because invocation 0 executes this. It should
2618 * at least mask out the loads and stores for other invocations.
2619 */
2620 lp_build_if(&if_ctx, gallivm,
2621 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2622 invocation_id, bld_base->uint_bld.zero, ""));
2623
2624 /* Determine the layout of one tess factor element in the buffer. */
2625 switch (shader->key.tcs.epilog.prim_mode) {
2626 case PIPE_PRIM_LINES:
2627 stride = 2; /* 2 dwords, 1 vec2 store */
2628 outer_comps = 2;
2629 inner_comps = 0;
2630 break;
2631 case PIPE_PRIM_TRIANGLES:
2632 stride = 4; /* 4 dwords, 1 vec4 store */
2633 outer_comps = 3;
2634 inner_comps = 1;
2635 break;
2636 case PIPE_PRIM_QUADS:
2637 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2638 outer_comps = 4;
2639 inner_comps = 2;
2640 break;
2641 default:
2642 assert(0);
2643 return;
2644 }
2645
2646 /* Load tess_inner and tess_outer from LDS.
2647 * Any invocation can write them, so we can't get them from a temporary.
2648 */
2649 tess_inner_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSINNER, 0);
2650 tess_outer_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSOUTER, 0);
2651
2652 lds_base = tcs_out_current_patch_data_offset;
2653 lds_inner = LLVMBuildAdd(gallivm->builder, lds_base,
2654 lp_build_const_int32(gallivm,
2655 tess_inner_index * 4), "");
2656 lds_outer = LLVMBuildAdd(gallivm->builder, lds_base,
2657 lp_build_const_int32(gallivm,
2658 tess_outer_index * 4), "");
2659
2660 for (i = 0; i < outer_comps; i++)
2661 out[i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
2662 for (i = 0; i < inner_comps; i++)
2663 out[outer_comps+i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
2664
2665 /* Convert the outputs to vectors for stores. */
2666 vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
2667 vec1 = NULL;
2668
2669 if (stride > 4)
2670 vec1 = lp_build_gather_values(gallivm, out+4, stride - 4);
2671
2672 /* Get the buffer. */
2673 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn,
2674 SI_PARAM_RW_BUFFERS);
2675 buffer = build_indexed_load_const(ctx, rw_buffers,
2676 lp_build_const_int32(gallivm, SI_HS_RING_TESS_FACTOR));
2677
2678 /* Get the offset. */
2679 tf_base = LLVMGetParam(ctx->radeon_bld.main_fn,
2680 SI_PARAM_TESS_FACTOR_OFFSET);
2681 byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
2682 lp_build_const_int32(gallivm, 4 * stride), "");
2683
2684 lp_build_if(&inner_if_ctx, gallivm,
2685 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2686 rel_patch_id, bld_base->uint_bld.zero, ""));
2687
2688 /* Store the dynamic HS control word. */
2689 build_tbuffer_store_dwords(ctx, buffer,
2690 lp_build_const_int32(gallivm, 0x80000000),
2691 1, lp_build_const_int32(gallivm, 0), tf_base, 0);
2692
2693 lp_build_endif(&inner_if_ctx);
2694
2695 /* Store the tessellation factors. */
2696 build_tbuffer_store_dwords(ctx, buffer, vec0,
2697 MIN2(stride, 4), byteoffset, tf_base, 4);
2698 if (vec1)
2699 build_tbuffer_store_dwords(ctx, buffer, vec1,
2700 stride - 4, byteoffset, tf_base, 20);
2701 lp_build_endif(&if_ctx);
2702 }
2703
2704 /* This only writes the tessellation factor levels. */
2705 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
2706 {
2707 struct si_shader_context *ctx = si_shader_context(bld_base);
2708 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2709
2710 rel_patch_id = get_rel_patch_id(ctx);
2711 invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
2712 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2713
2714 if (!ctx->is_monolithic) {
2715 /* Return epilog parameters from this function. */
2716 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
2717 LLVMValueRef ret = ctx->return_value;
2718 LLVMValueRef rw_buffers, rw0, rw1, tf_soffset;
2719 unsigned vgpr;
2720
2721 /* RW_BUFFERS pointer */
2722 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn,
2723 SI_PARAM_RW_BUFFERS);
2724 rw_buffers = LLVMBuildPtrToInt(builder, rw_buffers, ctx->i64, "");
2725 rw_buffers = LLVMBuildBitCast(builder, rw_buffers, ctx->v2i32, "");
2726 rw0 = LLVMBuildExtractElement(builder, rw_buffers,
2727 bld_base->uint_bld.zero, "");
2728 rw1 = LLVMBuildExtractElement(builder, rw_buffers,
2729 bld_base->uint_bld.one, "");
2730 ret = LLVMBuildInsertValue(builder, ret, rw0, 0, "");
2731 ret = LLVMBuildInsertValue(builder, ret, rw1, 1, "");
2732
2733 /* Tess factor buffer soffset is after user SGPRs. */
2734 tf_soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
2735 SI_PARAM_TESS_FACTOR_OFFSET);
2736 ret = LLVMBuildInsertValue(builder, ret, tf_soffset,
2737 SI_TCS_NUM_USER_SGPR + 1, "");
2738
2739 /* VGPRs */
2740 rel_patch_id = bitcast(bld_base, TGSI_TYPE_FLOAT, rel_patch_id);
2741 invocation_id = bitcast(bld_base, TGSI_TYPE_FLOAT, invocation_id);
2742 tf_lds_offset = bitcast(bld_base, TGSI_TYPE_FLOAT, tf_lds_offset);
2743
2744 vgpr = SI_TCS_NUM_USER_SGPR + 2;
2745 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2746 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2747 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2748 ctx->return_value = ret;
2749 return;
2750 }
2751
2752 si_copy_tcs_inputs(bld_base);
2753 si_write_tess_factors(bld_base, rel_patch_id, invocation_id, tf_lds_offset);
2754 }
2755
2756 static void si_llvm_emit_ls_epilogue(struct lp_build_tgsi_context *bld_base)
2757 {
2758 struct si_shader_context *ctx = si_shader_context(bld_base);
2759 struct si_shader *shader = ctx->shader;
2760 struct tgsi_shader_info *info = &shader->selector->info;
2761 struct gallivm_state *gallivm = bld_base->base.gallivm;
2762 unsigned i, chan;
2763 LLVMValueRef vertex_id = LLVMGetParam(ctx->radeon_bld.main_fn,
2764 ctx->param_rel_auto_id);
2765 LLVMValueRef vertex_dw_stride =
2766 unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 13, 8);
2767 LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
2768 vertex_dw_stride, "");
2769
2770 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2771 * its inputs from it. */
2772 for (i = 0; i < info->num_outputs; i++) {
2773 LLVMValueRef *out_ptr = ctx->radeon_bld.soa.outputs[i];
2774 unsigned name = info->output_semantic_name[i];
2775 unsigned index = info->output_semantic_index[i];
2776 int param = si_shader_io_get_unique_index(name, index);
2777 LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
2778 lp_build_const_int32(gallivm, param * 4), "");
2779
2780 for (chan = 0; chan < 4; chan++) {
2781 lds_store(bld_base, chan, dw_addr,
2782 LLVMBuildLoad(gallivm->builder, out_ptr[chan], ""));
2783 }
2784 }
2785 }
2786
2787 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context *bld_base)
2788 {
2789 struct si_shader_context *ctx = si_shader_context(bld_base);
2790 struct gallivm_state *gallivm = bld_base->base.gallivm;
2791 struct si_shader *es = ctx->shader;
2792 struct tgsi_shader_info *info = &es->selector->info;
2793 LLVMValueRef soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
2794 ctx->param_es2gs_offset);
2795 unsigned chan;
2796 int i;
2797
2798 for (i = 0; i < info->num_outputs; i++) {
2799 LLVMValueRef *out_ptr =
2800 ctx->radeon_bld.soa.outputs[i];
2801 int param_index;
2802
2803 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2804 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2805 continue;
2806
2807 param_index = si_shader_io_get_unique_index(info->output_semantic_name[i],
2808 info->output_semantic_index[i]);
2809
2810 for (chan = 0; chan < 4; chan++) {
2811 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
2812 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
2813
2814 build_tbuffer_store(ctx,
2815 ctx->esgs_ring,
2816 out_val, 1,
2817 LLVMGetUndef(ctx->i32), soffset,
2818 (4 * param_index + chan) * 4,
2819 V_008F0C_BUF_DATA_FORMAT_32,
2820 V_008F0C_BUF_NUM_FORMAT_UINT,
2821 0, 0, 1, 1, 0);
2822 }
2823 }
2824 }
2825
2826 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
2827 {
2828 struct si_shader_context *ctx = si_shader_context(bld_base);
2829 struct gallivm_state *gallivm = bld_base->base.gallivm;
2830 LLVMValueRef args[2];
2831
2832 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_NOP | SENDMSG_GS_DONE);
2833 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
2834 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
2835 ctx->voidt, args, 2, 0);
2836 }
2837
2838 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
2839 {
2840 struct si_shader_context *ctx = si_shader_context(bld_base);
2841 struct gallivm_state *gallivm = bld_base->base.gallivm;
2842 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2843 struct si_shader_output_values *outputs = NULL;
2844 int i,j;
2845
2846 assert(!ctx->is_gs_copy_shader);
2847
2848 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2849
2850 /* Vertex color clamping.
2851 *
2852 * This uses a state constant loaded in a user data SGPR and
2853 * an IF statement is added that clamps all colors if the constant
2854 * is true.
2855 */
2856 if (ctx->type == PIPE_SHADER_VERTEX) {
2857 struct lp_build_if_state if_ctx;
2858 LLVMValueRef cond = NULL;
2859 LLVMValueRef addr, val;
2860
2861 for (i = 0; i < info->num_outputs; i++) {
2862 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
2863 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
2864 continue;
2865
2866 /* We've found a color. */
2867 if (!cond) {
2868 /* The state is in the first bit of the user SGPR. */
2869 cond = LLVMGetParam(ctx->radeon_bld.main_fn,
2870 SI_PARAM_VS_STATE_BITS);
2871 cond = LLVMBuildTrunc(gallivm->builder, cond,
2872 ctx->i1, "");
2873 lp_build_if(&if_ctx, gallivm, cond);
2874 }
2875
2876 for (j = 0; j < 4; j++) {
2877 addr = ctx->radeon_bld.soa.outputs[i][j];
2878 val = LLVMBuildLoad(gallivm->builder, addr, "");
2879 val = radeon_llvm_saturate(bld_base, val);
2880 LLVMBuildStore(gallivm->builder, val, addr);
2881 }
2882 }
2883
2884 if (cond)
2885 lp_build_endif(&if_ctx);
2886 }
2887
2888 for (i = 0; i < info->num_outputs; i++) {
2889 outputs[i].name = info->output_semantic_name[i];
2890 outputs[i].sid = info->output_semantic_index[i];
2891
2892 for (j = 0; j < 4; j++)
2893 outputs[i].values[j] =
2894 LLVMBuildLoad(gallivm->builder,
2895 ctx->radeon_bld.soa.outputs[i][j],
2896 "");
2897 }
2898
2899 if (ctx->is_monolithic) {
2900 /* Export PrimitiveID when PS needs it. */
2901 if (si_vs_exports_prim_id(ctx->shader)) {
2902 outputs[i].name = TGSI_SEMANTIC_PRIMID;
2903 outputs[i].sid = 0;
2904 outputs[i].values[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2905 get_primitive_id(bld_base, 0));
2906 outputs[i].values[1] = bld_base->base.undef;
2907 outputs[i].values[2] = bld_base->base.undef;
2908 outputs[i].values[3] = bld_base->base.undef;
2909 i++;
2910 }
2911 } else {
2912 /* Return the primitive ID from the LLVM function. */
2913 ctx->return_value =
2914 LLVMBuildInsertValue(gallivm->builder,
2915 ctx->return_value,
2916 bitcast(bld_base, TGSI_TYPE_FLOAT,
2917 get_primitive_id(bld_base, 0)),
2918 VS_EPILOG_PRIMID_LOC, "");
2919 }
2920
2921 si_llvm_export_vs(bld_base, outputs, i);
2922 FREE(outputs);
2923 }
2924
2925 struct si_ps_exports {
2926 unsigned num;
2927 LLVMValueRef args[10][9];
2928 };
2929
2930 unsigned si_get_spi_shader_z_format(bool writes_z, bool writes_stencil,
2931 bool writes_samplemask)
2932 {
2933 if (writes_z) {
2934 /* Z needs 32 bits. */
2935 if (writes_samplemask)
2936 return V_028710_SPI_SHADER_32_ABGR;
2937 else if (writes_stencil)
2938 return V_028710_SPI_SHADER_32_GR;
2939 else
2940 return V_028710_SPI_SHADER_32_R;
2941 } else if (writes_stencil || writes_samplemask) {
2942 /* Both stencil and sample mask need only 16 bits. */
2943 return V_028710_SPI_SHADER_UINT16_ABGR;
2944 } else {
2945 return V_028710_SPI_SHADER_ZERO;
2946 }
2947 }
2948
2949 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
2950 LLVMValueRef depth, LLVMValueRef stencil,
2951 LLVMValueRef samplemask, struct si_ps_exports *exp)
2952 {
2953 struct si_shader_context *ctx = si_shader_context(bld_base);
2954 struct lp_build_context *base = &bld_base->base;
2955 struct lp_build_context *uint = &bld_base->uint_bld;
2956 LLVMValueRef args[9];
2957 unsigned mask = 0;
2958 unsigned format = si_get_spi_shader_z_format(depth != NULL,
2959 stencil != NULL,
2960 samplemask != NULL);
2961
2962 assert(depth || stencil || samplemask);
2963
2964 args[1] = uint->one; /* whether the EXEC mask is valid */
2965 args[2] = uint->one; /* DONE bit */
2966
2967 /* Specify the target we are exporting */
2968 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
2969
2970 args[4] = uint->zero; /* COMP flag */
2971 args[5] = base->undef; /* R, depth */
2972 args[6] = base->undef; /* G, stencil test value[0:7], stencil op value[8:15] */
2973 args[7] = base->undef; /* B, sample mask */
2974 args[8] = base->undef; /* A, alpha to mask */
2975
2976 if (format == V_028710_SPI_SHADER_UINT16_ABGR) {
2977 assert(!depth);
2978 args[4] = uint->one; /* COMPR flag */
2979
2980 if (stencil) {
2981 /* Stencil should be in X[23:16]. */
2982 stencil = bitcast(bld_base, TGSI_TYPE_UNSIGNED, stencil);
2983 stencil = LLVMBuildShl(base->gallivm->builder, stencil,
2984 LLVMConstInt(ctx->i32, 16, 0), "");
2985 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT, stencil);
2986 mask |= 0x3;
2987 }
2988 if (samplemask) {
2989 /* SampleMask should be in Y[15:0]. */
2990 args[6] = samplemask;
2991 mask |= 0xc;
2992 }
2993 } else {
2994 if (depth) {
2995 args[5] = depth;
2996 mask |= 0x1;
2997 }
2998 if (stencil) {
2999 args[6] = stencil;
3000 mask |= 0x2;
3001 }
3002 if (samplemask) {
3003 args[7] = samplemask;
3004 mask |= 0x4;
3005 }
3006 }
3007
3008 /* SI (except OLAND) has a bug that it only looks
3009 * at the X writemask component. */
3010 if (ctx->screen->b.chip_class == SI &&
3011 ctx->screen->b.family != CHIP_OLAND)
3012 mask |= 0x1;
3013
3014 /* Specify which components to enable */
3015 args[0] = lp_build_const_int32(base->gallivm, mask);
3016
3017 memcpy(exp->args[exp->num++], args, sizeof(args));
3018 }
3019
3020 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
3021 LLVMValueRef *color, unsigned index,
3022 unsigned samplemask_param,
3023 bool is_last, struct si_ps_exports *exp)
3024 {
3025 struct si_shader_context *ctx = si_shader_context(bld_base);
3026 struct lp_build_context *base = &bld_base->base;
3027 int i;
3028
3029 /* Clamp color */
3030 if (ctx->shader->key.ps.epilog.clamp_color)
3031 for (i = 0; i < 4; i++)
3032 color[i] = radeon_llvm_saturate(bld_base, color[i]);
3033
3034 /* Alpha to one */
3035 if (ctx->shader->key.ps.epilog.alpha_to_one)
3036 color[3] = base->one;
3037
3038 /* Alpha test */
3039 if (index == 0 &&
3040 ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
3041 si_alpha_test(bld_base, color[3]);
3042
3043 /* Line & polygon smoothing */
3044 if (ctx->shader->key.ps.epilog.poly_line_smoothing)
3045 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
3046 samplemask_param);
3047
3048 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3049 if (ctx->shader->key.ps.epilog.last_cbuf > 0) {
3050 LLVMValueRef args[8][9];
3051 int c, last = -1;
3052
3053 /* Get the export arguments, also find out what the last one is. */
3054 for (c = 0; c <= ctx->shader->key.ps.epilog.last_cbuf; c++) {
3055 si_llvm_init_export_args(bld_base, color,
3056 V_008DFC_SQ_EXP_MRT + c, args[c]);
3057 if (args[c][0] != bld_base->uint_bld.zero)
3058 last = c;
3059 }
3060
3061 /* Emit all exports. */
3062 for (c = 0; c <= ctx->shader->key.ps.epilog.last_cbuf; c++) {
3063 if (is_last && last == c) {
3064 args[c][1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
3065 args[c][2] = bld_base->uint_bld.one; /* DONE bit */
3066 } else if (args[c][0] == bld_base->uint_bld.zero)
3067 continue; /* unnecessary NULL export */
3068
3069 memcpy(exp->args[exp->num++], args[c], sizeof(args[c]));
3070 }
3071 } else {
3072 LLVMValueRef args[9];
3073
3074 /* Export */
3075 si_llvm_init_export_args(bld_base, color, V_008DFC_SQ_EXP_MRT + index,
3076 args);
3077 if (is_last) {
3078 args[1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
3079 args[2] = bld_base->uint_bld.one; /* DONE bit */
3080 } else if (args[0] == bld_base->uint_bld.zero)
3081 return; /* unnecessary NULL export */
3082
3083 memcpy(exp->args[exp->num++], args, sizeof(args));
3084 }
3085 }
3086
3087 static void si_emit_ps_exports(struct si_shader_context *ctx,
3088 struct si_ps_exports *exp)
3089 {
3090 for (unsigned i = 0; i < exp->num; i++)
3091 lp_build_intrinsic(ctx->radeon_bld.gallivm.builder,
3092 "llvm.SI.export", ctx->voidt,
3093 exp->args[i], 9, 0);
3094 }
3095
3096 static void si_export_null(struct lp_build_tgsi_context *bld_base)
3097 {
3098 struct si_shader_context *ctx = si_shader_context(bld_base);
3099 struct lp_build_context *base = &bld_base->base;
3100 struct lp_build_context *uint = &bld_base->uint_bld;
3101 LLVMValueRef args[9];
3102
3103 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
3104 args[1] = uint->one; /* whether the EXEC mask is valid */
3105 args[2] = uint->one; /* DONE bit */
3106 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
3107 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
3108 args[5] = base->undef; /* R */
3109 args[6] = base->undef; /* G */
3110 args[7] = base->undef; /* B */
3111 args[8] = base->undef; /* A */
3112
3113 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
3114 ctx->voidt, args, 9, 0);
3115 }
3116
3117 static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context *bld_base)
3118 {
3119 struct si_shader_context *ctx = si_shader_context(bld_base);
3120 struct si_shader *shader = ctx->shader;
3121 struct lp_build_context *base = &bld_base->base;
3122 struct tgsi_shader_info *info = &shader->selector->info;
3123 LLVMBuilderRef builder = base->gallivm->builder;
3124 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3125 int last_color_export = -1;
3126 int i;
3127 struct si_ps_exports exp = {};
3128
3129 /* Determine the last export. If MRTZ is present, it's always last.
3130 * Otherwise, find the last color export.
3131 */
3132 if (!info->writes_z && !info->writes_stencil && !info->writes_samplemask) {
3133 unsigned spi_format = shader->key.ps.epilog.spi_shader_col_format;
3134
3135 /* Don't export NULL and return if alpha-test is enabled. */
3136 if (shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS &&
3137 shader->key.ps.epilog.alpha_func != PIPE_FUNC_NEVER &&
3138 (spi_format & 0xf) == 0)
3139 spi_format |= V_028714_SPI_SHADER_32_AR;
3140
3141 for (i = 0; i < info->num_outputs; i++) {
3142 unsigned index = info->output_semantic_index[i];
3143
3144 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR)
3145 continue;
3146
3147 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3148 if (shader->key.ps.epilog.last_cbuf > 0) {
3149 /* Just set this if any of the colorbuffers are enabled. */
3150 if (spi_format &
3151 ((1llu << (4 * (shader->key.ps.epilog.last_cbuf + 1))) - 1))
3152 last_color_export = i;
3153 continue;
3154 }
3155
3156 if ((spi_format >> (index * 4)) & 0xf)
3157 last_color_export = i;
3158 }
3159
3160 /* If there are no outputs, export NULL. */
3161 if (last_color_export == -1) {
3162 si_export_null(bld_base);
3163 return;
3164 }
3165 }
3166
3167 for (i = 0; i < info->num_outputs; i++) {
3168 unsigned semantic_name = info->output_semantic_name[i];
3169 unsigned semantic_index = info->output_semantic_index[i];
3170 unsigned j;
3171 LLVMValueRef color[4] = {};
3172
3173 /* Select the correct target */
3174 switch (semantic_name) {
3175 case TGSI_SEMANTIC_POSITION:
3176 depth = LLVMBuildLoad(builder,
3177 ctx->radeon_bld.soa.outputs[i][2], "");
3178 break;
3179 case TGSI_SEMANTIC_STENCIL:
3180 stencil = LLVMBuildLoad(builder,
3181 ctx->radeon_bld.soa.outputs[i][1], "");
3182 break;
3183 case TGSI_SEMANTIC_SAMPLEMASK:
3184 samplemask = LLVMBuildLoad(builder,
3185 ctx->radeon_bld.soa.outputs[i][0], "");
3186 break;
3187 case TGSI_SEMANTIC_COLOR:
3188 for (j = 0; j < 4; j++)
3189 color[j] = LLVMBuildLoad(builder,
3190 ctx->radeon_bld.soa.outputs[i][j], "");
3191
3192 si_export_mrt_color(bld_base, color, semantic_index,
3193 SI_PARAM_SAMPLE_COVERAGE,
3194 last_color_export == i, &exp);
3195 break;
3196 default:
3197 fprintf(stderr,
3198 "Warning: SI unhandled fs output type:%d\n",
3199 semantic_name);
3200 }
3201 }
3202
3203 if (depth || stencil || samplemask)
3204 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
3205
3206 si_emit_ps_exports(ctx, &exp);
3207 }
3208
3209 /**
3210 * Return PS outputs in this order:
3211 *
3212 * v[0:3] = color0.xyzw
3213 * v[4:7] = color1.xyzw
3214 * ...
3215 * vN+0 = Depth
3216 * vN+1 = Stencil
3217 * vN+2 = SampleMask
3218 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3219 *
3220 * The alpha-ref SGPR is returned via its original location.
3221 */
3222 static void si_llvm_return_fs_outputs(struct lp_build_tgsi_context *bld_base)
3223 {
3224 struct si_shader_context *ctx = si_shader_context(bld_base);
3225 struct si_shader *shader = ctx->shader;
3226 struct lp_build_context *base = &bld_base->base;
3227 struct tgsi_shader_info *info = &shader->selector->info;
3228 LLVMBuilderRef builder = base->gallivm->builder;
3229 unsigned i, j, first_vgpr, vgpr;
3230
3231 LLVMValueRef color[8][4] = {};
3232 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3233 LLVMValueRef ret;
3234
3235 /* Read the output values. */
3236 for (i = 0; i < info->num_outputs; i++) {
3237 unsigned semantic_name = info->output_semantic_name[i];
3238 unsigned semantic_index = info->output_semantic_index[i];
3239
3240 switch (semantic_name) {
3241 case TGSI_SEMANTIC_COLOR:
3242 assert(semantic_index < 8);
3243 for (j = 0; j < 4; j++) {
3244 LLVMValueRef ptr = ctx->radeon_bld.soa.outputs[i][j];
3245 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3246 color[semantic_index][j] = result;
3247 }
3248 break;
3249 case TGSI_SEMANTIC_POSITION:
3250 depth = LLVMBuildLoad(builder,
3251 ctx->radeon_bld.soa.outputs[i][2], "");
3252 break;
3253 case TGSI_SEMANTIC_STENCIL:
3254 stencil = LLVMBuildLoad(builder,
3255 ctx->radeon_bld.soa.outputs[i][1], "");
3256 break;
3257 case TGSI_SEMANTIC_SAMPLEMASK:
3258 samplemask = LLVMBuildLoad(builder,
3259 ctx->radeon_bld.soa.outputs[i][0], "");
3260 break;
3261 default:
3262 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3263 semantic_name);
3264 }
3265 }
3266
3267 /* Fill the return structure. */
3268 ret = ctx->return_value;
3269
3270 /* Set SGPRs. */
3271 ret = LLVMBuildInsertValue(builder, ret,
3272 bitcast(bld_base, TGSI_TYPE_SIGNED,
3273 LLVMGetParam(ctx->radeon_bld.main_fn,
3274 SI_PARAM_ALPHA_REF)),
3275 SI_SGPR_ALPHA_REF, "");
3276
3277 /* Set VGPRs */
3278 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3279 for (i = 0; i < ARRAY_SIZE(color); i++) {
3280 if (!color[i][0])
3281 continue;
3282
3283 for (j = 0; j < 4; j++)
3284 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3285 }
3286 if (depth)
3287 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3288 if (stencil)
3289 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3290 if (samplemask)
3291 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3292
3293 /* Add the input sample mask for smoothing at the end. */
3294 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3295 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3296 ret = LLVMBuildInsertValue(builder, ret,
3297 LLVMGetParam(ctx->radeon_bld.main_fn,
3298 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3299
3300 ctx->return_value = ret;
3301 }
3302
3303 /**
3304 * Given a v8i32 resource descriptor for a buffer, extract the size of the
3305 * buffer in number of elements and return it as an i32.
3306 */
3307 static LLVMValueRef get_buffer_size(
3308 struct lp_build_tgsi_context *bld_base,
3309 LLVMValueRef descriptor)
3310 {
3311 struct si_shader_context *ctx = si_shader_context(bld_base);
3312 struct gallivm_state *gallivm = bld_base->base.gallivm;
3313 LLVMBuilderRef builder = gallivm->builder;
3314 LLVMValueRef size =
3315 LLVMBuildExtractElement(builder, descriptor,
3316 lp_build_const_int32(gallivm, 6), "");
3317
3318 if (ctx->screen->b.chip_class >= VI) {
3319 /* On VI, the descriptor contains the size in bytes,
3320 * but TXQ must return the size in elements.
3321 * The stride is always non-zero for resources using TXQ.
3322 */
3323 LLVMValueRef stride =
3324 LLVMBuildExtractElement(builder, descriptor,
3325 lp_build_const_int32(gallivm, 5), "");
3326 stride = LLVMBuildLShr(builder, stride,
3327 lp_build_const_int32(gallivm, 16), "");
3328 stride = LLVMBuildAnd(builder, stride,
3329 lp_build_const_int32(gallivm, 0x3FFF), "");
3330
3331 size = LLVMBuildUDiv(builder, size, stride, "");
3332 }
3333
3334 return size;
3335 }
3336
3337 /**
3338 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
3339 * intrinsic names).
3340 */
3341 static void build_int_type_name(
3342 LLVMTypeRef type,
3343 char *buf, unsigned bufsize)
3344 {
3345 assert(bufsize >= 6);
3346
3347 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind)
3348 snprintf(buf, bufsize, "v%ui32",
3349 LLVMGetVectorSize(type));
3350 else
3351 strcpy(buf, "i32");
3352 }
3353
3354 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
3355 struct lp_build_tgsi_context *bld_base,
3356 struct lp_build_emit_data *emit_data);
3357
3358 /* Prevent optimizations (at least of memory accesses) across the current
3359 * point in the program by emitting empty inline assembly that is marked as
3360 * having side effects.
3361 */
3362 static void emit_optimization_barrier(struct si_shader_context *ctx)
3363 {
3364 LLVMBuilderRef builder = ctx->radeon_bld.gallivm.builder;
3365 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
3366 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, "", "", true, false);
3367 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
3368 }
3369
3370 static void emit_waitcnt(struct si_shader_context *ctx)
3371 {
3372 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3373 LLVMBuilderRef builder = gallivm->builder;
3374 LLVMValueRef args[1] = {
3375 lp_build_const_int32(gallivm, 0xf70)
3376 };
3377 lp_build_intrinsic(builder, "llvm.amdgcn.s.waitcnt",
3378 ctx->voidt, args, 1, 0);
3379 }
3380
3381 static void membar_emit(
3382 const struct lp_build_tgsi_action *action,
3383 struct lp_build_tgsi_context *bld_base,
3384 struct lp_build_emit_data *emit_data)
3385 {
3386 struct si_shader_context *ctx = si_shader_context(bld_base);
3387
3388 emit_waitcnt(ctx);
3389 }
3390
3391 static LLVMValueRef
3392 shader_buffer_fetch_rsrc(struct si_shader_context *ctx,
3393 const struct tgsi_full_src_register *reg)
3394 {
3395 LLVMValueRef ind_index;
3396 LLVMValueRef rsrc_ptr;
3397
3398 if (!reg->Register.Indirect)
3399 return ctx->shader_buffers[reg->Register.Index];
3400
3401 ind_index = get_bounded_indirect_index(ctx, &reg->Indirect,
3402 reg->Register.Index,
3403 SI_NUM_SHADER_BUFFERS);
3404
3405 rsrc_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_SHADER_BUFFERS);
3406 return build_indexed_load_const(ctx, rsrc_ptr, ind_index);
3407 }
3408
3409 static bool tgsi_is_array_sampler(unsigned target)
3410 {
3411 return target == TGSI_TEXTURE_1D_ARRAY ||
3412 target == TGSI_TEXTURE_SHADOW1D_ARRAY ||
3413 target == TGSI_TEXTURE_2D_ARRAY ||
3414 target == TGSI_TEXTURE_SHADOW2D_ARRAY ||
3415 target == TGSI_TEXTURE_CUBE_ARRAY ||
3416 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY ||
3417 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
3418 }
3419
3420 static bool tgsi_is_array_image(unsigned target)
3421 {
3422 return target == TGSI_TEXTURE_3D ||
3423 target == TGSI_TEXTURE_CUBE ||
3424 target == TGSI_TEXTURE_1D_ARRAY ||
3425 target == TGSI_TEXTURE_2D_ARRAY ||
3426 target == TGSI_TEXTURE_CUBE_ARRAY ||
3427 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
3428 }
3429
3430 /**
3431 * Given a 256-bit resource descriptor, force the DCC enable bit to off.
3432 *
3433 * At least on Tonga, executing image stores on images with DCC enabled and
3434 * non-trivial can eventually lead to lockups. This can occur when an
3435 * application binds an image as read-only but then uses a shader that writes
3436 * to it. The OpenGL spec allows almost arbitrarily bad behavior (including
3437 * program termination) in this case, but it doesn't cost much to be a bit
3438 * nicer: disabling DCC in the shader still leads to undefined results but
3439 * avoids the lockup.
3440 */
3441 static LLVMValueRef force_dcc_off(struct si_shader_context *ctx,
3442 LLVMValueRef rsrc)
3443 {
3444 if (ctx->screen->b.chip_class <= CIK) {
3445 return rsrc;
3446 } else {
3447 LLVMBuilderRef builder = ctx->radeon_bld.gallivm.builder;
3448 LLVMValueRef i32_6 = LLVMConstInt(ctx->i32, 6, 0);
3449 LLVMValueRef i32_C = LLVMConstInt(ctx->i32, C_008F28_COMPRESSION_EN, 0);
3450 LLVMValueRef tmp;
3451
3452 tmp = LLVMBuildExtractElement(builder, rsrc, i32_6, "");
3453 tmp = LLVMBuildAnd(builder, tmp, i32_C, "");
3454 return LLVMBuildInsertElement(builder, rsrc, tmp, i32_6, "");
3455 }
3456 }
3457
3458 /**
3459 * Load the resource descriptor for \p image.
3460 */
3461 static void
3462 image_fetch_rsrc(
3463 struct lp_build_tgsi_context *bld_base,
3464 const struct tgsi_full_src_register *image,
3465 bool dcc_off,
3466 LLVMValueRef *rsrc)
3467 {
3468 struct si_shader_context *ctx = si_shader_context(bld_base);
3469
3470 assert(image->Register.File == TGSI_FILE_IMAGE);
3471
3472 if (!image->Register.Indirect) {
3473 /* Fast path: use preloaded resources */
3474 *rsrc = ctx->images[image->Register.Index];
3475 } else {
3476 /* Indexing and manual load */
3477 LLVMValueRef ind_index;
3478 LLVMValueRef rsrc_ptr;
3479 LLVMValueRef tmp;
3480
3481 /* From the GL_ARB_shader_image_load_store extension spec:
3482 *
3483 * If a shader performs an image load, store, or atomic
3484 * operation using an image variable declared as an array,
3485 * and if the index used to select an individual element is
3486 * negative or greater than or equal to the size of the
3487 * array, the results of the operation are undefined but may
3488 * not lead to termination.
3489 */
3490 ind_index = get_bounded_indirect_index(ctx, &image->Indirect,
3491 image->Register.Index,
3492 SI_NUM_IMAGES);
3493
3494 rsrc_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_IMAGES);
3495 tmp = build_indexed_load_const(ctx, rsrc_ptr, ind_index);
3496 if (dcc_off)
3497 tmp = force_dcc_off(ctx, tmp);
3498 *rsrc = tmp;
3499 }
3500 }
3501
3502 static LLVMValueRef image_fetch_coords(
3503 struct lp_build_tgsi_context *bld_base,
3504 const struct tgsi_full_instruction *inst,
3505 unsigned src)
3506 {
3507 struct gallivm_state *gallivm = bld_base->base.gallivm;
3508 LLVMBuilderRef builder = gallivm->builder;
3509 unsigned target = inst->Memory.Texture;
3510 unsigned num_coords = tgsi_util_get_texture_coord_dim(target);
3511 LLVMValueRef coords[4];
3512 LLVMValueRef tmp;
3513 int chan;
3514
3515 for (chan = 0; chan < num_coords; ++chan) {
3516 tmp = lp_build_emit_fetch(bld_base, inst, src, chan);
3517 tmp = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3518 coords[chan] = tmp;
3519 }
3520
3521 if (num_coords == 1)
3522 return coords[0];
3523
3524 if (num_coords == 3) {
3525 /* LLVM has difficulties lowering 3-element vectors. */
3526 coords[3] = bld_base->uint_bld.undef;
3527 num_coords = 4;
3528 }
3529
3530 return lp_build_gather_values(gallivm, coords, num_coords);
3531 }
3532
3533 /**
3534 * Append the extra mode bits that are used by image load and store.
3535 */
3536 static void image_append_args(
3537 struct si_shader_context *ctx,
3538 struct lp_build_emit_data * emit_data,
3539 unsigned target,
3540 bool atomic)
3541 {
3542 const struct tgsi_full_instruction *inst = emit_data->inst;
3543 LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
3544 LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
3545
3546 emit_data->args[emit_data->arg_count++] = i1false; /* r128 */
3547 emit_data->args[emit_data->arg_count++] =
3548 tgsi_is_array_image(target) ? i1true : i1false; /* da */
3549 if (!atomic) {
3550 emit_data->args[emit_data->arg_count++] =
3551 inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
3552 i1true : i1false; /* glc */
3553 }
3554 emit_data->args[emit_data->arg_count++] = i1false; /* slc */
3555 }
3556
3557 /**
3558 * Given a 256 bit resource, extract the top half (which stores the buffer
3559 * resource in the case of textures and images).
3560 */
3561 static LLVMValueRef extract_rsrc_top_half(
3562 struct si_shader_context *ctx,
3563 LLVMValueRef rsrc)
3564 {
3565 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3566 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
3567 LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
3568
3569 rsrc = LLVMBuildBitCast(gallivm->builder, rsrc, v2i128, "");
3570 rsrc = LLVMBuildExtractElement(gallivm->builder, rsrc, bld_base->uint_bld.one, "");
3571 rsrc = LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v4i32, "");
3572
3573 return rsrc;
3574 }
3575
3576 /**
3577 * Append the resource and indexing arguments for buffer intrinsics.
3578 *
3579 * \param rsrc the v4i32 buffer resource
3580 * \param index index into the buffer (stride-based)
3581 * \param offset byte offset into the buffer
3582 */
3583 static void buffer_append_args(
3584 struct si_shader_context *ctx,
3585 struct lp_build_emit_data *emit_data,
3586 LLVMValueRef rsrc,
3587 LLVMValueRef index,
3588 LLVMValueRef offset,
3589 bool atomic)
3590 {
3591 const struct tgsi_full_instruction *inst = emit_data->inst;
3592 LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
3593 LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
3594
3595 emit_data->args[emit_data->arg_count++] = rsrc;
3596 emit_data->args[emit_data->arg_count++] = index; /* vindex */
3597 emit_data->args[emit_data->arg_count++] = offset; /* voffset */
3598 if (!atomic) {
3599 emit_data->args[emit_data->arg_count++] =
3600 inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
3601 i1true : i1false; /* glc */
3602 }
3603 emit_data->args[emit_data->arg_count++] = i1false; /* slc */
3604 }
3605
3606 static void load_fetch_args(
3607 struct lp_build_tgsi_context * bld_base,
3608 struct lp_build_emit_data * emit_data)
3609 {
3610 struct si_shader_context *ctx = si_shader_context(bld_base);
3611 struct gallivm_state *gallivm = bld_base->base.gallivm;
3612 const struct tgsi_full_instruction * inst = emit_data->inst;
3613 unsigned target = inst->Memory.Texture;
3614 LLVMValueRef rsrc;
3615
3616 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
3617
3618 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3619 LLVMBuilderRef builder = gallivm->builder;
3620 LLVMValueRef offset;
3621 LLVMValueRef tmp;
3622
3623 rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0]);
3624
3625 tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
3626 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3627
3628 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
3629 offset, false);
3630 } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
3631 LLVMValueRef coords;
3632
3633 image_fetch_rsrc(bld_base, &inst->Src[0], false, &rsrc);
3634 coords = image_fetch_coords(bld_base, inst, 1);
3635
3636 if (target == TGSI_TEXTURE_BUFFER) {
3637 rsrc = extract_rsrc_top_half(ctx, rsrc);
3638 buffer_append_args(ctx, emit_data, rsrc, coords,
3639 bld_base->uint_bld.zero, false);
3640 } else {
3641 emit_data->args[0] = coords;
3642 emit_data->args[1] = rsrc;
3643 emit_data->args[2] = lp_build_const_int32(gallivm, 15); /* dmask */
3644 emit_data->arg_count = 3;
3645
3646 image_append_args(ctx, emit_data, target, false);
3647 }
3648 }
3649 }
3650
3651 static void load_emit_buffer(struct si_shader_context *ctx,
3652 struct lp_build_emit_data *emit_data)
3653 {
3654 const struct tgsi_full_instruction *inst = emit_data->inst;
3655 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3656 LLVMBuilderRef builder = gallivm->builder;
3657 uint writemask = inst->Dst[0].Register.WriteMask;
3658 uint count = util_last_bit(writemask);
3659 const char *intrinsic_name;
3660 LLVMTypeRef dst_type;
3661
3662 switch (count) {
3663 case 1:
3664 intrinsic_name = "llvm.amdgcn.buffer.load.f32";
3665 dst_type = ctx->f32;
3666 break;
3667 case 2:
3668 intrinsic_name = "llvm.amdgcn.buffer.load.v2f32";
3669 dst_type = LLVMVectorType(ctx->f32, 2);
3670 break;
3671 default: // 3 & 4
3672 intrinsic_name = "llvm.amdgcn.buffer.load.v4f32";
3673 dst_type = ctx->v4f32;
3674 count = 4;
3675 }
3676
3677 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3678 builder, intrinsic_name, dst_type,
3679 emit_data->args, emit_data->arg_count,
3680 LLVMReadOnlyAttribute);
3681 }
3682
3683 static LLVMValueRef get_memory_ptr(struct si_shader_context *ctx,
3684 const struct tgsi_full_instruction *inst,
3685 LLVMTypeRef type, int arg)
3686 {
3687 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3688 LLVMBuilderRef builder = gallivm->builder;
3689 LLVMValueRef offset, ptr;
3690 int addr_space;
3691
3692 offset = lp_build_emit_fetch(&ctx->radeon_bld.soa.bld_base, inst, arg, 0);
3693 offset = LLVMBuildBitCast(builder, offset, ctx->i32, "");
3694
3695 ptr = ctx->shared_memory;
3696 ptr = LLVMBuildGEP(builder, ptr, &offset, 1, "");
3697 addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
3698 ptr = LLVMBuildBitCast(builder, ptr, LLVMPointerType(type, addr_space), "");
3699
3700 return ptr;
3701 }
3702
3703 static void load_emit_memory(
3704 struct si_shader_context *ctx,
3705 struct lp_build_emit_data *emit_data)
3706 {
3707 const struct tgsi_full_instruction *inst = emit_data->inst;
3708 struct lp_build_context *base = &ctx->radeon_bld.soa.bld_base.base;
3709 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3710 LLVMBuilderRef builder = gallivm->builder;
3711 unsigned writemask = inst->Dst[0].Register.WriteMask;
3712 LLVMValueRef channels[4], ptr, derived_ptr, index;
3713 int chan;
3714
3715 ptr = get_memory_ptr(ctx, inst, base->elem_type, 1);
3716
3717 for (chan = 0; chan < 4; ++chan) {
3718 if (!(writemask & (1 << chan))) {
3719 channels[chan] = LLVMGetUndef(base->elem_type);
3720 continue;
3721 }
3722
3723 index = lp_build_const_int32(gallivm, chan);
3724 derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
3725 channels[chan] = LLVMBuildLoad(builder, derived_ptr, "");
3726 }
3727 emit_data->output[emit_data->chan] = lp_build_gather_values(gallivm, channels, 4);
3728 }
3729
3730 static void load_emit(
3731 const struct lp_build_tgsi_action *action,
3732 struct lp_build_tgsi_context *bld_base,
3733 struct lp_build_emit_data *emit_data)
3734 {
3735 struct si_shader_context *ctx = si_shader_context(bld_base);
3736 struct gallivm_state *gallivm = bld_base->base.gallivm;
3737 LLVMBuilderRef builder = gallivm->builder;
3738 const struct tgsi_full_instruction * inst = emit_data->inst;
3739 char intrinsic_name[32];
3740 char coords_type[8];
3741
3742 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
3743 load_emit_memory(ctx, emit_data);
3744 return;
3745 }
3746
3747 if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
3748 emit_waitcnt(ctx);
3749
3750 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3751 load_emit_buffer(ctx, emit_data);
3752 return;
3753 }
3754
3755 if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
3756 emit_data->output[emit_data->chan] =
3757 lp_build_intrinsic(
3758 builder, "llvm.amdgcn.buffer.load.format.v4f32", emit_data->dst_type,
3759 emit_data->args, emit_data->arg_count,
3760 LLVMReadOnlyAttribute);
3761 } else {
3762 build_int_type_name(LLVMTypeOf(emit_data->args[0]),
3763 coords_type, sizeof(coords_type));
3764
3765 snprintf(intrinsic_name, sizeof(intrinsic_name),
3766 "llvm.amdgcn.image.load.%s", coords_type);
3767
3768 emit_data->output[emit_data->chan] =
3769 lp_build_intrinsic(
3770 builder, intrinsic_name, emit_data->dst_type,
3771 emit_data->args, emit_data->arg_count,
3772 LLVMReadOnlyAttribute);
3773 }
3774 }
3775
3776 static void store_fetch_args(
3777 struct lp_build_tgsi_context * bld_base,
3778 struct lp_build_emit_data * emit_data)
3779 {
3780 struct si_shader_context *ctx = si_shader_context(bld_base);
3781 struct gallivm_state *gallivm = bld_base->base.gallivm;
3782 LLVMBuilderRef builder = gallivm->builder;
3783 const struct tgsi_full_instruction * inst = emit_data->inst;
3784 struct tgsi_full_src_register memory;
3785 LLVMValueRef chans[4];
3786 LLVMValueRef data;
3787 LLVMValueRef rsrc;
3788 unsigned chan;
3789
3790 emit_data->dst_type = LLVMVoidTypeInContext(gallivm->context);
3791
3792 for (chan = 0; chan < 4; ++chan) {
3793 chans[chan] = lp_build_emit_fetch(bld_base, inst, 1, chan);
3794 }
3795 data = lp_build_gather_values(gallivm, chans, 4);
3796
3797 emit_data->args[emit_data->arg_count++] = data;
3798
3799 memory = tgsi_full_src_register_from_dst(&inst->Dst[0]);
3800
3801 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
3802 LLVMValueRef offset;
3803 LLVMValueRef tmp;
3804
3805 rsrc = shader_buffer_fetch_rsrc(ctx, &memory);
3806
3807 tmp = lp_build_emit_fetch(bld_base, inst, 0, 0);
3808 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3809
3810 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
3811 offset, false);
3812 } else if (inst->Dst[0].Register.File == TGSI_FILE_IMAGE) {
3813 unsigned target = inst->Memory.Texture;
3814 LLVMValueRef coords;
3815
3816 coords = image_fetch_coords(bld_base, inst, 0);
3817
3818 if (target == TGSI_TEXTURE_BUFFER) {
3819 image_fetch_rsrc(bld_base, &memory, false, &rsrc);
3820
3821 rsrc = extract_rsrc_top_half(ctx, rsrc);
3822 buffer_append_args(ctx, emit_data, rsrc, coords,
3823 bld_base->uint_bld.zero, false);
3824 } else {
3825 emit_data->args[1] = coords;
3826 image_fetch_rsrc(bld_base, &memory, true, &emit_data->args[2]);
3827 emit_data->args[3] = lp_build_const_int32(gallivm, 15); /* dmask */
3828 emit_data->arg_count = 4;
3829
3830 image_append_args(ctx, emit_data, target, false);
3831 }
3832 }
3833 }
3834
3835 static void store_emit_buffer(
3836 struct si_shader_context *ctx,
3837 struct lp_build_emit_data *emit_data)
3838 {
3839 const struct tgsi_full_instruction *inst = emit_data->inst;
3840 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3841 LLVMBuilderRef builder = gallivm->builder;
3842 struct lp_build_context *uint_bld = &ctx->radeon_bld.soa.bld_base.uint_bld;
3843 LLVMValueRef base_data = emit_data->args[0];
3844 LLVMValueRef base_offset = emit_data->args[3];
3845 unsigned writemask = inst->Dst[0].Register.WriteMask;
3846
3847 while (writemask) {
3848 int start, count;
3849 const char *intrinsic_name;
3850 LLVMValueRef data;
3851 LLVMValueRef offset;
3852 LLVMValueRef tmp;
3853
3854 u_bit_scan_consecutive_range(&writemask, &start, &count);
3855
3856 /* Due to an LLVM limitation, split 3-element writes
3857 * into a 2-element and a 1-element write. */
3858 if (count == 3) {
3859 writemask |= 1 << (start + 2);
3860 count = 2;
3861 }
3862
3863 if (count == 4) {
3864 data = base_data;
3865 intrinsic_name = "llvm.amdgcn.buffer.store.v4f32";
3866 } else if (count == 2) {
3867 LLVMTypeRef v2f32 = LLVMVectorType(ctx->f32, 2);
3868
3869 tmp = LLVMBuildExtractElement(
3870 builder, base_data,
3871 lp_build_const_int32(gallivm, start), "");
3872 data = LLVMBuildInsertElement(
3873 builder, LLVMGetUndef(v2f32), tmp,
3874 uint_bld->zero, "");
3875
3876 tmp = LLVMBuildExtractElement(
3877 builder, base_data,
3878 lp_build_const_int32(gallivm, start + 1), "");
3879 data = LLVMBuildInsertElement(
3880 builder, data, tmp, uint_bld->one, "");
3881
3882 intrinsic_name = "llvm.amdgcn.buffer.store.v2f32";
3883 } else {
3884 assert(count == 1);
3885 data = LLVMBuildExtractElement(
3886 builder, base_data,
3887 lp_build_const_int32(gallivm, start), "");
3888 intrinsic_name = "llvm.amdgcn.buffer.store.f32";
3889 }
3890
3891 offset = base_offset;
3892 if (start != 0) {
3893 offset = LLVMBuildAdd(
3894 builder, offset,
3895 lp_build_const_int32(gallivm, start * 4), "");
3896 }
3897
3898 emit_data->args[0] = data;
3899 emit_data->args[3] = offset;
3900
3901 lp_build_intrinsic(
3902 builder, intrinsic_name, emit_data->dst_type,
3903 emit_data->args, emit_data->arg_count, 0);
3904 }
3905 }
3906
3907 static void store_emit_memory(
3908 struct si_shader_context *ctx,
3909 struct lp_build_emit_data *emit_data)
3910 {
3911 const struct tgsi_full_instruction *inst = emit_data->inst;
3912 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
3913 struct lp_build_context *base = &ctx->radeon_bld.soa.bld_base.base;
3914 LLVMBuilderRef builder = gallivm->builder;
3915 unsigned writemask = inst->Dst[0].Register.WriteMask;
3916 LLVMValueRef ptr, derived_ptr, data, index;
3917 int chan;
3918
3919 ptr = get_memory_ptr(ctx, inst, base->elem_type, 0);
3920
3921 for (chan = 0; chan < 4; ++chan) {
3922 if (!(writemask & (1 << chan))) {
3923 continue;
3924 }
3925 data = lp_build_emit_fetch(&ctx->radeon_bld.soa.bld_base, inst, 1, chan);
3926 index = lp_build_const_int32(gallivm, chan);
3927 derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
3928 LLVMBuildStore(builder, data, derived_ptr);
3929 }
3930 }
3931
3932 static void store_emit(
3933 const struct lp_build_tgsi_action *action,
3934 struct lp_build_tgsi_context *bld_base,
3935 struct lp_build_emit_data *emit_data)
3936 {
3937 struct si_shader_context *ctx = si_shader_context(bld_base);
3938 struct gallivm_state *gallivm = bld_base->base.gallivm;
3939 LLVMBuilderRef builder = gallivm->builder;
3940 const struct tgsi_full_instruction * inst = emit_data->inst;
3941 unsigned target = inst->Memory.Texture;
3942 char intrinsic_name[32];
3943 char coords_type[8];
3944
3945 if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY) {
3946 store_emit_memory(ctx, emit_data);
3947 return;
3948 }
3949
3950 if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
3951 emit_waitcnt(ctx);
3952
3953 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
3954 store_emit_buffer(ctx, emit_data);
3955 return;
3956 }
3957
3958 if (target == TGSI_TEXTURE_BUFFER) {
3959 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3960 builder, "llvm.amdgcn.buffer.store.format.v4f32",
3961 emit_data->dst_type, emit_data->args,
3962 emit_data->arg_count, 0);
3963 } else {
3964 build_int_type_name(LLVMTypeOf(emit_data->args[1]),
3965 coords_type, sizeof(coords_type));
3966 snprintf(intrinsic_name, sizeof(intrinsic_name),
3967 "llvm.amdgcn.image.store.%s", coords_type);
3968
3969 emit_data->output[emit_data->chan] =
3970 lp_build_intrinsic(
3971 builder, intrinsic_name, emit_data->dst_type,
3972 emit_data->args, emit_data->arg_count, 0);
3973 }
3974 }
3975
3976 static void atomic_fetch_args(
3977 struct lp_build_tgsi_context * bld_base,
3978 struct lp_build_emit_data * emit_data)
3979 {
3980 struct si_shader_context *ctx = si_shader_context(bld_base);
3981 struct gallivm_state *gallivm = bld_base->base.gallivm;
3982 LLVMBuilderRef builder = gallivm->builder;
3983 const struct tgsi_full_instruction * inst = emit_data->inst;
3984 LLVMValueRef data1, data2;
3985 LLVMValueRef rsrc;
3986 LLVMValueRef tmp;
3987
3988 emit_data->dst_type = bld_base->base.elem_type;
3989
3990 tmp = lp_build_emit_fetch(bld_base, inst, 2, 0);
3991 data1 = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3992
3993 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
3994 tmp = lp_build_emit_fetch(bld_base, inst, 3, 0);
3995 data2 = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3996 }
3997
3998 /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
3999 * of arguments, which is reversed relative to TGSI (and GLSL)
4000 */
4001 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
4002 emit_data->args[emit_data->arg_count++] = data2;
4003 emit_data->args[emit_data->arg_count++] = data1;
4004
4005 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
4006 LLVMValueRef offset;
4007
4008 rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0]);
4009
4010 tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
4011 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
4012
4013 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
4014 offset, true);
4015 } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
4016 unsigned target = inst->Memory.Texture;
4017 LLVMValueRef coords;
4018
4019 image_fetch_rsrc(bld_base, &inst->Src[0],
4020 target != TGSI_TEXTURE_BUFFER, &rsrc);
4021 coords = image_fetch_coords(bld_base, inst, 1);
4022
4023 if (target == TGSI_TEXTURE_BUFFER) {
4024 rsrc = extract_rsrc_top_half(ctx, rsrc);
4025 buffer_append_args(ctx, emit_data, rsrc, coords,
4026 bld_base->uint_bld.zero, true);
4027 } else {
4028 emit_data->args[emit_data->arg_count++] = coords;
4029 emit_data->args[emit_data->arg_count++] = rsrc;
4030
4031 image_append_args(ctx, emit_data, target, true);
4032 }
4033 }
4034 }
4035
4036 static void atomic_emit_memory(struct si_shader_context *ctx,
4037 struct lp_build_emit_data *emit_data) {
4038 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
4039 LLVMBuilderRef builder = gallivm->builder;
4040 const struct tgsi_full_instruction * inst = emit_data->inst;
4041 LLVMValueRef ptr, result, arg;
4042
4043 ptr = get_memory_ptr(ctx, inst, ctx->i32, 1);
4044
4045 arg = lp_build_emit_fetch(&ctx->radeon_bld.soa.bld_base, inst, 2, 0);
4046 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
4047
4048 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
4049 LLVMValueRef new_data;
4050 new_data = lp_build_emit_fetch(&ctx->radeon_bld.soa.bld_base,
4051 inst, 3, 0);
4052
4053 new_data = LLVMBuildBitCast(builder, new_data, ctx->i32, "");
4054
4055 #if HAVE_LLVM >= 0x309
4056 result = LLVMBuildAtomicCmpXchg(builder, ptr, arg, new_data,
4057 LLVMAtomicOrderingSequentiallyConsistent,
4058 LLVMAtomicOrderingSequentiallyConsistent,
4059 false);
4060 #endif
4061
4062 result = LLVMBuildExtractValue(builder, result, 0, "");
4063 } else {
4064 LLVMAtomicRMWBinOp op;
4065
4066 switch(inst->Instruction.Opcode) {
4067 case TGSI_OPCODE_ATOMUADD:
4068 op = LLVMAtomicRMWBinOpAdd;
4069 break;
4070 case TGSI_OPCODE_ATOMXCHG:
4071 op = LLVMAtomicRMWBinOpXchg;
4072 break;
4073 case TGSI_OPCODE_ATOMAND:
4074 op = LLVMAtomicRMWBinOpAnd;
4075 break;
4076 case TGSI_OPCODE_ATOMOR:
4077 op = LLVMAtomicRMWBinOpOr;
4078 break;
4079 case TGSI_OPCODE_ATOMXOR:
4080 op = LLVMAtomicRMWBinOpXor;
4081 break;
4082 case TGSI_OPCODE_ATOMUMIN:
4083 op = LLVMAtomicRMWBinOpUMin;
4084 break;
4085 case TGSI_OPCODE_ATOMUMAX:
4086 op = LLVMAtomicRMWBinOpUMax;
4087 break;
4088 case TGSI_OPCODE_ATOMIMIN:
4089 op = LLVMAtomicRMWBinOpMin;
4090 break;
4091 case TGSI_OPCODE_ATOMIMAX:
4092 op = LLVMAtomicRMWBinOpMax;
4093 break;
4094 default:
4095 unreachable("unknown atomic opcode");
4096 }
4097
4098 result = LLVMBuildAtomicRMW(builder, op, ptr, arg,
4099 LLVMAtomicOrderingSequentiallyConsistent,
4100 false);
4101 }
4102 emit_data->output[emit_data->chan] = LLVMBuildBitCast(builder, result, emit_data->dst_type, "");
4103 }
4104
4105 static void atomic_emit(
4106 const struct lp_build_tgsi_action *action,
4107 struct lp_build_tgsi_context *bld_base,
4108 struct lp_build_emit_data *emit_data)
4109 {
4110 struct si_shader_context *ctx = si_shader_context(bld_base);
4111 struct gallivm_state *gallivm = bld_base->base.gallivm;
4112 LLVMBuilderRef builder = gallivm->builder;
4113 const struct tgsi_full_instruction * inst = emit_data->inst;
4114 char intrinsic_name[40];
4115 LLVMValueRef tmp;
4116
4117 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
4118 atomic_emit_memory(ctx, emit_data);
4119 return;
4120 }
4121
4122 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
4123 inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
4124 snprintf(intrinsic_name, sizeof(intrinsic_name),
4125 "llvm.amdgcn.buffer.atomic.%s", action->intr_name);
4126 } else {
4127 char coords_type[8];
4128
4129 build_int_type_name(LLVMTypeOf(emit_data->args[1]),
4130 coords_type, sizeof(coords_type));
4131 snprintf(intrinsic_name, sizeof(intrinsic_name),
4132 "llvm.amdgcn.image.atomic.%s.%s",
4133 action->intr_name, coords_type);
4134 }
4135
4136 tmp = lp_build_intrinsic(
4137 builder, intrinsic_name, bld_base->uint_bld.elem_type,
4138 emit_data->args, emit_data->arg_count, 0);
4139 emit_data->output[emit_data->chan] =
4140 LLVMBuildBitCast(builder, tmp, bld_base->base.elem_type, "");
4141 }
4142
4143 static void resq_fetch_args(
4144 struct lp_build_tgsi_context * bld_base,
4145 struct lp_build_emit_data * emit_data)
4146 {
4147 struct si_shader_context *ctx = si_shader_context(bld_base);
4148 struct gallivm_state *gallivm = bld_base->base.gallivm;
4149 const struct tgsi_full_instruction *inst = emit_data->inst;
4150 const struct tgsi_full_src_register *reg = &inst->Src[0];
4151
4152 emit_data->dst_type = ctx->v4i32;
4153
4154 if (reg->Register.File == TGSI_FILE_BUFFER) {
4155 emit_data->args[0] = shader_buffer_fetch_rsrc(ctx, reg);
4156 emit_data->arg_count = 1;
4157 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
4158 image_fetch_rsrc(bld_base, reg, false, &emit_data->args[0]);
4159 emit_data->arg_count = 1;
4160 } else {
4161 emit_data->args[0] = bld_base->uint_bld.zero; /* mip level */
4162 image_fetch_rsrc(bld_base, reg, false, &emit_data->args[1]);
4163 emit_data->args[2] = lp_build_const_int32(gallivm, 15); /* dmask */
4164 emit_data->args[3] = bld_base->uint_bld.zero; /* unorm */
4165 emit_data->args[4] = bld_base->uint_bld.zero; /* r128 */
4166 emit_data->args[5] = tgsi_is_array_image(inst->Memory.Texture) ?
4167 bld_base->uint_bld.one : bld_base->uint_bld.zero; /* da */
4168 emit_data->args[6] = bld_base->uint_bld.zero; /* glc */
4169 emit_data->args[7] = bld_base->uint_bld.zero; /* slc */
4170 emit_data->args[8] = bld_base->uint_bld.zero; /* tfe */
4171 emit_data->args[9] = bld_base->uint_bld.zero; /* lwe */
4172 emit_data->arg_count = 10;
4173 }
4174 }
4175
4176 static void resq_emit(
4177 const struct lp_build_tgsi_action *action,
4178 struct lp_build_tgsi_context *bld_base,
4179 struct lp_build_emit_data *emit_data)
4180 {
4181 struct gallivm_state *gallivm = bld_base->base.gallivm;
4182 LLVMBuilderRef builder = gallivm->builder;
4183 const struct tgsi_full_instruction *inst = emit_data->inst;
4184 LLVMValueRef out;
4185
4186 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
4187 out = LLVMBuildExtractElement(builder, emit_data->args[0],
4188 lp_build_const_int32(gallivm, 2), "");
4189 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
4190 out = get_buffer_size(bld_base, emit_data->args[0]);
4191 } else {
4192 out = lp_build_intrinsic(
4193 builder, "llvm.SI.getresinfo.i32", emit_data->dst_type,
4194 emit_data->args, emit_data->arg_count,
4195 LLVMReadNoneAttribute);
4196
4197 /* Divide the number of layers by 6 to get the number of cubes. */
4198 if (inst->Memory.Texture == TGSI_TEXTURE_CUBE_ARRAY) {
4199 LLVMValueRef imm2 = lp_build_const_int32(gallivm, 2);
4200 LLVMValueRef imm6 = lp_build_const_int32(gallivm, 6);
4201
4202 LLVMValueRef z = LLVMBuildExtractElement(builder, out, imm2, "");
4203 z = LLVMBuildSDiv(builder, z, imm6, "");
4204 out = LLVMBuildInsertElement(builder, out, z, imm2, "");
4205 }
4206 }
4207
4208 emit_data->output[emit_data->chan] = out;
4209 }
4210
4211 static void set_tex_fetch_args(struct si_shader_context *ctx,
4212 struct lp_build_emit_data *emit_data,
4213 unsigned opcode, unsigned target,
4214 LLVMValueRef res_ptr, LLVMValueRef samp_ptr,
4215 LLVMValueRef *param, unsigned count,
4216 unsigned dmask)
4217 {
4218 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
4219 unsigned num_args;
4220 unsigned is_rect = target == TGSI_TEXTURE_RECT;
4221
4222 /* Pad to power of two vector */
4223 while (count < util_next_power_of_two(count))
4224 param[count++] = LLVMGetUndef(ctx->i32);
4225
4226 /* Texture coordinates. */
4227 if (count > 1)
4228 emit_data->args[0] = lp_build_gather_values(gallivm, param, count);
4229 else
4230 emit_data->args[0] = param[0];
4231
4232 /* Resource. */
4233 emit_data->args[1] = res_ptr;
4234 num_args = 2;
4235
4236 if (opcode == TGSI_OPCODE_TXF || opcode == TGSI_OPCODE_TXQ)
4237 emit_data->dst_type = ctx->v4i32;
4238 else {
4239 emit_data->dst_type = ctx->v4f32;
4240
4241 emit_data->args[num_args++] = samp_ptr;
4242 }
4243
4244 emit_data->args[num_args++] = lp_build_const_int32(gallivm, dmask);
4245 emit_data->args[num_args++] = lp_build_const_int32(gallivm, is_rect); /* unorm */
4246 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* r128 */
4247 emit_data->args[num_args++] = lp_build_const_int32(gallivm,
4248 tgsi_is_array_sampler(target)); /* da */
4249 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* glc */
4250 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* slc */
4251 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* tfe */
4252 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* lwe */
4253
4254 emit_data->arg_count = num_args;
4255 }
4256
4257 static const struct lp_build_tgsi_action tex_action;
4258
4259 enum desc_type {
4260 DESC_IMAGE,
4261 DESC_FMASK,
4262 DESC_SAMPLER
4263 };
4264
4265 static LLVMTypeRef const_array(LLVMTypeRef elem_type, int num_elements)
4266 {
4267 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
4268 CONST_ADDR_SPACE);
4269 }
4270
4271 /**
4272 * Load an image view, fmask view. or sampler state descriptor.
4273 */
4274 static LLVMValueRef get_sampler_desc_custom(struct si_shader_context *ctx,
4275 LLVMValueRef list, LLVMValueRef index,
4276 enum desc_type type)
4277 {
4278 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
4279 LLVMBuilderRef builder = gallivm->builder;
4280
4281 switch (type) {
4282 case DESC_IMAGE:
4283 /* The image is at [0:7]. */
4284 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
4285 break;
4286 case DESC_FMASK:
4287 /* The FMASK is at [8:15]. */
4288 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
4289 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 1, 0), "");
4290 break;
4291 case DESC_SAMPLER:
4292 /* The sampler state is at [12:15]. */
4293 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
4294 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 3, 0), "");
4295 list = LLVMBuildPointerCast(builder, list,
4296 const_array(ctx->v4i32, 0), "");
4297 break;
4298 }
4299
4300 return build_indexed_load_const(ctx, list, index);
4301 }
4302
4303 static LLVMValueRef get_sampler_desc(struct si_shader_context *ctx,
4304 LLVMValueRef index, enum desc_type type)
4305 {
4306 LLVMValueRef list = LLVMGetParam(ctx->radeon_bld.main_fn,
4307 SI_PARAM_SAMPLERS);
4308
4309 return get_sampler_desc_custom(ctx, list, index, type);
4310 }
4311
4312 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4313 *
4314 * SI-CI:
4315 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4316 * filtering manually. The driver sets img7 to a mask clearing
4317 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4318 * s_and_b32 samp0, samp0, img7
4319 *
4320 * VI:
4321 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4322 */
4323 static LLVMValueRef sici_fix_sampler_aniso(struct si_shader_context *ctx,
4324 LLVMValueRef res, LLVMValueRef samp)
4325 {
4326 LLVMBuilderRef builder = ctx->radeon_bld.gallivm.builder;
4327 LLVMValueRef img7, samp0;
4328
4329 if (ctx->screen->b.chip_class >= VI)
4330 return samp;
4331
4332 img7 = LLVMBuildExtractElement(builder, res,
4333 LLVMConstInt(ctx->i32, 7, 0), "");
4334 samp0 = LLVMBuildExtractElement(builder, samp,
4335 LLVMConstInt(ctx->i32, 0, 0), "");
4336 samp0 = LLVMBuildAnd(builder, samp0, img7, "");
4337 return LLVMBuildInsertElement(builder, samp, samp0,
4338 LLVMConstInt(ctx->i32, 0, 0), "");
4339 }
4340
4341 static void tex_fetch_ptrs(
4342 struct lp_build_tgsi_context *bld_base,
4343 struct lp_build_emit_data *emit_data,
4344 LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr, LLVMValueRef *fmask_ptr)
4345 {
4346 struct si_shader_context *ctx = si_shader_context(bld_base);
4347 const struct tgsi_full_instruction *inst = emit_data->inst;
4348 unsigned target = inst->Texture.Texture;
4349 unsigned sampler_src;
4350 unsigned sampler_index;
4351
4352 sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
4353 sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
4354
4355 if (emit_data->inst->Src[sampler_src].Register.Indirect) {
4356 const struct tgsi_full_src_register *reg = &emit_data->inst->Src[sampler_src];
4357 LLVMValueRef ind_index;
4358
4359 ind_index = get_bounded_indirect_index(ctx,
4360 &reg->Indirect,
4361 reg->Register.Index,
4362 SI_NUM_SAMPLERS);
4363
4364 *res_ptr = get_sampler_desc(ctx, ind_index, DESC_IMAGE);
4365
4366 if (target == TGSI_TEXTURE_2D_MSAA ||
4367 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
4368 if (samp_ptr)
4369 *samp_ptr = NULL;
4370 if (fmask_ptr)
4371 *fmask_ptr = get_sampler_desc(ctx, ind_index, DESC_FMASK);
4372 } else {
4373 if (samp_ptr) {
4374 *samp_ptr = get_sampler_desc(ctx, ind_index, DESC_SAMPLER);
4375 *samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
4376 }
4377 if (fmask_ptr)
4378 *fmask_ptr = NULL;
4379 }
4380 } else {
4381 *res_ptr = ctx->sampler_views[sampler_index];
4382 if (samp_ptr)
4383 *samp_ptr = ctx->sampler_states[sampler_index];
4384 if (fmask_ptr)
4385 *fmask_ptr = ctx->fmasks[sampler_index];
4386 }
4387 }
4388
4389 static void txq_fetch_args(
4390 struct lp_build_tgsi_context *bld_base,
4391 struct lp_build_emit_data *emit_data)
4392 {
4393 struct si_shader_context *ctx = si_shader_context(bld_base);
4394 struct gallivm_state *gallivm = bld_base->base.gallivm;
4395 LLVMBuilderRef builder = gallivm->builder;
4396 const struct tgsi_full_instruction *inst = emit_data->inst;
4397 unsigned target = inst->Texture.Texture;
4398 LLVMValueRef res_ptr;
4399 LLVMValueRef address;
4400
4401 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, NULL, NULL);
4402
4403 if (target == TGSI_TEXTURE_BUFFER) {
4404 /* Read the size from the buffer descriptor directly. */
4405 LLVMValueRef res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
4406 emit_data->args[0] = get_buffer_size(bld_base, res);
4407 return;
4408 }
4409
4410 /* Textures - set the mip level. */
4411 address = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
4412
4413 set_tex_fetch_args(ctx, emit_data, TGSI_OPCODE_TXQ, target, res_ptr,
4414 NULL, &address, 1, 0xf);
4415 }
4416
4417 static void txq_emit(const struct lp_build_tgsi_action *action,
4418 struct lp_build_tgsi_context *bld_base,
4419 struct lp_build_emit_data *emit_data)
4420 {
4421 struct lp_build_context *base = &bld_base->base;
4422 unsigned target = emit_data->inst->Texture.Texture;
4423
4424 if (target == TGSI_TEXTURE_BUFFER) {
4425 /* Just return the buffer size. */
4426 emit_data->output[emit_data->chan] = emit_data->args[0];
4427 return;
4428 }
4429
4430 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4431 base->gallivm->builder, "llvm.SI.getresinfo.i32",
4432 emit_data->dst_type, emit_data->args, emit_data->arg_count,
4433 LLVMReadNoneAttribute);
4434
4435 /* Divide the number of layers by 6 to get the number of cubes. */
4436 if (target == TGSI_TEXTURE_CUBE_ARRAY ||
4437 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4438 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
4439 LLVMValueRef two = lp_build_const_int32(bld_base->base.gallivm, 2);
4440 LLVMValueRef six = lp_build_const_int32(bld_base->base.gallivm, 6);
4441
4442 LLVMValueRef v4 = emit_data->output[emit_data->chan];
4443 LLVMValueRef z = LLVMBuildExtractElement(builder, v4, two, "");
4444 z = LLVMBuildSDiv(builder, z, six, "");
4445
4446 emit_data->output[emit_data->chan] =
4447 LLVMBuildInsertElement(builder, v4, z, two, "");
4448 }
4449 }
4450
4451 static void tex_fetch_args(
4452 struct lp_build_tgsi_context *bld_base,
4453 struct lp_build_emit_data *emit_data)
4454 {
4455 struct si_shader_context *ctx = si_shader_context(bld_base);
4456 struct gallivm_state *gallivm = bld_base->base.gallivm;
4457 const struct tgsi_full_instruction *inst = emit_data->inst;
4458 unsigned opcode = inst->Instruction.Opcode;
4459 unsigned target = inst->Texture.Texture;
4460 LLVMValueRef coords[5], derivs[6];
4461 LLVMValueRef address[16];
4462 unsigned num_coords = tgsi_util_get_texture_coord_dim(target);
4463 int ref_pos = tgsi_util_get_shadow_ref_src_index(target);
4464 unsigned count = 0;
4465 unsigned chan;
4466 unsigned num_deriv_channels = 0;
4467 bool has_offset = inst->Texture.NumOffsets > 0;
4468 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
4469 unsigned dmask = 0xf;
4470
4471 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
4472
4473 if (target == TGSI_TEXTURE_BUFFER) {
4474 LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
4475
4476 /* Bitcast and truncate v8i32 to v16i8. */
4477 LLVMValueRef res = res_ptr;
4478 res = LLVMBuildBitCast(gallivm->builder, res, v2i128, "");
4479 res = LLVMBuildExtractElement(gallivm->builder, res, bld_base->uint_bld.one, "");
4480 res = LLVMBuildBitCast(gallivm->builder, res, ctx->v16i8, "");
4481
4482 emit_data->dst_type = ctx->v4f32;
4483 emit_data->args[0] = res;
4484 emit_data->args[1] = bld_base->uint_bld.zero;
4485 emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
4486 emit_data->arg_count = 3;
4487 return;
4488 }
4489
4490 /* Fetch and project texture coordinates */
4491 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
4492 for (chan = 0; chan < 3; chan++ ) {
4493 coords[chan] = lp_build_emit_fetch(bld_base,
4494 emit_data->inst, 0,
4495 chan);
4496 if (opcode == TGSI_OPCODE_TXP)
4497 coords[chan] = lp_build_emit_llvm_binary(bld_base,
4498 TGSI_OPCODE_DIV,
4499 coords[chan],
4500 coords[3]);
4501 }
4502
4503 if (opcode == TGSI_OPCODE_TXP)
4504 coords[3] = bld_base->base.one;
4505
4506 /* Pack offsets. */
4507 if (has_offset && opcode != TGSI_OPCODE_TXF) {
4508 /* The offsets are six-bit signed integers packed like this:
4509 * X=[5:0], Y=[13:8], and Z=[21:16].
4510 */
4511 LLVMValueRef offset[3], pack;
4512
4513 assert(inst->Texture.NumOffsets == 1);
4514
4515 for (chan = 0; chan < 3; chan++) {
4516 offset[chan] = lp_build_emit_fetch_texoffset(bld_base,
4517 emit_data->inst, 0, chan);
4518 offset[chan] = LLVMBuildAnd(gallivm->builder, offset[chan],
4519 lp_build_const_int32(gallivm, 0x3f), "");
4520 if (chan)
4521 offset[chan] = LLVMBuildShl(gallivm->builder, offset[chan],
4522 lp_build_const_int32(gallivm, chan*8), "");
4523 }
4524
4525 pack = LLVMBuildOr(gallivm->builder, offset[0], offset[1], "");
4526 pack = LLVMBuildOr(gallivm->builder, pack, offset[2], "");
4527 address[count++] = pack;
4528 }
4529
4530 /* Pack LOD bias value */
4531 if (opcode == TGSI_OPCODE_TXB)
4532 address[count++] = coords[3];
4533 if (opcode == TGSI_OPCODE_TXB2)
4534 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4535
4536 /* Pack depth comparison value */
4537 if (tgsi_is_shadow_target(target) && opcode != TGSI_OPCODE_LODQ) {
4538 if (target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4539 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4540 } else {
4541 assert(ref_pos >= 0);
4542 address[count++] = coords[ref_pos];
4543 }
4544 }
4545
4546 /* Pack user derivatives */
4547 if (opcode == TGSI_OPCODE_TXD) {
4548 int param, num_src_deriv_channels;
4549
4550 switch (target) {
4551 case TGSI_TEXTURE_3D:
4552 num_src_deriv_channels = 3;
4553 num_deriv_channels = 3;
4554 break;
4555 case TGSI_TEXTURE_2D:
4556 case TGSI_TEXTURE_SHADOW2D:
4557 case TGSI_TEXTURE_RECT:
4558 case TGSI_TEXTURE_SHADOWRECT:
4559 case TGSI_TEXTURE_2D_ARRAY:
4560 case TGSI_TEXTURE_SHADOW2D_ARRAY:
4561 num_src_deriv_channels = 2;
4562 num_deriv_channels = 2;
4563 break;
4564 case TGSI_TEXTURE_CUBE:
4565 case TGSI_TEXTURE_SHADOWCUBE:
4566 case TGSI_TEXTURE_CUBE_ARRAY:
4567 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
4568 /* Cube derivatives will be converted to 2D. */
4569 num_src_deriv_channels = 3;
4570 num_deriv_channels = 2;
4571 break;
4572 case TGSI_TEXTURE_1D:
4573 case TGSI_TEXTURE_SHADOW1D:
4574 case TGSI_TEXTURE_1D_ARRAY:
4575 case TGSI_TEXTURE_SHADOW1D_ARRAY:
4576 num_src_deriv_channels = 1;
4577 num_deriv_channels = 1;
4578 break;
4579 default:
4580 unreachable("invalid target");
4581 }
4582
4583 for (param = 0; param < 2; param++)
4584 for (chan = 0; chan < num_src_deriv_channels; chan++)
4585 derivs[param * num_src_deriv_channels + chan] =
4586 lp_build_emit_fetch(bld_base, inst, param+1, chan);
4587 }
4588
4589 if (target == TGSI_TEXTURE_CUBE ||
4590 target == TGSI_TEXTURE_CUBE_ARRAY ||
4591 target == TGSI_TEXTURE_SHADOWCUBE ||
4592 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
4593 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords, derivs);
4594
4595 if (opcode == TGSI_OPCODE_TXD)
4596 for (int i = 0; i < num_deriv_channels * 2; i++)
4597 address[count++] = derivs[i];
4598
4599 /* Pack texture coordinates */
4600 address[count++] = coords[0];
4601 if (num_coords > 1)
4602 address[count++] = coords[1];
4603 if (num_coords > 2)
4604 address[count++] = coords[2];
4605
4606 /* Pack LOD or sample index */
4607 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
4608 address[count++] = coords[3];
4609 else if (opcode == TGSI_OPCODE_TXL2)
4610 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4611
4612 if (count > 16) {
4613 assert(!"Cannot handle more than 16 texture address parameters");
4614 count = 16;
4615 }
4616
4617 for (chan = 0; chan < count; chan++ ) {
4618 address[chan] = LLVMBuildBitCast(gallivm->builder,
4619 address[chan], ctx->i32, "");
4620 }
4621
4622 /* Adjust the sample index according to FMASK.
4623 *
4624 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
4625 * which is the identity mapping. Each nibble says which physical sample
4626 * should be fetched to get that sample.
4627 *
4628 * For example, 0x11111100 means there are only 2 samples stored and
4629 * the second sample covers 3/4 of the pixel. When reading samples 0
4630 * and 1, return physical sample 0 (determined by the first two 0s
4631 * in FMASK), otherwise return physical sample 1.
4632 *
4633 * The sample index should be adjusted as follows:
4634 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
4635 */
4636 if (target == TGSI_TEXTURE_2D_MSAA ||
4637 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
4638 struct lp_build_context *uint_bld = &bld_base->uint_bld;
4639 struct lp_build_emit_data txf_emit_data = *emit_data;
4640 LLVMValueRef txf_address[4];
4641 unsigned txf_count = count;
4642 struct tgsi_full_instruction inst = {};
4643
4644 memcpy(txf_address, address, sizeof(txf_address));
4645
4646 if (target == TGSI_TEXTURE_2D_MSAA) {
4647 txf_address[2] = bld_base->uint_bld.zero;
4648 }
4649 txf_address[3] = bld_base->uint_bld.zero;
4650
4651 /* Read FMASK using TXF. */
4652 inst.Instruction.Opcode = TGSI_OPCODE_TXF;
4653 inst.Texture.Texture = target;
4654 txf_emit_data.inst = &inst;
4655 txf_emit_data.chan = 0;
4656 set_tex_fetch_args(ctx, &txf_emit_data, TGSI_OPCODE_TXF,
4657 target, fmask_ptr, NULL,
4658 txf_address, txf_count, 0xf);
4659 build_tex_intrinsic(&tex_action, bld_base, &txf_emit_data);
4660
4661 /* Initialize some constants. */
4662 LLVMValueRef four = LLVMConstInt(ctx->i32, 4, 0);
4663 LLVMValueRef F = LLVMConstInt(ctx->i32, 0xF, 0);
4664
4665 /* Apply the formula. */
4666 LLVMValueRef fmask =
4667 LLVMBuildExtractElement(gallivm->builder,
4668 txf_emit_data.output[0],
4669 uint_bld->zero, "");
4670
4671 unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
4672
4673 LLVMValueRef sample_index4 =
4674 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
4675
4676 LLVMValueRef shifted_fmask =
4677 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
4678
4679 LLVMValueRef final_sample =
4680 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
4681
4682 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
4683 * resource descriptor is 0 (invalid),
4684 */
4685 LLVMValueRef fmask_desc =
4686 LLVMBuildBitCast(gallivm->builder, fmask_ptr,
4687 ctx->v8i32, "");
4688
4689 LLVMValueRef fmask_word1 =
4690 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
4691 uint_bld->one, "");
4692
4693 LLVMValueRef word1_is_nonzero =
4694 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
4695 fmask_word1, uint_bld->zero, "");
4696
4697 /* Replace the MSAA sample index. */
4698 address[sample_chan] =
4699 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
4700 final_sample, address[sample_chan], "");
4701 }
4702
4703 if (opcode == TGSI_OPCODE_TXF) {
4704 /* add tex offsets */
4705 if (inst->Texture.NumOffsets) {
4706 struct lp_build_context *uint_bld = &bld_base->uint_bld;
4707 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
4708 const struct tgsi_texture_offset *off = inst->TexOffsets;
4709
4710 assert(inst->Texture.NumOffsets == 1);
4711
4712 switch (target) {
4713 case TGSI_TEXTURE_3D:
4714 address[2] = lp_build_add(uint_bld, address[2],
4715 bld->immediates[off->Index][off->SwizzleZ]);
4716 /* fall through */
4717 case TGSI_TEXTURE_2D:
4718 case TGSI_TEXTURE_SHADOW2D:
4719 case TGSI_TEXTURE_RECT:
4720 case TGSI_TEXTURE_SHADOWRECT:
4721 case TGSI_TEXTURE_2D_ARRAY:
4722 case TGSI_TEXTURE_SHADOW2D_ARRAY:
4723 address[1] =
4724 lp_build_add(uint_bld, address[1],
4725 bld->immediates[off->Index][off->SwizzleY]);
4726 /* fall through */
4727 case TGSI_TEXTURE_1D:
4728 case TGSI_TEXTURE_SHADOW1D:
4729 case TGSI_TEXTURE_1D_ARRAY:
4730 case TGSI_TEXTURE_SHADOW1D_ARRAY:
4731 address[0] =
4732 lp_build_add(uint_bld, address[0],
4733 bld->immediates[off->Index][off->SwizzleX]);
4734 break;
4735 /* texture offsets do not apply to other texture targets */
4736 }
4737 }
4738 }
4739
4740 if (opcode == TGSI_OPCODE_TG4) {
4741 unsigned gather_comp = 0;
4742
4743 /* DMASK was repurposed for GATHER4. 4 components are always
4744 * returned and DMASK works like a swizzle - it selects
4745 * the component to fetch. The only valid DMASK values are
4746 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4747 * (red,red,red,red) etc.) The ISA document doesn't mention
4748 * this.
4749 */
4750
4751 /* Get the component index from src1.x for Gather4. */
4752 if (!tgsi_is_shadow_target(target)) {
4753 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
4754 LLVMValueRef comp_imm;
4755 struct tgsi_src_register src1 = inst->Src[1].Register;
4756
4757 assert(src1.File == TGSI_FILE_IMMEDIATE);
4758
4759 comp_imm = imms[src1.Index][src1.SwizzleX];
4760 gather_comp = LLVMConstIntGetZExtValue(comp_imm);
4761 gather_comp = CLAMP(gather_comp, 0, 3);
4762 }
4763
4764 dmask = 1 << gather_comp;
4765 }
4766
4767 set_tex_fetch_args(ctx, emit_data, opcode, target, res_ptr,
4768 samp_ptr, address, count, dmask);
4769 }
4770
4771 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
4772 * incorrectly forces nearest filtering if the texture format is integer.
4773 * The only effect it has on Gather4, which always returns 4 texels for
4774 * bilinear filtering, is that the final coordinates are off by 0.5 of
4775 * the texel size.
4776 *
4777 * The workaround is to subtract 0.5 from the unnormalized coordinates,
4778 * or (0.5 / size) from the normalized coordinates.
4779 */
4780 static void si_lower_gather4_integer(struct si_shader_context *ctx,
4781 struct lp_build_emit_data *emit_data,
4782 const char *intr_name,
4783 unsigned coord_vgpr_index)
4784 {
4785 LLVMBuilderRef builder = ctx->radeon_bld.gallivm.builder;
4786 LLVMValueRef coord = emit_data->args[0];
4787 LLVMValueRef half_texel[2];
4788 int c;
4789
4790 if (emit_data->inst->Texture.Texture == TGSI_TEXTURE_RECT ||
4791 emit_data->inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT) {
4792 half_texel[0] = half_texel[1] = LLVMConstReal(ctx->f32, -0.5);
4793 } else {
4794 struct tgsi_full_instruction txq_inst = {};
4795 struct lp_build_emit_data txq_emit_data = {};
4796
4797 /* Query the texture size. */
4798 txq_inst.Texture.Texture = emit_data->inst->Texture.Texture;
4799 txq_emit_data.inst = &txq_inst;
4800 txq_emit_data.dst_type = ctx->v4i32;
4801 set_tex_fetch_args(ctx, &txq_emit_data, TGSI_OPCODE_TXQ,
4802 txq_inst.Texture.Texture,
4803 emit_data->args[1], NULL,
4804 &ctx->radeon_bld.soa.bld_base.uint_bld.zero,
4805 1, 0xf);
4806 txq_emit(NULL, &ctx->radeon_bld.soa.bld_base, &txq_emit_data);
4807
4808 /* Compute -0.5 / size. */
4809 for (c = 0; c < 2; c++) {
4810 half_texel[c] =
4811 LLVMBuildExtractElement(builder, txq_emit_data.output[0],
4812 LLVMConstInt(ctx->i32, c, 0), "");
4813 half_texel[c] = LLVMBuildUIToFP(builder, half_texel[c], ctx->f32, "");
4814 half_texel[c] =
4815 lp_build_emit_llvm_unary(&ctx->radeon_bld.soa.bld_base,
4816 TGSI_OPCODE_RCP, half_texel[c]);
4817 half_texel[c] = LLVMBuildFMul(builder, half_texel[c],
4818 LLVMConstReal(ctx->f32, -0.5), "");
4819 }
4820 }
4821
4822 for (c = 0; c < 2; c++) {
4823 LLVMValueRef tmp;
4824 LLVMValueRef index = LLVMConstInt(ctx->i32, coord_vgpr_index + c, 0);
4825
4826 tmp = LLVMBuildExtractElement(builder, coord, index, "");
4827 tmp = LLVMBuildBitCast(builder, tmp, ctx->f32, "");
4828 tmp = LLVMBuildFAdd(builder, tmp, half_texel[c], "");
4829 tmp = LLVMBuildBitCast(builder, tmp, ctx->i32, "");
4830 coord = LLVMBuildInsertElement(builder, coord, tmp, index, "");
4831 }
4832
4833 emit_data->args[0] = coord;
4834 emit_data->output[emit_data->chan] =
4835 lp_build_intrinsic(builder, intr_name, emit_data->dst_type,
4836 emit_data->args, emit_data->arg_count,
4837 LLVMReadNoneAttribute);
4838 }
4839
4840 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
4841 struct lp_build_tgsi_context *bld_base,
4842 struct lp_build_emit_data *emit_data)
4843 {
4844 struct si_shader_context *ctx = si_shader_context(bld_base);
4845 struct lp_build_context *base = &bld_base->base;
4846 const struct tgsi_full_instruction *inst = emit_data->inst;
4847 unsigned opcode = inst->Instruction.Opcode;
4848 unsigned target = inst->Texture.Texture;
4849 char intr_name[127];
4850 bool has_offset = inst->Texture.NumOffsets > 0;
4851 bool is_shadow = tgsi_is_shadow_target(target);
4852 char type[64];
4853 const char *name = "llvm.SI.image.sample";
4854 const char *infix = "";
4855
4856 if (target == TGSI_TEXTURE_BUFFER) {
4857 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4858 base->gallivm->builder,
4859 "llvm.SI.vs.load.input", emit_data->dst_type,
4860 emit_data->args, emit_data->arg_count,
4861 LLVMReadNoneAttribute);
4862 return;
4863 }
4864
4865 switch (opcode) {
4866 case TGSI_OPCODE_TXF:
4867 name = target == TGSI_TEXTURE_2D_MSAA ||
4868 target == TGSI_TEXTURE_2D_ARRAY_MSAA ?
4869 "llvm.SI.image.load" :
4870 "llvm.SI.image.load.mip";
4871 is_shadow = false;
4872 has_offset = false;
4873 break;
4874 case TGSI_OPCODE_LODQ:
4875 name = "llvm.SI.getlod";
4876 is_shadow = false;
4877 has_offset = false;
4878 break;
4879 case TGSI_OPCODE_TEX:
4880 case TGSI_OPCODE_TEX2:
4881 case TGSI_OPCODE_TXP:
4882 if (ctx->type != PIPE_SHADER_FRAGMENT)
4883 infix = ".lz";
4884 break;
4885 case TGSI_OPCODE_TXB:
4886 case TGSI_OPCODE_TXB2:
4887 assert(ctx->type == PIPE_SHADER_FRAGMENT);
4888 infix = ".b";
4889 break;
4890 case TGSI_OPCODE_TXL:
4891 case TGSI_OPCODE_TXL2:
4892 infix = ".l";
4893 break;
4894 case TGSI_OPCODE_TXD:
4895 infix = ".d";
4896 break;
4897 case TGSI_OPCODE_TG4:
4898 name = "llvm.SI.gather4";
4899 infix = ".lz";
4900 break;
4901 default:
4902 assert(0);
4903 return;
4904 }
4905
4906 /* Add the type and suffixes .c, .o if needed. */
4907 build_int_type_name(LLVMTypeOf(emit_data->args[0]), type, sizeof(type));
4908 sprintf(intr_name, "%s%s%s%s.%s",
4909 name, is_shadow ? ".c" : "", infix,
4910 has_offset ? ".o" : "", type);
4911
4912 /* The hardware needs special lowering for Gather4 with integer formats. */
4913 if (opcode == TGSI_OPCODE_TG4) {
4914 struct tgsi_shader_info *info = &ctx->shader->selector->info;
4915 /* This will also work with non-constant indexing because of how
4916 * glsl_to_tgsi works and we intent to preserve that behavior.
4917 */
4918 const unsigned src_idx = 2;
4919 unsigned sampler = inst->Src[src_idx].Register.Index;
4920
4921 assert(inst->Src[src_idx].Register.File == TGSI_FILE_SAMPLER);
4922
4923 if (info->sampler_type[sampler] == TGSI_RETURN_TYPE_SINT ||
4924 info->sampler_type[sampler] == TGSI_RETURN_TYPE_UINT) {
4925 /* Texture coordinates start after:
4926 * {offset, bias, z-compare, derivatives}
4927 * Only the offset and z-compare can occur here.
4928 */
4929 si_lower_gather4_integer(ctx, emit_data, intr_name,
4930 (int)has_offset + (int)is_shadow);
4931 return;
4932 }
4933 }
4934
4935 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4936 base->gallivm->builder, intr_name, emit_data->dst_type,
4937 emit_data->args, emit_data->arg_count,
4938 LLVMReadNoneAttribute);
4939 }
4940
4941 static void si_llvm_emit_txqs(
4942 const struct lp_build_tgsi_action *action,
4943 struct lp_build_tgsi_context *bld_base,
4944 struct lp_build_emit_data *emit_data)
4945 {
4946 struct si_shader_context *ctx = si_shader_context(bld_base);
4947 struct gallivm_state *gallivm = bld_base->base.gallivm;
4948 LLVMBuilderRef builder = gallivm->builder;
4949 LLVMValueRef res, samples;
4950 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
4951
4952 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
4953
4954
4955 /* Read the samples from the descriptor directly. */
4956 res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
4957 samples = LLVMBuildExtractElement(
4958 builder, res,
4959 lp_build_const_int32(gallivm, 3), "");
4960 samples = LLVMBuildLShr(builder, samples,
4961 lp_build_const_int32(gallivm, 16), "");
4962 samples = LLVMBuildAnd(builder, samples,
4963 lp_build_const_int32(gallivm, 0xf), "");
4964 samples = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1),
4965 samples, "");
4966
4967 emit_data->output[emit_data->chan] = samples;
4968 }
4969
4970 /*
4971 * SI implements derivatives using the local data store (LDS)
4972 * All writes to the LDS happen in all executing threads at
4973 * the same time. TID is the Thread ID for the current
4974 * thread and is a value between 0 and 63, representing
4975 * the thread's position in the wavefront.
4976 *
4977 * For the pixel shader threads are grouped into quads of four pixels.
4978 * The TIDs of the pixels of a quad are:
4979 *
4980 * +------+------+
4981 * |4n + 0|4n + 1|
4982 * +------+------+
4983 * |4n + 2|4n + 3|
4984 * +------+------+
4985 *
4986 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
4987 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
4988 * the current pixel's column, and masking with 0xfffffffe yields the TID
4989 * of the left pixel of the current pixel's row.
4990 *
4991 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
4992 * adding 2 yields the TID of the pixel below the top pixel.
4993 */
4994 /* masks for thread ID. */
4995 #define TID_MASK_TOP_LEFT 0xfffffffc
4996 #define TID_MASK_TOP 0xfffffffd
4997 #define TID_MASK_LEFT 0xfffffffe
4998
4999 static void si_llvm_emit_ddxy(
5000 const struct lp_build_tgsi_action *action,
5001 struct lp_build_tgsi_context *bld_base,
5002 struct lp_build_emit_data *emit_data)
5003 {
5004 struct si_shader_context *ctx = si_shader_context(bld_base);
5005 struct gallivm_state *gallivm = bld_base->base.gallivm;
5006 const struct tgsi_full_instruction *inst = emit_data->inst;
5007 unsigned opcode = inst->Instruction.Opcode;
5008 LLVMValueRef indices[2];
5009 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
5010 LLVMValueRef tl, trbl, result[4];
5011 LLVMValueRef tl_tid, trbl_tid;
5012 unsigned swizzle[4];
5013 unsigned c;
5014 int idx;
5015 unsigned mask;
5016
5017 indices[0] = bld_base->uint_bld.zero;
5018 indices[1] = get_thread_id(ctx);
5019 store_ptr = LLVMBuildGEP(gallivm->builder, ctx->lds,
5020 indices, 2, "");
5021
5022 if (opcode == TGSI_OPCODE_DDX_FINE)
5023 mask = TID_MASK_LEFT;
5024 else if (opcode == TGSI_OPCODE_DDY_FINE)
5025 mask = TID_MASK_TOP;
5026 else
5027 mask = TID_MASK_TOP_LEFT;
5028
5029 tl_tid = LLVMBuildAnd(gallivm->builder, indices[1],
5030 lp_build_const_int32(gallivm, mask), "");
5031 indices[1] = tl_tid;
5032 load_ptr0 = LLVMBuildGEP(gallivm->builder, ctx->lds,
5033 indices, 2, "");
5034
5035 /* for DDX we want to next X pixel, DDY next Y pixel. */
5036 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
5037 trbl_tid = LLVMBuildAdd(gallivm->builder, indices[1],
5038 lp_build_const_int32(gallivm, idx), "");
5039 indices[1] = trbl_tid;
5040 load_ptr1 = LLVMBuildGEP(gallivm->builder, ctx->lds,
5041 indices, 2, "");
5042
5043 for (c = 0; c < 4; ++c) {
5044 unsigned i;
5045 LLVMValueRef val;
5046 LLVMValueRef args[2];
5047
5048 swizzle[c] = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], c);
5049 for (i = 0; i < c; ++i) {
5050 if (swizzle[i] == swizzle[c]) {
5051 result[c] = result[i];
5052 break;
5053 }
5054 }
5055 if (i != c)
5056 continue;
5057
5058 val = LLVMBuildBitCast(gallivm->builder,
5059 lp_build_emit_fetch(bld_base, inst, 0, c),
5060 ctx->i32, "");
5061
5062 if ((HAVE_LLVM >= 0x0309) && ctx->screen->b.family >= CHIP_TONGA) {
5063
5064 args[0] = LLVMBuildMul(gallivm->builder, tl_tid,
5065 lp_build_const_int32(gallivm, 4), "");
5066 args[1] = val;
5067 tl = lp_build_intrinsic(gallivm->builder,
5068 "llvm.amdgcn.ds.bpermute", ctx->i32,
5069 args, 2, LLVMReadNoneAttribute);
5070
5071 args[0] = LLVMBuildMul(gallivm->builder, trbl_tid,
5072 lp_build_const_int32(gallivm, 4), "");
5073 trbl = lp_build_intrinsic(gallivm->builder,
5074 "llvm.amdgcn.ds.bpermute", ctx->i32,
5075 args, 2, LLVMReadNoneAttribute);
5076 } else {
5077 LLVMBuildStore(gallivm->builder, val, store_ptr);
5078 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
5079 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
5080 }
5081 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
5082 trbl = LLVMBuildBitCast(gallivm->builder, trbl, ctx->f32, "");
5083 result[c] = LLVMBuildFSub(gallivm->builder, trbl, tl, "");
5084 }
5085
5086 emit_data->output[0] = lp_build_gather_values(gallivm, result, 4);
5087 }
5088
5089 /*
5090 * this takes an I,J coordinate pair,
5091 * and works out the X and Y derivatives.
5092 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
5093 */
5094 static LLVMValueRef si_llvm_emit_ddxy_interp(
5095 struct lp_build_tgsi_context *bld_base,
5096 LLVMValueRef interp_ij)
5097 {
5098 struct si_shader_context *ctx = si_shader_context(bld_base);
5099 struct gallivm_state *gallivm = bld_base->base.gallivm;
5100 LLVMValueRef indices[2];
5101 LLVMValueRef store_ptr, load_ptr_x, load_ptr_y, load_ptr_ddx, load_ptr_ddy, temp, temp2;
5102 LLVMValueRef tl, tr, bl, result[4];
5103 unsigned c;
5104
5105 indices[0] = bld_base->uint_bld.zero;
5106 indices[1] = get_thread_id(ctx);
5107 store_ptr = LLVMBuildGEP(gallivm->builder, ctx->lds,
5108 indices, 2, "");
5109
5110 temp = LLVMBuildAnd(gallivm->builder, indices[1],
5111 lp_build_const_int32(gallivm, TID_MASK_LEFT), "");
5112
5113 temp2 = LLVMBuildAnd(gallivm->builder, indices[1],
5114 lp_build_const_int32(gallivm, TID_MASK_TOP), "");
5115
5116 indices[1] = temp;
5117 load_ptr_x = LLVMBuildGEP(gallivm->builder, ctx->lds,
5118 indices, 2, "");
5119
5120 indices[1] = temp2;
5121 load_ptr_y = LLVMBuildGEP(gallivm->builder, ctx->lds,
5122 indices, 2, "");
5123
5124 indices[1] = LLVMBuildAdd(gallivm->builder, temp,
5125 lp_build_const_int32(gallivm, 1), "");
5126 load_ptr_ddx = LLVMBuildGEP(gallivm->builder, ctx->lds,
5127 indices, 2, "");
5128
5129 indices[1] = LLVMBuildAdd(gallivm->builder, temp2,
5130 lp_build_const_int32(gallivm, 2), "");
5131 load_ptr_ddy = LLVMBuildGEP(gallivm->builder, ctx->lds,
5132 indices, 2, "");
5133
5134 for (c = 0; c < 2; ++c) {
5135 LLVMValueRef store_val;
5136 LLVMValueRef c_ll = lp_build_const_int32(gallivm, c);
5137
5138 store_val = LLVMBuildExtractElement(gallivm->builder,
5139 interp_ij, c_ll, "");
5140 LLVMBuildStore(gallivm->builder,
5141 store_val,
5142 store_ptr);
5143
5144 tl = LLVMBuildLoad(gallivm->builder, load_ptr_x, "");
5145 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
5146
5147 tr = LLVMBuildLoad(gallivm->builder, load_ptr_ddx, "");
5148 tr = LLVMBuildBitCast(gallivm->builder, tr, ctx->f32, "");
5149
5150 result[c] = LLVMBuildFSub(gallivm->builder, tr, tl, "");
5151
5152 tl = LLVMBuildLoad(gallivm->builder, load_ptr_y, "");
5153 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
5154
5155 bl = LLVMBuildLoad(gallivm->builder, load_ptr_ddy, "");
5156 bl = LLVMBuildBitCast(gallivm->builder, bl, ctx->f32, "");
5157
5158 result[c + 2] = LLVMBuildFSub(gallivm->builder, bl, tl, "");
5159 }
5160
5161 return lp_build_gather_values(gallivm, result, 4);
5162 }
5163
5164 static void interp_fetch_args(
5165 struct lp_build_tgsi_context *bld_base,
5166 struct lp_build_emit_data *emit_data)
5167 {
5168 struct si_shader_context *ctx = si_shader_context(bld_base);
5169 struct gallivm_state *gallivm = bld_base->base.gallivm;
5170 const struct tgsi_full_instruction *inst = emit_data->inst;
5171
5172 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
5173 /* offset is in second src, first two channels */
5174 emit_data->args[0] = lp_build_emit_fetch(bld_base,
5175 emit_data->inst, 1,
5176 TGSI_CHAN_X);
5177 emit_data->args[1] = lp_build_emit_fetch(bld_base,
5178 emit_data->inst, 1,
5179 TGSI_CHAN_Y);
5180 emit_data->arg_count = 2;
5181 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5182 LLVMValueRef sample_position;
5183 LLVMValueRef sample_id;
5184 LLVMValueRef halfval = lp_build_const_float(gallivm, 0.5f);
5185
5186 /* fetch sample ID, then fetch its sample position,
5187 * and place into first two channels.
5188 */
5189 sample_id = lp_build_emit_fetch(bld_base,
5190 emit_data->inst, 1, TGSI_CHAN_X);
5191 sample_id = LLVMBuildBitCast(gallivm->builder, sample_id,
5192 ctx->i32, "");
5193 sample_position = load_sample_position(&ctx->radeon_bld, sample_id);
5194
5195 emit_data->args[0] = LLVMBuildExtractElement(gallivm->builder,
5196 sample_position,
5197 lp_build_const_int32(gallivm, 0), "");
5198
5199 emit_data->args[0] = LLVMBuildFSub(gallivm->builder, emit_data->args[0], halfval, "");
5200 emit_data->args[1] = LLVMBuildExtractElement(gallivm->builder,
5201 sample_position,
5202 lp_build_const_int32(gallivm, 1), "");
5203 emit_data->args[1] = LLVMBuildFSub(gallivm->builder, emit_data->args[1], halfval, "");
5204 emit_data->arg_count = 2;
5205 }
5206 }
5207
5208 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
5209 struct lp_build_tgsi_context *bld_base,
5210 struct lp_build_emit_data *emit_data)
5211 {
5212 struct si_shader_context *ctx = si_shader_context(bld_base);
5213 struct si_shader *shader = ctx->shader;
5214 struct gallivm_state *gallivm = bld_base->base.gallivm;
5215 LLVMValueRef interp_param;
5216 const struct tgsi_full_instruction *inst = emit_data->inst;
5217 const char *intr_name;
5218 int input_index = inst->Src[0].Register.Index;
5219 int chan;
5220 int i;
5221 LLVMValueRef attr_number;
5222 LLVMValueRef params = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
5223 int interp_param_idx;
5224 unsigned interp = shader->selector->info.input_interpolate[input_index];
5225 unsigned location;
5226
5227 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
5228
5229 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5230 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
5231 location = TGSI_INTERPOLATE_LOC_CENTER;
5232 else
5233 location = TGSI_INTERPOLATE_LOC_CENTROID;
5234
5235 interp_param_idx = lookup_interp_param_index(interp, location);
5236 if (interp_param_idx == -1)
5237 return;
5238 else if (interp_param_idx)
5239 interp_param = get_interp_param(ctx, interp_param_idx);
5240 else
5241 interp_param = NULL;
5242
5243 attr_number = lp_build_const_int32(gallivm, input_index);
5244
5245 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5246 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5247 LLVMValueRef ij_out[2];
5248 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
5249
5250 /*
5251 * take the I then J parameters, and the DDX/Y for it, and
5252 * calculate the IJ inputs for the interpolator.
5253 * temp1 = ddx * offset/sample.x + I;
5254 * interp_param.I = ddy * offset/sample.y + temp1;
5255 * temp1 = ddx * offset/sample.x + J;
5256 * interp_param.J = ddy * offset/sample.y + temp1;
5257 */
5258 for (i = 0; i < 2; i++) {
5259 LLVMValueRef ix_ll = lp_build_const_int32(gallivm, i);
5260 LLVMValueRef iy_ll = lp_build_const_int32(gallivm, i + 2);
5261 LLVMValueRef ddx_el = LLVMBuildExtractElement(gallivm->builder,
5262 ddxy_out, ix_ll, "");
5263 LLVMValueRef ddy_el = LLVMBuildExtractElement(gallivm->builder,
5264 ddxy_out, iy_ll, "");
5265 LLVMValueRef interp_el = LLVMBuildExtractElement(gallivm->builder,
5266 interp_param, ix_ll, "");
5267 LLVMValueRef temp1, temp2;
5268
5269 interp_el = LLVMBuildBitCast(gallivm->builder, interp_el,
5270 ctx->f32, "");
5271
5272 temp1 = LLVMBuildFMul(gallivm->builder, ddx_el, emit_data->args[0], "");
5273
5274 temp1 = LLVMBuildFAdd(gallivm->builder, temp1, interp_el, "");
5275
5276 temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
5277
5278 temp2 = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
5279
5280 ij_out[i] = LLVMBuildBitCast(gallivm->builder,
5281 temp2, ctx->i32, "");
5282 }
5283 interp_param = lp_build_gather_values(bld_base->base.gallivm, ij_out, 2);
5284 }
5285
5286 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
5287 for (chan = 0; chan < 2; chan++) {
5288 LLVMValueRef args[4];
5289 LLVMValueRef llvm_chan;
5290 unsigned schan;
5291
5292 schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
5293 llvm_chan = lp_build_const_int32(gallivm, schan);
5294
5295 args[0] = llvm_chan;
5296 args[1] = attr_number;
5297 args[2] = params;
5298 args[3] = interp_param;
5299
5300 emit_data->output[chan] =
5301 lp_build_intrinsic(gallivm->builder, intr_name,
5302 ctx->f32, args, args[3] ? 4 : 3,
5303 LLVMReadNoneAttribute);
5304 }
5305 }
5306
5307 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
5308 struct lp_build_emit_data *emit_data)
5309 {
5310 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
5311 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
5312 unsigned stream;
5313
5314 assert(src0.File == TGSI_FILE_IMMEDIATE);
5315
5316 stream = LLVMConstIntGetZExtValue(imms[src0.Index][src0.SwizzleX]) & 0x3;
5317 return stream;
5318 }
5319
5320 /* Emit one vertex from the geometry shader */
5321 static void si_llvm_emit_vertex(
5322 const struct lp_build_tgsi_action *action,
5323 struct lp_build_tgsi_context *bld_base,
5324 struct lp_build_emit_data *emit_data)
5325 {
5326 struct si_shader_context *ctx = si_shader_context(bld_base);
5327 struct lp_build_context *uint = &bld_base->uint_bld;
5328 struct si_shader *shader = ctx->shader;
5329 struct tgsi_shader_info *info = &shader->selector->info;
5330 struct gallivm_state *gallivm = bld_base->base.gallivm;
5331 LLVMValueRef soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
5332 SI_PARAM_GS2VS_OFFSET);
5333 LLVMValueRef gs_next_vertex;
5334 LLVMValueRef can_emit, kill;
5335 LLVMValueRef args[2];
5336 unsigned chan;
5337 int i;
5338 unsigned stream;
5339
5340 stream = si_llvm_get_stream(bld_base, emit_data);
5341
5342 /* Write vertex attribute values to GSVS ring */
5343 gs_next_vertex = LLVMBuildLoad(gallivm->builder,
5344 ctx->gs_next_vertex[stream],
5345 "");
5346
5347 /* If this thread has already emitted the declared maximum number of
5348 * vertices, kill it: excessive vertex emissions are not supposed to
5349 * have any effect, and GS threads have no externally observable
5350 * effects other than emitting vertices.
5351 */
5352 can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULE, gs_next_vertex,
5353 lp_build_const_int32(gallivm,
5354 shader->selector->gs_max_out_vertices), "");
5355 kill = lp_build_select(&bld_base->base, can_emit,
5356 lp_build_const_float(gallivm, 1.0f),
5357 lp_build_const_float(gallivm, -1.0f));
5358
5359 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
5360 ctx->voidt, &kill, 1, 0);
5361
5362 for (i = 0; i < info->num_outputs; i++) {
5363 LLVMValueRef *out_ptr =
5364 ctx->radeon_bld.soa.outputs[i];
5365
5366 for (chan = 0; chan < 4; chan++) {
5367 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
5368 LLVMValueRef voffset =
5369 lp_build_const_int32(gallivm, (i * 4 + chan) *
5370 shader->selector->gs_max_out_vertices);
5371
5372 voffset = lp_build_add(uint, voffset, gs_next_vertex);
5373 voffset = lp_build_mul_imm(uint, voffset, 4);
5374
5375 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
5376
5377 build_tbuffer_store(ctx,
5378 ctx->gsvs_ring[stream],
5379 out_val, 1,
5380 voffset, soffset, 0,
5381 V_008F0C_BUF_DATA_FORMAT_32,
5382 V_008F0C_BUF_NUM_FORMAT_UINT,
5383 1, 0, 1, 1, 0);
5384 }
5385 }
5386 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
5387 lp_build_const_int32(gallivm, 1));
5388
5389 LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
5390
5391 /* Signal vertex emission */
5392 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_EMIT | SENDMSG_GS | (stream << 8));
5393 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
5394 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
5395 ctx->voidt, args, 2, 0);
5396 }
5397
5398 /* Cut one primitive from the geometry shader */
5399 static void si_llvm_emit_primitive(
5400 const struct lp_build_tgsi_action *action,
5401 struct lp_build_tgsi_context *bld_base,
5402 struct lp_build_emit_data *emit_data)
5403 {
5404 struct si_shader_context *ctx = si_shader_context(bld_base);
5405 struct gallivm_state *gallivm = bld_base->base.gallivm;
5406 LLVMValueRef args[2];
5407 unsigned stream;
5408
5409 /* Signal primitive cut */
5410 stream = si_llvm_get_stream(bld_base, emit_data);
5411 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_CUT | SENDMSG_GS | (stream << 8));
5412 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
5413 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
5414 ctx->voidt, args, 2, 0);
5415 }
5416
5417 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
5418 struct lp_build_tgsi_context *bld_base,
5419 struct lp_build_emit_data *emit_data)
5420 {
5421 struct si_shader_context *ctx = si_shader_context(bld_base);
5422 struct gallivm_state *gallivm = bld_base->base.gallivm;
5423
5424 /* The real barrier instruction isn’t needed, because an entire patch
5425 * always fits into a single wave.
5426 */
5427 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
5428 emit_optimization_barrier(ctx);
5429 return;
5430 }
5431
5432 lp_build_intrinsic(gallivm->builder,
5433 HAVE_LLVM >= 0x0309 ? "llvm.amdgcn.s.barrier"
5434 : "llvm.AMDGPU.barrier.local",
5435 ctx->voidt, NULL, 0, 0);
5436 }
5437
5438 static const struct lp_build_tgsi_action tex_action = {
5439 .fetch_args = tex_fetch_args,
5440 .emit = build_tex_intrinsic,
5441 };
5442
5443 static const struct lp_build_tgsi_action interp_action = {
5444 .fetch_args = interp_fetch_args,
5445 .emit = build_interp_intrinsic,
5446 };
5447
5448 static void si_create_function(struct si_shader_context *ctx,
5449 LLVMTypeRef *returns, unsigned num_returns,
5450 LLVMTypeRef *params, unsigned num_params,
5451 int last_sgpr)
5452 {
5453 int i;
5454
5455 radeon_llvm_create_func(&ctx->radeon_bld, returns, num_returns,
5456 params, num_params);
5457 radeon_llvm_shader_type(ctx->radeon_bld.main_fn, ctx->type);
5458 ctx->return_value = LLVMGetUndef(ctx->radeon_bld.return_type);
5459
5460 for (i = 0; i <= last_sgpr; ++i) {
5461 LLVMValueRef P = LLVMGetParam(ctx->radeon_bld.main_fn, i);
5462
5463 /* The combination of:
5464 * - ByVal
5465 * - dereferenceable
5466 * - invariant.load
5467 * allows the optimization passes to move loads and reduces
5468 * SGPR spilling significantly.
5469 */
5470 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
5471 LLVMAddAttribute(P, LLVMByValAttribute);
5472 lp_add_attr_dereferenceable(P, UINT64_MAX);
5473 } else
5474 LLVMAddAttribute(P, LLVMInRegAttribute);
5475 }
5476
5477 if (ctx->screen->b.debug_flags & DBG_UNSAFE_MATH) {
5478 /* These were copied from some LLVM test. */
5479 LLVMAddTargetDependentFunctionAttr(ctx->radeon_bld.main_fn,
5480 "less-precise-fpmad",
5481 "true");
5482 LLVMAddTargetDependentFunctionAttr(ctx->radeon_bld.main_fn,
5483 "no-infs-fp-math",
5484 "true");
5485 LLVMAddTargetDependentFunctionAttr(ctx->radeon_bld.main_fn,
5486 "no-nans-fp-math",
5487 "true");
5488 LLVMAddTargetDependentFunctionAttr(ctx->radeon_bld.main_fn,
5489 "unsafe-fp-math",
5490 "true");
5491 }
5492 }
5493
5494 static void create_meta_data(struct si_shader_context *ctx)
5495 {
5496 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
5497
5498 ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
5499 "invariant.load", 14);
5500 ctx->range_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
5501 "range", 5);
5502 ctx->uniform_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
5503 "amdgpu.uniform", 14);
5504
5505 ctx->empty_md = LLVMMDNodeInContext(gallivm->context, NULL, 0);
5506 }
5507
5508 static void declare_streamout_params(struct si_shader_context *ctx,
5509 struct pipe_stream_output_info *so,
5510 LLVMTypeRef *params, LLVMTypeRef i32,
5511 unsigned *num_params)
5512 {
5513 int i;
5514
5515 /* Streamout SGPRs. */
5516 if (so->num_outputs) {
5517 if (ctx->type != PIPE_SHADER_TESS_EVAL)
5518 params[ctx->param_streamout_config = (*num_params)++] = i32;
5519 else
5520 ctx->param_streamout_config = ctx->param_tess_offchip;
5521
5522 params[ctx->param_streamout_write_index = (*num_params)++] = i32;
5523 }
5524 /* A streamout buffer offset is loaded if the stride is non-zero. */
5525 for (i = 0; i < 4; i++) {
5526 if (!so->stride[i])
5527 continue;
5528
5529 params[ctx->param_streamout_offset[i] = (*num_params)++] = i32;
5530 }
5531 }
5532
5533 static unsigned llvm_get_type_size(LLVMTypeRef type)
5534 {
5535 LLVMTypeKind kind = LLVMGetTypeKind(type);
5536
5537 switch (kind) {
5538 case LLVMIntegerTypeKind:
5539 return LLVMGetIntTypeWidth(type) / 8;
5540 case LLVMFloatTypeKind:
5541 return 4;
5542 case LLVMPointerTypeKind:
5543 return 8;
5544 case LLVMVectorTypeKind:
5545 return LLVMGetVectorSize(type) *
5546 llvm_get_type_size(LLVMGetElementType(type));
5547 default:
5548 assert(0);
5549 return 0;
5550 }
5551 }
5552
5553 static void declare_tess_lds(struct si_shader_context *ctx)
5554 {
5555 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
5556 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5557 struct lp_build_context *uint = &bld_base->uint_bld;
5558
5559 unsigned lds_size = ctx->screen->b.chip_class >= CIK ? 65536 : 32768;
5560 ctx->lds = LLVMBuildIntToPtr(gallivm->builder, uint->zero,
5561 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), LOCAL_ADDR_SPACE),
5562 "tess_lds");
5563 }
5564
5565 static void create_function(struct si_shader_context *ctx)
5566 {
5567 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5568 struct gallivm_state *gallivm = bld_base->base.gallivm;
5569 struct si_shader *shader = ctx->shader;
5570 LLVMTypeRef params[SI_NUM_PARAMS + SI_NUM_VERTEX_BUFFERS], v3i32;
5571 LLVMTypeRef returns[16+32*4];
5572 unsigned i, last_sgpr, num_params, num_return_sgprs;
5573 unsigned num_returns = 0;
5574
5575 v3i32 = LLVMVectorType(ctx->i32, 3);
5576
5577 params[SI_PARAM_RW_BUFFERS] = const_array(ctx->v16i8, SI_NUM_RW_BUFFERS);
5578 params[SI_PARAM_CONST_BUFFERS] = const_array(ctx->v16i8, SI_NUM_CONST_BUFFERS);
5579 params[SI_PARAM_SAMPLERS] = const_array(ctx->v8i32, SI_NUM_SAMPLERS);
5580 params[SI_PARAM_IMAGES] = const_array(ctx->v8i32, SI_NUM_IMAGES);
5581 params[SI_PARAM_SHADER_BUFFERS] = const_array(ctx->v4i32, SI_NUM_SHADER_BUFFERS);
5582
5583 switch (ctx->type) {
5584 case PIPE_SHADER_VERTEX:
5585 params[SI_PARAM_VERTEX_BUFFERS] = const_array(ctx->v16i8, SI_NUM_VERTEX_BUFFERS);
5586 params[SI_PARAM_BASE_VERTEX] = ctx->i32;
5587 params[SI_PARAM_START_INSTANCE] = ctx->i32;
5588 params[SI_PARAM_DRAWID] = ctx->i32;
5589 num_params = SI_PARAM_DRAWID+1;
5590
5591 if (shader->key.vs.as_es) {
5592 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
5593 } else if (shader->key.vs.as_ls) {
5594 params[SI_PARAM_LS_OUT_LAYOUT] = ctx->i32;
5595 num_params = SI_PARAM_LS_OUT_LAYOUT+1;
5596 } else {
5597 if (ctx->is_gs_copy_shader) {
5598 num_params = SI_PARAM_RW_BUFFERS+1;
5599 } else {
5600 params[SI_PARAM_VS_STATE_BITS] = ctx->i32;
5601 num_params = SI_PARAM_VS_STATE_BITS+1;
5602 }
5603
5604 /* The locations of the other parameters are assigned dynamically. */
5605 declare_streamout_params(ctx, &shader->selector->so,
5606 params, ctx->i32, &num_params);
5607 }
5608
5609 last_sgpr = num_params-1;
5610
5611 /* VGPRs */
5612 params[ctx->param_vertex_id = num_params++] = ctx->i32;
5613 params[ctx->param_rel_auto_id = num_params++] = ctx->i32;
5614 params[ctx->param_vs_prim_id = num_params++] = ctx->i32;
5615 params[ctx->param_instance_id = num_params++] = ctx->i32;
5616
5617 if (!ctx->is_monolithic &&
5618 !ctx->is_gs_copy_shader) {
5619 /* Vertex load indices. */
5620 ctx->param_vertex_index0 = num_params;
5621
5622 for (i = 0; i < shader->selector->info.num_inputs; i++)
5623 params[num_params++] = ctx->i32;
5624
5625 /* PrimitiveID output. */
5626 if (!shader->key.vs.as_es && !shader->key.vs.as_ls)
5627 for (i = 0; i <= VS_EPILOG_PRIMID_LOC; i++)
5628 returns[num_returns++] = ctx->f32;
5629 }
5630 break;
5631
5632 case PIPE_SHADER_TESS_CTRL:
5633 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
5634 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
5635 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
5636 params[SI_PARAM_TCS_IN_LAYOUT] = ctx->i32;
5637 params[ctx->param_oc_lds = SI_PARAM_TCS_OC_LDS] = ctx->i32;
5638 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx->i32;
5639 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
5640
5641 /* VGPRs */
5642 params[SI_PARAM_PATCH_ID] = ctx->i32;
5643 params[SI_PARAM_REL_IDS] = ctx->i32;
5644 num_params = SI_PARAM_REL_IDS+1;
5645
5646 if (!ctx->is_monolithic) {
5647 /* SI_PARAM_TCS_OC_LDS and PARAM_TESS_FACTOR_OFFSET are
5648 * placed after the user SGPRs.
5649 */
5650 for (i = 0; i < SI_TCS_NUM_USER_SGPR + 2; i++)
5651 returns[num_returns++] = ctx->i32; /* SGPRs */
5652
5653 for (i = 0; i < 3; i++)
5654 returns[num_returns++] = ctx->f32; /* VGPRs */
5655 }
5656 break;
5657
5658 case PIPE_SHADER_TESS_EVAL:
5659 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
5660 num_params = SI_PARAM_TCS_OFFCHIP_LAYOUT+1;
5661
5662 if (shader->key.tes.as_es) {
5663 params[ctx->param_oc_lds = num_params++] = ctx->i32;
5664 params[ctx->param_tess_offchip = num_params++] = ctx->i32;
5665 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
5666 } else {
5667 params[ctx->param_tess_offchip = num_params++] = ctx->i32;
5668 declare_streamout_params(ctx, &shader->selector->so,
5669 params, ctx->i32, &num_params);
5670 params[ctx->param_oc_lds = num_params++] = ctx->i32;
5671 }
5672 last_sgpr = num_params - 1;
5673
5674 /* VGPRs */
5675 params[ctx->param_tes_u = num_params++] = ctx->f32;
5676 params[ctx->param_tes_v = num_params++] = ctx->f32;
5677 params[ctx->param_tes_rel_patch_id = num_params++] = ctx->i32;
5678 params[ctx->param_tes_patch_id = num_params++] = ctx->i32;
5679
5680 /* PrimitiveID output. */
5681 if (!ctx->is_monolithic && !shader->key.tes.as_es)
5682 for (i = 0; i <= VS_EPILOG_PRIMID_LOC; i++)
5683 returns[num_returns++] = ctx->f32;
5684 break;
5685
5686 case PIPE_SHADER_GEOMETRY:
5687 params[SI_PARAM_GS2VS_OFFSET] = ctx->i32;
5688 params[SI_PARAM_GS_WAVE_ID] = ctx->i32;
5689 last_sgpr = SI_PARAM_GS_WAVE_ID;
5690
5691 /* VGPRs */
5692 params[SI_PARAM_VTX0_OFFSET] = ctx->i32;
5693 params[SI_PARAM_VTX1_OFFSET] = ctx->i32;
5694 params[SI_PARAM_PRIMITIVE_ID] = ctx->i32;
5695 params[SI_PARAM_VTX2_OFFSET] = ctx->i32;
5696 params[SI_PARAM_VTX3_OFFSET] = ctx->i32;
5697 params[SI_PARAM_VTX4_OFFSET] = ctx->i32;
5698 params[SI_PARAM_VTX5_OFFSET] = ctx->i32;
5699 params[SI_PARAM_GS_INSTANCE_ID] = ctx->i32;
5700 num_params = SI_PARAM_GS_INSTANCE_ID+1;
5701 break;
5702
5703 case PIPE_SHADER_FRAGMENT:
5704 params[SI_PARAM_ALPHA_REF] = ctx->f32;
5705 params[SI_PARAM_PRIM_MASK] = ctx->i32;
5706 last_sgpr = SI_PARAM_PRIM_MASK;
5707 params[SI_PARAM_PERSP_SAMPLE] = ctx->v2i32;
5708 params[SI_PARAM_PERSP_CENTER] = ctx->v2i32;
5709 params[SI_PARAM_PERSP_CENTROID] = ctx->v2i32;
5710 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
5711 params[SI_PARAM_LINEAR_SAMPLE] = ctx->v2i32;
5712 params[SI_PARAM_LINEAR_CENTER] = ctx->v2i32;
5713 params[SI_PARAM_LINEAR_CENTROID] = ctx->v2i32;
5714 params[SI_PARAM_LINE_STIPPLE_TEX] = ctx->f32;
5715 params[SI_PARAM_POS_X_FLOAT] = ctx->f32;
5716 params[SI_PARAM_POS_Y_FLOAT] = ctx->f32;
5717 params[SI_PARAM_POS_Z_FLOAT] = ctx->f32;
5718 params[SI_PARAM_POS_W_FLOAT] = ctx->f32;
5719 params[SI_PARAM_FRONT_FACE] = ctx->i32;
5720 params[SI_PARAM_ANCILLARY] = ctx->i32;
5721 params[SI_PARAM_SAMPLE_COVERAGE] = ctx->f32;
5722 params[SI_PARAM_POS_FIXED_PT] = ctx->i32;
5723 num_params = SI_PARAM_POS_FIXED_PT+1;
5724
5725 if (!ctx->is_monolithic) {
5726 /* Color inputs from the prolog. */
5727 if (shader->selector->info.colors_read) {
5728 unsigned num_color_elements =
5729 util_bitcount(shader->selector->info.colors_read);
5730
5731 assert(num_params + num_color_elements <= ARRAY_SIZE(params));
5732 for (i = 0; i < num_color_elements; i++)
5733 params[num_params++] = ctx->f32;
5734 }
5735
5736 /* Outputs for the epilog. */
5737 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
5738 num_returns =
5739 num_return_sgprs +
5740 util_bitcount(shader->selector->info.colors_written) * 4 +
5741 shader->selector->info.writes_z +
5742 shader->selector->info.writes_stencil +
5743 shader->selector->info.writes_samplemask +
5744 1 /* SampleMaskIn */;
5745
5746 num_returns = MAX2(num_returns,
5747 num_return_sgprs +
5748 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
5749
5750 for (i = 0; i < num_return_sgprs; i++)
5751 returns[i] = ctx->i32;
5752 for (; i < num_returns; i++)
5753 returns[i] = ctx->f32;
5754 }
5755 break;
5756
5757 case PIPE_SHADER_COMPUTE:
5758 params[SI_PARAM_GRID_SIZE] = v3i32;
5759 params[SI_PARAM_BLOCK_ID] = v3i32;
5760 last_sgpr = SI_PARAM_BLOCK_ID;
5761
5762 params[SI_PARAM_THREAD_ID] = v3i32;
5763 num_params = SI_PARAM_THREAD_ID + 1;
5764 break;
5765 default:
5766 assert(0 && "unimplemented shader");
5767 return;
5768 }
5769
5770 assert(num_params <= ARRAY_SIZE(params));
5771
5772 si_create_function(ctx, returns, num_returns, params,
5773 num_params, last_sgpr);
5774
5775 /* Reserve register locations for VGPR inputs the PS prolog may need. */
5776 if (ctx->type == PIPE_SHADER_FRAGMENT &&
5777 !ctx->is_monolithic) {
5778 radeon_llvm_add_attribute(ctx->radeon_bld.main_fn,
5779 "InitialPSInputAddr",
5780 S_0286D0_PERSP_SAMPLE_ENA(1) |
5781 S_0286D0_PERSP_CENTER_ENA(1) |
5782 S_0286D0_PERSP_CENTROID_ENA(1) |
5783 S_0286D0_LINEAR_SAMPLE_ENA(1) |
5784 S_0286D0_LINEAR_CENTER_ENA(1) |
5785 S_0286D0_LINEAR_CENTROID_ENA(1) |
5786 S_0286D0_FRONT_FACE_ENA(1) |
5787 S_0286D0_POS_FIXED_PT_ENA(1));
5788 } else if (ctx->type == PIPE_SHADER_COMPUTE) {
5789 const unsigned *properties = shader->selector->info.properties;
5790 unsigned max_work_group_size =
5791 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
5792 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
5793 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
5794
5795 assert(max_work_group_size);
5796
5797 radeon_llvm_add_attribute(ctx->radeon_bld.main_fn,
5798 "amdgpu-max-work-group-size",
5799 max_work_group_size);
5800 }
5801
5802 shader->info.num_input_sgprs = 0;
5803 shader->info.num_input_vgprs = 0;
5804
5805 for (i = 0; i <= last_sgpr; ++i)
5806 shader->info.num_input_sgprs += llvm_get_type_size(params[i]) / 4;
5807
5808 /* Unused fragment shader inputs are eliminated by the compiler,
5809 * so we don't know yet how many there will be.
5810 */
5811 if (ctx->type != PIPE_SHADER_FRAGMENT)
5812 for (; i < num_params; ++i)
5813 shader->info.num_input_vgprs += llvm_get_type_size(params[i]) / 4;
5814
5815 if (bld_base->info &&
5816 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
5817 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0 ||
5818 bld_base->info->opcode_count[TGSI_OPCODE_DDX_FINE] > 0 ||
5819 bld_base->info->opcode_count[TGSI_OPCODE_DDY_FINE] > 0 ||
5820 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_OFFSET] > 0 ||
5821 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_SAMPLE] > 0))
5822 ctx->lds =
5823 LLVMAddGlobalInAddressSpace(gallivm->module,
5824 LLVMArrayType(ctx->i32, 64),
5825 "ddxy_lds",
5826 LOCAL_ADDR_SPACE);
5827
5828 if ((ctx->type == PIPE_SHADER_VERTEX && shader->key.vs.as_ls) ||
5829 ctx->type == PIPE_SHADER_TESS_CTRL ||
5830 ctx->type == PIPE_SHADER_TESS_EVAL)
5831 declare_tess_lds(ctx);
5832 }
5833
5834 static void preload_constants(struct si_shader_context *ctx)
5835 {
5836 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5837 struct gallivm_state *gallivm = bld_base->base.gallivm;
5838 const struct tgsi_shader_info *info = bld_base->info;
5839 unsigned buf;
5840 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
5841
5842 for (buf = 0; buf < SI_NUM_CONST_BUFFERS; buf++) {
5843 if (info->const_file_max[buf] == -1)
5844 continue;
5845
5846 /* Load the resource descriptor */
5847 ctx->const_buffers[buf] =
5848 build_indexed_load_const(ctx, ptr, lp_build_const_int32(gallivm, buf));
5849 }
5850 }
5851
5852 static void preload_shader_buffers(struct si_shader_context *ctx)
5853 {
5854 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
5855 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_SHADER_BUFFERS);
5856 int buf, maxbuf;
5857
5858 maxbuf = MIN2(ctx->shader->selector->info.file_max[TGSI_FILE_BUFFER],
5859 SI_NUM_SHADER_BUFFERS - 1);
5860 for (buf = 0; buf <= maxbuf; ++buf) {
5861 ctx->shader_buffers[buf] =
5862 build_indexed_load_const(
5863 ctx, ptr, lp_build_const_int32(gallivm, buf));
5864 }
5865 }
5866
5867 static void preload_samplers(struct si_shader_context *ctx)
5868 {
5869 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5870 struct gallivm_state *gallivm = bld_base->base.gallivm;
5871 const struct tgsi_shader_info *info = bld_base->info;
5872 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
5873 LLVMValueRef offset;
5874
5875 if (num_samplers == 0)
5876 return;
5877
5878 /* Load the resources and samplers, we rely on the code sinking to do the rest */
5879 for (i = 0; i < num_samplers; ++i) {
5880 /* Resource */
5881 offset = lp_build_const_int32(gallivm, i);
5882 ctx->sampler_views[i] =
5883 get_sampler_desc(ctx, offset, DESC_IMAGE);
5884
5885 /* FMASK resource */
5886 if (info->is_msaa_sampler[i])
5887 ctx->fmasks[i] =
5888 get_sampler_desc(ctx, offset, DESC_FMASK);
5889 else {
5890 ctx->sampler_states[i] =
5891 get_sampler_desc(ctx, offset, DESC_SAMPLER);
5892 ctx->sampler_states[i] =
5893 sici_fix_sampler_aniso(ctx, ctx->sampler_views[i],
5894 ctx->sampler_states[i]);
5895 }
5896 }
5897 }
5898
5899 static void preload_images(struct si_shader_context *ctx)
5900 {
5901 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5902 struct tgsi_shader_info *info = &ctx->shader->selector->info;
5903 struct gallivm_state *gallivm = bld_base->base.gallivm;
5904 unsigned num_images = bld_base->info->file_max[TGSI_FILE_IMAGE] + 1;
5905 LLVMValueRef res_ptr;
5906 unsigned i;
5907
5908 if (num_images == 0)
5909 return;
5910
5911 res_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_IMAGES);
5912
5913 for (i = 0; i < num_images; ++i) {
5914 /* Rely on LLVM to shrink the load for buffer resources. */
5915 LLVMValueRef rsrc =
5916 build_indexed_load_const(ctx, res_ptr,
5917 lp_build_const_int32(gallivm, i));
5918
5919 if (info->images_writemask & (1 << i) &&
5920 !(info->images_buffers & (1 << i)))
5921 rsrc = force_dcc_off(ctx, rsrc);
5922
5923 ctx->images[i] = rsrc;
5924 }
5925 }
5926
5927 static void preload_streamout_buffers(struct si_shader_context *ctx)
5928 {
5929 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
5930 struct gallivm_state *gallivm = bld_base->base.gallivm;
5931 unsigned i;
5932
5933 /* Streamout can only be used if the shader is compiled as VS. */
5934 if (!ctx->shader->selector->so.num_outputs ||
5935 (ctx->type == PIPE_SHADER_VERTEX &&
5936 (ctx->shader->key.vs.as_es ||
5937 ctx->shader->key.vs.as_ls)) ||
5938 (ctx->type == PIPE_SHADER_TESS_EVAL &&
5939 ctx->shader->key.tes.as_es))
5940 return;
5941
5942 LLVMValueRef buf_ptr = LLVMGetParam(ctx->radeon_bld.main_fn,
5943 SI_PARAM_RW_BUFFERS);
5944
5945 /* Load the resources, we rely on the code sinking to do the rest */
5946 for (i = 0; i < 4; ++i) {
5947 if (ctx->shader->selector->so.stride[i]) {
5948 LLVMValueRef offset = lp_build_const_int32(gallivm,
5949 SI_VS_STREAMOUT_BUF0 + i);
5950
5951 ctx->so_buffers[i] = build_indexed_load_const(ctx, buf_ptr, offset);
5952 }
5953 }
5954 }
5955
5956 /**
5957 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
5958 * for later use.
5959 */
5960 static void preload_ring_buffers(struct si_shader_context *ctx)
5961 {
5962 struct gallivm_state *gallivm =
5963 ctx->radeon_bld.soa.bld_base.base.gallivm;
5964
5965 LLVMValueRef buf_ptr = LLVMGetParam(ctx->radeon_bld.main_fn,
5966 SI_PARAM_RW_BUFFERS);
5967
5968 if ((ctx->type == PIPE_SHADER_VERTEX &&
5969 ctx->shader->key.vs.as_es) ||
5970 (ctx->type == PIPE_SHADER_TESS_EVAL &&
5971 ctx->shader->key.tes.as_es) ||
5972 ctx->type == PIPE_SHADER_GEOMETRY) {
5973 unsigned ring =
5974 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
5975 : SI_ES_RING_ESGS;
5976 LLVMValueRef offset = lp_build_const_int32(gallivm, ring);
5977
5978 ctx->esgs_ring =
5979 build_indexed_load_const(ctx, buf_ptr, offset);
5980 }
5981
5982 if (ctx->is_gs_copy_shader) {
5983 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_VS_RING_GSVS);
5984
5985 ctx->gsvs_ring[0] =
5986 build_indexed_load_const(ctx, buf_ptr, offset);
5987 }
5988 if (ctx->type == PIPE_SHADER_GEOMETRY) {
5989 int i;
5990 for (i = 0; i < 4; i++) {
5991 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_GS_RING_GSVS0 + i);
5992
5993 ctx->gsvs_ring[i] =
5994 build_indexed_load_const(ctx, buf_ptr, offset);
5995 }
5996 }
5997 }
5998
5999 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
6000 LLVMValueRef param_rw_buffers,
6001 unsigned param_pos_fixed_pt)
6002 {
6003 struct lp_build_tgsi_context *bld_base =
6004 &ctx->radeon_bld.soa.bld_base;
6005 struct gallivm_state *gallivm = bld_base->base.gallivm;
6006 LLVMBuilderRef builder = gallivm->builder;
6007 LLVMValueRef slot, desc, offset, row, bit, address[2];
6008
6009 /* Use the fixed-point gl_FragCoord input.
6010 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
6011 * per coordinate to get the repeating effect.
6012 */
6013 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
6014 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
6015
6016 /* Load the buffer descriptor. */
6017 slot = lp_build_const_int32(gallivm, SI_PS_CONST_POLY_STIPPLE);
6018 desc = build_indexed_load_const(ctx, param_rw_buffers, slot);
6019
6020 /* The stipple pattern is 32x32, each row has 32 bits. */
6021 offset = LLVMBuildMul(builder, address[1],
6022 LLVMConstInt(ctx->i32, 4, 0), "");
6023 row = buffer_load_const(ctx, desc, offset);
6024 row = LLVMBuildBitCast(builder, row, ctx->i32, "");
6025 bit = LLVMBuildLShr(builder, row, address[0], "");
6026 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
6027
6028 /* The intrinsic kills the thread if arg < 0. */
6029 bit = LLVMBuildSelect(builder, bit, LLVMConstReal(ctx->f32, 0),
6030 LLVMConstReal(ctx->f32, -1), "");
6031 lp_build_intrinsic(builder, "llvm.AMDGPU.kill", ctx->voidt, &bit, 1, 0);
6032 }
6033
6034 void si_shader_binary_read_config(struct radeon_shader_binary *binary,
6035 struct si_shader_config *conf,
6036 unsigned symbol_offset)
6037 {
6038 unsigned i;
6039 const unsigned char *config =
6040 radeon_shader_binary_config_start(binary, symbol_offset);
6041 bool really_needs_scratch = false;
6042
6043 /* LLVM adds SGPR spills to the scratch size.
6044 * Find out if we really need the scratch buffer.
6045 */
6046 for (i = 0; i < binary->reloc_count; i++) {
6047 const struct radeon_shader_reloc *reloc = &binary->relocs[i];
6048
6049 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
6050 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
6051 really_needs_scratch = true;
6052 break;
6053 }
6054 }
6055
6056 /* XXX: We may be able to emit some of these values directly rather than
6057 * extracting fields to be emitted later.
6058 */
6059
6060 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
6061 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
6062 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
6063 switch (reg) {
6064 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
6065 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
6066 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
6067 case R_00B848_COMPUTE_PGM_RSRC1:
6068 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
6069 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
6070 conf->float_mode = G_00B028_FLOAT_MODE(value);
6071 conf->rsrc1 = value;
6072 break;
6073 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
6074 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
6075 break;
6076 case R_00B84C_COMPUTE_PGM_RSRC2:
6077 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
6078 conf->rsrc2 = value;
6079 break;
6080 case R_0286CC_SPI_PS_INPUT_ENA:
6081 conf->spi_ps_input_ena = value;
6082 break;
6083 case R_0286D0_SPI_PS_INPUT_ADDR:
6084 conf->spi_ps_input_addr = value;
6085 break;
6086 case R_0286E8_SPI_TMPRING_SIZE:
6087 case R_00B860_COMPUTE_TMPRING_SIZE:
6088 /* WAVESIZE is in units of 256 dwords. */
6089 if (really_needs_scratch)
6090 conf->scratch_bytes_per_wave =
6091 G_00B860_WAVESIZE(value) * 256 * 4;
6092 break;
6093 case 0x4: /* SPILLED_SGPRS */
6094 conf->spilled_sgprs = value;
6095 break;
6096 case 0x8: /* SPILLED_VGPRS */
6097 conf->spilled_vgprs = value;
6098 break;
6099 default:
6100 {
6101 static bool printed;
6102
6103 if (!printed) {
6104 fprintf(stderr, "Warning: LLVM emitted unknown "
6105 "config register: 0x%x\n", reg);
6106 printed = true;
6107 }
6108 }
6109 break;
6110 }
6111 }
6112
6113 if (!conf->spi_ps_input_addr)
6114 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
6115 }
6116
6117 void si_shader_apply_scratch_relocs(struct si_context *sctx,
6118 struct si_shader *shader,
6119 struct si_shader_config *config,
6120 uint64_t scratch_va)
6121 {
6122 unsigned i;
6123 uint32_t scratch_rsrc_dword0 = scratch_va;
6124 uint32_t scratch_rsrc_dword1 =
6125 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
6126
6127 /* Enable scratch coalescing if LLVM sets ELEMENT_SIZE & INDEX_STRIDE
6128 * correctly.
6129 */
6130 if (HAVE_LLVM >= 0x0309)
6131 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
6132 else
6133 scratch_rsrc_dword1 |=
6134 S_008F04_STRIDE(config->scratch_bytes_per_wave / 64);
6135
6136 for (i = 0 ; i < shader->binary.reloc_count; i++) {
6137 const struct radeon_shader_reloc *reloc =
6138 &shader->binary.relocs[i];
6139 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
6140 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
6141 &scratch_rsrc_dword0, 4);
6142 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
6143 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
6144 &scratch_rsrc_dword1, 4);
6145 }
6146 }
6147 }
6148
6149 static unsigned si_get_shader_binary_size(struct si_shader *shader)
6150 {
6151 unsigned size = shader->binary.code_size;
6152
6153 if (shader->prolog)
6154 size += shader->prolog->binary.code_size;
6155 if (shader->epilog)
6156 size += shader->epilog->binary.code_size;
6157 return size;
6158 }
6159
6160 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
6161 {
6162 const struct radeon_shader_binary *prolog =
6163 shader->prolog ? &shader->prolog->binary : NULL;
6164 const struct radeon_shader_binary *epilog =
6165 shader->epilog ? &shader->epilog->binary : NULL;
6166 const struct radeon_shader_binary *mainb = &shader->binary;
6167 unsigned bo_size = si_get_shader_binary_size(shader) +
6168 (!epilog ? mainb->rodata_size : 0);
6169 unsigned char *ptr;
6170
6171 assert(!prolog || !prolog->rodata_size);
6172 assert((!prolog && !epilog) || !mainb->rodata_size);
6173 assert(!epilog || !epilog->rodata_size);
6174
6175 r600_resource_reference(&shader->bo, NULL);
6176 shader->bo = si_resource_create_custom(&sscreen->b.b,
6177 PIPE_USAGE_IMMUTABLE,
6178 bo_size);
6179 if (!shader->bo)
6180 return -ENOMEM;
6181
6182 /* Upload. */
6183 ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
6184 PIPE_TRANSFER_READ_WRITE);
6185
6186 if (prolog) {
6187 util_memcpy_cpu_to_le32(ptr, prolog->code, prolog->code_size);
6188 ptr += prolog->code_size;
6189 }
6190
6191 util_memcpy_cpu_to_le32(ptr, mainb->code, mainb->code_size);
6192 ptr += mainb->code_size;
6193
6194 if (epilog)
6195 util_memcpy_cpu_to_le32(ptr, epilog->code, epilog->code_size);
6196 else if (mainb->rodata_size > 0)
6197 util_memcpy_cpu_to_le32(ptr, mainb->rodata, mainb->rodata_size);
6198
6199 sscreen->b.ws->buffer_unmap(shader->bo->buf);
6200 return 0;
6201 }
6202
6203 static void si_shader_dump_disassembly(const struct radeon_shader_binary *binary,
6204 struct pipe_debug_callback *debug,
6205 const char *name, FILE *file)
6206 {
6207 char *line, *p;
6208 unsigned i, count;
6209
6210 if (binary->disasm_string) {
6211 fprintf(file, "Shader %s disassembly:\n", name);
6212 fprintf(file, "%s", binary->disasm_string);
6213
6214 if (debug && debug->debug_message) {
6215 /* Very long debug messages are cut off, so send the
6216 * disassembly one line at a time. This causes more
6217 * overhead, but on the plus side it simplifies
6218 * parsing of resulting logs.
6219 */
6220 pipe_debug_message(debug, SHADER_INFO,
6221 "Shader Disassembly Begin");
6222
6223 line = binary->disasm_string;
6224 while (*line) {
6225 p = util_strchrnul(line, '\n');
6226 count = p - line;
6227
6228 if (count) {
6229 pipe_debug_message(debug, SHADER_INFO,
6230 "%.*s", count, line);
6231 }
6232
6233 if (!*p)
6234 break;
6235 line = p + 1;
6236 }
6237
6238 pipe_debug_message(debug, SHADER_INFO,
6239 "Shader Disassembly End");
6240 }
6241 } else {
6242 fprintf(file, "Shader %s binary:\n", name);
6243 for (i = 0; i < binary->code_size; i += 4) {
6244 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
6245 binary->code[i + 3], binary->code[i + 2],
6246 binary->code[i + 1], binary->code[i]);
6247 }
6248 }
6249 }
6250
6251 static void si_shader_dump_stats(struct si_screen *sscreen,
6252 struct si_shader_config *conf,
6253 unsigned num_inputs,
6254 unsigned code_size,
6255 struct pipe_debug_callback *debug,
6256 unsigned processor,
6257 FILE *file)
6258 {
6259 unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
6260 unsigned lds_per_wave = 0;
6261 unsigned max_simd_waves = 10;
6262
6263 /* Compute LDS usage for PS. */
6264 if (processor == PIPE_SHADER_FRAGMENT) {
6265 /* The minimum usage per wave is (num_inputs * 48). The maximum
6266 * usage is (num_inputs * 48 * 16).
6267 * We can get anything in between and it varies between waves.
6268 *
6269 * The 48 bytes per input for a single primitive is equal to
6270 * 4 bytes/component * 4 components/input * 3 points.
6271 *
6272 * Other stages don't know the size at compile time or don't
6273 * allocate LDS per wave, but instead they do it per thread group.
6274 */
6275 lds_per_wave = conf->lds_size * lds_increment +
6276 align(num_inputs * 48, lds_increment);
6277 }
6278
6279 /* Compute the per-SIMD wave counts. */
6280 if (conf->num_sgprs) {
6281 if (sscreen->b.chip_class >= VI)
6282 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
6283 else
6284 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
6285 }
6286
6287 if (conf->num_vgprs)
6288 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
6289
6290 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
6291 * that PS can use.
6292 */
6293 if (lds_per_wave)
6294 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
6295
6296 if (file != stderr ||
6297 r600_can_dump_shader(&sscreen->b, processor)) {
6298 if (processor == PIPE_SHADER_FRAGMENT) {
6299 fprintf(file, "*** SHADER CONFIG ***\n"
6300 "SPI_PS_INPUT_ADDR = 0x%04x\n"
6301 "SPI_PS_INPUT_ENA = 0x%04x\n",
6302 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
6303 }
6304
6305 fprintf(file, "*** SHADER STATS ***\n"
6306 "SGPRS: %d\n"
6307 "VGPRS: %d\n"
6308 "Spilled SGPRs: %d\n"
6309 "Spilled VGPRs: %d\n"
6310 "Code Size: %d bytes\n"
6311 "LDS: %d blocks\n"
6312 "Scratch: %d bytes per wave\n"
6313 "Max Waves: %d\n"
6314 "********************\n\n\n",
6315 conf->num_sgprs, conf->num_vgprs,
6316 conf->spilled_sgprs, conf->spilled_vgprs, code_size,
6317 conf->lds_size, conf->scratch_bytes_per_wave,
6318 max_simd_waves);
6319 }
6320
6321 pipe_debug_message(debug, SHADER_INFO,
6322 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
6323 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
6324 "Spilled VGPRs: %d",
6325 conf->num_sgprs, conf->num_vgprs, code_size,
6326 conf->lds_size, conf->scratch_bytes_per_wave,
6327 max_simd_waves, conf->spilled_sgprs,
6328 conf->spilled_vgprs);
6329 }
6330
6331 static const char *si_get_shader_name(struct si_shader *shader,
6332 unsigned processor)
6333 {
6334 switch (processor) {
6335 case PIPE_SHADER_VERTEX:
6336 if (shader->key.vs.as_es)
6337 return "Vertex Shader as ES";
6338 else if (shader->key.vs.as_ls)
6339 return "Vertex Shader as LS";
6340 else
6341 return "Vertex Shader as VS";
6342 case PIPE_SHADER_TESS_CTRL:
6343 return "Tessellation Control Shader";
6344 case PIPE_SHADER_TESS_EVAL:
6345 if (shader->key.tes.as_es)
6346 return "Tessellation Evaluation Shader as ES";
6347 else
6348 return "Tessellation Evaluation Shader as VS";
6349 case PIPE_SHADER_GEOMETRY:
6350 if (shader->gs_copy_shader == NULL)
6351 return "GS Copy Shader as VS";
6352 else
6353 return "Geometry Shader";
6354 case PIPE_SHADER_FRAGMENT:
6355 return "Pixel Shader";
6356 case PIPE_SHADER_COMPUTE:
6357 return "Compute Shader";
6358 default:
6359 return "Unknown Shader";
6360 }
6361 }
6362
6363 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
6364 struct pipe_debug_callback *debug, unsigned processor,
6365 FILE *file)
6366 {
6367 if (file != stderr ||
6368 r600_can_dump_shader(&sscreen->b, processor))
6369 si_dump_shader_key(processor, &shader->key, file);
6370
6371 if (file != stderr && shader->binary.llvm_ir_string) {
6372 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
6373 si_get_shader_name(shader, processor));
6374 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
6375 }
6376
6377 if (file != stderr ||
6378 (r600_can_dump_shader(&sscreen->b, processor) &&
6379 !(sscreen->b.debug_flags & DBG_NO_ASM))) {
6380 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
6381
6382 if (shader->prolog)
6383 si_shader_dump_disassembly(&shader->prolog->binary,
6384 debug, "prolog", file);
6385
6386 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
6387
6388 if (shader->epilog)
6389 si_shader_dump_disassembly(&shader->epilog->binary,
6390 debug, "epilog", file);
6391 fprintf(file, "\n");
6392 }
6393
6394 si_shader_dump_stats(sscreen, &shader->config,
6395 shader->selector ? shader->selector->info.num_inputs : 0,
6396 si_get_shader_binary_size(shader), debug, processor,
6397 file);
6398 }
6399
6400 int si_compile_llvm(struct si_screen *sscreen,
6401 struct radeon_shader_binary *binary,
6402 struct si_shader_config *conf,
6403 LLVMTargetMachineRef tm,
6404 LLVMModuleRef mod,
6405 struct pipe_debug_callback *debug,
6406 unsigned processor,
6407 const char *name)
6408 {
6409 int r = 0;
6410 unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
6411
6412 if (r600_can_dump_shader(&sscreen->b, processor)) {
6413 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
6414
6415 if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
6416 fprintf(stderr, "%s LLVM IR:\n\n", name);
6417 LLVMDumpModule(mod);
6418 fprintf(stderr, "\n");
6419 }
6420 }
6421
6422 if (sscreen->record_llvm_ir) {
6423 char *ir = LLVMPrintModuleToString(mod);
6424 binary->llvm_ir_string = strdup(ir);
6425 LLVMDisposeMessage(ir);
6426 }
6427
6428 if (!si_replace_shader(count, binary)) {
6429 r = radeon_llvm_compile(mod, binary, tm, debug);
6430 if (r)
6431 return r;
6432 }
6433
6434 si_shader_binary_read_config(binary, conf, 0);
6435
6436 /* Enable 64-bit and 16-bit denormals, because there is no performance
6437 * cost.
6438 *
6439 * If denormals are enabled, all floating-point output modifiers are
6440 * ignored.
6441 *
6442 * Don't enable denormals for 32-bit floats, because:
6443 * - Floating-point output modifiers would be ignored by the hw.
6444 * - Some opcodes don't support denormals, such as v_mad_f32. We would
6445 * have to stop using those.
6446 * - SI & CI would be very slow.
6447 */
6448 conf->float_mode |= V_00B028_FP_64_DENORMS;
6449
6450 FREE(binary->config);
6451 FREE(binary->global_symbol_offsets);
6452 binary->config = NULL;
6453 binary->global_symbol_offsets = NULL;
6454
6455 /* Some shaders can't have rodata because their binaries can be
6456 * concatenated.
6457 */
6458 if (binary->rodata_size &&
6459 (processor == PIPE_SHADER_VERTEX ||
6460 processor == PIPE_SHADER_TESS_CTRL ||
6461 processor == PIPE_SHADER_TESS_EVAL ||
6462 processor == PIPE_SHADER_FRAGMENT)) {
6463 fprintf(stderr, "radeonsi: The shader can't have rodata.");
6464 return -EINVAL;
6465 }
6466
6467 return r;
6468 }
6469
6470 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
6471 {
6472 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
6473 LLVMBuildRetVoid(ctx->radeon_bld.gallivm.builder);
6474 else
6475 LLVMBuildRet(ctx->radeon_bld.gallivm.builder, ret);
6476 }
6477
6478 /* Generate code for the hardware VS shader stage to go with a geometry shader */
6479 static int si_generate_gs_copy_shader(struct si_screen *sscreen,
6480 struct si_shader_context *ctx,
6481 struct si_shader *gs,
6482 struct pipe_debug_callback *debug)
6483 {
6484 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
6485 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
6486 struct lp_build_context *uint = &bld_base->uint_bld;
6487 struct si_shader_output_values *outputs;
6488 struct tgsi_shader_info *gsinfo = &gs->selector->info;
6489 LLVMValueRef args[9];
6490 int i, r;
6491
6492 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
6493
6494 si_init_shader_ctx(ctx, sscreen, ctx->shader, ctx->tm);
6495 ctx->type = PIPE_SHADER_VERTEX;
6496 ctx->is_gs_copy_shader = true;
6497
6498 create_meta_data(ctx);
6499 create_function(ctx);
6500 preload_streamout_buffers(ctx);
6501 preload_ring_buffers(ctx);
6502
6503 args[0] = ctx->gsvs_ring[0];
6504 args[1] = lp_build_mul_imm(uint,
6505 LLVMGetParam(ctx->radeon_bld.main_fn,
6506 ctx->param_vertex_id),
6507 4);
6508 args[3] = uint->zero;
6509 args[4] = uint->one; /* OFFEN */
6510 args[5] = uint->zero; /* IDXEN */
6511 args[6] = uint->one; /* GLC */
6512 args[7] = uint->one; /* SLC */
6513 args[8] = uint->zero; /* TFE */
6514
6515 /* Fetch vertex data from GSVS ring */
6516 for (i = 0; i < gsinfo->num_outputs; ++i) {
6517 unsigned chan;
6518
6519 outputs[i].name = gsinfo->output_semantic_name[i];
6520 outputs[i].sid = gsinfo->output_semantic_index[i];
6521
6522 for (chan = 0; chan < 4; chan++) {
6523 args[2] = lp_build_const_int32(gallivm,
6524 (i * 4 + chan) *
6525 gs->selector->gs_max_out_vertices * 16 * 4);
6526
6527 outputs[i].values[chan] =
6528 LLVMBuildBitCast(gallivm->builder,
6529 lp_build_intrinsic(gallivm->builder,
6530 "llvm.SI.buffer.load.dword.i32.i32",
6531 ctx->i32, args, 9,
6532 LLVMReadOnlyAttribute),
6533 ctx->f32, "");
6534 }
6535 }
6536
6537 si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
6538
6539 LLVMBuildRetVoid(gallivm->builder);
6540
6541 /* Dump LLVM IR before any optimization passes */
6542 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
6543 r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
6544 LLVMDumpModule(bld_base->base.gallivm->module);
6545
6546 radeon_llvm_finalize_module(&ctx->radeon_bld);
6547
6548 r = si_compile_llvm(sscreen, &ctx->shader->binary,
6549 &ctx->shader->config, ctx->tm,
6550 bld_base->base.gallivm->module,
6551 debug, PIPE_SHADER_GEOMETRY,
6552 "GS Copy Shader");
6553 if (!r) {
6554 if (r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
6555 fprintf(stderr, "GS Copy Shader:\n");
6556 si_shader_dump(sscreen, ctx->shader, debug,
6557 PIPE_SHADER_GEOMETRY, stderr);
6558 r = si_shader_binary_upload(sscreen, ctx->shader);
6559 }
6560
6561 radeon_llvm_dispose(&ctx->radeon_bld);
6562
6563 FREE(outputs);
6564 return r;
6565 }
6566
6567 static void si_dump_shader_key(unsigned shader, union si_shader_key *key,
6568 FILE *f)
6569 {
6570 int i;
6571
6572 fprintf(f, "SHADER KEY\n");
6573
6574 switch (shader) {
6575 case PIPE_SHADER_VERTEX:
6576 fprintf(f, " instance_divisors = {");
6577 for (i = 0; i < ARRAY_SIZE(key->vs.prolog.instance_divisors); i++)
6578 fprintf(f, !i ? "%u" : ", %u",
6579 key->vs.prolog.instance_divisors[i]);
6580 fprintf(f, "}\n");
6581 fprintf(f, " as_es = %u\n", key->vs.as_es);
6582 fprintf(f, " as_ls = %u\n", key->vs.as_ls);
6583 fprintf(f, " export_prim_id = %u\n", key->vs.epilog.export_prim_id);
6584 break;
6585
6586 case PIPE_SHADER_TESS_CTRL:
6587 fprintf(f, " prim_mode = %u\n", key->tcs.epilog.prim_mode);
6588 break;
6589
6590 case PIPE_SHADER_TESS_EVAL:
6591 fprintf(f, " as_es = %u\n", key->tes.as_es);
6592 fprintf(f, " export_prim_id = %u\n", key->tes.epilog.export_prim_id);
6593 break;
6594
6595 case PIPE_SHADER_GEOMETRY:
6596 case PIPE_SHADER_COMPUTE:
6597 break;
6598
6599 case PIPE_SHADER_FRAGMENT:
6600 fprintf(f, " prolog.color_two_side = %u\n", key->ps.prolog.color_two_side);
6601 fprintf(f, " prolog.flatshade_colors = %u\n", key->ps.prolog.flatshade_colors);
6602 fprintf(f, " prolog.poly_stipple = %u\n", key->ps.prolog.poly_stipple);
6603 fprintf(f, " prolog.force_persp_sample_interp = %u\n", key->ps.prolog.force_persp_sample_interp);
6604 fprintf(f, " prolog.force_linear_sample_interp = %u\n", key->ps.prolog.force_linear_sample_interp);
6605 fprintf(f, " prolog.force_persp_center_interp = %u\n", key->ps.prolog.force_persp_center_interp);
6606 fprintf(f, " prolog.force_linear_center_interp = %u\n", key->ps.prolog.force_linear_center_interp);
6607 fprintf(f, " prolog.bc_optimize_for_persp = %u\n", key->ps.prolog.bc_optimize_for_persp);
6608 fprintf(f, " prolog.bc_optimize_for_linear = %u\n", key->ps.prolog.bc_optimize_for_linear);
6609 fprintf(f, " epilog.spi_shader_col_format = 0x%x\n", key->ps.epilog.spi_shader_col_format);
6610 fprintf(f, " epilog.color_is_int8 = 0x%X\n", key->ps.epilog.color_is_int8);
6611 fprintf(f, " epilog.last_cbuf = %u\n", key->ps.epilog.last_cbuf);
6612 fprintf(f, " epilog.alpha_func = %u\n", key->ps.epilog.alpha_func);
6613 fprintf(f, " epilog.alpha_to_one = %u\n", key->ps.epilog.alpha_to_one);
6614 fprintf(f, " epilog.poly_line_smoothing = %u\n", key->ps.epilog.poly_line_smoothing);
6615 fprintf(f, " epilog.clamp_color = %u\n", key->ps.epilog.clamp_color);
6616 break;
6617
6618 default:
6619 assert(0);
6620 }
6621 }
6622
6623 static void si_init_shader_ctx(struct si_shader_context *ctx,
6624 struct si_screen *sscreen,
6625 struct si_shader *shader,
6626 LLVMTargetMachineRef tm)
6627 {
6628 struct lp_build_tgsi_context *bld_base;
6629 struct lp_build_tgsi_action tmpl = {};
6630
6631 memset(ctx, 0, sizeof(*ctx));
6632 radeon_llvm_context_init(
6633 &ctx->radeon_bld, "amdgcn--",
6634 (shader && shader->selector) ? &shader->selector->info : NULL,
6635 (shader && shader->selector) ? shader->selector->tokens : NULL);
6636 ctx->tm = tm;
6637 ctx->screen = sscreen;
6638 if (shader && shader->selector)
6639 ctx->type = shader->selector->info.processor;
6640 else
6641 ctx->type = -1;
6642 ctx->shader = shader;
6643
6644 ctx->voidt = LLVMVoidTypeInContext(ctx->radeon_bld.gallivm.context);
6645 ctx->i1 = LLVMInt1TypeInContext(ctx->radeon_bld.gallivm.context);
6646 ctx->i8 = LLVMInt8TypeInContext(ctx->radeon_bld.gallivm.context);
6647 ctx->i32 = LLVMInt32TypeInContext(ctx->radeon_bld.gallivm.context);
6648 ctx->i64 = LLVMInt64TypeInContext(ctx->radeon_bld.gallivm.context);
6649 ctx->i128 = LLVMIntTypeInContext(ctx->radeon_bld.gallivm.context, 128);
6650 ctx->f32 = LLVMFloatTypeInContext(ctx->radeon_bld.gallivm.context);
6651 ctx->v16i8 = LLVMVectorType(ctx->i8, 16);
6652 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
6653 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
6654 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
6655 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
6656
6657 bld_base = &ctx->radeon_bld.soa.bld_base;
6658 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
6659
6660 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
6661 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
6662 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
6663
6664 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
6665 bld_base->op_actions[TGSI_OPCODE_TEX2] = tex_action;
6666 bld_base->op_actions[TGSI_OPCODE_TXB] = tex_action;
6667 bld_base->op_actions[TGSI_OPCODE_TXB2] = tex_action;
6668 bld_base->op_actions[TGSI_OPCODE_TXD] = tex_action;
6669 bld_base->op_actions[TGSI_OPCODE_TXF] = tex_action;
6670 bld_base->op_actions[TGSI_OPCODE_TXL] = tex_action;
6671 bld_base->op_actions[TGSI_OPCODE_TXL2] = tex_action;
6672 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
6673 bld_base->op_actions[TGSI_OPCODE_TXQ].fetch_args = txq_fetch_args;
6674 bld_base->op_actions[TGSI_OPCODE_TXQ].emit = txq_emit;
6675 bld_base->op_actions[TGSI_OPCODE_TG4] = tex_action;
6676 bld_base->op_actions[TGSI_OPCODE_LODQ] = tex_action;
6677 bld_base->op_actions[TGSI_OPCODE_TXQS].emit = si_llvm_emit_txqs;
6678
6679 bld_base->op_actions[TGSI_OPCODE_LOAD].fetch_args = load_fetch_args;
6680 bld_base->op_actions[TGSI_OPCODE_LOAD].emit = load_emit;
6681 bld_base->op_actions[TGSI_OPCODE_STORE].fetch_args = store_fetch_args;
6682 bld_base->op_actions[TGSI_OPCODE_STORE].emit = store_emit;
6683 bld_base->op_actions[TGSI_OPCODE_RESQ].fetch_args = resq_fetch_args;
6684 bld_base->op_actions[TGSI_OPCODE_RESQ].emit = resq_emit;
6685
6686 tmpl.fetch_args = atomic_fetch_args;
6687 tmpl.emit = atomic_emit;
6688 bld_base->op_actions[TGSI_OPCODE_ATOMUADD] = tmpl;
6689 bld_base->op_actions[TGSI_OPCODE_ATOMUADD].intr_name = "add";
6690 bld_base->op_actions[TGSI_OPCODE_ATOMXCHG] = tmpl;
6691 bld_base->op_actions[TGSI_OPCODE_ATOMXCHG].intr_name = "swap";
6692 bld_base->op_actions[TGSI_OPCODE_ATOMCAS] = tmpl;
6693 bld_base->op_actions[TGSI_OPCODE_ATOMCAS].intr_name = "cmpswap";
6694 bld_base->op_actions[TGSI_OPCODE_ATOMAND] = tmpl;
6695 bld_base->op_actions[TGSI_OPCODE_ATOMAND].intr_name = "and";
6696 bld_base->op_actions[TGSI_OPCODE_ATOMOR] = tmpl;
6697 bld_base->op_actions[TGSI_OPCODE_ATOMOR].intr_name = "or";
6698 bld_base->op_actions[TGSI_OPCODE_ATOMXOR] = tmpl;
6699 bld_base->op_actions[TGSI_OPCODE_ATOMXOR].intr_name = "xor";
6700 bld_base->op_actions[TGSI_OPCODE_ATOMUMIN] = tmpl;
6701 bld_base->op_actions[TGSI_OPCODE_ATOMUMIN].intr_name = "umin";
6702 bld_base->op_actions[TGSI_OPCODE_ATOMUMAX] = tmpl;
6703 bld_base->op_actions[TGSI_OPCODE_ATOMUMAX].intr_name = "umax";
6704 bld_base->op_actions[TGSI_OPCODE_ATOMIMIN] = tmpl;
6705 bld_base->op_actions[TGSI_OPCODE_ATOMIMIN].intr_name = "smin";
6706 bld_base->op_actions[TGSI_OPCODE_ATOMIMAX] = tmpl;
6707 bld_base->op_actions[TGSI_OPCODE_ATOMIMAX].intr_name = "smax";
6708
6709 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
6710
6711 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
6712 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
6713 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
6714 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
6715
6716 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
6717 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
6718 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
6719
6720 bld_base->op_actions[TGSI_OPCODE_MAX].emit = build_tgsi_intrinsic_nomem;
6721 bld_base->op_actions[TGSI_OPCODE_MAX].intr_name = "llvm.maxnum.f32";
6722 bld_base->op_actions[TGSI_OPCODE_MIN].emit = build_tgsi_intrinsic_nomem;
6723 bld_base->op_actions[TGSI_OPCODE_MIN].intr_name = "llvm.minnum.f32";
6724 }
6725
6726 int si_compile_tgsi_shader(struct si_screen *sscreen,
6727 LLVMTargetMachineRef tm,
6728 struct si_shader *shader,
6729 bool is_monolithic,
6730 struct pipe_debug_callback *debug)
6731 {
6732 struct si_shader_selector *sel = shader->selector;
6733 struct si_shader_context ctx;
6734 struct lp_build_tgsi_context *bld_base;
6735 LLVMModuleRef mod;
6736 int r = 0;
6737
6738 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6739 * conversion fails. */
6740 if (r600_can_dump_shader(&sscreen->b, sel->info.processor) &&
6741 !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
6742 tgsi_dump(sel->tokens, 0);
6743 si_dump_streamout(&sel->so);
6744 }
6745
6746 si_init_shader_ctx(&ctx, sscreen, shader, tm);
6747 ctx.is_monolithic = is_monolithic;
6748
6749 shader->info.uses_instanceid = sel->info.uses_instanceid;
6750
6751 bld_base = &ctx.radeon_bld.soa.bld_base;
6752 ctx.radeon_bld.load_system_value = declare_system_value;
6753
6754 switch (ctx.type) {
6755 case PIPE_SHADER_VERTEX:
6756 ctx.radeon_bld.load_input = declare_input_vs;
6757 if (shader->key.vs.as_ls)
6758 bld_base->emit_epilogue = si_llvm_emit_ls_epilogue;
6759 else if (shader->key.vs.as_es)
6760 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
6761 else
6762 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
6763 break;
6764 case PIPE_SHADER_TESS_CTRL:
6765 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
6766 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
6767 bld_base->emit_store = store_output_tcs;
6768 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
6769 break;
6770 case PIPE_SHADER_TESS_EVAL:
6771 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
6772 if (shader->key.tes.as_es)
6773 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
6774 else
6775 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
6776 break;
6777 case PIPE_SHADER_GEOMETRY:
6778 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
6779 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
6780 break;
6781 case PIPE_SHADER_FRAGMENT:
6782 ctx.radeon_bld.load_input = declare_input_fs;
6783 if (is_monolithic)
6784 bld_base->emit_epilogue = si_llvm_emit_fs_epilogue;
6785 else
6786 bld_base->emit_epilogue = si_llvm_return_fs_outputs;
6787 break;
6788 case PIPE_SHADER_COMPUTE:
6789 ctx.radeon_bld.declare_memory_region = declare_compute_memory;
6790 break;
6791 default:
6792 assert(!"Unsupported shader type");
6793 return -1;
6794 }
6795
6796 create_meta_data(&ctx);
6797 create_function(&ctx);
6798 preload_constants(&ctx);
6799 preload_shader_buffers(&ctx);
6800 preload_samplers(&ctx);
6801 preload_images(&ctx);
6802 preload_streamout_buffers(&ctx);
6803 preload_ring_buffers(&ctx);
6804
6805 if (ctx.is_monolithic && sel->type == PIPE_SHADER_FRAGMENT &&
6806 shader->key.ps.prolog.poly_stipple) {
6807 LLVMValueRef list = LLVMGetParam(ctx.radeon_bld.main_fn,
6808 SI_PARAM_RW_BUFFERS);
6809 si_llvm_emit_polygon_stipple(&ctx, list,
6810 SI_PARAM_POS_FIXED_PT);
6811 }
6812
6813 if (ctx.type == PIPE_SHADER_GEOMETRY) {
6814 int i;
6815 for (i = 0; i < 4; i++) {
6816 ctx.gs_next_vertex[i] =
6817 lp_build_alloca(bld_base->base.gallivm,
6818 ctx.i32, "");
6819 }
6820 }
6821
6822 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
6823 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
6824 goto out;
6825 }
6826
6827 si_llvm_build_ret(&ctx, ctx.return_value);
6828 mod = bld_base->base.gallivm->module;
6829
6830 /* Dump LLVM IR before any optimization passes */
6831 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
6832 r600_can_dump_shader(&sscreen->b, ctx.type))
6833 LLVMDumpModule(mod);
6834
6835 radeon_llvm_finalize_module(&ctx.radeon_bld);
6836
6837 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
6838 mod, debug, ctx.type, "TGSI shader");
6839 if (r) {
6840 fprintf(stderr, "LLVM failed to compile shader\n");
6841 goto out;
6842 }
6843
6844 radeon_llvm_dispose(&ctx.radeon_bld);
6845
6846 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
6847 * LLVM 3.9svn has this bug.
6848 */
6849 if (sel->type == PIPE_SHADER_COMPUTE) {
6850 unsigned *props = sel->info.properties;
6851 unsigned wave_size = 64;
6852 unsigned max_vgprs = 256;
6853 unsigned max_sgprs = sscreen->b.chip_class >= VI ? 800 : 512;
6854 unsigned max_sgprs_per_wave = 128;
6855 unsigned min_waves_per_cu =
6856 DIV_ROUND_UP(props[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
6857 props[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
6858 props[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH],
6859 wave_size);
6860 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
6861
6862 max_vgprs = max_vgprs / min_waves_per_simd;
6863 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
6864
6865 if (shader->config.num_sgprs > max_sgprs ||
6866 shader->config.num_vgprs > max_vgprs) {
6867 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6868 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6869 shader->config.num_sgprs, shader->config.num_vgprs,
6870 max_sgprs, max_vgprs);
6871
6872 /* Just terminate the process, because dependent
6873 * shaders can hang due to bad input data, but use
6874 * the env var to allow shader-db to work.
6875 */
6876 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6877 abort();
6878 }
6879 }
6880
6881 /* Add the scratch offset to input SGPRs. */
6882 if (shader->config.scratch_bytes_per_wave)
6883 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6884
6885 /* Calculate the number of fragment input VGPRs. */
6886 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6887 shader->info.num_input_vgprs = 0;
6888 shader->info.face_vgpr_index = -1;
6889
6890 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6891 shader->info.num_input_vgprs += 2;
6892 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6893 shader->info.num_input_vgprs += 2;
6894 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6895 shader->info.num_input_vgprs += 2;
6896 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6897 shader->info.num_input_vgprs += 3;
6898 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6899 shader->info.num_input_vgprs += 2;
6900 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6901 shader->info.num_input_vgprs += 2;
6902 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6903 shader->info.num_input_vgprs += 2;
6904 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6905 shader->info.num_input_vgprs += 1;
6906 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6907 shader->info.num_input_vgprs += 1;
6908 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6909 shader->info.num_input_vgprs += 1;
6910 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6911 shader->info.num_input_vgprs += 1;
6912 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6913 shader->info.num_input_vgprs += 1;
6914 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6915 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6916 shader->info.num_input_vgprs += 1;
6917 }
6918 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr))
6919 shader->info.num_input_vgprs += 1;
6920 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6921 shader->info.num_input_vgprs += 1;
6922 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6923 shader->info.num_input_vgprs += 1;
6924 }
6925
6926 if (ctx.type == PIPE_SHADER_GEOMETRY) {
6927 shader->gs_copy_shader = CALLOC_STRUCT(si_shader);
6928 shader->gs_copy_shader->selector = shader->selector;
6929 ctx.shader = shader->gs_copy_shader;
6930 if ((r = si_generate_gs_copy_shader(sscreen, &ctx,
6931 shader, debug))) {
6932 free(shader->gs_copy_shader);
6933 shader->gs_copy_shader = NULL;
6934 goto out;
6935 }
6936 }
6937
6938 out:
6939 return r;
6940 }
6941
6942 /**
6943 * Create, compile and return a shader part (prolog or epilog).
6944 *
6945 * \param sscreen screen
6946 * \param list list of shader parts of the same category
6947 * \param key shader part key
6948 * \param tm LLVM target machine
6949 * \param debug debug callback
6950 * \param compile the callback responsible for compilation
6951 * \return non-NULL on success
6952 */
6953 static struct si_shader_part *
6954 si_get_shader_part(struct si_screen *sscreen,
6955 struct si_shader_part **list,
6956 union si_shader_part_key *key,
6957 LLVMTargetMachineRef tm,
6958 struct pipe_debug_callback *debug,
6959 bool (*compile)(struct si_screen *,
6960 LLVMTargetMachineRef,
6961 struct pipe_debug_callback *,
6962 struct si_shader_part *))
6963 {
6964 struct si_shader_part *result;
6965
6966 pipe_mutex_lock(sscreen->shader_parts_mutex);
6967
6968 /* Find existing. */
6969 for (result = *list; result; result = result->next) {
6970 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
6971 pipe_mutex_unlock(sscreen->shader_parts_mutex);
6972 return result;
6973 }
6974 }
6975
6976 /* Compile a new one. */
6977 result = CALLOC_STRUCT(si_shader_part);
6978 result->key = *key;
6979 if (!compile(sscreen, tm, debug, result)) {
6980 FREE(result);
6981 pipe_mutex_unlock(sscreen->shader_parts_mutex);
6982 return NULL;
6983 }
6984
6985 result->next = *list;
6986 *list = result;
6987 pipe_mutex_unlock(sscreen->shader_parts_mutex);
6988 return result;
6989 }
6990
6991 /**
6992 * Create a vertex shader prolog.
6993 *
6994 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
6995 * All inputs are returned unmodified. The vertex load indices are
6996 * stored after them, which will used by the API VS for fetching inputs.
6997 *
6998 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
6999 * input_v0,
7000 * input_v1,
7001 * input_v2,
7002 * input_v3,
7003 * (VertexID + BaseVertex),
7004 * (InstanceID + StartInstance),
7005 * (InstanceID / 2 + StartInstance)
7006 */
7007 static bool si_compile_vs_prolog(struct si_screen *sscreen,
7008 LLVMTargetMachineRef tm,
7009 struct pipe_debug_callback *debug,
7010 struct si_shader_part *out)
7011 {
7012 union si_shader_part_key *key = &out->key;
7013 struct si_shader shader = {};
7014 struct si_shader_context ctx;
7015 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
7016 LLVMTypeRef *params, *returns;
7017 LLVMValueRef ret, func;
7018 int last_sgpr, num_params, num_returns, i;
7019 bool status = true;
7020
7021 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7022 ctx.type = PIPE_SHADER_VERTEX;
7023 ctx.param_vertex_id = key->vs_prolog.num_input_sgprs;
7024 ctx.param_instance_id = key->vs_prolog.num_input_sgprs + 3;
7025
7026 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
7027 params = alloca((key->vs_prolog.num_input_sgprs + 4) *
7028 sizeof(LLVMTypeRef));
7029 returns = alloca((key->vs_prolog.num_input_sgprs + 4 +
7030 key->vs_prolog.last_input + 1) *
7031 sizeof(LLVMTypeRef));
7032 num_params = 0;
7033 num_returns = 0;
7034
7035 /* Declare input and output SGPRs. */
7036 num_params = 0;
7037 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7038 params[num_params++] = ctx.i32;
7039 returns[num_returns++] = ctx.i32;
7040 }
7041 last_sgpr = num_params - 1;
7042
7043 /* 4 preloaded VGPRs (outputs must be floats) */
7044 for (i = 0; i < 4; i++) {
7045 params[num_params++] = ctx.i32;
7046 returns[num_returns++] = ctx.f32;
7047 }
7048
7049 /* Vertex load indices. */
7050 for (i = 0; i <= key->vs_prolog.last_input; i++)
7051 returns[num_returns++] = ctx.f32;
7052
7053 /* Create the function. */
7054 si_create_function(&ctx, returns, num_returns, params,
7055 num_params, last_sgpr);
7056 func = ctx.radeon_bld.main_fn;
7057
7058 /* Copy inputs to outputs. This should be no-op, as the registers match,
7059 * but it will prevent the compiler from overwriting them unintentionally.
7060 */
7061 ret = ctx.return_value;
7062 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7063 LLVMValueRef p = LLVMGetParam(func, i);
7064 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
7065 }
7066 for (i = num_params - 4; i < num_params; i++) {
7067 LLVMValueRef p = LLVMGetParam(func, i);
7068 p = LLVMBuildBitCast(gallivm->builder, p, ctx.f32, "");
7069 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
7070 }
7071
7072 /* Compute vertex load indices from instance divisors. */
7073 for (i = 0; i <= key->vs_prolog.last_input; i++) {
7074 unsigned divisor = key->vs_prolog.states.instance_divisors[i];
7075 LLVMValueRef index;
7076
7077 if (divisor) {
7078 /* InstanceID / Divisor + StartInstance */
7079 index = get_instance_index_for_fetch(&ctx.radeon_bld,
7080 SI_SGPR_START_INSTANCE,
7081 divisor);
7082 } else {
7083 /* VertexID + BaseVertex */
7084 index = LLVMBuildAdd(gallivm->builder,
7085 LLVMGetParam(func, ctx.param_vertex_id),
7086 LLVMGetParam(func, SI_SGPR_BASE_VERTEX), "");
7087 }
7088
7089 index = LLVMBuildBitCast(gallivm->builder, index, ctx.f32, "");
7090 ret = LLVMBuildInsertValue(gallivm->builder, ret, index,
7091 num_params++, "");
7092 }
7093
7094 /* Compile. */
7095 si_llvm_build_ret(&ctx, ret);
7096 radeon_llvm_finalize_module(&ctx.radeon_bld);
7097
7098 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7099 gallivm->module, debug, ctx.type,
7100 "Vertex Shader Prolog"))
7101 status = false;
7102
7103 radeon_llvm_dispose(&ctx.radeon_bld);
7104 return status;
7105 }
7106
7107 /**
7108 * Compile the vertex shader epilog. This is also used by the tessellation
7109 * evaluation shader compiled as VS.
7110 *
7111 * The input is PrimitiveID.
7112 *
7113 * If PrimitiveID is required by the pixel shader, export it.
7114 * Otherwise, do nothing.
7115 */
7116 static bool si_compile_vs_epilog(struct si_screen *sscreen,
7117 LLVMTargetMachineRef tm,
7118 struct pipe_debug_callback *debug,
7119 struct si_shader_part *out)
7120 {
7121 union si_shader_part_key *key = &out->key;
7122 struct si_shader_context ctx;
7123 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
7124 struct lp_build_tgsi_context *bld_base = &ctx.radeon_bld.soa.bld_base;
7125 LLVMTypeRef params[5];
7126 int num_params, i;
7127 bool status = true;
7128
7129 si_init_shader_ctx(&ctx, sscreen, NULL, tm);
7130 ctx.type = PIPE_SHADER_VERTEX;
7131
7132 /* Declare input VGPRs. */
7133 num_params = key->vs_epilog.states.export_prim_id ?
7134 (VS_EPILOG_PRIMID_LOC + 1) : 0;
7135 assert(num_params <= ARRAY_SIZE(params));
7136
7137 for (i = 0; i < num_params; i++)
7138 params[i] = ctx.f32;
7139
7140 /* Create the function. */
7141 si_create_function(&ctx, NULL, 0, params, num_params, -1);
7142
7143 /* Emit exports. */
7144 if (key->vs_epilog.states.export_prim_id) {
7145 struct lp_build_context *base = &bld_base->base;
7146 struct lp_build_context *uint = &bld_base->uint_bld;
7147 LLVMValueRef args[9];
7148
7149 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
7150 args[1] = uint->zero; /* whether the EXEC mask is valid */
7151 args[2] = uint->zero; /* DONE bit */
7152 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_PARAM +
7153 key->vs_epilog.prim_id_param_offset);
7154 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
7155 args[5] = LLVMGetParam(ctx.radeon_bld.main_fn,
7156 VS_EPILOG_PRIMID_LOC); /* X */
7157 args[6] = uint->undef; /* Y */
7158 args[7] = uint->undef; /* Z */
7159 args[8] = uint->undef; /* W */
7160
7161 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
7162 LLVMVoidTypeInContext(base->gallivm->context),
7163 args, 9, 0);
7164 }
7165
7166 /* Compile. */
7167 LLVMBuildRetVoid(gallivm->builder);
7168 radeon_llvm_finalize_module(&ctx.radeon_bld);
7169
7170 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7171 gallivm->module, debug, ctx.type,
7172 "Vertex Shader Epilog"))
7173 status = false;
7174
7175 radeon_llvm_dispose(&ctx.radeon_bld);
7176 return status;
7177 }
7178
7179 /**
7180 * Create & compile a vertex shader epilog. This a helper used by VS and TES.
7181 */
7182 static bool si_get_vs_epilog(struct si_screen *sscreen,
7183 LLVMTargetMachineRef tm,
7184 struct si_shader *shader,
7185 struct pipe_debug_callback *debug,
7186 struct si_vs_epilog_bits *states)
7187 {
7188 union si_shader_part_key epilog_key;
7189
7190 memset(&epilog_key, 0, sizeof(epilog_key));
7191 epilog_key.vs_epilog.states = *states;
7192
7193 /* Set up the PrimitiveID output. */
7194 if (shader->key.vs.epilog.export_prim_id) {
7195 unsigned index = shader->selector->info.num_outputs;
7196 unsigned offset = shader->info.nr_param_exports++;
7197
7198 epilog_key.vs_epilog.prim_id_param_offset = offset;
7199 assert(index < ARRAY_SIZE(shader->info.vs_output_param_offset));
7200 shader->info.vs_output_param_offset[index] = offset;
7201 }
7202
7203 shader->epilog = si_get_shader_part(sscreen, &sscreen->vs_epilogs,
7204 &epilog_key, tm, debug,
7205 si_compile_vs_epilog);
7206 return shader->epilog != NULL;
7207 }
7208
7209 /**
7210 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
7211 */
7212 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
7213 LLVMTargetMachineRef tm,
7214 struct si_shader *shader,
7215 struct pipe_debug_callback *debug)
7216 {
7217 struct tgsi_shader_info *info = &shader->selector->info;
7218 union si_shader_part_key prolog_key;
7219 unsigned i;
7220
7221 /* Get the prolog. */
7222 memset(&prolog_key, 0, sizeof(prolog_key));
7223 prolog_key.vs_prolog.states = shader->key.vs.prolog;
7224 prolog_key.vs_prolog.num_input_sgprs = shader->info.num_input_sgprs;
7225 prolog_key.vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
7226
7227 /* The prolog is a no-op if there are no inputs. */
7228 if (info->num_inputs) {
7229 shader->prolog =
7230 si_get_shader_part(sscreen, &sscreen->vs_prologs,
7231 &prolog_key, tm, debug,
7232 si_compile_vs_prolog);
7233 if (!shader->prolog)
7234 return false;
7235 }
7236
7237 /* Get the epilog. */
7238 if (!shader->key.vs.as_es && !shader->key.vs.as_ls &&
7239 !si_get_vs_epilog(sscreen, tm, shader, debug,
7240 &shader->key.vs.epilog))
7241 return false;
7242
7243 /* Set the instanceID flag. */
7244 for (i = 0; i < info->num_inputs; i++)
7245 if (prolog_key.vs_prolog.states.instance_divisors[i])
7246 shader->info.uses_instanceid = true;
7247
7248 return true;
7249 }
7250
7251 /**
7252 * Select and compile (or reuse) TES parts (epilog).
7253 */
7254 static bool si_shader_select_tes_parts(struct si_screen *sscreen,
7255 LLVMTargetMachineRef tm,
7256 struct si_shader *shader,
7257 struct pipe_debug_callback *debug)
7258 {
7259 if (shader->key.tes.as_es)
7260 return true;
7261
7262 /* TES compiled as VS. */
7263 return si_get_vs_epilog(sscreen, tm, shader, debug,
7264 &shader->key.tes.epilog);
7265 }
7266
7267 /**
7268 * Compile the TCS epilog. This writes tesselation factors to memory based on
7269 * the output primitive type of the tesselator (determined by TES).
7270 */
7271 static bool si_compile_tcs_epilog(struct si_screen *sscreen,
7272 LLVMTargetMachineRef tm,
7273 struct pipe_debug_callback *debug,
7274 struct si_shader_part *out)
7275 {
7276 union si_shader_part_key *key = &out->key;
7277 struct si_shader shader = {};
7278 struct si_shader_context ctx;
7279 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
7280 struct lp_build_tgsi_context *bld_base = &ctx.radeon_bld.soa.bld_base;
7281 LLVMTypeRef params[16];
7282 LLVMValueRef func;
7283 int last_sgpr, num_params;
7284 bool status = true;
7285
7286 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7287 ctx.type = PIPE_SHADER_TESS_CTRL;
7288 shader.key.tcs.epilog = key->tcs_epilog.states;
7289
7290 /* Declare inputs. Only RW_BUFFERS and TESS_FACTOR_OFFSET are used. */
7291 params[SI_PARAM_RW_BUFFERS] = const_array(ctx.v16i8, SI_NUM_RW_BUFFERS);
7292 params[SI_PARAM_CONST_BUFFERS] = ctx.i64;
7293 params[SI_PARAM_SAMPLERS] = ctx.i64;
7294 params[SI_PARAM_IMAGES] = ctx.i64;
7295 params[SI_PARAM_SHADER_BUFFERS] = ctx.i64;
7296 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx.i32;
7297 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx.i32;
7298 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx.i32;
7299 params[SI_PARAM_TCS_IN_LAYOUT] = ctx.i32;
7300 params[ctx.param_oc_lds = SI_PARAM_TCS_OC_LDS] = ctx.i32;
7301 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx.i32;
7302 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
7303 num_params = last_sgpr + 1;
7304
7305 params[num_params++] = ctx.i32; /* patch index within the wave (REL_PATCH_ID) */
7306 params[num_params++] = ctx.i32; /* invocation ID within the patch */
7307 params[num_params++] = ctx.i32; /* LDS offset where tess factors should be loaded from */
7308
7309 /* Create the function. */
7310 si_create_function(&ctx, NULL, 0, params, num_params, last_sgpr);
7311 declare_tess_lds(&ctx);
7312 func = ctx.radeon_bld.main_fn;
7313
7314 si_write_tess_factors(bld_base,
7315 LLVMGetParam(func, last_sgpr + 1),
7316 LLVMGetParam(func, last_sgpr + 2),
7317 LLVMGetParam(func, last_sgpr + 3));
7318
7319 /* Compile. */
7320 LLVMBuildRetVoid(gallivm->builder);
7321 radeon_llvm_finalize_module(&ctx.radeon_bld);
7322
7323 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7324 gallivm->module, debug, ctx.type,
7325 "Tessellation Control Shader Epilog"))
7326 status = false;
7327
7328 radeon_llvm_dispose(&ctx.radeon_bld);
7329 return status;
7330 }
7331
7332 /**
7333 * Select and compile (or reuse) TCS parts (epilog).
7334 */
7335 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
7336 LLVMTargetMachineRef tm,
7337 struct si_shader *shader,
7338 struct pipe_debug_callback *debug)
7339 {
7340 union si_shader_part_key epilog_key;
7341
7342 /* Get the epilog. */
7343 memset(&epilog_key, 0, sizeof(epilog_key));
7344 epilog_key.tcs_epilog.states = shader->key.tcs.epilog;
7345
7346 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
7347 &epilog_key, tm, debug,
7348 si_compile_tcs_epilog);
7349 return shader->epilog != NULL;
7350 }
7351
7352 /**
7353 * Compile the pixel shader prolog. This handles:
7354 * - two-side color selection and interpolation
7355 * - overriding interpolation parameters for the API PS
7356 * - polygon stippling
7357 *
7358 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
7359 * overriden by other states. (e.g. per-sample interpolation)
7360 * Interpolated colors are stored after the preloaded VGPRs.
7361 */
7362 static bool si_compile_ps_prolog(struct si_screen *sscreen,
7363 LLVMTargetMachineRef tm,
7364 struct pipe_debug_callback *debug,
7365 struct si_shader_part *out)
7366 {
7367 union si_shader_part_key *key = &out->key;
7368 struct si_shader shader = {};
7369 struct si_shader_context ctx;
7370 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
7371 LLVMTypeRef *params;
7372 LLVMValueRef ret, func;
7373 int last_sgpr, num_params, num_returns, i, num_color_channels;
7374 bool status = true;
7375
7376 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7377 ctx.type = PIPE_SHADER_FRAGMENT;
7378 shader.key.ps.prolog = key->ps_prolog.states;
7379
7380 /* Number of inputs + 8 color elements. */
7381 params = alloca((key->ps_prolog.num_input_sgprs +
7382 key->ps_prolog.num_input_vgprs + 8) *
7383 sizeof(LLVMTypeRef));
7384
7385 /* Declare inputs. */
7386 num_params = 0;
7387 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
7388 params[num_params++] = ctx.i32;
7389 last_sgpr = num_params - 1;
7390
7391 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
7392 params[num_params++] = ctx.f32;
7393
7394 /* Declare outputs (same as inputs + add colors if needed) */
7395 num_returns = num_params;
7396 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
7397 for (i = 0; i < num_color_channels; i++)
7398 params[num_returns++] = ctx.f32;
7399
7400 /* Create the function. */
7401 si_create_function(&ctx, params, num_returns, params,
7402 num_params, last_sgpr);
7403 func = ctx.radeon_bld.main_fn;
7404
7405 /* Copy inputs to outputs. This should be no-op, as the registers match,
7406 * but it will prevent the compiler from overwriting them unintentionally.
7407 */
7408 ret = ctx.return_value;
7409 for (i = 0; i < num_params; i++) {
7410 LLVMValueRef p = LLVMGetParam(func, i);
7411 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
7412 }
7413
7414 /* Polygon stippling. */
7415 if (key->ps_prolog.states.poly_stipple) {
7416 /* POS_FIXED_PT is always last. */
7417 unsigned pos = key->ps_prolog.num_input_sgprs +
7418 key->ps_prolog.num_input_vgprs - 1;
7419 LLVMValueRef ptr[2], list;
7420
7421 /* Get the pointer to rw buffers. */
7422 ptr[0] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS);
7423 ptr[1] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS_HI);
7424 list = lp_build_gather_values(gallivm, ptr, 2);
7425 list = LLVMBuildBitCast(gallivm->builder, list, ctx.i64, "");
7426 list = LLVMBuildIntToPtr(gallivm->builder, list,
7427 const_array(ctx.v16i8, SI_NUM_RW_BUFFERS), "");
7428
7429 si_llvm_emit_polygon_stipple(&ctx, list, pos);
7430 }
7431
7432 if (key->ps_prolog.states.bc_optimize_for_persp ||
7433 key->ps_prolog.states.bc_optimize_for_linear) {
7434 unsigned i, base = key->ps_prolog.num_input_sgprs;
7435 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
7436
7437 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
7438 * The hw doesn't compute CENTROID if the whole wave only
7439 * contains fully-covered quads.
7440 *
7441 * PRIM_MASK is after user SGPRs.
7442 */
7443 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7444 bc_optimize = LLVMBuildLShr(gallivm->builder, bc_optimize,
7445 LLVMConstInt(ctx.i32, 31, 0), "");
7446 bc_optimize = LLVMBuildTrunc(gallivm->builder, bc_optimize,
7447 ctx.i1, "");
7448
7449 if (key->ps_prolog.states.bc_optimize_for_persp) {
7450 /* Read PERSP_CENTER. */
7451 for (i = 0; i < 2; i++)
7452 center[i] = LLVMGetParam(func, base + 2 + i);
7453 /* Read PERSP_CENTROID. */
7454 for (i = 0; i < 2; i++)
7455 centroid[i] = LLVMGetParam(func, base + 4 + i);
7456 /* Select PERSP_CENTROID. */
7457 for (i = 0; i < 2; i++) {
7458 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
7459 center[i], centroid[i], "");
7460 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7461 tmp, base + 4 + i, "");
7462 }
7463 }
7464 if (key->ps_prolog.states.bc_optimize_for_linear) {
7465 /* Read LINEAR_CENTER. */
7466 for (i = 0; i < 2; i++)
7467 center[i] = LLVMGetParam(func, base + 8 + i);
7468 /* Read LINEAR_CENTROID. */
7469 for (i = 0; i < 2; i++)
7470 centroid[i] = LLVMGetParam(func, base + 10 + i);
7471 /* Select LINEAR_CENTROID. */
7472 for (i = 0; i < 2; i++) {
7473 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
7474 center[i], centroid[i], "");
7475 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7476 tmp, base + 10 + i, "");
7477 }
7478 }
7479 }
7480
7481 /* Interpolate colors. */
7482 for (i = 0; i < 2; i++) {
7483 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
7484 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
7485 key->ps_prolog.face_vgpr_index;
7486 LLVMValueRef interp[2], color[4];
7487 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
7488
7489 if (!writemask)
7490 continue;
7491
7492 /* If the interpolation qualifier is not CONSTANT (-1). */
7493 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
7494 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
7495 key->ps_prolog.color_interp_vgpr_index[i];
7496
7497 /* Get the (i,j) updated by bc_optimize handling. */
7498 interp[0] = LLVMBuildExtractValue(gallivm->builder, ret,
7499 interp_vgpr, "");
7500 interp[1] = LLVMBuildExtractValue(gallivm->builder, ret,
7501 interp_vgpr + 1, "");
7502 interp_ij = lp_build_gather_values(gallivm, interp, 2);
7503 interp_ij = LLVMBuildBitCast(gallivm->builder, interp_ij,
7504 ctx.v2i32, "");
7505 }
7506
7507 /* Use the absolute location of the input. */
7508 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7509
7510 if (key->ps_prolog.states.color_two_side) {
7511 face = LLVMGetParam(func, face_vgpr);
7512 face = LLVMBuildBitCast(gallivm->builder, face, ctx.i32, "");
7513 }
7514
7515 interp_fs_input(&ctx,
7516 key->ps_prolog.color_attr_index[i],
7517 TGSI_SEMANTIC_COLOR, i,
7518 key->ps_prolog.num_interp_inputs,
7519 key->ps_prolog.colors_read, interp_ij,
7520 prim_mask, face, color);
7521
7522 while (writemask) {
7523 unsigned chan = u_bit_scan(&writemask);
7524 ret = LLVMBuildInsertValue(gallivm->builder, ret, color[chan],
7525 num_params++, "");
7526 }
7527 }
7528
7529 /* Force per-sample interpolation. */
7530 if (key->ps_prolog.states.force_persp_sample_interp) {
7531 unsigned i, base = key->ps_prolog.num_input_sgprs;
7532 LLVMValueRef persp_sample[2];
7533
7534 /* Read PERSP_SAMPLE. */
7535 for (i = 0; i < 2; i++)
7536 persp_sample[i] = LLVMGetParam(func, base + i);
7537 /* Overwrite PERSP_CENTER. */
7538 for (i = 0; i < 2; i++)
7539 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7540 persp_sample[i], base + 2 + i, "");
7541 /* Overwrite PERSP_CENTROID. */
7542 for (i = 0; i < 2; i++)
7543 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7544 persp_sample[i], base + 4 + i, "");
7545 }
7546 if (key->ps_prolog.states.force_linear_sample_interp) {
7547 unsigned i, base = key->ps_prolog.num_input_sgprs;
7548 LLVMValueRef linear_sample[2];
7549
7550 /* Read LINEAR_SAMPLE. */
7551 for (i = 0; i < 2; i++)
7552 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
7553 /* Overwrite LINEAR_CENTER. */
7554 for (i = 0; i < 2; i++)
7555 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7556 linear_sample[i], base + 8 + i, "");
7557 /* Overwrite LINEAR_CENTROID. */
7558 for (i = 0; i < 2; i++)
7559 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7560 linear_sample[i], base + 10 + i, "");
7561 }
7562
7563 /* Force center interpolation. */
7564 if (key->ps_prolog.states.force_persp_center_interp) {
7565 unsigned i, base = key->ps_prolog.num_input_sgprs;
7566 LLVMValueRef persp_center[2];
7567
7568 /* Read PERSP_CENTER. */
7569 for (i = 0; i < 2; i++)
7570 persp_center[i] = LLVMGetParam(func, base + 2 + i);
7571 /* Overwrite PERSP_SAMPLE. */
7572 for (i = 0; i < 2; i++)
7573 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7574 persp_center[i], base + i, "");
7575 /* Overwrite PERSP_CENTROID. */
7576 for (i = 0; i < 2; i++)
7577 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7578 persp_center[i], base + 4 + i, "");
7579 }
7580 if (key->ps_prolog.states.force_linear_center_interp) {
7581 unsigned i, base = key->ps_prolog.num_input_sgprs;
7582 LLVMValueRef linear_center[2];
7583
7584 /* Read LINEAR_CENTER. */
7585 for (i = 0; i < 2; i++)
7586 linear_center[i] = LLVMGetParam(func, base + 8 + i);
7587 /* Overwrite LINEAR_SAMPLE. */
7588 for (i = 0; i < 2; i++)
7589 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7590 linear_center[i], base + 6 + i, "");
7591 /* Overwrite LINEAR_CENTROID. */
7592 for (i = 0; i < 2; i++)
7593 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7594 linear_center[i], base + 10 + i, "");
7595 }
7596
7597 /* Tell LLVM to insert WQM instruction sequence when needed. */
7598 if (key->ps_prolog.wqm) {
7599 LLVMAddTargetDependentFunctionAttr(func,
7600 "amdgpu-ps-wqm-outputs", "");
7601 }
7602
7603 /* Compile. */
7604 si_llvm_build_ret(&ctx, ret);
7605 radeon_llvm_finalize_module(&ctx.radeon_bld);
7606
7607 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7608 gallivm->module, debug, ctx.type,
7609 "Fragment Shader Prolog"))
7610 status = false;
7611
7612 radeon_llvm_dispose(&ctx.radeon_bld);
7613 return status;
7614 }
7615
7616 /**
7617 * Compile the pixel shader epilog. This handles everything that must be
7618 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7619 */
7620 static bool si_compile_ps_epilog(struct si_screen *sscreen,
7621 LLVMTargetMachineRef tm,
7622 struct pipe_debug_callback *debug,
7623 struct si_shader_part *out)
7624 {
7625 union si_shader_part_key *key = &out->key;
7626 struct si_shader shader = {};
7627 struct si_shader_context ctx;
7628 struct gallivm_state *gallivm = &ctx.radeon_bld.gallivm;
7629 struct lp_build_tgsi_context *bld_base = &ctx.radeon_bld.soa.bld_base;
7630 LLVMTypeRef params[16+8*4+3];
7631 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7632 int last_sgpr, num_params, i;
7633 bool status = true;
7634 struct si_ps_exports exp = {};
7635
7636 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7637 ctx.type = PIPE_SHADER_FRAGMENT;
7638 shader.key.ps.epilog = key->ps_epilog.states;
7639
7640 /* Declare input SGPRs. */
7641 params[SI_PARAM_RW_BUFFERS] = ctx.i64;
7642 params[SI_PARAM_CONST_BUFFERS] = ctx.i64;
7643 params[SI_PARAM_SAMPLERS] = ctx.i64;
7644 params[SI_PARAM_IMAGES] = ctx.i64;
7645 params[SI_PARAM_SHADER_BUFFERS] = ctx.i64;
7646 params[SI_PARAM_ALPHA_REF] = ctx.f32;
7647 last_sgpr = SI_PARAM_ALPHA_REF;
7648
7649 /* Declare input VGPRs. */
7650 num_params = (last_sgpr + 1) +
7651 util_bitcount(key->ps_epilog.colors_written) * 4 +
7652 key->ps_epilog.writes_z +
7653 key->ps_epilog.writes_stencil +
7654 key->ps_epilog.writes_samplemask;
7655
7656 num_params = MAX2(num_params,
7657 last_sgpr + 1 + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7658
7659 assert(num_params <= ARRAY_SIZE(params));
7660
7661 for (i = last_sgpr + 1; i < num_params; i++)
7662 params[i] = ctx.f32;
7663
7664 /* Create the function. */
7665 si_create_function(&ctx, NULL, 0, params, num_params, last_sgpr);
7666 /* Disable elimination of unused inputs. */
7667 radeon_llvm_add_attribute(ctx.radeon_bld.main_fn,
7668 "InitialPSInputAddr", 0xffffff);
7669
7670 /* Process colors. */
7671 unsigned vgpr = last_sgpr + 1;
7672 unsigned colors_written = key->ps_epilog.colors_written;
7673 int last_color_export = -1;
7674
7675 /* Find the last color export. */
7676 if (!key->ps_epilog.writes_z &&
7677 !key->ps_epilog.writes_stencil &&
7678 !key->ps_epilog.writes_samplemask) {
7679 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7680
7681 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7682 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7683 /* Just set this if any of the colorbuffers are enabled. */
7684 if (spi_format &
7685 ((1llu << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7686 last_color_export = 0;
7687 } else {
7688 for (i = 0; i < 8; i++)
7689 if (colors_written & (1 << i) &&
7690 (spi_format >> (i * 4)) & 0xf)
7691 last_color_export = i;
7692 }
7693 }
7694
7695 while (colors_written) {
7696 LLVMValueRef color[4];
7697 int mrt = u_bit_scan(&colors_written);
7698
7699 for (i = 0; i < 4; i++)
7700 color[i] = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
7701
7702 si_export_mrt_color(bld_base, color, mrt,
7703 num_params - 1,
7704 mrt == last_color_export, &exp);
7705 }
7706
7707 /* Process depth, stencil, samplemask. */
7708 if (key->ps_epilog.writes_z)
7709 depth = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
7710 if (key->ps_epilog.writes_stencil)
7711 stencil = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
7712 if (key->ps_epilog.writes_samplemask)
7713 samplemask = LLVMGetParam(ctx.radeon_bld.main_fn, vgpr++);
7714
7715 if (depth || stencil || samplemask)
7716 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
7717 else if (last_color_export == -1)
7718 si_export_null(bld_base);
7719
7720 if (exp.num)
7721 si_emit_ps_exports(&ctx, &exp);
7722
7723 /* Compile. */
7724 LLVMBuildRetVoid(gallivm->builder);
7725 radeon_llvm_finalize_module(&ctx.radeon_bld);
7726
7727 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7728 gallivm->module, debug, ctx.type,
7729 "Fragment Shader Epilog"))
7730 status = false;
7731
7732 radeon_llvm_dispose(&ctx.radeon_bld);
7733 return status;
7734 }
7735
7736 /**
7737 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7738 */
7739 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7740 LLVMTargetMachineRef tm,
7741 struct si_shader *shader,
7742 struct pipe_debug_callback *debug)
7743 {
7744 struct tgsi_shader_info *info = &shader->selector->info;
7745 union si_shader_part_key prolog_key;
7746 union si_shader_part_key epilog_key;
7747 unsigned i;
7748
7749 /* Get the prolog. */
7750 memset(&prolog_key, 0, sizeof(prolog_key));
7751 prolog_key.ps_prolog.states = shader->key.ps.prolog;
7752 prolog_key.ps_prolog.colors_read = info->colors_read;
7753 prolog_key.ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
7754 prolog_key.ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
7755 prolog_key.ps_prolog.wqm = info->uses_derivatives &&
7756 (prolog_key.ps_prolog.colors_read ||
7757 prolog_key.ps_prolog.states.force_persp_sample_interp ||
7758 prolog_key.ps_prolog.states.force_linear_sample_interp ||
7759 prolog_key.ps_prolog.states.force_persp_center_interp ||
7760 prolog_key.ps_prolog.states.force_linear_center_interp ||
7761 prolog_key.ps_prolog.states.bc_optimize_for_persp ||
7762 prolog_key.ps_prolog.states.bc_optimize_for_linear);
7763
7764 if (info->colors_read) {
7765 unsigned *color = shader->selector->color_attr_index;
7766
7767 if (shader->key.ps.prolog.color_two_side) {
7768 /* BCOLORs are stored after the last input. */
7769 prolog_key.ps_prolog.num_interp_inputs = info->num_inputs;
7770 prolog_key.ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
7771 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
7772 }
7773
7774 for (i = 0; i < 2; i++) {
7775 unsigned interp = info->input_interpolate[color[i]];
7776 unsigned location = info->input_interpolate_loc[color[i]];
7777
7778 if (!(info->colors_read & (0xf << i*4)))
7779 continue;
7780
7781 prolog_key.ps_prolog.color_attr_index[i] = color[i];
7782
7783 if (shader->key.ps.prolog.flatshade_colors &&
7784 interp == TGSI_INTERPOLATE_COLOR)
7785 interp = TGSI_INTERPOLATE_CONSTANT;
7786
7787 switch (interp) {
7788 case TGSI_INTERPOLATE_CONSTANT:
7789 prolog_key.ps_prolog.color_interp_vgpr_index[i] = -1;
7790 break;
7791 case TGSI_INTERPOLATE_PERSPECTIVE:
7792 case TGSI_INTERPOLATE_COLOR:
7793 /* Force the interpolation location for colors here. */
7794 if (shader->key.ps.prolog.force_persp_sample_interp)
7795 location = TGSI_INTERPOLATE_LOC_SAMPLE;
7796 if (shader->key.ps.prolog.force_persp_center_interp)
7797 location = TGSI_INTERPOLATE_LOC_CENTER;
7798
7799 switch (location) {
7800 case TGSI_INTERPOLATE_LOC_SAMPLE:
7801 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 0;
7802 shader->config.spi_ps_input_ena |=
7803 S_0286CC_PERSP_SAMPLE_ENA(1);
7804 break;
7805 case TGSI_INTERPOLATE_LOC_CENTER:
7806 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 2;
7807 shader->config.spi_ps_input_ena |=
7808 S_0286CC_PERSP_CENTER_ENA(1);
7809 break;
7810 case TGSI_INTERPOLATE_LOC_CENTROID:
7811 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 4;
7812 shader->config.spi_ps_input_ena |=
7813 S_0286CC_PERSP_CENTROID_ENA(1);
7814 break;
7815 default:
7816 assert(0);
7817 }
7818 break;
7819 case TGSI_INTERPOLATE_LINEAR:
7820 /* Force the interpolation location for colors here. */
7821 if (shader->key.ps.prolog.force_linear_sample_interp)
7822 location = TGSI_INTERPOLATE_LOC_SAMPLE;
7823 if (shader->key.ps.prolog.force_linear_center_interp)
7824 location = TGSI_INTERPOLATE_LOC_CENTER;
7825
7826 switch (location) {
7827 case TGSI_INTERPOLATE_LOC_SAMPLE:
7828 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 6;
7829 shader->config.spi_ps_input_ena |=
7830 S_0286CC_LINEAR_SAMPLE_ENA(1);
7831 break;
7832 case TGSI_INTERPOLATE_LOC_CENTER:
7833 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 8;
7834 shader->config.spi_ps_input_ena |=
7835 S_0286CC_LINEAR_CENTER_ENA(1);
7836 break;
7837 case TGSI_INTERPOLATE_LOC_CENTROID:
7838 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 10;
7839 shader->config.spi_ps_input_ena |=
7840 S_0286CC_LINEAR_CENTROID_ENA(1);
7841 break;
7842 default:
7843 assert(0);
7844 }
7845 break;
7846 default:
7847 assert(0);
7848 }
7849 }
7850 }
7851
7852 /* The prolog is a no-op if these aren't set. */
7853 if (prolog_key.ps_prolog.colors_read ||
7854 prolog_key.ps_prolog.states.force_persp_sample_interp ||
7855 prolog_key.ps_prolog.states.force_linear_sample_interp ||
7856 prolog_key.ps_prolog.states.force_persp_center_interp ||
7857 prolog_key.ps_prolog.states.force_linear_center_interp ||
7858 prolog_key.ps_prolog.states.bc_optimize_for_persp ||
7859 prolog_key.ps_prolog.states.bc_optimize_for_linear ||
7860 prolog_key.ps_prolog.states.poly_stipple) {
7861 shader->prolog =
7862 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7863 &prolog_key, tm, debug,
7864 si_compile_ps_prolog);
7865 if (!shader->prolog)
7866 return false;
7867 }
7868
7869 /* Get the epilog. */
7870 memset(&epilog_key, 0, sizeof(epilog_key));
7871 epilog_key.ps_epilog.colors_written = info->colors_written;
7872 epilog_key.ps_epilog.writes_z = info->writes_z;
7873 epilog_key.ps_epilog.writes_stencil = info->writes_stencil;
7874 epilog_key.ps_epilog.writes_samplemask = info->writes_samplemask;
7875 epilog_key.ps_epilog.states = shader->key.ps.epilog;
7876
7877 shader->epilog =
7878 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7879 &epilog_key, tm, debug,
7880 si_compile_ps_epilog);
7881 if (!shader->epilog)
7882 return false;
7883
7884 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7885 if (shader->key.ps.prolog.poly_stipple) {
7886 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7887 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7888 }
7889
7890 /* Set up the enable bits for per-sample shading if needed. */
7891 if (shader->key.ps.prolog.force_persp_sample_interp &&
7892 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7893 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7894 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7895 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7896 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7897 }
7898 if (shader->key.ps.prolog.force_linear_sample_interp &&
7899 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7900 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7901 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7902 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7903 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7904 }
7905 if (shader->key.ps.prolog.force_persp_center_interp &&
7906 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7907 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7908 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
7909 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7910 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7911 }
7912 if (shader->key.ps.prolog.force_linear_center_interp &&
7913 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7914 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7915 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
7916 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7917 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7918 }
7919
7920 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7921 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7922 !(shader->config.spi_ps_input_ena & 0xf)) {
7923 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7924 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7925 }
7926
7927 /* At least one pair of interpolation weights must be enabled. */
7928 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7929 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7930 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7931 }
7932
7933 /* The sample mask input is always enabled, because the API shader always
7934 * passes it through to the epilog. Disable it here if it's unused.
7935 */
7936 if (!shader->key.ps.epilog.poly_line_smoothing &&
7937 !shader->selector->info.reads_samplemask)
7938 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7939
7940 return true;
7941 }
7942
7943 static void si_fix_num_sgprs(struct si_shader *shader)
7944 {
7945 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7946
7947 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7948 }
7949
7950 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
7951 struct si_shader *shader,
7952 struct pipe_debug_callback *debug)
7953 {
7954 struct si_shader *mainp = shader->selector->main_shader_part;
7955 int r;
7956
7957 /* LS, ES, VS are compiled on demand if the main part hasn't been
7958 * compiled for that stage.
7959 */
7960 if (!mainp ||
7961 (shader->selector->type == PIPE_SHADER_VERTEX &&
7962 (shader->key.vs.as_es != mainp->key.vs.as_es ||
7963 shader->key.vs.as_ls != mainp->key.vs.as_ls)) ||
7964 (shader->selector->type == PIPE_SHADER_TESS_EVAL &&
7965 shader->key.tes.as_es != mainp->key.tes.as_es) ||
7966 (shader->selector->type == PIPE_SHADER_TESS_CTRL &&
7967 shader->key.tcs.epilog.inputs_to_copy) ||
7968 shader->selector->type == PIPE_SHADER_COMPUTE) {
7969 /* Monolithic shader (compiled as a whole, has many variants,
7970 * may take a long time to compile).
7971 */
7972 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
7973 if (r)
7974 return r;
7975 } else {
7976 /* The shader consists of 2-3 parts:
7977 *
7978 * - the middle part is the user shader, it has 1 variant only
7979 * and it was compiled during the creation of the shader
7980 * selector
7981 * - the prolog part is inserted at the beginning
7982 * - the epilog part is inserted at the end
7983 *
7984 * The prolog and epilog have many (but simple) variants.
7985 */
7986
7987 /* Copy the compiled TGSI shader data over. */
7988 shader->is_binary_shared = true;
7989 shader->binary = mainp->binary;
7990 shader->config = mainp->config;
7991 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7992 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7993 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7994 memcpy(shader->info.vs_output_param_offset,
7995 mainp->info.vs_output_param_offset,
7996 sizeof(mainp->info.vs_output_param_offset));
7997 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7998 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7999 shader->info.nr_param_exports = mainp->info.nr_param_exports;
8000
8001 /* Select prologs and/or epilogs. */
8002 switch (shader->selector->type) {
8003 case PIPE_SHADER_VERTEX:
8004 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
8005 return -1;
8006 break;
8007 case PIPE_SHADER_TESS_CTRL:
8008 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
8009 return -1;
8010 break;
8011 case PIPE_SHADER_TESS_EVAL:
8012 if (!si_shader_select_tes_parts(sscreen, tm, shader, debug))
8013 return -1;
8014 break;
8015 case PIPE_SHADER_FRAGMENT:
8016 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
8017 return -1;
8018
8019 /* Make sure we have at least as many VGPRs as there
8020 * are allocated inputs.
8021 */
8022 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8023 shader->info.num_input_vgprs);
8024 break;
8025 }
8026
8027 /* Update SGPR and VGPR counts. */
8028 if (shader->prolog) {
8029 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8030 shader->prolog->config.num_sgprs);
8031 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8032 shader->prolog->config.num_vgprs);
8033 }
8034 if (shader->epilog) {
8035 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8036 shader->epilog->config.num_sgprs);
8037 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8038 shader->epilog->config.num_vgprs);
8039 }
8040 }
8041
8042 si_fix_num_sgprs(shader);
8043 si_shader_dump(sscreen, shader, debug, shader->selector->info.processor,
8044 stderr);
8045
8046 /* Upload. */
8047 r = si_shader_binary_upload(sscreen, shader);
8048 if (r) {
8049 fprintf(stderr, "LLVM failed to upload shader\n");
8050 return r;
8051 }
8052
8053 return 0;
8054 }
8055
8056 void si_shader_destroy(struct si_shader *shader)
8057 {
8058 if (shader->gs_copy_shader) {
8059 si_shader_destroy(shader->gs_copy_shader);
8060 FREE(shader->gs_copy_shader);
8061 }
8062
8063 if (shader->scratch_bo)
8064 r600_resource_reference(&shader->scratch_bo, NULL);
8065
8066 r600_resource_reference(&shader->bo, NULL);
8067
8068 if (!shader->is_binary_shared)
8069 radeon_shader_binary_clean(&shader->binary);
8070
8071 free(shader->shader_log);
8072 }