d1482de209d101803d5581a3d6021a81437211c0
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_bitarit.h"
35 #include "gallivm/lp_bld_flow.h"
36 #include "radeon/r600_cs.h"
37 #include "radeon/radeon_llvm.h"
38 #include "radeon/radeon_elf_util.h"
39 #include "radeon/radeon_llvm_emit.h"
40 #include "util/u_memory.h"
41 #include "util/u_pstipple.h"
42 #include "tgsi/tgsi_parse.h"
43 #include "tgsi/tgsi_util.h"
44 #include "tgsi/tgsi_dump.h"
45
46 #include "si_pipe.h"
47 #include "si_shader.h"
48 #include "sid.h"
49
50 #include <errno.h>
51
52 static const char *scratch_rsrc_dword0_symbol =
53 "SCRATCH_RSRC_DWORD0";
54
55 static const char *scratch_rsrc_dword1_symbol =
56 "SCRATCH_RSRC_DWORD1";
57
58 struct si_shader_output_values
59 {
60 LLVMValueRef values[4];
61 unsigned name;
62 unsigned sid;
63 };
64
65 struct si_shader_context
66 {
67 struct radeon_llvm_context radeon_bld;
68 struct si_shader *shader;
69 struct si_screen *screen;
70
71 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
72 bool is_gs_copy_shader;
73 int param_streamout_config;
74 int param_streamout_write_index;
75 int param_streamout_offset[4];
76 int param_vertex_id;
77 int param_rel_auto_id;
78 int param_vs_prim_id;
79 int param_instance_id;
80 int param_tes_u;
81 int param_tes_v;
82 int param_tes_rel_patch_id;
83 int param_tes_patch_id;
84 int param_es2gs_offset;
85
86 LLVMTargetMachineRef tm;
87
88 LLVMValueRef const_md;
89 LLVMValueRef const_buffers[SI_NUM_CONST_BUFFERS];
90 LLVMValueRef lds;
91 LLVMValueRef *constants[SI_NUM_CONST_BUFFERS];
92 LLVMValueRef sampler_views[SI_NUM_SAMPLERS];
93 LLVMValueRef sampler_states[SI_NUM_SAMPLERS];
94 LLVMValueRef fmasks[SI_NUM_USER_SAMPLERS];
95 LLVMValueRef so_buffers[4];
96 LLVMValueRef esgs_ring;
97 LLVMValueRef gsvs_ring[4];
98 LLVMValueRef gs_next_vertex[4];
99
100 LLVMTypeRef voidt;
101 LLVMTypeRef i1;
102 LLVMTypeRef i8;
103 LLVMTypeRef i32;
104 LLVMTypeRef i128;
105 LLVMTypeRef f32;
106 LLVMTypeRef v16i8;
107 LLVMTypeRef v4i32;
108 LLVMTypeRef v4f32;
109 LLVMTypeRef v8i32;
110 };
111
112 static struct si_shader_context *si_shader_context(
113 struct lp_build_tgsi_context *bld_base)
114 {
115 return (struct si_shader_context *)bld_base;
116 }
117
118 static void si_init_shader_ctx(struct si_shader_context *ctx,
119 struct si_screen *sscreen,
120 struct si_shader *shader,
121 LLVMTargetMachineRef tm,
122 struct tgsi_shader_info *info);
123
124
125 #define PERSPECTIVE_BASE 0
126 #define LINEAR_BASE 9
127
128 #define SAMPLE_OFFSET 0
129 #define CENTER_OFFSET 2
130 #define CENTROID_OFSET 4
131
132 #define USE_SGPR_MAX_SUFFIX_LEN 5
133 #define CONST_ADDR_SPACE 2
134 #define LOCAL_ADDR_SPACE 3
135 #define USER_SGPR_ADDR_SPACE 8
136
137
138 #define SENDMSG_GS 2
139 #define SENDMSG_GS_DONE 3
140
141 #define SENDMSG_GS_OP_NOP (0 << 4)
142 #define SENDMSG_GS_OP_CUT (1 << 4)
143 #define SENDMSG_GS_OP_EMIT (2 << 4)
144 #define SENDMSG_GS_OP_EMIT_CUT (3 << 4)
145
146 /**
147 * Returns a unique index for a semantic name and index. The index must be
148 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
149 * calculated.
150 */
151 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
152 {
153 switch (semantic_name) {
154 case TGSI_SEMANTIC_POSITION:
155 return 0;
156 case TGSI_SEMANTIC_PSIZE:
157 return 1;
158 case TGSI_SEMANTIC_CLIPDIST:
159 assert(index <= 1);
160 return 2 + index;
161 case TGSI_SEMANTIC_GENERIC:
162 if (index <= 63-4)
163 return 4 + index;
164 else
165 /* same explanation as in the default statement,
166 * the only user hitting this is st/nine.
167 */
168 return 0;
169
170 /* patch indices are completely separate and thus start from 0 */
171 case TGSI_SEMANTIC_TESSOUTER:
172 return 0;
173 case TGSI_SEMANTIC_TESSINNER:
174 return 1;
175 case TGSI_SEMANTIC_PATCH:
176 return 2 + index;
177
178 default:
179 /* Don't fail here. The result of this function is only used
180 * for LS, TCS, TES, and GS, where legacy GL semantics can't
181 * occur, but this function is called for all vertex shaders
182 * before it's known whether LS will be compiled or not.
183 */
184 return 0;
185 }
186 }
187
188 /**
189 * Get the value of a shader input parameter and extract a bitfield.
190 */
191 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
192 unsigned param, unsigned rshift,
193 unsigned bitwidth)
194 {
195 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
196 LLVMValueRef value = LLVMGetParam(ctx->radeon_bld.main_fn,
197 param);
198
199 if (rshift)
200 value = LLVMBuildLShr(gallivm->builder, value,
201 lp_build_const_int32(gallivm, rshift), "");
202
203 if (rshift + bitwidth < 32) {
204 unsigned mask = (1 << bitwidth) - 1;
205 value = LLVMBuildAnd(gallivm->builder, value,
206 lp_build_const_int32(gallivm, mask), "");
207 }
208
209 return value;
210 }
211
212 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
213 {
214 switch (ctx->type) {
215 case TGSI_PROCESSOR_TESS_CTRL:
216 return unpack_param(ctx, SI_PARAM_REL_IDS, 0, 8);
217
218 case TGSI_PROCESSOR_TESS_EVAL:
219 return LLVMGetParam(ctx->radeon_bld.main_fn,
220 ctx->param_tes_rel_patch_id);
221
222 default:
223 assert(0);
224 return NULL;
225 }
226 }
227
228 /* Tessellation shaders pass outputs to the next shader using LDS.
229 *
230 * LS outputs = TCS inputs
231 * TCS outputs = TES inputs
232 *
233 * The LDS layout is:
234 * - TCS inputs for patch 0
235 * - TCS inputs for patch 1
236 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
237 * - ...
238 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
239 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
240 * - TCS outputs for patch 1
241 * - Per-patch TCS outputs for patch 1
242 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
243 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
244 * - ...
245 *
246 * All three shaders VS(LS), TCS, TES share the same LDS space.
247 */
248
249 static LLVMValueRef
250 get_tcs_in_patch_stride(struct si_shader_context *ctx)
251 {
252 if (ctx->type == TGSI_PROCESSOR_VERTEX)
253 return unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 0, 13);
254 else if (ctx->type == TGSI_PROCESSOR_TESS_CTRL)
255 return unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 0, 13);
256 else {
257 assert(0);
258 return NULL;
259 }
260 }
261
262 static LLVMValueRef
263 get_tcs_out_patch_stride(struct si_shader_context *ctx)
264 {
265 return unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 0, 13);
266 }
267
268 static LLVMValueRef
269 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
270 {
271 return lp_build_mul_imm(&ctx->radeon_bld.soa.bld_base.uint_bld,
272 unpack_param(ctx,
273 SI_PARAM_TCS_OUT_OFFSETS,
274 0, 16),
275 4);
276 }
277
278 static LLVMValueRef
279 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
280 {
281 return lp_build_mul_imm(&ctx->radeon_bld.soa.bld_base.uint_bld,
282 unpack_param(ctx,
283 SI_PARAM_TCS_OUT_OFFSETS,
284 16, 16),
285 4);
286 }
287
288 static LLVMValueRef
289 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
290 {
291 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
292 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
293 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
294
295 return LLVMBuildMul(gallivm->builder, patch_stride, rel_patch_id, "");
296 }
297
298 static LLVMValueRef
299 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
300 {
301 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
302 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
303 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
304 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
305
306 return LLVMBuildAdd(gallivm->builder, patch0_offset,
307 LLVMBuildMul(gallivm->builder, patch_stride,
308 rel_patch_id, ""),
309 "");
310 }
311
312 static LLVMValueRef
313 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
314 {
315 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
316 LLVMValueRef patch0_patch_data_offset =
317 get_tcs_out_patch0_patch_data_offset(ctx);
318 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
319 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
320
321 return LLVMBuildAdd(gallivm->builder, patch0_patch_data_offset,
322 LLVMBuildMul(gallivm->builder, patch_stride,
323 rel_patch_id, ""),
324 "");
325 }
326
327 static void build_indexed_store(struct si_shader_context *ctx,
328 LLVMValueRef base_ptr, LLVMValueRef index,
329 LLVMValueRef value)
330 {
331 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
332 struct gallivm_state *gallivm = bld_base->base.gallivm;
333 LLVMValueRef indices[2], pointer;
334
335 indices[0] = bld_base->uint_bld.zero;
336 indices[1] = index;
337
338 pointer = LLVMBuildGEP(gallivm->builder, base_ptr, indices, 2, "");
339 LLVMBuildStore(gallivm->builder, value, pointer);
340 }
341
342 /**
343 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
344 * It's equivalent to doing a load from &base_ptr[index].
345 *
346 * \param base_ptr Where the array starts.
347 * \param index The element index into the array.
348 */
349 static LLVMValueRef build_indexed_load(struct si_shader_context *ctx,
350 LLVMValueRef base_ptr, LLVMValueRef index)
351 {
352 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
353 struct gallivm_state *gallivm = bld_base->base.gallivm;
354 LLVMValueRef indices[2], pointer;
355
356 indices[0] = bld_base->uint_bld.zero;
357 indices[1] = index;
358
359 pointer = LLVMBuildGEP(gallivm->builder, base_ptr, indices, 2, "");
360 return LLVMBuildLoad(gallivm->builder, pointer, "");
361 }
362
363 /**
364 * Do a load from &base_ptr[index], but also add a flag that it's loading
365 * a constant.
366 */
367 static LLVMValueRef build_indexed_load_const(
368 struct si_shader_context *ctx,
369 LLVMValueRef base_ptr, LLVMValueRef index)
370 {
371 LLVMValueRef result = build_indexed_load(ctx, base_ptr, index);
372 LLVMSetMetadata(result, 1, ctx->const_md);
373 return result;
374 }
375
376 static LLVMValueRef get_instance_index_for_fetch(
377 struct radeon_llvm_context *radeon_bld,
378 unsigned divisor)
379 {
380 struct si_shader_context *ctx =
381 si_shader_context(&radeon_bld->soa.bld_base);
382 struct gallivm_state *gallivm = radeon_bld->soa.bld_base.base.gallivm;
383
384 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
385 ctx->param_instance_id);
386
387 /* The division must be done before START_INSTANCE is added. */
388 if (divisor > 1)
389 result = LLVMBuildUDiv(gallivm->builder, result,
390 lp_build_const_int32(gallivm, divisor), "");
391
392 return LLVMBuildAdd(gallivm->builder, result, LLVMGetParam(
393 radeon_bld->main_fn, SI_PARAM_START_INSTANCE), "");
394 }
395
396 static void declare_input_vs(
397 struct radeon_llvm_context *radeon_bld,
398 unsigned input_index,
399 const struct tgsi_full_declaration *decl)
400 {
401 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
402 struct gallivm_state *gallivm = base->gallivm;
403 struct si_shader_context *ctx =
404 si_shader_context(&radeon_bld->soa.bld_base);
405 unsigned divisor = ctx->shader->key.vs.instance_divisors[input_index];
406
407 unsigned chan;
408
409 LLVMValueRef t_list_ptr;
410 LLVMValueRef t_offset;
411 LLVMValueRef t_list;
412 LLVMValueRef attribute_offset;
413 LLVMValueRef buffer_index;
414 LLVMValueRef args[3];
415 LLVMValueRef input;
416
417 /* Load the T list */
418 t_list_ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_BUFFERS);
419
420 t_offset = lp_build_const_int32(gallivm, input_index);
421
422 t_list = build_indexed_load_const(ctx, t_list_ptr, t_offset);
423
424 /* Build the attribute offset */
425 attribute_offset = lp_build_const_int32(gallivm, 0);
426
427 if (divisor) {
428 /* Build index from instance ID, start instance and divisor */
429 ctx->shader->uses_instanceid = true;
430 buffer_index = get_instance_index_for_fetch(&ctx->radeon_bld, divisor);
431 } else {
432 /* Load the buffer index for vertices. */
433 LLVMValueRef vertex_id = LLVMGetParam(ctx->radeon_bld.main_fn,
434 ctx->param_vertex_id);
435 LLVMValueRef base_vertex = LLVMGetParam(radeon_bld->main_fn,
436 SI_PARAM_BASE_VERTEX);
437 buffer_index = LLVMBuildAdd(gallivm->builder, base_vertex, vertex_id, "");
438 }
439
440 args[0] = t_list;
441 args[1] = attribute_offset;
442 args[2] = buffer_index;
443 input = lp_build_intrinsic(gallivm->builder,
444 "llvm.SI.vs.load.input", ctx->v4f32, args, 3,
445 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
446
447 /* Break up the vec4 into individual components */
448 for (chan = 0; chan < 4; chan++) {
449 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
450 /* XXX: Use a helper function for this. There is one in
451 * tgsi_llvm.c. */
452 ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
453 LLVMBuildExtractElement(gallivm->builder,
454 input, llvm_chan, "");
455 }
456 }
457
458 static LLVMValueRef get_primitive_id(struct lp_build_tgsi_context *bld_base,
459 unsigned swizzle)
460 {
461 struct si_shader_context *ctx = si_shader_context(bld_base);
462
463 if (swizzle > 0)
464 return bld_base->uint_bld.zero;
465
466 switch (ctx->type) {
467 case TGSI_PROCESSOR_VERTEX:
468 return LLVMGetParam(ctx->radeon_bld.main_fn,
469 ctx->param_vs_prim_id);
470 case TGSI_PROCESSOR_TESS_CTRL:
471 return LLVMGetParam(ctx->radeon_bld.main_fn,
472 SI_PARAM_PATCH_ID);
473 case TGSI_PROCESSOR_TESS_EVAL:
474 return LLVMGetParam(ctx->radeon_bld.main_fn,
475 ctx->param_tes_patch_id);
476 case TGSI_PROCESSOR_GEOMETRY:
477 return LLVMGetParam(ctx->radeon_bld.main_fn,
478 SI_PARAM_PRIMITIVE_ID);
479 default:
480 assert(0);
481 return bld_base->uint_bld.zero;
482 }
483 }
484
485 /**
486 * Return the value of tgsi_ind_register for indexing.
487 * This is the indirect index with the constant offset added to it.
488 */
489 static LLVMValueRef get_indirect_index(struct si_shader_context *ctx,
490 const struct tgsi_ind_register *ind,
491 int rel_index)
492 {
493 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
494 LLVMValueRef result;
495
496 result = ctx->radeon_bld.soa.addr[ind->Index][ind->Swizzle];
497 result = LLVMBuildLoad(gallivm->builder, result, "");
498 result = LLVMBuildAdd(gallivm->builder, result,
499 lp_build_const_int32(gallivm, rel_index), "");
500 return result;
501 }
502
503 /**
504 * Calculate a dword address given an input or output register and a stride.
505 */
506 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
507 const struct tgsi_full_dst_register *dst,
508 const struct tgsi_full_src_register *src,
509 LLVMValueRef vertex_dw_stride,
510 LLVMValueRef base_addr)
511 {
512 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
513 struct tgsi_shader_info *info = &ctx->shader->selector->info;
514 ubyte *name, *index, *array_first;
515 int first, param;
516 struct tgsi_full_dst_register reg;
517
518 /* Set the register description. The address computation is the same
519 * for sources and destinations. */
520 if (src) {
521 reg.Register.File = src->Register.File;
522 reg.Register.Index = src->Register.Index;
523 reg.Register.Indirect = src->Register.Indirect;
524 reg.Register.Dimension = src->Register.Dimension;
525 reg.Indirect = src->Indirect;
526 reg.Dimension = src->Dimension;
527 reg.DimIndirect = src->DimIndirect;
528 } else
529 reg = *dst;
530
531 /* If the register is 2-dimensional (e.g. an array of vertices
532 * in a primitive), calculate the base address of the vertex. */
533 if (reg.Register.Dimension) {
534 LLVMValueRef index;
535
536 if (reg.Dimension.Indirect)
537 index = get_indirect_index(ctx, &reg.DimIndirect,
538 reg.Dimension.Index);
539 else
540 index = lp_build_const_int32(gallivm, reg.Dimension.Index);
541
542 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
543 LLVMBuildMul(gallivm->builder, index,
544 vertex_dw_stride, ""), "");
545 }
546
547 /* Get information about the register. */
548 if (reg.Register.File == TGSI_FILE_INPUT) {
549 name = info->input_semantic_name;
550 index = info->input_semantic_index;
551 array_first = info->input_array_first;
552 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
553 name = info->output_semantic_name;
554 index = info->output_semantic_index;
555 array_first = info->output_array_first;
556 } else {
557 assert(0);
558 return NULL;
559 }
560
561 if (reg.Register.Indirect) {
562 /* Add the relative address of the element. */
563 LLVMValueRef ind_index;
564
565 if (reg.Indirect.ArrayID)
566 first = array_first[reg.Indirect.ArrayID];
567 else
568 first = reg.Register.Index;
569
570 ind_index = get_indirect_index(ctx, &reg.Indirect,
571 reg.Register.Index - first);
572
573 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
574 LLVMBuildMul(gallivm->builder, ind_index,
575 lp_build_const_int32(gallivm, 4), ""), "");
576
577 param = si_shader_io_get_unique_index(name[first], index[first]);
578 } else {
579 param = si_shader_io_get_unique_index(name[reg.Register.Index],
580 index[reg.Register.Index]);
581 }
582
583 /* Add the base address of the element. */
584 return LLVMBuildAdd(gallivm->builder, base_addr,
585 lp_build_const_int32(gallivm, param * 4), "");
586 }
587
588 /**
589 * Load from LDS.
590 *
591 * \param type output value type
592 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
593 * \param dw_addr address in dwords
594 */
595 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
596 enum tgsi_opcode_type type, unsigned swizzle,
597 LLVMValueRef dw_addr)
598 {
599 struct si_shader_context *ctx = si_shader_context(bld_base);
600 struct gallivm_state *gallivm = bld_base->base.gallivm;
601 LLVMValueRef value;
602
603 if (swizzle == ~0) {
604 LLVMValueRef values[TGSI_NUM_CHANNELS];
605
606 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
607 values[chan] = lds_load(bld_base, type, chan, dw_addr);
608
609 return lp_build_gather_values(bld_base->base.gallivm, values,
610 TGSI_NUM_CHANNELS);
611 }
612
613 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
614 lp_build_const_int32(gallivm, swizzle));
615
616 value = build_indexed_load(ctx, ctx->lds, dw_addr);
617 if (type == TGSI_TYPE_DOUBLE) {
618 LLVMValueRef value2;
619 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
620 lp_build_const_int32(gallivm, swizzle + 1));
621 value2 = build_indexed_load(ctx, ctx->lds, dw_addr);
622 return radeon_llvm_emit_fetch_double(bld_base, value, value2);
623 }
624
625 return LLVMBuildBitCast(gallivm->builder, value,
626 tgsi2llvmtype(bld_base, type), "");
627 }
628
629 /**
630 * Store to LDS.
631 *
632 * \param swizzle offset (typically 0..3)
633 * \param dw_addr address in dwords
634 * \param value value to store
635 */
636 static void lds_store(struct lp_build_tgsi_context *bld_base,
637 unsigned swizzle, LLVMValueRef dw_addr,
638 LLVMValueRef value)
639 {
640 struct si_shader_context *ctx = si_shader_context(bld_base);
641 struct gallivm_state *gallivm = bld_base->base.gallivm;
642
643 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
644 lp_build_const_int32(gallivm, swizzle));
645
646 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
647 build_indexed_store(ctx, ctx->lds,
648 dw_addr, value);
649 }
650
651 static LLVMValueRef fetch_input_tcs(
652 struct lp_build_tgsi_context *bld_base,
653 const struct tgsi_full_src_register *reg,
654 enum tgsi_opcode_type type, unsigned swizzle)
655 {
656 struct si_shader_context *ctx = si_shader_context(bld_base);
657 LLVMValueRef dw_addr, stride;
658
659 stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
660 dw_addr = get_tcs_in_current_patch_offset(ctx);
661 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
662
663 return lds_load(bld_base, type, swizzle, dw_addr);
664 }
665
666 static LLVMValueRef fetch_output_tcs(
667 struct lp_build_tgsi_context *bld_base,
668 const struct tgsi_full_src_register *reg,
669 enum tgsi_opcode_type type, unsigned swizzle)
670 {
671 struct si_shader_context *ctx = si_shader_context(bld_base);
672 LLVMValueRef dw_addr, stride;
673
674 if (reg->Register.Dimension) {
675 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
676 dw_addr = get_tcs_out_current_patch_offset(ctx);
677 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
678 } else {
679 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
680 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
681 }
682
683 return lds_load(bld_base, type, swizzle, dw_addr);
684 }
685
686 static LLVMValueRef fetch_input_tes(
687 struct lp_build_tgsi_context *bld_base,
688 const struct tgsi_full_src_register *reg,
689 enum tgsi_opcode_type type, unsigned swizzle)
690 {
691 struct si_shader_context *ctx = si_shader_context(bld_base);
692 LLVMValueRef dw_addr, stride;
693
694 if (reg->Register.Dimension) {
695 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
696 dw_addr = get_tcs_out_current_patch_offset(ctx);
697 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
698 } else {
699 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
700 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
701 }
702
703 return lds_load(bld_base, type, swizzle, dw_addr);
704 }
705
706 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
707 const struct tgsi_full_instruction *inst,
708 const struct tgsi_opcode_info *info,
709 LLVMValueRef dst[4])
710 {
711 struct si_shader_context *ctx = si_shader_context(bld_base);
712 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
713 unsigned chan_index;
714 LLVMValueRef dw_addr, stride;
715
716 /* Only handle per-patch and per-vertex outputs here.
717 * Vectors will be lowered to scalars and this function will be called again.
718 */
719 if (reg->Register.File != TGSI_FILE_OUTPUT ||
720 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
721 radeon_llvm_emit_store(bld_base, inst, info, dst);
722 return;
723 }
724
725 if (reg->Register.Dimension) {
726 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
727 dw_addr = get_tcs_out_current_patch_offset(ctx);
728 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
729 } else {
730 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
731 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
732 }
733
734 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
735 LLVMValueRef value = dst[chan_index];
736
737 if (inst->Instruction.Saturate)
738 value = radeon_llvm_saturate(bld_base, value);
739
740 lds_store(bld_base, chan_index, dw_addr, value);
741 }
742 }
743
744 static LLVMValueRef fetch_input_gs(
745 struct lp_build_tgsi_context *bld_base,
746 const struct tgsi_full_src_register *reg,
747 enum tgsi_opcode_type type,
748 unsigned swizzle)
749 {
750 struct lp_build_context *base = &bld_base->base;
751 struct si_shader_context *ctx = si_shader_context(bld_base);
752 struct si_shader *shader = ctx->shader;
753 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
754 struct gallivm_state *gallivm = base->gallivm;
755 LLVMValueRef vtx_offset;
756 LLVMValueRef args[9];
757 unsigned vtx_offset_param;
758 struct tgsi_shader_info *info = &shader->selector->info;
759 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
760 unsigned semantic_index = info->input_semantic_index[reg->Register.Index];
761 unsigned param;
762 LLVMValueRef value;
763
764 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
765 return get_primitive_id(bld_base, swizzle);
766
767 if (!reg->Register.Dimension)
768 return NULL;
769
770 if (swizzle == ~0) {
771 LLVMValueRef values[TGSI_NUM_CHANNELS];
772 unsigned chan;
773 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
774 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
775 }
776 return lp_build_gather_values(bld_base->base.gallivm, values,
777 TGSI_NUM_CHANNELS);
778 }
779
780 /* Get the vertex offset parameter */
781 vtx_offset_param = reg->Dimension.Index;
782 if (vtx_offset_param < 2) {
783 vtx_offset_param += SI_PARAM_VTX0_OFFSET;
784 } else {
785 assert(vtx_offset_param < 6);
786 vtx_offset_param += SI_PARAM_VTX2_OFFSET - 2;
787 }
788 vtx_offset = lp_build_mul_imm(uint,
789 LLVMGetParam(ctx->radeon_bld.main_fn,
790 vtx_offset_param),
791 4);
792
793 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
794 args[0] = ctx->esgs_ring;
795 args[1] = vtx_offset;
796 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle) * 256);
797 args[3] = uint->zero;
798 args[4] = uint->one; /* OFFEN */
799 args[5] = uint->zero; /* IDXEN */
800 args[6] = uint->one; /* GLC */
801 args[7] = uint->zero; /* SLC */
802 args[8] = uint->zero; /* TFE */
803
804 value = lp_build_intrinsic(gallivm->builder,
805 "llvm.SI.buffer.load.dword.i32.i32",
806 ctx->i32, args, 9,
807 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute);
808 if (type == TGSI_TYPE_DOUBLE) {
809 LLVMValueRef value2;
810 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle + 1) * 256);
811 value2 = lp_build_intrinsic(gallivm->builder,
812 "llvm.SI.buffer.load.dword.i32.i32",
813 ctx->i32, args, 9,
814 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute);
815 return radeon_llvm_emit_fetch_double(bld_base,
816 value, value2);
817 }
818 return LLVMBuildBitCast(gallivm->builder,
819 value,
820 tgsi2llvmtype(bld_base, type), "");
821 }
822
823 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
824 {
825 switch (interpolate) {
826 case TGSI_INTERPOLATE_CONSTANT:
827 return 0;
828
829 case TGSI_INTERPOLATE_LINEAR:
830 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
831 return SI_PARAM_LINEAR_SAMPLE;
832 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
833 return SI_PARAM_LINEAR_CENTROID;
834 else
835 return SI_PARAM_LINEAR_CENTER;
836 break;
837 case TGSI_INTERPOLATE_COLOR:
838 case TGSI_INTERPOLATE_PERSPECTIVE:
839 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
840 return SI_PARAM_PERSP_SAMPLE;
841 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
842 return SI_PARAM_PERSP_CENTROID;
843 else
844 return SI_PARAM_PERSP_CENTER;
845 break;
846 default:
847 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
848 return -1;
849 }
850 }
851
852 /* This shouldn't be used by explicit INTERP opcodes. */
853 static unsigned select_interp_param(struct si_shader_context *ctx,
854 unsigned param)
855 {
856 if (!ctx->shader->key.ps.force_persample_interp)
857 return param;
858
859 /* If the shader doesn't use center/centroid, just return the parameter.
860 *
861 * If the shader only uses one set of (i,j), "si_emit_spi_ps_input" can
862 * switch between center/centroid and sample without shader changes.
863 */
864 switch (param) {
865 case SI_PARAM_PERSP_CENTROID:
866 case SI_PARAM_PERSP_CENTER:
867 return SI_PARAM_PERSP_SAMPLE;
868
869 case SI_PARAM_LINEAR_CENTROID:
870 case SI_PARAM_LINEAR_CENTER:
871 return SI_PARAM_LINEAR_SAMPLE;
872
873 default:
874 return param;
875 }
876 }
877
878 /**
879 * Interpolate a fragment shader input.
880 *
881 * @param ctx context
882 * @param input_index index of the input in hardware
883 * @param semantic_name TGSI_SEMANTIC_*
884 * @param semantic_index semantic index
885 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
886 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
887 * @param interp_param interpolation weights (i,j)
888 * @param prim_mask SI_PARAM_PRIM_MASK
889 * @param face SI_PARAM_FRONT_FACE
890 * @param result the return value (4 components)
891 */
892 static void interp_fs_input(struct si_shader_context *ctx,
893 unsigned input_index,
894 unsigned semantic_name,
895 unsigned semantic_index,
896 unsigned num_interp_inputs,
897 unsigned colors_read_mask,
898 LLVMValueRef interp_param,
899 LLVMValueRef prim_mask,
900 LLVMValueRef face,
901 LLVMValueRef result[4])
902 {
903 struct lp_build_context *base = &ctx->radeon_bld.soa.bld_base.base;
904 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
905 struct gallivm_state *gallivm = base->gallivm;
906 const char *intr_name;
907 LLVMValueRef attr_number;
908
909 unsigned chan;
910
911 attr_number = lp_build_const_int32(gallivm, input_index);
912
913 /* fs.constant returns the param from the middle vertex, so it's not
914 * really useful for flat shading. It's meant to be used for custom
915 * interpolation (but the intrinsic can't fetch from the other two
916 * vertices).
917 *
918 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
919 * to do the right thing. The only reason we use fs.constant is that
920 * fs.interp cannot be used on integers, because they can be equal
921 * to NaN.
922 */
923 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
924
925 if (semantic_name == TGSI_SEMANTIC_COLOR &&
926 ctx->shader->key.ps.color_two_side) {
927 LLVMValueRef args[4];
928 LLVMValueRef is_face_positive;
929 LLVMValueRef back_attr_number;
930
931 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
932 * otherwise it's at offset "num_inputs".
933 */
934 unsigned back_attr_offset = num_interp_inputs;
935 if (semantic_index == 1 && colors_read_mask & 0xf)
936 back_attr_offset += 1;
937
938 back_attr_number = lp_build_const_int32(gallivm, back_attr_offset);
939
940 is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
941 face, uint->zero, "");
942
943 args[2] = prim_mask;
944 args[3] = interp_param;
945 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
946 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
947 LLVMValueRef front, back;
948
949 args[0] = llvm_chan;
950 args[1] = attr_number;
951 front = lp_build_intrinsic(gallivm->builder, intr_name,
952 ctx->f32, args, args[3] ? 4 : 3,
953 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
954
955 args[1] = back_attr_number;
956 back = lp_build_intrinsic(gallivm->builder, intr_name,
957 ctx->f32, args, args[3] ? 4 : 3,
958 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
959
960 result[chan] = LLVMBuildSelect(gallivm->builder,
961 is_face_positive,
962 front,
963 back,
964 "");
965 }
966 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
967 LLVMValueRef args[4];
968
969 args[0] = uint->zero;
970 args[1] = attr_number;
971 args[2] = prim_mask;
972 args[3] = interp_param;
973 result[0] = lp_build_intrinsic(gallivm->builder, intr_name,
974 ctx->f32, args, args[3] ? 4 : 3,
975 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
976 result[1] =
977 result[2] = lp_build_const_float(gallivm, 0.0f);
978 result[3] = lp_build_const_float(gallivm, 1.0f);
979 } else {
980 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
981 LLVMValueRef args[4];
982 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
983
984 args[0] = llvm_chan;
985 args[1] = attr_number;
986 args[2] = prim_mask;
987 args[3] = interp_param;
988 result[chan] = lp_build_intrinsic(gallivm->builder, intr_name,
989 ctx->f32, args, args[3] ? 4 : 3,
990 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
991 }
992 }
993 }
994
995 static void declare_input_fs(
996 struct radeon_llvm_context *radeon_bld,
997 unsigned input_index,
998 const struct tgsi_full_declaration *decl)
999 {
1000 struct si_shader_context *ctx =
1001 si_shader_context(&radeon_bld->soa.bld_base);
1002 struct si_shader *shader = ctx->shader;
1003 LLVMValueRef main_fn = radeon_bld->main_fn;
1004 LLVMValueRef interp_param = NULL;
1005 int interp_param_idx;
1006
1007 interp_param_idx = lookup_interp_param_index(decl->Interp.Interpolate,
1008 decl->Interp.Location);
1009 if (interp_param_idx == -1)
1010 return;
1011 else if (interp_param_idx) {
1012 interp_param_idx = select_interp_param(ctx,
1013 interp_param_idx);
1014 interp_param = LLVMGetParam(main_fn, interp_param_idx);
1015 }
1016
1017 interp_fs_input(ctx, input_index, decl->Semantic.Name,
1018 decl->Semantic.Index, shader->selector->info.num_inputs,
1019 shader->selector->info.colors_read, interp_param,
1020 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1021 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1022 &radeon_bld->inputs[radeon_llvm_reg_index_soa(input_index, 0)]);
1023 }
1024
1025 static LLVMValueRef get_sample_id(struct radeon_llvm_context *radeon_bld)
1026 {
1027 return unpack_param(si_shader_context(&radeon_bld->soa.bld_base),
1028 SI_PARAM_ANCILLARY, 8, 4);
1029 }
1030
1031 /**
1032 * Load a dword from a constant buffer.
1033 */
1034 static LLVMValueRef buffer_load_const(LLVMBuilderRef builder, LLVMValueRef resource,
1035 LLVMValueRef offset, LLVMTypeRef return_type)
1036 {
1037 LLVMValueRef args[2] = {resource, offset};
1038
1039 return lp_build_intrinsic(builder, "llvm.SI.load.const", return_type, args, 2,
1040 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1041 }
1042
1043 static LLVMValueRef load_sample_position(struct radeon_llvm_context *radeon_bld, LLVMValueRef sample_id)
1044 {
1045 struct si_shader_context *ctx =
1046 si_shader_context(&radeon_bld->soa.bld_base);
1047 struct lp_build_context *uint_bld = &radeon_bld->soa.bld_base.uint_bld;
1048 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1049 LLVMBuilderRef builder = gallivm->builder;
1050 LLVMValueRef desc = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
1051 LLVMValueRef buf_index = lp_build_const_int32(gallivm, SI_DRIVER_STATE_CONST_BUF);
1052 LLVMValueRef resource = build_indexed_load_const(ctx, desc, buf_index);
1053
1054 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1055 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1056 LLVMValueRef offset1 = LLVMBuildAdd(builder, offset0, lp_build_const_int32(gallivm, 4), "");
1057
1058 LLVMValueRef pos[4] = {
1059 buffer_load_const(builder, resource, offset0, ctx->f32),
1060 buffer_load_const(builder, resource, offset1, ctx->f32),
1061 lp_build_const_float(gallivm, 0),
1062 lp_build_const_float(gallivm, 0)
1063 };
1064
1065 return lp_build_gather_values(gallivm, pos, 4);
1066 }
1067
1068 static void declare_system_value(
1069 struct radeon_llvm_context *radeon_bld,
1070 unsigned index,
1071 const struct tgsi_full_declaration *decl)
1072 {
1073 struct si_shader_context *ctx =
1074 si_shader_context(&radeon_bld->soa.bld_base);
1075 struct lp_build_context *bld = &radeon_bld->soa.bld_base.base;
1076 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1077 LLVMValueRef value = 0;
1078
1079 switch (decl->Semantic.Name) {
1080 case TGSI_SEMANTIC_INSTANCEID:
1081 value = LLVMGetParam(radeon_bld->main_fn,
1082 ctx->param_instance_id);
1083 break;
1084
1085 case TGSI_SEMANTIC_VERTEXID:
1086 value = LLVMBuildAdd(gallivm->builder,
1087 LLVMGetParam(radeon_bld->main_fn,
1088 ctx->param_vertex_id),
1089 LLVMGetParam(radeon_bld->main_fn,
1090 SI_PARAM_BASE_VERTEX), "");
1091 break;
1092
1093 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1094 value = LLVMGetParam(radeon_bld->main_fn,
1095 ctx->param_vertex_id);
1096 break;
1097
1098 case TGSI_SEMANTIC_BASEVERTEX:
1099 value = LLVMGetParam(radeon_bld->main_fn,
1100 SI_PARAM_BASE_VERTEX);
1101 break;
1102
1103 case TGSI_SEMANTIC_INVOCATIONID:
1104 if (ctx->type == TGSI_PROCESSOR_TESS_CTRL)
1105 value = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
1106 else if (ctx->type == TGSI_PROCESSOR_GEOMETRY)
1107 value = LLVMGetParam(radeon_bld->main_fn,
1108 SI_PARAM_GS_INSTANCE_ID);
1109 else
1110 assert(!"INVOCATIONID not implemented");
1111 break;
1112
1113 case TGSI_SEMANTIC_POSITION:
1114 {
1115 LLVMValueRef pos[4] = {
1116 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1117 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1118 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Z_FLOAT),
1119 lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base, TGSI_OPCODE_RCP,
1120 LLVMGetParam(radeon_bld->main_fn,
1121 SI_PARAM_POS_W_FLOAT)),
1122 };
1123 value = lp_build_gather_values(gallivm, pos, 4);
1124 break;
1125 }
1126
1127 case TGSI_SEMANTIC_FACE:
1128 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_FRONT_FACE);
1129 break;
1130
1131 case TGSI_SEMANTIC_SAMPLEID:
1132 value = get_sample_id(radeon_bld);
1133 break;
1134
1135 case TGSI_SEMANTIC_SAMPLEPOS: {
1136 LLVMValueRef pos[4] = {
1137 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1138 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1139 lp_build_const_float(gallivm, 0),
1140 lp_build_const_float(gallivm, 0)
1141 };
1142 pos[0] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1143 TGSI_OPCODE_FRC, pos[0]);
1144 pos[1] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1145 TGSI_OPCODE_FRC, pos[1]);
1146 value = lp_build_gather_values(gallivm, pos, 4);
1147 break;
1148 }
1149
1150 case TGSI_SEMANTIC_SAMPLEMASK:
1151 /* This can only occur with the OpenGL Core profile, which
1152 * doesn't support smoothing.
1153 */
1154 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1155 break;
1156
1157 case TGSI_SEMANTIC_TESSCOORD:
1158 {
1159 LLVMValueRef coord[4] = {
1160 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_u),
1161 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_v),
1162 bld->zero,
1163 bld->zero
1164 };
1165
1166 /* For triangles, the vector should be (u, v, 1-u-v). */
1167 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1168 PIPE_PRIM_TRIANGLES)
1169 coord[2] = lp_build_sub(bld, bld->one,
1170 lp_build_add(bld, coord[0], coord[1]));
1171
1172 value = lp_build_gather_values(gallivm, coord, 4);
1173 break;
1174 }
1175
1176 case TGSI_SEMANTIC_VERTICESIN:
1177 value = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 26, 6);
1178 break;
1179
1180 case TGSI_SEMANTIC_TESSINNER:
1181 case TGSI_SEMANTIC_TESSOUTER:
1182 {
1183 LLVMValueRef dw_addr;
1184 int param = si_shader_io_get_unique_index(decl->Semantic.Name, 0);
1185
1186 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1187 dw_addr = LLVMBuildAdd(gallivm->builder, dw_addr,
1188 lp_build_const_int32(gallivm, param * 4), "");
1189
1190 value = lds_load(&radeon_bld->soa.bld_base, TGSI_TYPE_FLOAT,
1191 ~0, dw_addr);
1192 break;
1193 }
1194
1195 case TGSI_SEMANTIC_PRIMID:
1196 value = get_primitive_id(&radeon_bld->soa.bld_base, 0);
1197 break;
1198
1199 default:
1200 assert(!"unknown system value");
1201 return;
1202 }
1203
1204 radeon_bld->system_values[index] = value;
1205 }
1206
1207 static LLVMValueRef fetch_constant(
1208 struct lp_build_tgsi_context *bld_base,
1209 const struct tgsi_full_src_register *reg,
1210 enum tgsi_opcode_type type,
1211 unsigned swizzle)
1212 {
1213 struct si_shader_context *ctx = si_shader_context(bld_base);
1214 struct lp_build_context *base = &bld_base->base;
1215 const struct tgsi_ind_register *ireg = &reg->Indirect;
1216 unsigned buf, idx;
1217
1218 LLVMValueRef addr, bufp;
1219 LLVMValueRef result;
1220
1221 if (swizzle == LP_CHAN_ALL) {
1222 unsigned chan;
1223 LLVMValueRef values[4];
1224 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
1225 values[chan] = fetch_constant(bld_base, reg, type, chan);
1226
1227 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
1228 }
1229
1230 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
1231 idx = reg->Register.Index * 4 + swizzle;
1232
1233 if (!reg->Register.Indirect && !reg->Dimension.Indirect) {
1234 if (type != TGSI_TYPE_DOUBLE)
1235 return bitcast(bld_base, type, ctx->constants[buf][idx]);
1236 else {
1237 return radeon_llvm_emit_fetch_double(bld_base,
1238 ctx->constants[buf][idx],
1239 ctx->constants[buf][idx + 1]);
1240 }
1241 }
1242
1243 if (reg->Register.Dimension && reg->Dimension.Indirect) {
1244 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
1245 LLVMValueRef index;
1246 index = get_indirect_index(ctx, &reg->DimIndirect,
1247 reg->Dimension.Index);
1248 bufp = build_indexed_load_const(ctx, ptr, index);
1249 } else
1250 bufp = ctx->const_buffers[buf];
1251
1252 addr = ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
1253 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
1254 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
1255 addr = lp_build_add(&bld_base->uint_bld, addr,
1256 lp_build_const_int32(base->gallivm, idx * 4));
1257
1258 result = buffer_load_const(base->gallivm->builder, bufp,
1259 addr, ctx->f32);
1260
1261 if (type != TGSI_TYPE_DOUBLE)
1262 result = bitcast(bld_base, type, result);
1263 else {
1264 LLVMValueRef addr2, result2;
1265 addr2 = ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle + 1];
1266 addr2 = LLVMBuildLoad(base->gallivm->builder, addr2, "load addr reg2");
1267 addr2 = lp_build_mul_imm(&bld_base->uint_bld, addr2, 16);
1268 addr2 = lp_build_add(&bld_base->uint_bld, addr2,
1269 lp_build_const_int32(base->gallivm, idx * 4));
1270
1271 result2 = buffer_load_const(base->gallivm->builder, ctx->const_buffers[buf],
1272 addr2, ctx->f32);
1273
1274 result = radeon_llvm_emit_fetch_double(bld_base,
1275 result, result2);
1276 }
1277 return result;
1278 }
1279
1280 /* Upper 16 bits must be zero. */
1281 static LLVMValueRef si_llvm_pack_two_int16(struct gallivm_state *gallivm,
1282 LLVMValueRef val[2])
1283 {
1284 return LLVMBuildOr(gallivm->builder, val[0],
1285 LLVMBuildShl(gallivm->builder, val[1],
1286 lp_build_const_int32(gallivm, 16),
1287 ""), "");
1288 }
1289
1290 /* Upper 16 bits are ignored and will be dropped. */
1291 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct gallivm_state *gallivm,
1292 LLVMValueRef val[2])
1293 {
1294 LLVMValueRef v[2] = {
1295 LLVMBuildAnd(gallivm->builder, val[0],
1296 lp_build_const_int32(gallivm, 0xffff), ""),
1297 val[1],
1298 };
1299 return si_llvm_pack_two_int16(gallivm, v);
1300 }
1301
1302 /* Initialize arguments for the shader export intrinsic */
1303 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
1304 LLVMValueRef *values,
1305 unsigned target,
1306 LLVMValueRef *args)
1307 {
1308 struct si_shader_context *ctx = si_shader_context(bld_base);
1309 struct lp_build_context *uint =
1310 &ctx->radeon_bld.soa.bld_base.uint_bld;
1311 struct lp_build_context *base = &bld_base->base;
1312 struct gallivm_state *gallivm = base->gallivm;
1313 LLVMBuilderRef builder = base->gallivm->builder;
1314 LLVMValueRef val[4];
1315 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1316 unsigned chan;
1317 bool is_int8;
1318
1319 /* Default is 0xf. Adjusted below depending on the format. */
1320 args[0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1321
1322 /* Specify whether the EXEC mask represents the valid mask */
1323 args[1] = uint->zero;
1324
1325 /* Specify whether this is the last export */
1326 args[2] = uint->zero;
1327
1328 /* Specify the target we are exporting */
1329 args[3] = lp_build_const_int32(base->gallivm, target);
1330
1331 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
1332 const union si_shader_key *key = &ctx->shader->key;
1333 unsigned col_formats = key->ps.spi_shader_col_format;
1334 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1335
1336 assert(cbuf >= 0 && cbuf < 8);
1337 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1338 is_int8 = (key->ps.color_is_int8 >> cbuf) & 0x1;
1339 }
1340
1341 args[4] = uint->zero; /* COMPR flag */
1342 args[5] = base->undef;
1343 args[6] = base->undef;
1344 args[7] = base->undef;
1345 args[8] = base->undef;
1346
1347 switch (spi_shader_col_format) {
1348 case V_028714_SPI_SHADER_ZERO:
1349 args[0] = uint->zero; /* writemask */
1350 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
1351 break;
1352
1353 case V_028714_SPI_SHADER_32_R:
1354 args[0] = uint->one; /* writemask */
1355 args[5] = values[0];
1356 break;
1357
1358 case V_028714_SPI_SHADER_32_GR:
1359 args[0] = lp_build_const_int32(base->gallivm, 0x3); /* writemask */
1360 args[5] = values[0];
1361 args[6] = values[1];
1362 break;
1363
1364 case V_028714_SPI_SHADER_32_AR:
1365 args[0] = lp_build_const_int32(base->gallivm, 0x9); /* writemask */
1366 args[5] = values[0];
1367 args[8] = values[3];
1368 break;
1369
1370 case V_028714_SPI_SHADER_FP16_ABGR:
1371 args[4] = uint->one; /* COMPR flag */
1372
1373 for (chan = 0; chan < 2; chan++) {
1374 LLVMValueRef pack_args[2] = {
1375 values[2 * chan],
1376 values[2 * chan + 1]
1377 };
1378 LLVMValueRef packed;
1379
1380 packed = lp_build_intrinsic(base->gallivm->builder,
1381 "llvm.SI.packf16",
1382 ctx->i32, pack_args, 2,
1383 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
1384 args[chan + 5] =
1385 LLVMBuildBitCast(base->gallivm->builder,
1386 packed, ctx->f32, "");
1387 }
1388 break;
1389
1390 case V_028714_SPI_SHADER_UNORM16_ABGR:
1391 for (chan = 0; chan < 4; chan++) {
1392 val[chan] = radeon_llvm_saturate(bld_base, values[chan]);
1393 val[chan] = LLVMBuildFMul(builder, val[chan],
1394 lp_build_const_float(gallivm, 65535), "");
1395 val[chan] = LLVMBuildFAdd(builder, val[chan],
1396 lp_build_const_float(gallivm, 0.5), "");
1397 val[chan] = LLVMBuildFPToUI(builder, val[chan],
1398 ctx->i32, "");
1399 }
1400
1401 args[4] = uint->one; /* COMPR flag */
1402 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1403 si_llvm_pack_two_int16(gallivm, val));
1404 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1405 si_llvm_pack_two_int16(gallivm, val+2));
1406 break;
1407
1408 case V_028714_SPI_SHADER_SNORM16_ABGR:
1409 for (chan = 0; chan < 4; chan++) {
1410 /* Clamp between [-1, 1]. */
1411 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MIN,
1412 values[chan],
1413 lp_build_const_float(gallivm, 1));
1414 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MAX,
1415 val[chan],
1416 lp_build_const_float(gallivm, -1));
1417 /* Convert to a signed integer in [-32767, 32767]. */
1418 val[chan] = LLVMBuildFMul(builder, val[chan],
1419 lp_build_const_float(gallivm, 32767), "");
1420 /* If positive, add 0.5, else add -0.5. */
1421 val[chan] = LLVMBuildFAdd(builder, val[chan],
1422 LLVMBuildSelect(builder,
1423 LLVMBuildFCmp(builder, LLVMRealOGE,
1424 val[chan], base->zero, ""),
1425 lp_build_const_float(gallivm, 0.5),
1426 lp_build_const_float(gallivm, -0.5), ""), "");
1427 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
1428 }
1429
1430 args[4] = uint->one; /* COMPR flag */
1431 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1432 si_llvm_pack_two_int32_as_int16(gallivm, val));
1433 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1434 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
1435 break;
1436
1437 case V_028714_SPI_SHADER_UINT16_ABGR: {
1438 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
1439 255 : 65535);
1440 /* Clamp. */
1441 for (chan = 0; chan < 4; chan++) {
1442 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1443 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
1444 val[chan], max);
1445 }
1446
1447 args[4] = uint->one; /* COMPR flag */
1448 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1449 si_llvm_pack_two_int16(gallivm, val));
1450 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1451 si_llvm_pack_two_int16(gallivm, val+2));
1452 break;
1453 }
1454
1455 case V_028714_SPI_SHADER_SINT16_ABGR: {
1456 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
1457 127 : 32767);
1458 LLVMValueRef min = lp_build_const_int32(gallivm, is_int8 ?
1459 -128 : -32768);
1460 /* Clamp. */
1461 for (chan = 0; chan < 4; chan++) {
1462 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
1463 val[chan] = lp_build_emit_llvm_binary(bld_base,
1464 TGSI_OPCODE_IMIN,
1465 val[chan], max);
1466 val[chan] = lp_build_emit_llvm_binary(bld_base,
1467 TGSI_OPCODE_IMAX,
1468 val[chan], min);
1469 }
1470
1471 args[4] = uint->one; /* COMPR flag */
1472 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1473 si_llvm_pack_two_int32_as_int16(gallivm, val));
1474 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1475 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
1476 break;
1477 }
1478
1479 case V_028714_SPI_SHADER_32_ABGR:
1480 memcpy(&args[5], values, sizeof(values[0]) * 4);
1481 break;
1482 }
1483 }
1484
1485 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
1486 LLVMValueRef alpha)
1487 {
1488 struct si_shader_context *ctx = si_shader_context(bld_base);
1489 struct gallivm_state *gallivm = bld_base->base.gallivm;
1490
1491 if (ctx->shader->key.ps.alpha_func != PIPE_FUNC_NEVER) {
1492 LLVMValueRef alpha_ref = LLVMGetParam(ctx->radeon_bld.main_fn,
1493 SI_PARAM_ALPHA_REF);
1494
1495 LLVMValueRef alpha_pass =
1496 lp_build_cmp(&bld_base->base,
1497 ctx->shader->key.ps.alpha_func,
1498 alpha, alpha_ref);
1499 LLVMValueRef arg =
1500 lp_build_select(&bld_base->base,
1501 alpha_pass,
1502 lp_build_const_float(gallivm, 1.0f),
1503 lp_build_const_float(gallivm, -1.0f));
1504
1505 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
1506 ctx->voidt, &arg, 1, 0);
1507 } else {
1508 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kilp",
1509 ctx->voidt, NULL, 0, 0);
1510 }
1511 }
1512
1513 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
1514 LLVMValueRef alpha)
1515 {
1516 struct si_shader_context *ctx = si_shader_context(bld_base);
1517 struct gallivm_state *gallivm = bld_base->base.gallivm;
1518 LLVMValueRef coverage;
1519
1520 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
1521 coverage = LLVMGetParam(ctx->radeon_bld.main_fn,
1522 SI_PARAM_SAMPLE_COVERAGE);
1523 coverage = bitcast(bld_base, TGSI_TYPE_SIGNED, coverage);
1524
1525 coverage = lp_build_intrinsic(gallivm->builder, "llvm.ctpop.i32",
1526 ctx->i32,
1527 &coverage, 1, LLVMReadNoneAttribute);
1528
1529 coverage = LLVMBuildUIToFP(gallivm->builder, coverage,
1530 ctx->f32, "");
1531
1532 coverage = LLVMBuildFMul(gallivm->builder, coverage,
1533 lp_build_const_float(gallivm,
1534 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
1535
1536 return LLVMBuildFMul(gallivm->builder, alpha, coverage, "");
1537 }
1538
1539 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context *bld_base,
1540 LLVMValueRef (*pos)[9], LLVMValueRef *out_elts)
1541 {
1542 struct si_shader_context *ctx = si_shader_context(bld_base);
1543 struct lp_build_context *base = &bld_base->base;
1544 struct lp_build_context *uint = &ctx->radeon_bld.soa.bld_base.uint_bld;
1545 unsigned reg_index;
1546 unsigned chan;
1547 unsigned const_chan;
1548 LLVMValueRef base_elt;
1549 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
1550 LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm, SI_DRIVER_STATE_CONST_BUF);
1551 LLVMValueRef const_resource = build_indexed_load_const(ctx, ptr, constbuf_index);
1552
1553 for (reg_index = 0; reg_index < 2; reg_index ++) {
1554 LLVMValueRef *args = pos[2 + reg_index];
1555
1556 args[5] =
1557 args[6] =
1558 args[7] =
1559 args[8] = lp_build_const_float(base->gallivm, 0.0f);
1560
1561 /* Compute dot products of position and user clip plane vectors */
1562 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1563 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
1564 args[1] = lp_build_const_int32(base->gallivm,
1565 ((reg_index * 4 + chan) * 4 +
1566 const_chan) * 4);
1567 base_elt = buffer_load_const(base->gallivm->builder, const_resource,
1568 args[1], ctx->f32);
1569 args[5 + chan] =
1570 lp_build_add(base, args[5 + chan],
1571 lp_build_mul(base, base_elt,
1572 out_elts[const_chan]));
1573 }
1574 }
1575
1576 args[0] = lp_build_const_int32(base->gallivm, 0xf);
1577 args[1] = uint->zero;
1578 args[2] = uint->zero;
1579 args[3] = lp_build_const_int32(base->gallivm,
1580 V_008DFC_SQ_EXP_POS + 2 + reg_index);
1581 args[4] = uint->zero;
1582 }
1583 }
1584
1585 static void si_dump_streamout(struct pipe_stream_output_info *so)
1586 {
1587 unsigned i;
1588
1589 if (so->num_outputs)
1590 fprintf(stderr, "STREAMOUT\n");
1591
1592 for (i = 0; i < so->num_outputs; i++) {
1593 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
1594 so->output[i].start_component;
1595 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
1596 i, so->output[i].output_buffer,
1597 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
1598 so->output[i].register_index,
1599 mask & 1 ? "x" : "",
1600 mask & 2 ? "y" : "",
1601 mask & 4 ? "z" : "",
1602 mask & 8 ? "w" : "");
1603 }
1604 }
1605
1606 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
1607 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
1608 * or v4i32 (num_channels=3,4). */
1609 static void build_tbuffer_store(struct si_shader_context *ctx,
1610 LLVMValueRef rsrc,
1611 LLVMValueRef vdata,
1612 unsigned num_channels,
1613 LLVMValueRef vaddr,
1614 LLVMValueRef soffset,
1615 unsigned inst_offset,
1616 unsigned dfmt,
1617 unsigned nfmt,
1618 unsigned offen,
1619 unsigned idxen,
1620 unsigned glc,
1621 unsigned slc,
1622 unsigned tfe)
1623 {
1624 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
1625 LLVMValueRef args[] = {
1626 rsrc,
1627 vdata,
1628 LLVMConstInt(ctx->i32, num_channels, 0),
1629 vaddr,
1630 soffset,
1631 LLVMConstInt(ctx->i32, inst_offset, 0),
1632 LLVMConstInt(ctx->i32, dfmt, 0),
1633 LLVMConstInt(ctx->i32, nfmt, 0),
1634 LLVMConstInt(ctx->i32, offen, 0),
1635 LLVMConstInt(ctx->i32, idxen, 0),
1636 LLVMConstInt(ctx->i32, glc, 0),
1637 LLVMConstInt(ctx->i32, slc, 0),
1638 LLVMConstInt(ctx->i32, tfe, 0)
1639 };
1640
1641 /* The instruction offset field has 12 bits */
1642 assert(offen || inst_offset < (1 << 12));
1643
1644 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
1645 unsigned func = CLAMP(num_channels, 1, 3) - 1;
1646 const char *types[] = {"i32", "v2i32", "v4i32"};
1647 char name[256];
1648 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
1649
1650 lp_build_intrinsic(gallivm->builder, name, ctx->voidt,
1651 args, Elements(args), 0);
1652 }
1653
1654 static void build_tbuffer_store_dwords(struct si_shader_context *ctx,
1655 LLVMValueRef rsrc,
1656 LLVMValueRef vdata,
1657 unsigned num_channels,
1658 LLVMValueRef vaddr,
1659 LLVMValueRef soffset,
1660 unsigned inst_offset)
1661 {
1662 static unsigned dfmt[] = {
1663 V_008F0C_BUF_DATA_FORMAT_32,
1664 V_008F0C_BUF_DATA_FORMAT_32_32,
1665 V_008F0C_BUF_DATA_FORMAT_32_32_32,
1666 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
1667 };
1668 assert(num_channels >= 1 && num_channels <= 4);
1669
1670 build_tbuffer_store(ctx, rsrc, vdata, num_channels, vaddr, soffset,
1671 inst_offset, dfmt[num_channels-1],
1672 V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
1673 }
1674
1675 /* On SI, the vertex shader is responsible for writing streamout data
1676 * to buffers. */
1677 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
1678 struct si_shader_output_values *outputs,
1679 unsigned noutput)
1680 {
1681 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
1682 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
1683 LLVMBuilderRef builder = gallivm->builder;
1684 int i, j;
1685 struct lp_build_if_state if_ctx;
1686
1687 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
1688 LLVMValueRef so_vtx_count =
1689 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
1690
1691 LLVMValueRef tid = lp_build_intrinsic(builder, "llvm.SI.tid", ctx->i32,
1692 NULL, 0, LLVMReadNoneAttribute);
1693
1694 /* can_emit = tid < so_vtx_count; */
1695 LLVMValueRef can_emit =
1696 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
1697
1698 LLVMValueRef stream_id =
1699 unpack_param(ctx, ctx->param_streamout_config, 24, 2);
1700
1701 /* Emit the streamout code conditionally. This actually avoids
1702 * out-of-bounds buffer access. The hw tells us via the SGPR
1703 * (so_vtx_count) which threads are allowed to emit streamout data. */
1704 lp_build_if(&if_ctx, gallivm, can_emit);
1705 {
1706 /* The buffer offset is computed as follows:
1707 * ByteOffset = streamout_offset[buffer_id]*4 +
1708 * (streamout_write_index + thread_id)*stride[buffer_id] +
1709 * attrib_offset
1710 */
1711
1712 LLVMValueRef so_write_index =
1713 LLVMGetParam(ctx->radeon_bld.main_fn,
1714 ctx->param_streamout_write_index);
1715
1716 /* Compute (streamout_write_index + thread_id). */
1717 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
1718
1719 /* Compute the write offset for each enabled buffer. */
1720 LLVMValueRef so_write_offset[4] = {};
1721 for (i = 0; i < 4; i++) {
1722 if (!so->stride[i])
1723 continue;
1724
1725 LLVMValueRef so_offset = LLVMGetParam(ctx->radeon_bld.main_fn,
1726 ctx->param_streamout_offset[i]);
1727 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
1728
1729 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
1730 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
1731 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
1732 }
1733
1734 /* Write streamout data. */
1735 for (i = 0; i < so->num_outputs; i++) {
1736 unsigned buf_idx = so->output[i].output_buffer;
1737 unsigned reg = so->output[i].register_index;
1738 unsigned start = so->output[i].start_component;
1739 unsigned num_comps = so->output[i].num_components;
1740 unsigned stream = so->output[i].stream;
1741 LLVMValueRef out[4];
1742 struct lp_build_if_state if_ctx_stream;
1743
1744 assert(num_comps && num_comps <= 4);
1745 if (!num_comps || num_comps > 4)
1746 continue;
1747
1748 if (reg >= noutput)
1749 continue;
1750
1751 /* Load the output as int. */
1752 for (j = 0; j < num_comps; j++) {
1753 out[j] = LLVMBuildBitCast(builder,
1754 outputs[reg].values[start+j],
1755 ctx->i32, "");
1756 }
1757
1758 /* Pack the output. */
1759 LLVMValueRef vdata = NULL;
1760
1761 switch (num_comps) {
1762 case 1: /* as i32 */
1763 vdata = out[0];
1764 break;
1765 case 2: /* as v2i32 */
1766 case 3: /* as v4i32 (aligned to 4) */
1767 case 4: /* as v4i32 */
1768 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
1769 for (j = 0; j < num_comps; j++) {
1770 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
1771 LLVMConstInt(ctx->i32, j, 0), "");
1772 }
1773 break;
1774 }
1775
1776 LLVMValueRef can_emit_stream =
1777 LLVMBuildICmp(builder, LLVMIntEQ,
1778 stream_id,
1779 lp_build_const_int32(gallivm, stream), "");
1780
1781 lp_build_if(&if_ctx_stream, gallivm, can_emit_stream);
1782 build_tbuffer_store_dwords(ctx, ctx->so_buffers[buf_idx],
1783 vdata, num_comps,
1784 so_write_offset[buf_idx],
1785 LLVMConstInt(ctx->i32, 0, 0),
1786 so->output[i].dst_offset*4);
1787 lp_build_endif(&if_ctx_stream);
1788 }
1789 }
1790 lp_build_endif(&if_ctx);
1791 }
1792
1793
1794 /* Generate export instructions for hardware VS shader stage */
1795 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
1796 struct si_shader_output_values *outputs,
1797 unsigned noutput)
1798 {
1799 struct si_shader_context *ctx = si_shader_context(bld_base);
1800 struct si_shader *shader = ctx->shader;
1801 struct lp_build_context *base = &bld_base->base;
1802 struct lp_build_context *uint =
1803 &ctx->radeon_bld.soa.bld_base.uint_bld;
1804 LLVMValueRef args[9];
1805 LLVMValueRef pos_args[4][9] = { { 0 } };
1806 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
1807 unsigned semantic_name, semantic_index;
1808 unsigned target;
1809 unsigned param_count = 0;
1810 unsigned pos_idx;
1811 int i;
1812
1813 if (outputs && ctx->shader->selector->so.num_outputs) {
1814 si_llvm_emit_streamout(ctx, outputs, noutput);
1815 }
1816
1817 for (i = 0; i < noutput; i++) {
1818 semantic_name = outputs[i].name;
1819 semantic_index = outputs[i].sid;
1820
1821 handle_semantic:
1822 /* Select the correct target */
1823 switch(semantic_name) {
1824 case TGSI_SEMANTIC_PSIZE:
1825 psize_value = outputs[i].values[0];
1826 continue;
1827 case TGSI_SEMANTIC_EDGEFLAG:
1828 edgeflag_value = outputs[i].values[0];
1829 continue;
1830 case TGSI_SEMANTIC_LAYER:
1831 layer_value = outputs[i].values[0];
1832 semantic_name = TGSI_SEMANTIC_GENERIC;
1833 goto handle_semantic;
1834 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1835 viewport_index_value = outputs[i].values[0];
1836 semantic_name = TGSI_SEMANTIC_GENERIC;
1837 goto handle_semantic;
1838 case TGSI_SEMANTIC_POSITION:
1839 target = V_008DFC_SQ_EXP_POS;
1840 break;
1841 case TGSI_SEMANTIC_COLOR:
1842 case TGSI_SEMANTIC_BCOLOR:
1843 target = V_008DFC_SQ_EXP_PARAM + param_count;
1844 shader->vs_output_param_offset[i] = param_count;
1845 param_count++;
1846 break;
1847 case TGSI_SEMANTIC_CLIPDIST:
1848 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
1849 break;
1850 case TGSI_SEMANTIC_CLIPVERTEX:
1851 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
1852 continue;
1853 case TGSI_SEMANTIC_PRIMID:
1854 case TGSI_SEMANTIC_FOG:
1855 case TGSI_SEMANTIC_TEXCOORD:
1856 case TGSI_SEMANTIC_GENERIC:
1857 target = V_008DFC_SQ_EXP_PARAM + param_count;
1858 shader->vs_output_param_offset[i] = param_count;
1859 param_count++;
1860 break;
1861 default:
1862 target = 0;
1863 fprintf(stderr,
1864 "Warning: SI unhandled vs output type:%d\n",
1865 semantic_name);
1866 }
1867
1868 si_llvm_init_export_args(bld_base, outputs[i].values, target, args);
1869
1870 if (target >= V_008DFC_SQ_EXP_POS &&
1871 target <= (V_008DFC_SQ_EXP_POS + 3)) {
1872 memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
1873 args, sizeof(args));
1874 } else {
1875 lp_build_intrinsic(base->gallivm->builder,
1876 "llvm.SI.export", ctx->voidt,
1877 args, 9, 0);
1878 }
1879
1880 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
1881 semantic_name = TGSI_SEMANTIC_GENERIC;
1882 goto handle_semantic;
1883 }
1884 }
1885
1886 shader->nr_param_exports = param_count;
1887
1888 /* We need to add the position output manually if it's missing. */
1889 if (!pos_args[0][0]) {
1890 pos_args[0][0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1891 pos_args[0][1] = uint->zero; /* EXEC mask */
1892 pos_args[0][2] = uint->zero; /* last export? */
1893 pos_args[0][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS);
1894 pos_args[0][4] = uint->zero; /* COMPR flag */
1895 pos_args[0][5] = base->zero; /* X */
1896 pos_args[0][6] = base->zero; /* Y */
1897 pos_args[0][7] = base->zero; /* Z */
1898 pos_args[0][8] = base->one; /* W */
1899 }
1900
1901 /* Write the misc vector (point size, edgeflag, layer, viewport). */
1902 if (shader->selector->info.writes_psize ||
1903 shader->selector->info.writes_edgeflag ||
1904 shader->selector->info.writes_viewport_index ||
1905 shader->selector->info.writes_layer) {
1906 pos_args[1][0] = lp_build_const_int32(base->gallivm, /* writemask */
1907 shader->selector->info.writes_psize |
1908 (shader->selector->info.writes_edgeflag << 1) |
1909 (shader->selector->info.writes_layer << 2) |
1910 (shader->selector->info.writes_viewport_index << 3));
1911 pos_args[1][1] = uint->zero; /* EXEC mask */
1912 pos_args[1][2] = uint->zero; /* last export? */
1913 pos_args[1][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + 1);
1914 pos_args[1][4] = uint->zero; /* COMPR flag */
1915 pos_args[1][5] = base->zero; /* X */
1916 pos_args[1][6] = base->zero; /* Y */
1917 pos_args[1][7] = base->zero; /* Z */
1918 pos_args[1][8] = base->zero; /* W */
1919
1920 if (shader->selector->info.writes_psize)
1921 pos_args[1][5] = psize_value;
1922
1923 if (shader->selector->info.writes_edgeflag) {
1924 /* The output is a float, but the hw expects an integer
1925 * with the first bit containing the edge flag. */
1926 edgeflag_value = LLVMBuildFPToUI(base->gallivm->builder,
1927 edgeflag_value,
1928 ctx->i32, "");
1929 edgeflag_value = lp_build_min(&bld_base->int_bld,
1930 edgeflag_value,
1931 bld_base->int_bld.one);
1932
1933 /* The LLVM intrinsic expects a float. */
1934 pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder,
1935 edgeflag_value,
1936 ctx->f32, "");
1937 }
1938
1939 if (shader->selector->info.writes_layer)
1940 pos_args[1][7] = layer_value;
1941
1942 if (shader->selector->info.writes_viewport_index)
1943 pos_args[1][8] = viewport_index_value;
1944 }
1945
1946 for (i = 0; i < 4; i++)
1947 if (pos_args[i][0])
1948 shader->nr_pos_exports++;
1949
1950 pos_idx = 0;
1951 for (i = 0; i < 4; i++) {
1952 if (!pos_args[i][0])
1953 continue;
1954
1955 /* Specify the target we are exporting */
1956 pos_args[i][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + pos_idx++);
1957
1958 if (pos_idx == shader->nr_pos_exports)
1959 /* Specify that this is the last export */
1960 pos_args[i][2] = uint->one;
1961
1962 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
1963 ctx->voidt, pos_args[i], 9, 0);
1964 }
1965 }
1966
1967 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
1968 LLVMValueRef rel_patch_id,
1969 LLVMValueRef invocation_id,
1970 LLVMValueRef tcs_out_current_patch_data_offset)
1971 {
1972 struct si_shader_context *ctx = si_shader_context(bld_base);
1973 struct gallivm_state *gallivm = bld_base->base.gallivm;
1974 struct si_shader *shader = ctx->shader;
1975 unsigned tess_inner_index, tess_outer_index;
1976 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
1977 LLVMValueRef out[6], vec0, vec1, rw_buffers, tf_base;
1978 unsigned stride, outer_comps, inner_comps, i;
1979 struct lp_build_if_state if_ctx;
1980
1981 /* Do this only for invocation 0, because the tess levels are per-patch,
1982 * not per-vertex.
1983 *
1984 * This can't jump, because invocation 0 executes this. It should
1985 * at least mask out the loads and stores for other invocations.
1986 */
1987 lp_build_if(&if_ctx, gallivm,
1988 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
1989 invocation_id, bld_base->uint_bld.zero, ""));
1990
1991 /* Determine the layout of one tess factor element in the buffer. */
1992 switch (shader->key.tcs.prim_mode) {
1993 case PIPE_PRIM_LINES:
1994 stride = 2; /* 2 dwords, 1 vec2 store */
1995 outer_comps = 2;
1996 inner_comps = 0;
1997 break;
1998 case PIPE_PRIM_TRIANGLES:
1999 stride = 4; /* 4 dwords, 1 vec4 store */
2000 outer_comps = 3;
2001 inner_comps = 1;
2002 break;
2003 case PIPE_PRIM_QUADS:
2004 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2005 outer_comps = 4;
2006 inner_comps = 2;
2007 break;
2008 default:
2009 assert(0);
2010 return;
2011 }
2012
2013 /* Load tess_inner and tess_outer from LDS.
2014 * Any invocation can write them, so we can't get them from a temporary.
2015 */
2016 tess_inner_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSINNER, 0);
2017 tess_outer_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSOUTER, 0);
2018
2019 lds_base = tcs_out_current_patch_data_offset;
2020 lds_inner = LLVMBuildAdd(gallivm->builder, lds_base,
2021 lp_build_const_int32(gallivm,
2022 tess_inner_index * 4), "");
2023 lds_outer = LLVMBuildAdd(gallivm->builder, lds_base,
2024 lp_build_const_int32(gallivm,
2025 tess_outer_index * 4), "");
2026
2027 for (i = 0; i < outer_comps; i++)
2028 out[i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
2029 for (i = 0; i < inner_comps; i++)
2030 out[outer_comps+i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
2031
2032 /* Convert the outputs to vectors for stores. */
2033 vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
2034 vec1 = NULL;
2035
2036 if (stride > 4)
2037 vec1 = lp_build_gather_values(gallivm, out+4, stride - 4);
2038
2039 /* Get the buffer. */
2040 rw_buffers = LLVMGetParam(ctx->radeon_bld.main_fn,
2041 SI_PARAM_RW_BUFFERS);
2042 buffer = build_indexed_load_const(ctx, rw_buffers,
2043 lp_build_const_int32(gallivm, SI_RING_TESS_FACTOR));
2044
2045 /* Get the offset. */
2046 tf_base = LLVMGetParam(ctx->radeon_bld.main_fn,
2047 SI_PARAM_TESS_FACTOR_OFFSET);
2048 byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
2049 lp_build_const_int32(gallivm, 4 * stride), "");
2050
2051 /* Store the outputs. */
2052 build_tbuffer_store_dwords(ctx, buffer, vec0,
2053 MIN2(stride, 4), byteoffset, tf_base, 0);
2054 if (vec1)
2055 build_tbuffer_store_dwords(ctx, buffer, vec1,
2056 stride - 4, byteoffset, tf_base, 16);
2057 lp_build_endif(&if_ctx);
2058 }
2059
2060 /* This only writes the tessellation factor levels. */
2061 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
2062 {
2063 struct si_shader_context *ctx = si_shader_context(bld_base);
2064 LLVMValueRef invocation_id;
2065
2066 invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
2067
2068 si_write_tess_factors(bld_base,
2069 get_rel_patch_id(ctx),
2070 invocation_id,
2071 get_tcs_out_current_patch_data_offset(ctx));
2072 }
2073
2074 static void si_llvm_emit_ls_epilogue(struct lp_build_tgsi_context *bld_base)
2075 {
2076 struct si_shader_context *ctx = si_shader_context(bld_base);
2077 struct si_shader *shader = ctx->shader;
2078 struct tgsi_shader_info *info = &shader->selector->info;
2079 struct gallivm_state *gallivm = bld_base->base.gallivm;
2080 unsigned i, chan;
2081 LLVMValueRef vertex_id = LLVMGetParam(ctx->radeon_bld.main_fn,
2082 ctx->param_rel_auto_id);
2083 LLVMValueRef vertex_dw_stride =
2084 unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 13, 8);
2085 LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
2086 vertex_dw_stride, "");
2087
2088 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2089 * its inputs from it. */
2090 for (i = 0; i < info->num_outputs; i++) {
2091 LLVMValueRef *out_ptr = ctx->radeon_bld.soa.outputs[i];
2092 unsigned name = info->output_semantic_name[i];
2093 unsigned index = info->output_semantic_index[i];
2094 int param = si_shader_io_get_unique_index(name, index);
2095 LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
2096 lp_build_const_int32(gallivm, param * 4), "");
2097
2098 for (chan = 0; chan < 4; chan++) {
2099 lds_store(bld_base, chan, dw_addr,
2100 LLVMBuildLoad(gallivm->builder, out_ptr[chan], ""));
2101 }
2102 }
2103 }
2104
2105 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context *bld_base)
2106 {
2107 struct si_shader_context *ctx = si_shader_context(bld_base);
2108 struct gallivm_state *gallivm = bld_base->base.gallivm;
2109 struct si_shader *es = ctx->shader;
2110 struct tgsi_shader_info *info = &es->selector->info;
2111 LLVMValueRef soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
2112 ctx->param_es2gs_offset);
2113 unsigned chan;
2114 int i;
2115
2116 for (i = 0; i < info->num_outputs; i++) {
2117 LLVMValueRef *out_ptr =
2118 ctx->radeon_bld.soa.outputs[i];
2119 int param_index;
2120
2121 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2122 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2123 continue;
2124
2125 param_index = si_shader_io_get_unique_index(info->output_semantic_name[i],
2126 info->output_semantic_index[i]);
2127
2128 for (chan = 0; chan < 4; chan++) {
2129 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
2130 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
2131
2132 build_tbuffer_store(ctx,
2133 ctx->esgs_ring,
2134 out_val, 1,
2135 LLVMGetUndef(ctx->i32), soffset,
2136 (4 * param_index + chan) * 4,
2137 V_008F0C_BUF_DATA_FORMAT_32,
2138 V_008F0C_BUF_NUM_FORMAT_UINT,
2139 0, 0, 1, 1, 0);
2140 }
2141 }
2142 }
2143
2144 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
2145 {
2146 struct si_shader_context *ctx = si_shader_context(bld_base);
2147 struct gallivm_state *gallivm = bld_base->base.gallivm;
2148 LLVMValueRef args[2];
2149
2150 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_NOP | SENDMSG_GS_DONE);
2151 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
2152 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
2153 ctx->voidt, args, 2, LLVMNoUnwindAttribute);
2154 }
2155
2156 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
2157 {
2158 struct si_shader_context *ctx = si_shader_context(bld_base);
2159 struct gallivm_state *gallivm = bld_base->base.gallivm;
2160 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2161 struct si_shader_output_values *outputs = NULL;
2162 int i,j;
2163
2164 assert(!ctx->is_gs_copy_shader);
2165
2166 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2167
2168 /* Vertex color clamping.
2169 *
2170 * This uses a state constant loaded in a user data SGPR and
2171 * an IF statement is added that clamps all colors if the constant
2172 * is true.
2173 */
2174 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
2175 struct lp_build_if_state if_ctx;
2176 LLVMValueRef cond = NULL;
2177 LLVMValueRef addr, val;
2178
2179 for (i = 0; i < info->num_outputs; i++) {
2180 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
2181 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
2182 continue;
2183
2184 /* We've found a color. */
2185 if (!cond) {
2186 /* The state is in the first bit of the user SGPR. */
2187 cond = LLVMGetParam(ctx->radeon_bld.main_fn,
2188 SI_PARAM_VS_STATE_BITS);
2189 cond = LLVMBuildTrunc(gallivm->builder, cond,
2190 ctx->i1, "");
2191 lp_build_if(&if_ctx, gallivm, cond);
2192 }
2193
2194 for (j = 0; j < 4; j++) {
2195 addr = ctx->radeon_bld.soa.outputs[i][j];
2196 val = LLVMBuildLoad(gallivm->builder, addr, "");
2197 val = radeon_llvm_saturate(bld_base, val);
2198 LLVMBuildStore(gallivm->builder, val, addr);
2199 }
2200 }
2201
2202 if (cond)
2203 lp_build_endif(&if_ctx);
2204 }
2205
2206 for (i = 0; i < info->num_outputs; i++) {
2207 outputs[i].name = info->output_semantic_name[i];
2208 outputs[i].sid = info->output_semantic_index[i];
2209
2210 for (j = 0; j < 4; j++)
2211 outputs[i].values[j] =
2212 LLVMBuildLoad(gallivm->builder,
2213 ctx->radeon_bld.soa.outputs[i][j],
2214 "");
2215 }
2216
2217 /* Export PrimitiveID when PS needs it. */
2218 if (si_vs_exports_prim_id(ctx->shader)) {
2219 outputs[i].name = TGSI_SEMANTIC_PRIMID;
2220 outputs[i].sid = 0;
2221 outputs[i].values[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2222 get_primitive_id(bld_base, 0));
2223 outputs[i].values[1] = bld_base->base.undef;
2224 outputs[i].values[2] = bld_base->base.undef;
2225 outputs[i].values[3] = bld_base->base.undef;
2226 i++;
2227 }
2228
2229 si_llvm_export_vs(bld_base, outputs, i);
2230 FREE(outputs);
2231 }
2232
2233 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
2234 LLVMValueRef depth, LLVMValueRef stencil,
2235 LLVMValueRef samplemask)
2236 {
2237 struct si_shader_context *ctx = si_shader_context(bld_base);
2238 struct lp_build_context *base = &bld_base->base;
2239 struct lp_build_context *uint = &bld_base->uint_bld;
2240 LLVMValueRef args[9];
2241 unsigned mask = 0;
2242
2243 assert(depth || stencil || samplemask);
2244
2245 args[1] = uint->one; /* whether the EXEC mask is valid */
2246 args[2] = uint->one; /* DONE bit */
2247
2248 /* Specify the target we are exporting */
2249 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
2250
2251 args[4] = uint->zero; /* COMP flag */
2252 args[5] = base->undef; /* R, depth */
2253 args[6] = base->undef; /* G, stencil test value[0:7], stencil op value[8:15] */
2254 args[7] = base->undef; /* B, sample mask */
2255 args[8] = base->undef; /* A, alpha to mask */
2256
2257 if (depth) {
2258 args[5] = depth;
2259 mask |= 0x1;
2260 }
2261
2262 if (stencil) {
2263 args[6] = stencil;
2264 mask |= 0x2;
2265 }
2266
2267 if (samplemask) {
2268 args[7] = samplemask;
2269 mask |= 0x4;
2270 }
2271
2272 /* SI (except OLAND) has a bug that it only looks
2273 * at the X writemask component. */
2274 if (ctx->screen->b.chip_class == SI &&
2275 ctx->screen->b.family != CHIP_OLAND)
2276 mask |= 0x1;
2277
2278 /* Specify which components to enable */
2279 args[0] = lp_build_const_int32(base->gallivm, mask);
2280
2281 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2282 ctx->voidt, args, 9, 0);
2283 }
2284
2285 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
2286 LLVMValueRef *color, unsigned index,
2287 bool is_last)
2288 {
2289 struct si_shader_context *ctx = si_shader_context(bld_base);
2290 struct lp_build_context *base = &bld_base->base;
2291 int i;
2292
2293 /* Clamp color */
2294 if (ctx->shader->key.ps.clamp_color)
2295 for (i = 0; i < 4; i++)
2296 color[i] = radeon_llvm_saturate(bld_base, color[i]);
2297
2298 /* Alpha to one */
2299 if (ctx->shader->key.ps.alpha_to_one)
2300 color[3] = base->one;
2301
2302 /* Alpha test */
2303 if (index == 0 &&
2304 ctx->shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
2305 si_alpha_test(bld_base, color[3]);
2306
2307 /* Line & polygon smoothing */
2308 if (ctx->shader->key.ps.poly_line_smoothing)
2309 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3]);
2310
2311 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
2312 if (ctx->shader->key.ps.last_cbuf > 0) {
2313 LLVMValueRef args[8][9];
2314 int c, last = -1;
2315
2316 /* Get the export arguments, also find out what the last one is. */
2317 for (c = 0; c <= ctx->shader->key.ps.last_cbuf; c++) {
2318 si_llvm_init_export_args(bld_base, color,
2319 V_008DFC_SQ_EXP_MRT + c, args[c]);
2320 if (args[c][0] != bld_base->uint_bld.zero)
2321 last = c;
2322 }
2323
2324 /* Emit all exports. */
2325 for (c = 0; c <= ctx->shader->key.ps.last_cbuf; c++) {
2326 if (is_last && last == c) {
2327 args[c][1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
2328 args[c][2] = bld_base->uint_bld.one; /* DONE bit */
2329 } else if (args[c][0] == bld_base->uint_bld.zero)
2330 continue; /* unnecessary NULL export */
2331
2332 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2333 ctx->voidt, args[c], 9, 0);
2334 }
2335 } else {
2336 LLVMValueRef args[9];
2337
2338 /* Export */
2339 si_llvm_init_export_args(bld_base, color, V_008DFC_SQ_EXP_MRT + index,
2340 args);
2341 if (is_last) {
2342 args[1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
2343 args[2] = bld_base->uint_bld.one; /* DONE bit */
2344 } else if (args[0] == bld_base->uint_bld.zero)
2345 return; /* unnecessary NULL export */
2346
2347 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2348 ctx->voidt, args, 9, 0);
2349 }
2350 }
2351
2352 static void si_export_null(struct lp_build_tgsi_context *bld_base)
2353 {
2354 struct si_shader_context *ctx = si_shader_context(bld_base);
2355 struct lp_build_context *base = &bld_base->base;
2356 struct lp_build_context *uint = &bld_base->uint_bld;
2357 LLVMValueRef args[9];
2358
2359 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
2360 args[1] = uint->one; /* whether the EXEC mask is valid */
2361 args[2] = uint->one; /* DONE bit */
2362 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
2363 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
2364 args[5] = uint->undef; /* R */
2365 args[6] = uint->undef; /* G */
2366 args[7] = uint->undef; /* B */
2367 args[8] = uint->undef; /* A */
2368
2369 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2370 ctx->voidt, args, 9, 0);
2371 }
2372
2373 static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context *bld_base)
2374 {
2375 struct si_shader_context *ctx = si_shader_context(bld_base);
2376 struct si_shader *shader = ctx->shader;
2377 struct lp_build_context *base = &bld_base->base;
2378 struct tgsi_shader_info *info = &shader->selector->info;
2379 LLVMBuilderRef builder = base->gallivm->builder;
2380 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
2381 int last_color_export = -1;
2382 int i;
2383
2384 /* Determine the last export. If MRTZ is present, it's always last.
2385 * Otherwise, find the last color export.
2386 */
2387 if (!info->writes_z && !info->writes_stencil && !info->writes_samplemask) {
2388 unsigned spi_format = shader->key.ps.spi_shader_col_format;
2389
2390 /* Don't export NULL and return if alpha-test is enabled. */
2391 if (shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS &&
2392 shader->key.ps.alpha_func != PIPE_FUNC_NEVER &&
2393 (spi_format & 0xf) == 0)
2394 spi_format |= V_028714_SPI_SHADER_32_AR;
2395
2396 for (i = 0; i < info->num_outputs; i++) {
2397 unsigned index = info->output_semantic_index[i];
2398
2399 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR)
2400 continue;
2401
2402 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
2403 if (shader->key.ps.last_cbuf > 0) {
2404 /* Just set this if any of the colorbuffers are enabled. */
2405 if (spi_format &
2406 ((1llu << (4 * (shader->key.ps.last_cbuf + 1))) - 1))
2407 last_color_export = i;
2408 continue;
2409 }
2410
2411 if ((spi_format >> (index * 4)) & 0xf)
2412 last_color_export = i;
2413 }
2414
2415 /* If there are no outputs, export NULL. */
2416 if (last_color_export == -1) {
2417 si_export_null(bld_base);
2418 return;
2419 }
2420 }
2421
2422 for (i = 0; i < info->num_outputs; i++) {
2423 unsigned semantic_name = info->output_semantic_name[i];
2424 unsigned semantic_index = info->output_semantic_index[i];
2425 unsigned j;
2426 LLVMValueRef color[4] = {};
2427
2428 /* Select the correct target */
2429 switch (semantic_name) {
2430 case TGSI_SEMANTIC_POSITION:
2431 depth = LLVMBuildLoad(builder,
2432 ctx->radeon_bld.soa.outputs[i][2], "");
2433 break;
2434 case TGSI_SEMANTIC_STENCIL:
2435 stencil = LLVMBuildLoad(builder,
2436 ctx->radeon_bld.soa.outputs[i][1], "");
2437 break;
2438 case TGSI_SEMANTIC_SAMPLEMASK:
2439 samplemask = LLVMBuildLoad(builder,
2440 ctx->radeon_bld.soa.outputs[i][0], "");
2441 break;
2442 case TGSI_SEMANTIC_COLOR:
2443 for (j = 0; j < 4; j++)
2444 color[j] = LLVMBuildLoad(builder,
2445 ctx->radeon_bld.soa.outputs[i][j], "");
2446
2447 si_export_mrt_color(bld_base, color, semantic_index,
2448 last_color_export == i);
2449 break;
2450 default:
2451 fprintf(stderr,
2452 "Warning: SI unhandled fs output type:%d\n",
2453 semantic_name);
2454 }
2455 }
2456
2457 if (depth || stencil || samplemask)
2458 si_export_mrt_z(bld_base, depth, stencil, samplemask);
2459 }
2460
2461 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
2462 struct lp_build_tgsi_context *bld_base,
2463 struct lp_build_emit_data *emit_data);
2464
2465 static bool tgsi_is_array_sampler(unsigned target)
2466 {
2467 return target == TGSI_TEXTURE_1D_ARRAY ||
2468 target == TGSI_TEXTURE_SHADOW1D_ARRAY ||
2469 target == TGSI_TEXTURE_2D_ARRAY ||
2470 target == TGSI_TEXTURE_SHADOW2D_ARRAY ||
2471 target == TGSI_TEXTURE_CUBE_ARRAY ||
2472 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY ||
2473 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
2474 }
2475
2476 static void set_tex_fetch_args(struct si_shader_context *ctx,
2477 struct lp_build_emit_data *emit_data,
2478 unsigned opcode, unsigned target,
2479 LLVMValueRef res_ptr, LLVMValueRef samp_ptr,
2480 LLVMValueRef *param, unsigned count,
2481 unsigned dmask)
2482 {
2483 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
2484 unsigned num_args;
2485 unsigned is_rect = target == TGSI_TEXTURE_RECT;
2486
2487 /* Pad to power of two vector */
2488 while (count < util_next_power_of_two(count))
2489 param[count++] = LLVMGetUndef(ctx->i32);
2490
2491 /* Texture coordinates. */
2492 if (count > 1)
2493 emit_data->args[0] = lp_build_gather_values(gallivm, param, count);
2494 else
2495 emit_data->args[0] = param[0];
2496
2497 /* Resource. */
2498 emit_data->args[1] = res_ptr;
2499 num_args = 2;
2500
2501 if (opcode == TGSI_OPCODE_TXF || opcode == TGSI_OPCODE_TXQ)
2502 emit_data->dst_type = ctx->v4i32;
2503 else {
2504 emit_data->dst_type = ctx->v4f32;
2505
2506 emit_data->args[num_args++] = samp_ptr;
2507 }
2508
2509 emit_data->args[num_args++] = lp_build_const_int32(gallivm, dmask);
2510 emit_data->args[num_args++] = lp_build_const_int32(gallivm, is_rect); /* unorm */
2511 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* r128 */
2512 emit_data->args[num_args++] = lp_build_const_int32(gallivm,
2513 tgsi_is_array_sampler(target)); /* da */
2514 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* glc */
2515 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* slc */
2516 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* tfe */
2517 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* lwe */
2518
2519 emit_data->arg_count = num_args;
2520 }
2521
2522 static const struct lp_build_tgsi_action tex_action;
2523
2524 enum desc_type {
2525 DESC_IMAGE,
2526 DESC_FMASK,
2527 DESC_SAMPLER
2528 };
2529
2530 static LLVMTypeRef const_array(LLVMTypeRef elem_type, int num_elements)
2531 {
2532 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
2533 CONST_ADDR_SPACE);
2534 }
2535
2536 /**
2537 * Load an image view, fmask view. or sampler state descriptor.
2538 */
2539 static LLVMValueRef get_sampler_desc(struct si_shader_context *ctx,
2540 LLVMValueRef index, enum desc_type type)
2541 {
2542 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
2543 LLVMBuilderRef builder = gallivm->builder;
2544 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn,
2545 SI_PARAM_SAMPLERS);
2546
2547 switch (type) {
2548 case DESC_IMAGE:
2549 /* The image is at [0:7]. */
2550 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
2551 break;
2552 case DESC_FMASK:
2553 /* The FMASK is at [8:15]. */
2554 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
2555 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 1, 0), "");
2556 break;
2557 case DESC_SAMPLER:
2558 /* The sampler state is at [12:15]. */
2559 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
2560 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 3, 0), "");
2561 ptr = LLVMBuildPointerCast(builder, ptr,
2562 const_array(ctx->v4i32, 0), "");
2563 break;
2564 }
2565
2566 return build_indexed_load_const(ctx, ptr, index);
2567 }
2568
2569 static void tex_fetch_ptrs(
2570 struct lp_build_tgsi_context *bld_base,
2571 struct lp_build_emit_data *emit_data,
2572 LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr, LLVMValueRef *fmask_ptr)
2573 {
2574 struct si_shader_context *ctx = si_shader_context(bld_base);
2575 const struct tgsi_full_instruction *inst = emit_data->inst;
2576 unsigned target = inst->Texture.Texture;
2577 unsigned sampler_src;
2578 unsigned sampler_index;
2579
2580 sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
2581 sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
2582
2583 if (emit_data->inst->Src[sampler_src].Register.Indirect) {
2584 const struct tgsi_full_src_register *reg = &emit_data->inst->Src[sampler_src];
2585 LLVMValueRef ind_index;
2586
2587 ind_index = get_indirect_index(ctx, &reg->Indirect, reg->Register.Index);
2588
2589 *res_ptr = get_sampler_desc(ctx, ind_index, DESC_IMAGE);
2590
2591 if (target == TGSI_TEXTURE_2D_MSAA ||
2592 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
2593 *samp_ptr = NULL;
2594 *fmask_ptr = get_sampler_desc(ctx, ind_index, DESC_FMASK);
2595 } else {
2596 *samp_ptr = get_sampler_desc(ctx, ind_index, DESC_SAMPLER);
2597 *fmask_ptr = NULL;
2598 }
2599 } else {
2600 *res_ptr = ctx->sampler_views[sampler_index];
2601 *samp_ptr = ctx->sampler_states[sampler_index];
2602 *fmask_ptr = ctx->fmasks[sampler_index];
2603 }
2604 }
2605
2606 static void tex_fetch_args(
2607 struct lp_build_tgsi_context *bld_base,
2608 struct lp_build_emit_data *emit_data)
2609 {
2610 struct si_shader_context *ctx = si_shader_context(bld_base);
2611 struct gallivm_state *gallivm = bld_base->base.gallivm;
2612 LLVMBuilderRef builder = gallivm->builder;
2613 const struct tgsi_full_instruction *inst = emit_data->inst;
2614 unsigned opcode = inst->Instruction.Opcode;
2615 unsigned target = inst->Texture.Texture;
2616 LLVMValueRef coords[5], derivs[6];
2617 LLVMValueRef address[16];
2618 int ref_pos;
2619 unsigned num_coords = tgsi_util_get_texture_coord_dim(target, &ref_pos);
2620 unsigned count = 0;
2621 unsigned chan;
2622 unsigned num_deriv_channels = 0;
2623 bool has_offset = inst->Texture.NumOffsets > 0;
2624 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
2625 unsigned dmask = 0xf;
2626
2627 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
2628
2629 if (opcode == TGSI_OPCODE_TXQ) {
2630 if (target == TGSI_TEXTURE_BUFFER) {
2631 /* Read the size from the buffer descriptor directly. */
2632 LLVMValueRef res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
2633 LLVMValueRef size = LLVMBuildExtractElement(builder, res,
2634 lp_build_const_int32(gallivm, 6), "");
2635
2636 if (ctx->screen->b.chip_class >= VI) {
2637 /* On VI, the descriptor contains the size in bytes,
2638 * but TXQ must return the size in elements.
2639 * The stride is always non-zero for resources using TXQ.
2640 */
2641 LLVMValueRef stride =
2642 LLVMBuildExtractElement(builder, res,
2643 lp_build_const_int32(gallivm, 5), "");
2644 stride = LLVMBuildLShr(builder, stride,
2645 lp_build_const_int32(gallivm, 16), "");
2646 stride = LLVMBuildAnd(builder, stride,
2647 lp_build_const_int32(gallivm, 0x3FFF), "");
2648
2649 size = LLVMBuildUDiv(builder, size, stride, "");
2650 }
2651
2652 emit_data->args[0] = size;
2653 return;
2654 }
2655
2656 /* Textures - set the mip level. */
2657 address[count++] = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
2658
2659 set_tex_fetch_args(ctx, emit_data, opcode, target, res_ptr,
2660 NULL, address, count, 0xf);
2661 return;
2662 }
2663
2664 if (target == TGSI_TEXTURE_BUFFER) {
2665 LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
2666
2667 /* Bitcast and truncate v8i32 to v16i8. */
2668 LLVMValueRef res = res_ptr;
2669 res = LLVMBuildBitCast(gallivm->builder, res, v2i128, "");
2670 res = LLVMBuildExtractElement(gallivm->builder, res, bld_base->uint_bld.one, "");
2671 res = LLVMBuildBitCast(gallivm->builder, res, ctx->v16i8, "");
2672
2673 emit_data->dst_type = ctx->v4f32;
2674 emit_data->args[0] = res;
2675 emit_data->args[1] = bld_base->uint_bld.zero;
2676 emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
2677 emit_data->arg_count = 3;
2678 return;
2679 }
2680
2681 /* Fetch and project texture coordinates */
2682 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
2683 for (chan = 0; chan < 3; chan++ ) {
2684 coords[chan] = lp_build_emit_fetch(bld_base,
2685 emit_data->inst, 0,
2686 chan);
2687 if (opcode == TGSI_OPCODE_TXP)
2688 coords[chan] = lp_build_emit_llvm_binary(bld_base,
2689 TGSI_OPCODE_DIV,
2690 coords[chan],
2691 coords[3]);
2692 }
2693
2694 if (opcode == TGSI_OPCODE_TXP)
2695 coords[3] = bld_base->base.one;
2696
2697 /* Pack offsets. */
2698 if (has_offset && opcode != TGSI_OPCODE_TXF) {
2699 /* The offsets are six-bit signed integers packed like this:
2700 * X=[5:0], Y=[13:8], and Z=[21:16].
2701 */
2702 LLVMValueRef offset[3], pack;
2703
2704 assert(inst->Texture.NumOffsets == 1);
2705
2706 for (chan = 0; chan < 3; chan++) {
2707 offset[chan] = lp_build_emit_fetch_texoffset(bld_base,
2708 emit_data->inst, 0, chan);
2709 offset[chan] = LLVMBuildAnd(gallivm->builder, offset[chan],
2710 lp_build_const_int32(gallivm, 0x3f), "");
2711 if (chan)
2712 offset[chan] = LLVMBuildShl(gallivm->builder, offset[chan],
2713 lp_build_const_int32(gallivm, chan*8), "");
2714 }
2715
2716 pack = LLVMBuildOr(gallivm->builder, offset[0], offset[1], "");
2717 pack = LLVMBuildOr(gallivm->builder, pack, offset[2], "");
2718 address[count++] = pack;
2719 }
2720
2721 /* Pack LOD bias value */
2722 if (opcode == TGSI_OPCODE_TXB)
2723 address[count++] = coords[3];
2724 if (opcode == TGSI_OPCODE_TXB2)
2725 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
2726
2727 /* Pack depth comparison value */
2728 if (tgsi_is_shadow_target(target) && opcode != TGSI_OPCODE_LODQ) {
2729 if (target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
2730 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
2731 } else {
2732 assert(ref_pos >= 0);
2733 address[count++] = coords[ref_pos];
2734 }
2735 }
2736
2737 /* Pack user derivatives */
2738 if (opcode == TGSI_OPCODE_TXD) {
2739 int param, num_src_deriv_channels;
2740
2741 switch (target) {
2742 case TGSI_TEXTURE_3D:
2743 num_src_deriv_channels = 3;
2744 num_deriv_channels = 3;
2745 break;
2746 case TGSI_TEXTURE_2D:
2747 case TGSI_TEXTURE_SHADOW2D:
2748 case TGSI_TEXTURE_RECT:
2749 case TGSI_TEXTURE_SHADOWRECT:
2750 case TGSI_TEXTURE_2D_ARRAY:
2751 case TGSI_TEXTURE_SHADOW2D_ARRAY:
2752 num_src_deriv_channels = 2;
2753 num_deriv_channels = 2;
2754 break;
2755 case TGSI_TEXTURE_CUBE:
2756 case TGSI_TEXTURE_SHADOWCUBE:
2757 case TGSI_TEXTURE_CUBE_ARRAY:
2758 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
2759 /* Cube derivatives will be converted to 2D. */
2760 num_src_deriv_channels = 3;
2761 num_deriv_channels = 2;
2762 break;
2763 case TGSI_TEXTURE_1D:
2764 case TGSI_TEXTURE_SHADOW1D:
2765 case TGSI_TEXTURE_1D_ARRAY:
2766 case TGSI_TEXTURE_SHADOW1D_ARRAY:
2767 num_src_deriv_channels = 1;
2768 num_deriv_channels = 1;
2769 break;
2770 default:
2771 unreachable("invalid target");
2772 }
2773
2774 for (param = 0; param < 2; param++)
2775 for (chan = 0; chan < num_src_deriv_channels; chan++)
2776 derivs[param * num_src_deriv_channels + chan] =
2777 lp_build_emit_fetch(bld_base, inst, param+1, chan);
2778 }
2779
2780 if (target == TGSI_TEXTURE_CUBE ||
2781 target == TGSI_TEXTURE_CUBE_ARRAY ||
2782 target == TGSI_TEXTURE_SHADOWCUBE ||
2783 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
2784 radeon_llvm_emit_prepare_cube_coords(bld_base, emit_data, coords, derivs);
2785
2786 if (opcode == TGSI_OPCODE_TXD)
2787 for (int i = 0; i < num_deriv_channels * 2; i++)
2788 address[count++] = derivs[i];
2789
2790 /* Pack texture coordinates */
2791 address[count++] = coords[0];
2792 if (num_coords > 1)
2793 address[count++] = coords[1];
2794 if (num_coords > 2)
2795 address[count++] = coords[2];
2796
2797 /* Pack LOD or sample index */
2798 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
2799 address[count++] = coords[3];
2800 else if (opcode == TGSI_OPCODE_TXL2)
2801 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
2802
2803 if (count > 16) {
2804 assert(!"Cannot handle more than 16 texture address parameters");
2805 count = 16;
2806 }
2807
2808 for (chan = 0; chan < count; chan++ ) {
2809 address[chan] = LLVMBuildBitCast(gallivm->builder,
2810 address[chan], ctx->i32, "");
2811 }
2812
2813 /* Adjust the sample index according to FMASK.
2814 *
2815 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
2816 * which is the identity mapping. Each nibble says which physical sample
2817 * should be fetched to get that sample.
2818 *
2819 * For example, 0x11111100 means there are only 2 samples stored and
2820 * the second sample covers 3/4 of the pixel. When reading samples 0
2821 * and 1, return physical sample 0 (determined by the first two 0s
2822 * in FMASK), otherwise return physical sample 1.
2823 *
2824 * The sample index should be adjusted as follows:
2825 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
2826 */
2827 if (target == TGSI_TEXTURE_2D_MSAA ||
2828 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
2829 struct lp_build_context *uint_bld = &bld_base->uint_bld;
2830 struct lp_build_emit_data txf_emit_data = *emit_data;
2831 LLVMValueRef txf_address[4];
2832 unsigned txf_count = count;
2833 struct tgsi_full_instruction inst = {};
2834
2835 memcpy(txf_address, address, sizeof(txf_address));
2836
2837 if (target == TGSI_TEXTURE_2D_MSAA) {
2838 txf_address[2] = bld_base->uint_bld.zero;
2839 }
2840 txf_address[3] = bld_base->uint_bld.zero;
2841
2842 /* Read FMASK using TXF. */
2843 inst.Instruction.Opcode = TGSI_OPCODE_TXF;
2844 inst.Texture.Texture = target;
2845 txf_emit_data.inst = &inst;
2846 txf_emit_data.chan = 0;
2847 set_tex_fetch_args(ctx, &txf_emit_data, TGSI_OPCODE_TXF,
2848 target, fmask_ptr, NULL,
2849 txf_address, txf_count, 0xf);
2850 build_tex_intrinsic(&tex_action, bld_base, &txf_emit_data);
2851
2852 /* Initialize some constants. */
2853 LLVMValueRef four = LLVMConstInt(ctx->i32, 4, 0);
2854 LLVMValueRef F = LLVMConstInt(ctx->i32, 0xF, 0);
2855
2856 /* Apply the formula. */
2857 LLVMValueRef fmask =
2858 LLVMBuildExtractElement(gallivm->builder,
2859 txf_emit_data.output[0],
2860 uint_bld->zero, "");
2861
2862 unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
2863
2864 LLVMValueRef sample_index4 =
2865 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
2866
2867 LLVMValueRef shifted_fmask =
2868 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
2869
2870 LLVMValueRef final_sample =
2871 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
2872
2873 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
2874 * resource descriptor is 0 (invalid),
2875 */
2876 LLVMValueRef fmask_desc =
2877 LLVMBuildBitCast(gallivm->builder, fmask_ptr,
2878 ctx->v8i32, "");
2879
2880 LLVMValueRef fmask_word1 =
2881 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
2882 uint_bld->one, "");
2883
2884 LLVMValueRef word1_is_nonzero =
2885 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
2886 fmask_word1, uint_bld->zero, "");
2887
2888 /* Replace the MSAA sample index. */
2889 address[sample_chan] =
2890 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
2891 final_sample, address[sample_chan], "");
2892 }
2893
2894 if (opcode == TGSI_OPCODE_TXF) {
2895 /* add tex offsets */
2896 if (inst->Texture.NumOffsets) {
2897 struct lp_build_context *uint_bld = &bld_base->uint_bld;
2898 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
2899 const struct tgsi_texture_offset *off = inst->TexOffsets;
2900
2901 assert(inst->Texture.NumOffsets == 1);
2902
2903 switch (target) {
2904 case TGSI_TEXTURE_3D:
2905 address[2] = lp_build_add(uint_bld, address[2],
2906 bld->immediates[off->Index][off->SwizzleZ]);
2907 /* fall through */
2908 case TGSI_TEXTURE_2D:
2909 case TGSI_TEXTURE_SHADOW2D:
2910 case TGSI_TEXTURE_RECT:
2911 case TGSI_TEXTURE_SHADOWRECT:
2912 case TGSI_TEXTURE_2D_ARRAY:
2913 case TGSI_TEXTURE_SHADOW2D_ARRAY:
2914 address[1] =
2915 lp_build_add(uint_bld, address[1],
2916 bld->immediates[off->Index][off->SwizzleY]);
2917 /* fall through */
2918 case TGSI_TEXTURE_1D:
2919 case TGSI_TEXTURE_SHADOW1D:
2920 case TGSI_TEXTURE_1D_ARRAY:
2921 case TGSI_TEXTURE_SHADOW1D_ARRAY:
2922 address[0] =
2923 lp_build_add(uint_bld, address[0],
2924 bld->immediates[off->Index][off->SwizzleX]);
2925 break;
2926 /* texture offsets do not apply to other texture targets */
2927 }
2928 }
2929 }
2930
2931 if (opcode == TGSI_OPCODE_TG4) {
2932 unsigned gather_comp = 0;
2933
2934 /* DMASK was repurposed for GATHER4. 4 components are always
2935 * returned and DMASK works like a swizzle - it selects
2936 * the component to fetch. The only valid DMASK values are
2937 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
2938 * (red,red,red,red) etc.) The ISA document doesn't mention
2939 * this.
2940 */
2941
2942 /* Get the component index from src1.x for Gather4. */
2943 if (!tgsi_is_shadow_target(target)) {
2944 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
2945 LLVMValueRef comp_imm;
2946 struct tgsi_src_register src1 = inst->Src[1].Register;
2947
2948 assert(src1.File == TGSI_FILE_IMMEDIATE);
2949
2950 comp_imm = imms[src1.Index][src1.SwizzleX];
2951 gather_comp = LLVMConstIntGetZExtValue(comp_imm);
2952 gather_comp = CLAMP(gather_comp, 0, 3);
2953 }
2954
2955 dmask = 1 << gather_comp;
2956 }
2957
2958 set_tex_fetch_args(ctx, emit_data, opcode, target, res_ptr,
2959 samp_ptr, address, count, dmask);
2960 }
2961
2962 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
2963 struct lp_build_tgsi_context *bld_base,
2964 struct lp_build_emit_data *emit_data)
2965 {
2966 struct lp_build_context *base = &bld_base->base;
2967 unsigned opcode = emit_data->inst->Instruction.Opcode;
2968 unsigned target = emit_data->inst->Texture.Texture;
2969 char intr_name[127];
2970 bool has_offset = emit_data->inst->Texture.NumOffsets > 0;
2971 bool is_shadow = tgsi_is_shadow_target(target);
2972 char type[64];
2973 const char *name = "llvm.SI.image.sample";
2974 const char *infix = "";
2975
2976 if (opcode == TGSI_OPCODE_TXQ && target == TGSI_TEXTURE_BUFFER) {
2977 /* Just return the buffer size. */
2978 emit_data->output[emit_data->chan] = emit_data->args[0];
2979 return;
2980 }
2981
2982 if (target == TGSI_TEXTURE_BUFFER) {
2983 emit_data->output[emit_data->chan] = lp_build_intrinsic(
2984 base->gallivm->builder,
2985 "llvm.SI.vs.load.input", emit_data->dst_type,
2986 emit_data->args, emit_data->arg_count,
2987 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
2988 return;
2989 }
2990
2991 switch (opcode) {
2992 case TGSI_OPCODE_TXF:
2993 name = target == TGSI_TEXTURE_2D_MSAA ||
2994 target == TGSI_TEXTURE_2D_ARRAY_MSAA ?
2995 "llvm.SI.image.load" :
2996 "llvm.SI.image.load.mip";
2997 is_shadow = false;
2998 has_offset = false;
2999 break;
3000 case TGSI_OPCODE_TXQ:
3001 name = "llvm.SI.getresinfo";
3002 is_shadow = false;
3003 has_offset = false;
3004 break;
3005 case TGSI_OPCODE_LODQ:
3006 name = "llvm.SI.getlod";
3007 is_shadow = false;
3008 has_offset = false;
3009 break;
3010 case TGSI_OPCODE_TEX:
3011 case TGSI_OPCODE_TEX2:
3012 case TGSI_OPCODE_TXP:
3013 break;
3014 case TGSI_OPCODE_TXB:
3015 case TGSI_OPCODE_TXB2:
3016 infix = ".b";
3017 break;
3018 case TGSI_OPCODE_TXL:
3019 case TGSI_OPCODE_TXL2:
3020 infix = ".l";
3021 break;
3022 case TGSI_OPCODE_TXD:
3023 infix = ".d";
3024 break;
3025 case TGSI_OPCODE_TG4:
3026 name = "llvm.SI.gather4";
3027 break;
3028 default:
3029 assert(0);
3030 return;
3031 }
3032
3033 if (LLVMGetTypeKind(LLVMTypeOf(emit_data->args[0])) == LLVMVectorTypeKind)
3034 sprintf(type, ".v%ui32",
3035 LLVMGetVectorSize(LLVMTypeOf(emit_data->args[0])));
3036 else
3037 strcpy(type, ".i32");
3038
3039 /* Add the type and suffixes .c, .o if needed. */
3040 sprintf(intr_name, "%s%s%s%s%s",
3041 name, is_shadow ? ".c" : "", infix,
3042 has_offset ? ".o" : "", type);
3043
3044 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3045 base->gallivm->builder, intr_name, emit_data->dst_type,
3046 emit_data->args, emit_data->arg_count,
3047 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
3048
3049 /* Divide the number of layers by 6 to get the number of cubes. */
3050 if (opcode == TGSI_OPCODE_TXQ &&
3051 (target == TGSI_TEXTURE_CUBE_ARRAY ||
3052 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)) {
3053 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
3054 LLVMValueRef two = lp_build_const_int32(bld_base->base.gallivm, 2);
3055 LLVMValueRef six = lp_build_const_int32(bld_base->base.gallivm, 6);
3056
3057 LLVMValueRef v4 = emit_data->output[emit_data->chan];
3058 LLVMValueRef z = LLVMBuildExtractElement(builder, v4, two, "");
3059 z = LLVMBuildSDiv(builder, z, six, "");
3060
3061 emit_data->output[emit_data->chan] =
3062 LLVMBuildInsertElement(builder, v4, z, two, "");
3063 }
3064 }
3065
3066 static void si_llvm_emit_txqs(
3067 const struct lp_build_tgsi_action *action,
3068 struct lp_build_tgsi_context *bld_base,
3069 struct lp_build_emit_data *emit_data)
3070 {
3071 struct si_shader_context *ctx = si_shader_context(bld_base);
3072 struct gallivm_state *gallivm = bld_base->base.gallivm;
3073 LLVMBuilderRef builder = gallivm->builder;
3074 LLVMValueRef res, samples;
3075 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
3076
3077 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
3078
3079
3080 /* Read the samples from the descriptor directly. */
3081 res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
3082 samples = LLVMBuildExtractElement(
3083 builder, res,
3084 lp_build_const_int32(gallivm, 3), "");
3085 samples = LLVMBuildLShr(builder, samples,
3086 lp_build_const_int32(gallivm, 16), "");
3087 samples = LLVMBuildAnd(builder, samples,
3088 lp_build_const_int32(gallivm, 0xf), "");
3089 samples = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1),
3090 samples, "");
3091
3092 emit_data->output[emit_data->chan] = samples;
3093 }
3094
3095 /*
3096 * SI implements derivatives using the local data store (LDS)
3097 * All writes to the LDS happen in all executing threads at
3098 * the same time. TID is the Thread ID for the current
3099 * thread and is a value between 0 and 63, representing
3100 * the thread's position in the wavefront.
3101 *
3102 * For the pixel shader threads are grouped into quads of four pixels.
3103 * The TIDs of the pixels of a quad are:
3104 *
3105 * +------+------+
3106 * |4n + 0|4n + 1|
3107 * +------+------+
3108 * |4n + 2|4n + 3|
3109 * +------+------+
3110 *
3111 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
3112 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
3113 * the current pixel's column, and masking with 0xfffffffe yields the TID
3114 * of the left pixel of the current pixel's row.
3115 *
3116 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
3117 * adding 2 yields the TID of the pixel below the top pixel.
3118 */
3119 /* masks for thread ID. */
3120 #define TID_MASK_TOP_LEFT 0xfffffffc
3121 #define TID_MASK_TOP 0xfffffffd
3122 #define TID_MASK_LEFT 0xfffffffe
3123
3124 static void si_llvm_emit_ddxy(
3125 const struct lp_build_tgsi_action *action,
3126 struct lp_build_tgsi_context *bld_base,
3127 struct lp_build_emit_data *emit_data)
3128 {
3129 struct si_shader_context *ctx = si_shader_context(bld_base);
3130 struct gallivm_state *gallivm = bld_base->base.gallivm;
3131 const struct tgsi_full_instruction *inst = emit_data->inst;
3132 unsigned opcode = inst->Instruction.Opcode;
3133 LLVMValueRef indices[2];
3134 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
3135 LLVMValueRef tl, trbl, result[4];
3136 unsigned swizzle[4];
3137 unsigned c;
3138 int idx;
3139 unsigned mask;
3140
3141 indices[0] = bld_base->uint_bld.zero;
3142 indices[1] = lp_build_intrinsic(gallivm->builder, "llvm.SI.tid", ctx->i32,
3143 NULL, 0, LLVMReadNoneAttribute);
3144 store_ptr = LLVMBuildGEP(gallivm->builder, ctx->lds,
3145 indices, 2, "");
3146
3147 if (opcode == TGSI_OPCODE_DDX_FINE)
3148 mask = TID_MASK_LEFT;
3149 else if (opcode == TGSI_OPCODE_DDY_FINE)
3150 mask = TID_MASK_TOP;
3151 else
3152 mask = TID_MASK_TOP_LEFT;
3153
3154 indices[1] = LLVMBuildAnd(gallivm->builder, indices[1],
3155 lp_build_const_int32(gallivm, mask), "");
3156 load_ptr0 = LLVMBuildGEP(gallivm->builder, ctx->lds,
3157 indices, 2, "");
3158
3159 /* for DDX we want to next X pixel, DDY next Y pixel. */
3160 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
3161 indices[1] = LLVMBuildAdd(gallivm->builder, indices[1],
3162 lp_build_const_int32(gallivm, idx), "");
3163 load_ptr1 = LLVMBuildGEP(gallivm->builder, ctx->lds,
3164 indices, 2, "");
3165
3166 for (c = 0; c < 4; ++c) {
3167 unsigned i;
3168
3169 swizzle[c] = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], c);
3170 for (i = 0; i < c; ++i) {
3171 if (swizzle[i] == swizzle[c]) {
3172 result[c] = result[i];
3173 break;
3174 }
3175 }
3176 if (i != c)
3177 continue;
3178
3179 LLVMBuildStore(gallivm->builder,
3180 LLVMBuildBitCast(gallivm->builder,
3181 lp_build_emit_fetch(bld_base, inst, 0, c),
3182 ctx->i32, ""),
3183 store_ptr);
3184
3185 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
3186 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
3187
3188 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
3189 trbl = LLVMBuildBitCast(gallivm->builder, trbl, ctx->f32, "");
3190
3191 result[c] = LLVMBuildFSub(gallivm->builder, trbl, tl, "");
3192 }
3193
3194 emit_data->output[0] = lp_build_gather_values(gallivm, result, 4);
3195 }
3196
3197 /*
3198 * this takes an I,J coordinate pair,
3199 * and works out the X and Y derivatives.
3200 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
3201 */
3202 static LLVMValueRef si_llvm_emit_ddxy_interp(
3203 struct lp_build_tgsi_context *bld_base,
3204 LLVMValueRef interp_ij)
3205 {
3206 struct si_shader_context *ctx = si_shader_context(bld_base);
3207 struct gallivm_state *gallivm = bld_base->base.gallivm;
3208 LLVMValueRef indices[2];
3209 LLVMValueRef store_ptr, load_ptr_x, load_ptr_y, load_ptr_ddx, load_ptr_ddy, temp, temp2;
3210 LLVMValueRef tl, tr, bl, result[4];
3211 unsigned c;
3212
3213 indices[0] = bld_base->uint_bld.zero;
3214 indices[1] = lp_build_intrinsic(gallivm->builder, "llvm.SI.tid", ctx->i32,
3215 NULL, 0, LLVMReadNoneAttribute);
3216 store_ptr = LLVMBuildGEP(gallivm->builder, ctx->lds,
3217 indices, 2, "");
3218
3219 temp = LLVMBuildAnd(gallivm->builder, indices[1],
3220 lp_build_const_int32(gallivm, TID_MASK_LEFT), "");
3221
3222 temp2 = LLVMBuildAnd(gallivm->builder, indices[1],
3223 lp_build_const_int32(gallivm, TID_MASK_TOP), "");
3224
3225 indices[1] = temp;
3226 load_ptr_x = LLVMBuildGEP(gallivm->builder, ctx->lds,
3227 indices, 2, "");
3228
3229 indices[1] = temp2;
3230 load_ptr_y = LLVMBuildGEP(gallivm->builder, ctx->lds,
3231 indices, 2, "");
3232
3233 indices[1] = LLVMBuildAdd(gallivm->builder, temp,
3234 lp_build_const_int32(gallivm, 1), "");
3235 load_ptr_ddx = LLVMBuildGEP(gallivm->builder, ctx->lds,
3236 indices, 2, "");
3237
3238 indices[1] = LLVMBuildAdd(gallivm->builder, temp2,
3239 lp_build_const_int32(gallivm, 2), "");
3240 load_ptr_ddy = LLVMBuildGEP(gallivm->builder, ctx->lds,
3241 indices, 2, "");
3242
3243 for (c = 0; c < 2; ++c) {
3244 LLVMValueRef store_val;
3245 LLVMValueRef c_ll = lp_build_const_int32(gallivm, c);
3246
3247 store_val = LLVMBuildExtractElement(gallivm->builder,
3248 interp_ij, c_ll, "");
3249 LLVMBuildStore(gallivm->builder,
3250 store_val,
3251 store_ptr);
3252
3253 tl = LLVMBuildLoad(gallivm->builder, load_ptr_x, "");
3254 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
3255
3256 tr = LLVMBuildLoad(gallivm->builder, load_ptr_ddx, "");
3257 tr = LLVMBuildBitCast(gallivm->builder, tr, ctx->f32, "");
3258
3259 result[c] = LLVMBuildFSub(gallivm->builder, tr, tl, "");
3260
3261 tl = LLVMBuildLoad(gallivm->builder, load_ptr_y, "");
3262 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
3263
3264 bl = LLVMBuildLoad(gallivm->builder, load_ptr_ddy, "");
3265 bl = LLVMBuildBitCast(gallivm->builder, bl, ctx->f32, "");
3266
3267 result[c + 2] = LLVMBuildFSub(gallivm->builder, bl, tl, "");
3268 }
3269
3270 return lp_build_gather_values(gallivm, result, 4);
3271 }
3272
3273 static void interp_fetch_args(
3274 struct lp_build_tgsi_context *bld_base,
3275 struct lp_build_emit_data *emit_data)
3276 {
3277 struct si_shader_context *ctx = si_shader_context(bld_base);
3278 struct gallivm_state *gallivm = bld_base->base.gallivm;
3279 const struct tgsi_full_instruction *inst = emit_data->inst;
3280
3281 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
3282 /* offset is in second src, first two channels */
3283 emit_data->args[0] = lp_build_emit_fetch(bld_base,
3284 emit_data->inst, 1,
3285 TGSI_CHAN_X);
3286 emit_data->args[1] = lp_build_emit_fetch(bld_base,
3287 emit_data->inst, 1,
3288 TGSI_CHAN_Y);
3289 emit_data->arg_count = 2;
3290 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3291 LLVMValueRef sample_position;
3292 LLVMValueRef sample_id;
3293 LLVMValueRef halfval = lp_build_const_float(gallivm, 0.5f);
3294
3295 /* fetch sample ID, then fetch its sample position,
3296 * and place into first two channels.
3297 */
3298 sample_id = lp_build_emit_fetch(bld_base,
3299 emit_data->inst, 1, TGSI_CHAN_X);
3300 sample_id = LLVMBuildBitCast(gallivm->builder, sample_id,
3301 ctx->i32, "");
3302 sample_position = load_sample_position(&ctx->radeon_bld, sample_id);
3303
3304 emit_data->args[0] = LLVMBuildExtractElement(gallivm->builder,
3305 sample_position,
3306 lp_build_const_int32(gallivm, 0), "");
3307
3308 emit_data->args[0] = LLVMBuildFSub(gallivm->builder, emit_data->args[0], halfval, "");
3309 emit_data->args[1] = LLVMBuildExtractElement(gallivm->builder,
3310 sample_position,
3311 lp_build_const_int32(gallivm, 1), "");
3312 emit_data->args[1] = LLVMBuildFSub(gallivm->builder, emit_data->args[1], halfval, "");
3313 emit_data->arg_count = 2;
3314 }
3315 }
3316
3317 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
3318 struct lp_build_tgsi_context *bld_base,
3319 struct lp_build_emit_data *emit_data)
3320 {
3321 struct si_shader_context *ctx = si_shader_context(bld_base);
3322 struct si_shader *shader = ctx->shader;
3323 struct gallivm_state *gallivm = bld_base->base.gallivm;
3324 LLVMValueRef interp_param;
3325 const struct tgsi_full_instruction *inst = emit_data->inst;
3326 const char *intr_name;
3327 int input_index = inst->Src[0].Register.Index;
3328 int chan;
3329 int i;
3330 LLVMValueRef attr_number;
3331 LLVMValueRef params = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_PRIM_MASK);
3332 int interp_param_idx;
3333 unsigned interp = shader->selector->info.input_interpolate[input_index];
3334 unsigned location;
3335
3336 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
3337
3338 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3339 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
3340 location = TGSI_INTERPOLATE_LOC_CENTER;
3341 else
3342 location = TGSI_INTERPOLATE_LOC_CENTROID;
3343
3344 interp_param_idx = lookup_interp_param_index(interp, location);
3345 if (interp_param_idx == -1)
3346 return;
3347 else if (interp_param_idx)
3348 interp_param = LLVMGetParam(ctx->radeon_bld.main_fn, interp_param_idx);
3349 else
3350 interp_param = NULL;
3351
3352 attr_number = lp_build_const_int32(gallivm, input_index);
3353
3354 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
3355 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
3356 LLVMValueRef ij_out[2];
3357 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
3358
3359 /*
3360 * take the I then J parameters, and the DDX/Y for it, and
3361 * calculate the IJ inputs for the interpolator.
3362 * temp1 = ddx * offset/sample.x + I;
3363 * interp_param.I = ddy * offset/sample.y + temp1;
3364 * temp1 = ddx * offset/sample.x + J;
3365 * interp_param.J = ddy * offset/sample.y + temp1;
3366 */
3367 for (i = 0; i < 2; i++) {
3368 LLVMValueRef ix_ll = lp_build_const_int32(gallivm, i);
3369 LLVMValueRef iy_ll = lp_build_const_int32(gallivm, i + 2);
3370 LLVMValueRef ddx_el = LLVMBuildExtractElement(gallivm->builder,
3371 ddxy_out, ix_ll, "");
3372 LLVMValueRef ddy_el = LLVMBuildExtractElement(gallivm->builder,
3373 ddxy_out, iy_ll, "");
3374 LLVMValueRef interp_el = LLVMBuildExtractElement(gallivm->builder,
3375 interp_param, ix_ll, "");
3376 LLVMValueRef temp1, temp2;
3377
3378 interp_el = LLVMBuildBitCast(gallivm->builder, interp_el,
3379 ctx->f32, "");
3380
3381 temp1 = LLVMBuildFMul(gallivm->builder, ddx_el, emit_data->args[0], "");
3382
3383 temp1 = LLVMBuildFAdd(gallivm->builder, temp1, interp_el, "");
3384
3385 temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
3386
3387 temp2 = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
3388
3389 ij_out[i] = LLVMBuildBitCast(gallivm->builder,
3390 temp2, ctx->i32, "");
3391 }
3392 interp_param = lp_build_gather_values(bld_base->base.gallivm, ij_out, 2);
3393 }
3394
3395 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
3396 for (chan = 0; chan < 2; chan++) {
3397 LLVMValueRef args[4];
3398 LLVMValueRef llvm_chan;
3399 unsigned schan;
3400
3401 schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
3402 llvm_chan = lp_build_const_int32(gallivm, schan);
3403
3404 args[0] = llvm_chan;
3405 args[1] = attr_number;
3406 args[2] = params;
3407 args[3] = interp_param;
3408
3409 emit_data->output[chan] =
3410 lp_build_intrinsic(gallivm->builder, intr_name,
3411 ctx->f32, args, args[3] ? 4 : 3,
3412 LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
3413 }
3414 }
3415
3416 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
3417 struct lp_build_emit_data *emit_data)
3418 {
3419 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
3420 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
3421 unsigned stream;
3422
3423 assert(src0.File == TGSI_FILE_IMMEDIATE);
3424
3425 stream = LLVMConstIntGetZExtValue(imms[src0.Index][src0.SwizzleX]) & 0x3;
3426 return stream;
3427 }
3428
3429 /* Emit one vertex from the geometry shader */
3430 static void si_llvm_emit_vertex(
3431 const struct lp_build_tgsi_action *action,
3432 struct lp_build_tgsi_context *bld_base,
3433 struct lp_build_emit_data *emit_data)
3434 {
3435 struct si_shader_context *ctx = si_shader_context(bld_base);
3436 struct lp_build_context *uint = &bld_base->uint_bld;
3437 struct si_shader *shader = ctx->shader;
3438 struct tgsi_shader_info *info = &shader->selector->info;
3439 struct gallivm_state *gallivm = bld_base->base.gallivm;
3440 LLVMValueRef soffset = LLVMGetParam(ctx->radeon_bld.main_fn,
3441 SI_PARAM_GS2VS_OFFSET);
3442 LLVMValueRef gs_next_vertex;
3443 LLVMValueRef can_emit, kill;
3444 LLVMValueRef args[2];
3445 unsigned chan;
3446 int i;
3447 unsigned stream;
3448
3449 stream = si_llvm_get_stream(bld_base, emit_data);
3450
3451 /* Write vertex attribute values to GSVS ring */
3452 gs_next_vertex = LLVMBuildLoad(gallivm->builder,
3453 ctx->gs_next_vertex[stream],
3454 "");
3455
3456 /* If this thread has already emitted the declared maximum number of
3457 * vertices, kill it: excessive vertex emissions are not supposed to
3458 * have any effect, and GS threads have no externally observable
3459 * effects other than emitting vertices.
3460 */
3461 can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULE, gs_next_vertex,
3462 lp_build_const_int32(gallivm,
3463 shader->selector->gs_max_out_vertices), "");
3464 kill = lp_build_select(&bld_base->base, can_emit,
3465 lp_build_const_float(gallivm, 1.0f),
3466 lp_build_const_float(gallivm, -1.0f));
3467
3468 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
3469 ctx->voidt, &kill, 1, 0);
3470
3471 for (i = 0; i < info->num_outputs; i++) {
3472 LLVMValueRef *out_ptr =
3473 ctx->radeon_bld.soa.outputs[i];
3474
3475 for (chan = 0; chan < 4; chan++) {
3476 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
3477 LLVMValueRef voffset =
3478 lp_build_const_int32(gallivm, (i * 4 + chan) *
3479 shader->selector->gs_max_out_vertices);
3480
3481 voffset = lp_build_add(uint, voffset, gs_next_vertex);
3482 voffset = lp_build_mul_imm(uint, voffset, 4);
3483
3484 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
3485
3486 build_tbuffer_store(ctx,
3487 ctx->gsvs_ring[stream],
3488 out_val, 1,
3489 voffset, soffset, 0,
3490 V_008F0C_BUF_DATA_FORMAT_32,
3491 V_008F0C_BUF_NUM_FORMAT_UINT,
3492 1, 0, 1, 1, 0);
3493 }
3494 }
3495 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
3496 lp_build_const_int32(gallivm, 1));
3497
3498 LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
3499
3500 /* Signal vertex emission */
3501 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_EMIT | SENDMSG_GS | (stream << 8));
3502 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
3503 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
3504 ctx->voidt, args, 2, LLVMNoUnwindAttribute);
3505 }
3506
3507 /* Cut one primitive from the geometry shader */
3508 static void si_llvm_emit_primitive(
3509 const struct lp_build_tgsi_action *action,
3510 struct lp_build_tgsi_context *bld_base,
3511 struct lp_build_emit_data *emit_data)
3512 {
3513 struct si_shader_context *ctx = si_shader_context(bld_base);
3514 struct gallivm_state *gallivm = bld_base->base.gallivm;
3515 LLVMValueRef args[2];
3516 unsigned stream;
3517
3518 /* Signal primitive cut */
3519 stream = si_llvm_get_stream(bld_base, emit_data);
3520 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_CUT | SENDMSG_GS | (stream << 8));
3521 args[1] = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
3522 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
3523 ctx->voidt, args, 2, LLVMNoUnwindAttribute);
3524 }
3525
3526 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
3527 struct lp_build_tgsi_context *bld_base,
3528 struct lp_build_emit_data *emit_data)
3529 {
3530 struct si_shader_context *ctx = si_shader_context(bld_base);
3531 struct gallivm_state *gallivm = bld_base->base.gallivm;
3532
3533 lp_build_intrinsic(gallivm->builder,
3534 HAVE_LLVM >= 0x0309 ? "llvm.amdgcn.s.barrier"
3535 : "llvm.AMDGPU.barrier.local",
3536 ctx->voidt, NULL, 0, LLVMNoUnwindAttribute);
3537 }
3538
3539 static const struct lp_build_tgsi_action tex_action = {
3540 .fetch_args = tex_fetch_args,
3541 .emit = build_tex_intrinsic,
3542 };
3543
3544 static const struct lp_build_tgsi_action interp_action = {
3545 .fetch_args = interp_fetch_args,
3546 .emit = build_interp_intrinsic,
3547 };
3548
3549 static void create_meta_data(struct si_shader_context *ctx)
3550 {
3551 struct gallivm_state *gallivm = ctx->radeon_bld.soa.bld_base.base.gallivm;
3552 LLVMValueRef args[3];
3553
3554 args[0] = LLVMMDStringInContext(gallivm->context, "const", 5);
3555 args[1] = 0;
3556 args[2] = lp_build_const_int32(gallivm, 1);
3557
3558 ctx->const_md = LLVMMDNodeInContext(gallivm->context, args, 3);
3559 }
3560
3561 static void declare_streamout_params(struct si_shader_context *ctx,
3562 struct pipe_stream_output_info *so,
3563 LLVMTypeRef *params, LLVMTypeRef i32,
3564 unsigned *num_params)
3565 {
3566 int i;
3567
3568 /* Streamout SGPRs. */
3569 if (so->num_outputs) {
3570 params[ctx->param_streamout_config = (*num_params)++] = i32;
3571 params[ctx->param_streamout_write_index = (*num_params)++] = i32;
3572 }
3573 /* A streamout buffer offset is loaded if the stride is non-zero. */
3574 for (i = 0; i < 4; i++) {
3575 if (!so->stride[i])
3576 continue;
3577
3578 params[ctx->param_streamout_offset[i] = (*num_params)++] = i32;
3579 }
3580 }
3581
3582 static void create_function(struct si_shader_context *ctx)
3583 {
3584 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
3585 struct gallivm_state *gallivm = bld_base->base.gallivm;
3586 struct si_shader *shader = ctx->shader;
3587 LLVMTypeRef params[SI_NUM_PARAMS], v2i32, v3i32;
3588 unsigned i, last_array_pointer, last_sgpr, num_params;
3589
3590 v2i32 = LLVMVectorType(ctx->i32, 2);
3591 v3i32 = LLVMVectorType(ctx->i32, 3);
3592
3593 params[SI_PARAM_RW_BUFFERS] = const_array(ctx->v16i8, SI_NUM_RW_BUFFERS);
3594 params[SI_PARAM_CONST_BUFFERS] = const_array(ctx->v16i8, SI_NUM_CONST_BUFFERS);
3595 params[SI_PARAM_SAMPLERS] = const_array(ctx->v8i32, SI_NUM_SAMPLERS);
3596 params[SI_PARAM_UNUSED] = LLVMPointerType(ctx->i32, CONST_ADDR_SPACE);
3597 last_array_pointer = SI_PARAM_UNUSED;
3598
3599 switch (ctx->type) {
3600 case TGSI_PROCESSOR_VERTEX:
3601 params[SI_PARAM_VERTEX_BUFFERS] = const_array(ctx->v16i8, SI_NUM_VERTEX_BUFFERS);
3602 last_array_pointer = SI_PARAM_VERTEX_BUFFERS;
3603 params[SI_PARAM_BASE_VERTEX] = ctx->i32;
3604 params[SI_PARAM_START_INSTANCE] = ctx->i32;
3605 num_params = SI_PARAM_START_INSTANCE+1;
3606
3607 if (shader->key.vs.as_es) {
3608 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
3609 } else if (shader->key.vs.as_ls) {
3610 params[SI_PARAM_LS_OUT_LAYOUT] = ctx->i32;
3611 num_params = SI_PARAM_LS_OUT_LAYOUT+1;
3612 } else {
3613 if (ctx->is_gs_copy_shader) {
3614 last_array_pointer = SI_PARAM_CONST_BUFFERS;
3615 num_params = SI_PARAM_CONST_BUFFERS+1;
3616 } else {
3617 params[SI_PARAM_VS_STATE_BITS] = ctx->i32;
3618 num_params = SI_PARAM_VS_STATE_BITS+1;
3619 }
3620
3621 /* The locations of the other parameters are assigned dynamically. */
3622 declare_streamout_params(ctx, &shader->selector->so,
3623 params, ctx->i32, &num_params);
3624 }
3625
3626 last_sgpr = num_params-1;
3627
3628 /* VGPRs */
3629 params[ctx->param_vertex_id = num_params++] = ctx->i32;
3630 params[ctx->param_rel_auto_id = num_params++] = ctx->i32;
3631 params[ctx->param_vs_prim_id = num_params++] = ctx->i32;
3632 params[ctx->param_instance_id = num_params++] = ctx->i32;
3633 break;
3634
3635 case TGSI_PROCESSOR_TESS_CTRL:
3636 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
3637 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
3638 params[SI_PARAM_TCS_IN_LAYOUT] = ctx->i32;
3639 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx->i32;
3640 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
3641
3642 /* VGPRs */
3643 params[SI_PARAM_PATCH_ID] = ctx->i32;
3644 params[SI_PARAM_REL_IDS] = ctx->i32;
3645 num_params = SI_PARAM_REL_IDS+1;
3646 break;
3647
3648 case TGSI_PROCESSOR_TESS_EVAL:
3649 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
3650 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
3651 num_params = SI_PARAM_TCS_OUT_LAYOUT+1;
3652
3653 if (shader->key.tes.as_es) {
3654 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
3655 } else {
3656 declare_streamout_params(ctx, &shader->selector->so,
3657 params, ctx->i32, &num_params);
3658 }
3659 last_sgpr = num_params - 1;
3660
3661 /* VGPRs */
3662 params[ctx->param_tes_u = num_params++] = ctx->f32;
3663 params[ctx->param_tes_v = num_params++] = ctx->f32;
3664 params[ctx->param_tes_rel_patch_id = num_params++] = ctx->i32;
3665 params[ctx->param_tes_patch_id = num_params++] = ctx->i32;
3666 break;
3667
3668 case TGSI_PROCESSOR_GEOMETRY:
3669 params[SI_PARAM_GS2VS_OFFSET] = ctx->i32;
3670 params[SI_PARAM_GS_WAVE_ID] = ctx->i32;
3671 last_sgpr = SI_PARAM_GS_WAVE_ID;
3672
3673 /* VGPRs */
3674 params[SI_PARAM_VTX0_OFFSET] = ctx->i32;
3675 params[SI_PARAM_VTX1_OFFSET] = ctx->i32;
3676 params[SI_PARAM_PRIMITIVE_ID] = ctx->i32;
3677 params[SI_PARAM_VTX2_OFFSET] = ctx->i32;
3678 params[SI_PARAM_VTX3_OFFSET] = ctx->i32;
3679 params[SI_PARAM_VTX4_OFFSET] = ctx->i32;
3680 params[SI_PARAM_VTX5_OFFSET] = ctx->i32;
3681 params[SI_PARAM_GS_INSTANCE_ID] = ctx->i32;
3682 num_params = SI_PARAM_GS_INSTANCE_ID+1;
3683 break;
3684
3685 case TGSI_PROCESSOR_FRAGMENT:
3686 params[SI_PARAM_ALPHA_REF] = ctx->f32;
3687 params[SI_PARAM_PRIM_MASK] = ctx->i32;
3688 last_sgpr = SI_PARAM_PRIM_MASK;
3689 params[SI_PARAM_PERSP_SAMPLE] = v2i32;
3690 params[SI_PARAM_PERSP_CENTER] = v2i32;
3691 params[SI_PARAM_PERSP_CENTROID] = v2i32;
3692 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
3693 params[SI_PARAM_LINEAR_SAMPLE] = v2i32;
3694 params[SI_PARAM_LINEAR_CENTER] = v2i32;
3695 params[SI_PARAM_LINEAR_CENTROID] = v2i32;
3696 params[SI_PARAM_LINE_STIPPLE_TEX] = ctx->f32;
3697 params[SI_PARAM_POS_X_FLOAT] = ctx->f32;
3698 params[SI_PARAM_POS_Y_FLOAT] = ctx->f32;
3699 params[SI_PARAM_POS_Z_FLOAT] = ctx->f32;
3700 params[SI_PARAM_POS_W_FLOAT] = ctx->f32;
3701 params[SI_PARAM_FRONT_FACE] = ctx->i32;
3702 params[SI_PARAM_ANCILLARY] = ctx->i32;
3703 params[SI_PARAM_SAMPLE_COVERAGE] = ctx->f32;
3704 params[SI_PARAM_POS_FIXED_PT] = ctx->f32;
3705 num_params = SI_PARAM_POS_FIXED_PT+1;
3706 break;
3707
3708 default:
3709 assert(0 && "unimplemented shader");
3710 return;
3711 }
3712
3713 assert(num_params <= Elements(params));
3714 radeon_llvm_create_func(&ctx->radeon_bld, params, num_params);
3715 radeon_llvm_shader_type(ctx->radeon_bld.main_fn, ctx->type);
3716
3717 for (i = 0; i <= last_sgpr; ++i) {
3718 LLVMValueRef P = LLVMGetParam(ctx->radeon_bld.main_fn, i);
3719
3720 /* We tell llvm that array inputs are passed by value to allow Sinking pass
3721 * to move load. Inputs are constant so this is fine. */
3722 if (i <= last_array_pointer)
3723 LLVMAddAttribute(P, LLVMByValAttribute);
3724 else
3725 LLVMAddAttribute(P, LLVMInRegAttribute);
3726 }
3727
3728 if (bld_base->info &&
3729 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
3730 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0 ||
3731 bld_base->info->opcode_count[TGSI_OPCODE_DDX_FINE] > 0 ||
3732 bld_base->info->opcode_count[TGSI_OPCODE_DDY_FINE] > 0 ||
3733 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_OFFSET] > 0 ||
3734 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_SAMPLE] > 0))
3735 ctx->lds =
3736 LLVMAddGlobalInAddressSpace(gallivm->module,
3737 LLVMArrayType(ctx->i32, 64),
3738 "ddxy_lds",
3739 LOCAL_ADDR_SPACE);
3740
3741 if ((ctx->type == TGSI_PROCESSOR_VERTEX && shader->key.vs.as_ls) ||
3742 ctx->type == TGSI_PROCESSOR_TESS_CTRL ||
3743 ctx->type == TGSI_PROCESSOR_TESS_EVAL) {
3744 /* This is the upper bound, maximum is 32 inputs times 32 vertices */
3745 unsigned vertex_data_dw_size = 32*32*4;
3746 unsigned patch_data_dw_size = 32*4;
3747 /* The formula is: TCS inputs + TCS outputs + TCS patch outputs. */
3748 unsigned patch_dw_size = vertex_data_dw_size*2 + patch_data_dw_size;
3749 unsigned lds_dwords = patch_dw_size;
3750
3751 /* The actual size is computed outside of the shader to reduce
3752 * the number of shader variants. */
3753 ctx->lds =
3754 LLVMAddGlobalInAddressSpace(gallivm->module,
3755 LLVMArrayType(ctx->i32, lds_dwords),
3756 "tess_lds",
3757 LOCAL_ADDR_SPACE);
3758 }
3759 }
3760
3761 static void preload_constants(struct si_shader_context *ctx)
3762 {
3763 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
3764 struct gallivm_state *gallivm = bld_base->base.gallivm;
3765 const struct tgsi_shader_info *info = bld_base->info;
3766 unsigned buf;
3767 LLVMValueRef ptr = LLVMGetParam(ctx->radeon_bld.main_fn, SI_PARAM_CONST_BUFFERS);
3768
3769 for (buf = 0; buf < SI_NUM_CONST_BUFFERS; buf++) {
3770 unsigned i, num_const = info->const_file_max[buf] + 1;
3771
3772 if (num_const == 0)
3773 continue;
3774
3775 /* Allocate space for the constant values */
3776 ctx->constants[buf] = CALLOC(num_const * 4, sizeof(LLVMValueRef));
3777
3778 /* Load the resource descriptor */
3779 ctx->const_buffers[buf] =
3780 build_indexed_load_const(ctx, ptr, lp_build_const_int32(gallivm, buf));
3781
3782 /* Load the constants, we rely on the code sinking to do the rest */
3783 for (i = 0; i < num_const * 4; ++i) {
3784 ctx->constants[buf][i] =
3785 buffer_load_const(gallivm->builder,
3786 ctx->const_buffers[buf],
3787 lp_build_const_int32(gallivm, i * 4),
3788 ctx->f32);
3789 }
3790 }
3791 }
3792
3793 static void preload_samplers(struct si_shader_context *ctx)
3794 {
3795 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
3796 struct gallivm_state *gallivm = bld_base->base.gallivm;
3797 const struct tgsi_shader_info *info = bld_base->info;
3798 unsigned i, num_samplers = info->file_max[TGSI_FILE_SAMPLER] + 1;
3799 LLVMValueRef offset;
3800
3801 if (num_samplers == 0)
3802 return;
3803
3804 /* Load the resources and samplers, we rely on the code sinking to do the rest */
3805 for (i = 0; i < num_samplers; ++i) {
3806 /* Resource */
3807 offset = lp_build_const_int32(gallivm, i);
3808 ctx->sampler_views[i] =
3809 get_sampler_desc(ctx, offset, DESC_IMAGE);
3810
3811 /* FMASK resource */
3812 if (info->is_msaa_sampler[i])
3813 ctx->fmasks[i] =
3814 get_sampler_desc(ctx, offset, DESC_FMASK);
3815 else
3816 ctx->sampler_states[i] =
3817 get_sampler_desc(ctx, offset, DESC_SAMPLER);
3818 }
3819 }
3820
3821 static void preload_streamout_buffers(struct si_shader_context *ctx)
3822 {
3823 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
3824 struct gallivm_state *gallivm = bld_base->base.gallivm;
3825 unsigned i;
3826
3827 /* Streamout can only be used if the shader is compiled as VS. */
3828 if (!ctx->shader->selector->so.num_outputs ||
3829 (ctx->type == TGSI_PROCESSOR_VERTEX &&
3830 (ctx->shader->key.vs.as_es ||
3831 ctx->shader->key.vs.as_ls)) ||
3832 (ctx->type == TGSI_PROCESSOR_TESS_EVAL &&
3833 ctx->shader->key.tes.as_es))
3834 return;
3835
3836 LLVMValueRef buf_ptr = LLVMGetParam(ctx->radeon_bld.main_fn,
3837 SI_PARAM_RW_BUFFERS);
3838
3839 /* Load the resources, we rely on the code sinking to do the rest */
3840 for (i = 0; i < 4; ++i) {
3841 if (ctx->shader->selector->so.stride[i]) {
3842 LLVMValueRef offset = lp_build_const_int32(gallivm,
3843 SI_SO_BUF_OFFSET + i);
3844
3845 ctx->so_buffers[i] = build_indexed_load_const(ctx, buf_ptr, offset);
3846 }
3847 }
3848 }
3849
3850 /**
3851 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
3852 * for later use.
3853 */
3854 static void preload_ring_buffers(struct si_shader_context *ctx)
3855 {
3856 struct gallivm_state *gallivm =
3857 ctx->radeon_bld.soa.bld_base.base.gallivm;
3858
3859 LLVMValueRef buf_ptr = LLVMGetParam(ctx->radeon_bld.main_fn,
3860 SI_PARAM_RW_BUFFERS);
3861
3862 if ((ctx->type == TGSI_PROCESSOR_VERTEX &&
3863 ctx->shader->key.vs.as_es) ||
3864 (ctx->type == TGSI_PROCESSOR_TESS_EVAL &&
3865 ctx->shader->key.tes.as_es) ||
3866 ctx->type == TGSI_PROCESSOR_GEOMETRY) {
3867 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_RING_ESGS);
3868
3869 ctx->esgs_ring =
3870 build_indexed_load_const(ctx, buf_ptr, offset);
3871 }
3872
3873 if (ctx->is_gs_copy_shader) {
3874 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_RING_GSVS);
3875
3876 ctx->gsvs_ring[0] =
3877 build_indexed_load_const(ctx, buf_ptr, offset);
3878 }
3879 if (ctx->type == TGSI_PROCESSOR_GEOMETRY) {
3880 int i;
3881 for (i = 0; i < 4; i++) {
3882 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_RING_GSVS + i);
3883
3884 ctx->gsvs_ring[i] =
3885 build_indexed_load_const(ctx, buf_ptr, offset);
3886 }
3887 }
3888 }
3889
3890 void si_shader_binary_read_config(struct radeon_shader_binary *binary,
3891 struct si_shader_config *conf,
3892 unsigned symbol_offset)
3893 {
3894 unsigned i;
3895 const unsigned char *config =
3896 radeon_shader_binary_config_start(binary, symbol_offset);
3897
3898 /* XXX: We may be able to emit some of these values directly rather than
3899 * extracting fields to be emitted later.
3900 */
3901
3902 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
3903 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
3904 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
3905 switch (reg) {
3906 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
3907 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
3908 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
3909 case R_00B848_COMPUTE_PGM_RSRC1:
3910 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
3911 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
3912 conf->float_mode = G_00B028_FLOAT_MODE(value);
3913 conf->rsrc1 = value;
3914 break;
3915 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
3916 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
3917 break;
3918 case R_00B84C_COMPUTE_PGM_RSRC2:
3919 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
3920 conf->rsrc2 = value;
3921 break;
3922 case R_0286CC_SPI_PS_INPUT_ENA:
3923 conf->spi_ps_input_ena = value;
3924 break;
3925 case R_0286D0_SPI_PS_INPUT_ADDR:
3926 conf->spi_ps_input_addr = value;
3927 break;
3928 case R_0286E8_SPI_TMPRING_SIZE:
3929 case R_00B860_COMPUTE_TMPRING_SIZE:
3930 /* WAVESIZE is in units of 256 dwords. */
3931 conf->scratch_bytes_per_wave =
3932 G_00B860_WAVESIZE(value) * 256 * 4 * 1;
3933 break;
3934 default:
3935 {
3936 static bool printed;
3937
3938 if (!printed) {
3939 fprintf(stderr, "Warning: LLVM emitted unknown "
3940 "config register: 0x%x\n", reg);
3941 printed = true;
3942 }
3943 }
3944 break;
3945 }
3946
3947 if (!conf->spi_ps_input_addr)
3948 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
3949 }
3950 }
3951
3952 void si_shader_apply_scratch_relocs(struct si_context *sctx,
3953 struct si_shader *shader,
3954 uint64_t scratch_va)
3955 {
3956 unsigned i;
3957 uint32_t scratch_rsrc_dword0 = scratch_va;
3958 uint32_t scratch_rsrc_dword1 =
3959 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32)
3960 | S_008F04_STRIDE(shader->config.scratch_bytes_per_wave / 64);
3961
3962 for (i = 0 ; i < shader->binary.reloc_count; i++) {
3963 const struct radeon_shader_reloc *reloc =
3964 &shader->binary.relocs[i];
3965 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
3966 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
3967 &scratch_rsrc_dword0, 4);
3968 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
3969 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
3970 &scratch_rsrc_dword1, 4);
3971 }
3972 }
3973 }
3974
3975 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
3976 {
3977 const struct radeon_shader_binary *binary = &shader->binary;
3978 unsigned code_size = binary->code_size + binary->rodata_size;
3979 unsigned char *ptr;
3980
3981 r600_resource_reference(&shader->bo, NULL);
3982 shader->bo = si_resource_create_custom(&sscreen->b.b,
3983 PIPE_USAGE_IMMUTABLE,
3984 code_size);
3985 if (!shader->bo)
3986 return -ENOMEM;
3987
3988 ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
3989 PIPE_TRANSFER_READ_WRITE);
3990 util_memcpy_cpu_to_le32(ptr, binary->code, binary->code_size);
3991 if (binary->rodata_size > 0) {
3992 ptr += binary->code_size;
3993 util_memcpy_cpu_to_le32(ptr, binary->rodata,
3994 binary->rodata_size);
3995 }
3996
3997 sscreen->b.ws->buffer_unmap(shader->bo->buf);
3998 return 0;
3999 }
4000
4001 static void si_shader_dump_disassembly(const struct radeon_shader_binary *binary,
4002 struct pipe_debug_callback *debug)
4003 {
4004 char *line, *p;
4005 unsigned i, count;
4006
4007 if (binary->disasm_string) {
4008 fprintf(stderr, "\nShader Disassembly:\n\n");
4009 fprintf(stderr, "%s\n", binary->disasm_string);
4010
4011 if (debug && debug->debug_message) {
4012 /* Very long debug messages are cut off, so send the
4013 * disassembly one line at a time. This causes more
4014 * overhead, but on the plus side it simplifies
4015 * parsing of resulting logs.
4016 */
4017 pipe_debug_message(debug, SHADER_INFO,
4018 "Shader Disassembly Begin");
4019
4020 line = binary->disasm_string;
4021 while (*line) {
4022 p = strchrnul(line, '\n');
4023 count = p - line;
4024
4025 if (count) {
4026 pipe_debug_message(debug, SHADER_INFO,
4027 "%.*s", count, line);
4028 }
4029
4030 if (!*p)
4031 break;
4032 line = p + 1;
4033 }
4034
4035 pipe_debug_message(debug, SHADER_INFO,
4036 "Shader Disassembly End");
4037 }
4038 } else {
4039 fprintf(stderr, "SI CODE:\n");
4040 for (i = 0; i < binary->code_size; i += 4) {
4041 fprintf(stderr, "@0x%x: %02x%02x%02x%02x\n", i,
4042 binary->code[i + 3], binary->code[i + 2],
4043 binary->code[i + 1], binary->code[i]);
4044 }
4045 }
4046 }
4047
4048 static void si_shader_dump_stats(struct si_screen *sscreen,
4049 struct si_shader_config *conf,
4050 unsigned num_inputs,
4051 unsigned code_size,
4052 struct pipe_debug_callback *debug,
4053 unsigned processor)
4054 {
4055 unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
4056 unsigned lds_per_wave = 0;
4057 unsigned max_simd_waves = 10;
4058
4059 /* Compute LDS usage for PS. */
4060 if (processor == TGSI_PROCESSOR_FRAGMENT) {
4061 /* The minimum usage per wave is (num_inputs * 36). The maximum
4062 * usage is (num_inputs * 36 * 16).
4063 * We can get anything in between and it varies between waves.
4064 *
4065 * Other stages don't know the size at compile time or don't
4066 * allocate LDS per wave, but instead they do it per thread group.
4067 */
4068 lds_per_wave = conf->lds_size * lds_increment +
4069 align(num_inputs * 36, lds_increment);
4070 }
4071
4072 /* Compute the per-SIMD wave counts. */
4073 if (conf->num_sgprs) {
4074 if (sscreen->b.chip_class >= VI)
4075 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
4076 else
4077 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
4078 }
4079
4080 if (conf->num_vgprs)
4081 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
4082
4083 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
4084 * that PS can use.
4085 */
4086 if (lds_per_wave)
4087 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
4088
4089 if (r600_can_dump_shader(&sscreen->b, processor)) {
4090 if (processor == TGSI_PROCESSOR_FRAGMENT) {
4091 fprintf(stderr, "*** SHADER CONFIG ***\n"
4092 "SPI_PS_INPUT_ADDR = 0x%04x\n"
4093 "SPI_PS_INPUT_ENA = 0x%04x\n",
4094 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
4095 }
4096
4097 fprintf(stderr, "*** SHADER STATS ***\n"
4098 "SGPRS: %d\n"
4099 "VGPRS: %d\n"
4100 "Code Size: %d bytes\n"
4101 "LDS: %d blocks\n"
4102 "Scratch: %d bytes per wave\n"
4103 "Max Waves: %d\n"
4104 "********************\n",
4105 conf->num_sgprs, conf->num_vgprs, code_size,
4106 conf->lds_size, conf->scratch_bytes_per_wave,
4107 max_simd_waves);
4108 }
4109
4110 pipe_debug_message(debug, SHADER_INFO,
4111 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
4112 "LDS: %d Scratch: %d Max Waves: %d",
4113 conf->num_sgprs, conf->num_vgprs, code_size,
4114 conf->lds_size, conf->scratch_bytes_per_wave,
4115 max_simd_waves);
4116 }
4117
4118 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
4119 struct pipe_debug_callback *debug, unsigned processor)
4120 {
4121 if (r600_can_dump_shader(&sscreen->b, processor))
4122 if (!(sscreen->b.debug_flags & DBG_NO_ASM))
4123 si_shader_dump_disassembly(&shader->binary, debug);
4124
4125 si_shader_dump_stats(sscreen, &shader->config,
4126 shader->selector ? shader->selector->info.num_inputs : 0,
4127 shader->binary.code_size, debug, processor);
4128 }
4129
4130 int si_compile_llvm(struct si_screen *sscreen,
4131 struct radeon_shader_binary *binary,
4132 struct si_shader_config *conf,
4133 LLVMTargetMachineRef tm,
4134 LLVMModuleRef mod,
4135 struct pipe_debug_callback *debug,
4136 unsigned processor,
4137 const char *name)
4138 {
4139 int r = 0;
4140 unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
4141
4142 if (r600_can_dump_shader(&sscreen->b, processor)) {
4143 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
4144
4145 if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
4146 fprintf(stderr, "%s LLVM IR:\n\n", name);
4147 LLVMDumpModule(mod);
4148 fprintf(stderr, "\n");
4149 }
4150 }
4151
4152 if (!si_replace_shader(count, binary)) {
4153 r = radeon_llvm_compile(mod, binary,
4154 r600_get_llvm_processor_name(sscreen->b.family), tm,
4155 debug);
4156 if (r)
4157 return r;
4158 }
4159
4160 si_shader_binary_read_config(binary, conf, 0);
4161
4162 /* Enable 64-bit and 16-bit denormals, because there is no performance
4163 * cost.
4164 *
4165 * If denormals are enabled, all floating-point output modifiers are
4166 * ignored.
4167 *
4168 * Don't enable denormals for 32-bit floats, because:
4169 * - Floating-point output modifiers would be ignored by the hw.
4170 * - Some opcodes don't support denormals, such as v_mad_f32. We would
4171 * have to stop using those.
4172 * - SI & CI would be very slow.
4173 */
4174 conf->float_mode |= V_00B028_FP_64_DENORMS;
4175
4176 FREE(binary->config);
4177 FREE(binary->global_symbol_offsets);
4178 binary->config = NULL;
4179 binary->global_symbol_offsets = NULL;
4180 return r;
4181 }
4182
4183 /* Generate code for the hardware VS shader stage to go with a geometry shader */
4184 static int si_generate_gs_copy_shader(struct si_screen *sscreen,
4185 struct si_shader_context *ctx,
4186 struct si_shader *gs,
4187 struct pipe_debug_callback *debug)
4188 {
4189 struct gallivm_state *gallivm = &ctx->radeon_bld.gallivm;
4190 struct lp_build_tgsi_context *bld_base = &ctx->radeon_bld.soa.bld_base;
4191 struct lp_build_context *uint = &bld_base->uint_bld;
4192 struct si_shader_output_values *outputs;
4193 struct tgsi_shader_info *gsinfo = &gs->selector->info;
4194 LLVMValueRef args[9];
4195 int i, r;
4196
4197 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
4198
4199 si_init_shader_ctx(ctx, sscreen, ctx->shader, ctx->tm, gsinfo);
4200 ctx->type = TGSI_PROCESSOR_VERTEX;
4201 ctx->is_gs_copy_shader = true;
4202
4203 create_meta_data(ctx);
4204 create_function(ctx);
4205 preload_streamout_buffers(ctx);
4206 preload_ring_buffers(ctx);
4207
4208 args[0] = ctx->gsvs_ring[0];
4209 args[1] = lp_build_mul_imm(uint,
4210 LLVMGetParam(ctx->radeon_bld.main_fn,
4211 ctx->param_vertex_id),
4212 4);
4213 args[3] = uint->zero;
4214 args[4] = uint->one; /* OFFEN */
4215 args[5] = uint->zero; /* IDXEN */
4216 args[6] = uint->one; /* GLC */
4217 args[7] = uint->one; /* SLC */
4218 args[8] = uint->zero; /* TFE */
4219
4220 /* Fetch vertex data from GSVS ring */
4221 for (i = 0; i < gsinfo->num_outputs; ++i) {
4222 unsigned chan;
4223
4224 outputs[i].name = gsinfo->output_semantic_name[i];
4225 outputs[i].sid = gsinfo->output_semantic_index[i];
4226
4227 for (chan = 0; chan < 4; chan++) {
4228 args[2] = lp_build_const_int32(gallivm,
4229 (i * 4 + chan) *
4230 gs->selector->gs_max_out_vertices * 16 * 4);
4231
4232 outputs[i].values[chan] =
4233 LLVMBuildBitCast(gallivm->builder,
4234 lp_build_intrinsic(gallivm->builder,
4235 "llvm.SI.buffer.load.dword.i32.i32",
4236 ctx->i32, args, 9,
4237 LLVMReadOnlyAttribute | LLVMNoUnwindAttribute),
4238 ctx->f32, "");
4239 }
4240 }
4241
4242 si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
4243
4244 LLVMBuildRetVoid(bld_base->base.gallivm->builder);
4245
4246 /* Dump LLVM IR before any optimization passes */
4247 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
4248 r600_can_dump_shader(&sscreen->b, TGSI_PROCESSOR_GEOMETRY))
4249 LLVMDumpModule(bld_base->base.gallivm->module);
4250
4251 radeon_llvm_finalize_module(&ctx->radeon_bld);
4252
4253 r = si_compile_llvm(sscreen, &ctx->shader->binary,
4254 &ctx->shader->config, ctx->tm,
4255 bld_base->base.gallivm->module,
4256 debug, TGSI_PROCESSOR_GEOMETRY,
4257 "GS Copy Shader");
4258 if (!r) {
4259 if (r600_can_dump_shader(&sscreen->b, TGSI_PROCESSOR_GEOMETRY))
4260 fprintf(stderr, "GS Copy Shader:\n");
4261 si_shader_dump(sscreen, ctx->shader, debug,
4262 TGSI_PROCESSOR_GEOMETRY);
4263 r = si_shader_binary_upload(sscreen, ctx->shader);
4264 }
4265
4266 radeon_llvm_dispose(&ctx->radeon_bld);
4267
4268 FREE(outputs);
4269 return r;
4270 }
4271
4272 void si_dump_shader_key(unsigned shader, union si_shader_key *key, FILE *f)
4273 {
4274 int i;
4275
4276 fprintf(f, "SHADER KEY\n");
4277
4278 switch (shader) {
4279 case PIPE_SHADER_VERTEX:
4280 fprintf(f, " instance_divisors = {");
4281 for (i = 0; i < Elements(key->vs.instance_divisors); i++)
4282 fprintf(f, !i ? "%u" : ", %u",
4283 key->vs.instance_divisors[i]);
4284 fprintf(f, "}\n");
4285 fprintf(f, " as_es = %u\n", key->vs.as_es);
4286 fprintf(f, " as_ls = %u\n", key->vs.as_ls);
4287 fprintf(f, " export_prim_id = %u\n", key->vs.export_prim_id);
4288 break;
4289
4290 case PIPE_SHADER_TESS_CTRL:
4291 fprintf(f, " prim_mode = %u\n", key->tcs.prim_mode);
4292 break;
4293
4294 case PIPE_SHADER_TESS_EVAL:
4295 fprintf(f, " as_es = %u\n", key->tes.as_es);
4296 fprintf(f, " export_prim_id = %u\n", key->tes.export_prim_id);
4297 break;
4298
4299 case PIPE_SHADER_GEOMETRY:
4300 break;
4301
4302 case PIPE_SHADER_FRAGMENT:
4303 fprintf(f, " spi_shader_col_format = 0x%x\n", key->ps.spi_shader_col_format);
4304 fprintf(f, " last_cbuf = %u\n", key->ps.last_cbuf);
4305 fprintf(f, " color_two_side = %u\n", key->ps.color_two_side);
4306 fprintf(f, " alpha_func = %u\n", key->ps.alpha_func);
4307 fprintf(f, " alpha_to_one = %u\n", key->ps.alpha_to_one);
4308 fprintf(f, " poly_stipple = %u\n", key->ps.poly_stipple);
4309 fprintf(f, " clamp_color = %u\n", key->ps.clamp_color);
4310 break;
4311
4312 default:
4313 assert(0);
4314 }
4315 }
4316
4317 static void si_init_shader_ctx(struct si_shader_context *ctx,
4318 struct si_screen *sscreen,
4319 struct si_shader *shader,
4320 LLVMTargetMachineRef tm,
4321 struct tgsi_shader_info *info)
4322 {
4323 struct lp_build_tgsi_context *bld_base;
4324
4325 memset(ctx, 0, sizeof(*ctx));
4326 radeon_llvm_context_init(&ctx->radeon_bld, "amdgcn--");
4327 ctx->tm = tm;
4328 ctx->screen = sscreen;
4329 if (shader && shader->selector)
4330 ctx->type = shader->selector->info.processor;
4331 else
4332 ctx->type = -1;
4333 ctx->shader = shader;
4334
4335 ctx->voidt = LLVMVoidTypeInContext(ctx->radeon_bld.gallivm.context);
4336 ctx->i1 = LLVMInt1TypeInContext(ctx->radeon_bld.gallivm.context);
4337 ctx->i8 = LLVMInt8TypeInContext(ctx->radeon_bld.gallivm.context);
4338 ctx->i32 = LLVMInt32TypeInContext(ctx->radeon_bld.gallivm.context);
4339 ctx->i128 = LLVMIntTypeInContext(ctx->radeon_bld.gallivm.context, 128);
4340 ctx->f32 = LLVMFloatTypeInContext(ctx->radeon_bld.gallivm.context);
4341 ctx->v16i8 = LLVMVectorType(ctx->i8, 16);
4342 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
4343 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
4344 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
4345
4346 bld_base = &ctx->radeon_bld.soa.bld_base;
4347 bld_base->info = info;
4348 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
4349
4350 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
4351 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
4352 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
4353
4354 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
4355 bld_base->op_actions[TGSI_OPCODE_TEX2] = tex_action;
4356 bld_base->op_actions[TGSI_OPCODE_TXB] = tex_action;
4357 bld_base->op_actions[TGSI_OPCODE_TXB2] = tex_action;
4358 bld_base->op_actions[TGSI_OPCODE_TXD] = tex_action;
4359 bld_base->op_actions[TGSI_OPCODE_TXF] = tex_action;
4360 bld_base->op_actions[TGSI_OPCODE_TXL] = tex_action;
4361 bld_base->op_actions[TGSI_OPCODE_TXL2] = tex_action;
4362 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
4363 bld_base->op_actions[TGSI_OPCODE_TXQ] = tex_action;
4364 bld_base->op_actions[TGSI_OPCODE_TG4] = tex_action;
4365 bld_base->op_actions[TGSI_OPCODE_LODQ] = tex_action;
4366 bld_base->op_actions[TGSI_OPCODE_TXQS].emit = si_llvm_emit_txqs;
4367
4368 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
4369 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
4370 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
4371 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
4372
4373 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
4374 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
4375 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
4376
4377 bld_base->op_actions[TGSI_OPCODE_MAX].emit = build_tgsi_intrinsic_nomem;
4378 bld_base->op_actions[TGSI_OPCODE_MAX].intr_name = "llvm.maxnum.f32";
4379 bld_base->op_actions[TGSI_OPCODE_MIN].emit = build_tgsi_intrinsic_nomem;
4380 bld_base->op_actions[TGSI_OPCODE_MIN].intr_name = "llvm.minnum.f32";
4381 }
4382
4383 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
4384 struct si_shader *shader,
4385 struct pipe_debug_callback *debug)
4386 {
4387 struct si_shader_selector *sel = shader->selector;
4388 struct tgsi_token *tokens = sel->tokens;
4389 struct si_shader_context ctx;
4390 struct lp_build_tgsi_context *bld_base;
4391 struct tgsi_shader_info stipple_shader_info;
4392 LLVMModuleRef mod;
4393 int r = 0;
4394 bool poly_stipple = sel->type == PIPE_SHADER_FRAGMENT &&
4395 shader->key.ps.poly_stipple;
4396
4397 if (poly_stipple) {
4398 tokens = util_pstipple_create_fragment_shader(tokens, NULL,
4399 SI_POLY_STIPPLE_SAMPLER,
4400 TGSI_FILE_SYSTEM_VALUE);
4401 tgsi_scan_shader(tokens, &stipple_shader_info);
4402 }
4403
4404 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
4405 * conversion fails. */
4406 if (r600_can_dump_shader(&sscreen->b, sel->info.processor) &&
4407 !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
4408 si_dump_shader_key(sel->type, &shader->key, stderr);
4409 tgsi_dump(tokens, 0);
4410 si_dump_streamout(&sel->so);
4411 }
4412
4413 si_init_shader_ctx(&ctx, sscreen, shader, tm,
4414 poly_stipple ? &stipple_shader_info : &sel->info);
4415
4416 shader->uses_instanceid = sel->info.uses_instanceid;
4417
4418 bld_base = &ctx.radeon_bld.soa.bld_base;
4419 ctx.radeon_bld.load_system_value = declare_system_value;
4420
4421 switch (ctx.type) {
4422 case TGSI_PROCESSOR_VERTEX:
4423 ctx.radeon_bld.load_input = declare_input_vs;
4424 if (shader->key.vs.as_ls)
4425 bld_base->emit_epilogue = si_llvm_emit_ls_epilogue;
4426 else if (shader->key.vs.as_es)
4427 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
4428 else
4429 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
4430 break;
4431 case TGSI_PROCESSOR_TESS_CTRL:
4432 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
4433 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
4434 bld_base->emit_store = store_output_tcs;
4435 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
4436 break;
4437 case TGSI_PROCESSOR_TESS_EVAL:
4438 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
4439 if (shader->key.tes.as_es)
4440 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
4441 else
4442 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
4443 break;
4444 case TGSI_PROCESSOR_GEOMETRY:
4445 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
4446 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
4447 break;
4448 case TGSI_PROCESSOR_FRAGMENT:
4449 ctx.radeon_bld.load_input = declare_input_fs;
4450 bld_base->emit_epilogue = si_llvm_emit_fs_epilogue;
4451 break;
4452 default:
4453 assert(!"Unsupported shader type");
4454 return -1;
4455 }
4456
4457 create_meta_data(&ctx);
4458 create_function(&ctx);
4459 preload_constants(&ctx);
4460 preload_samplers(&ctx);
4461 preload_streamout_buffers(&ctx);
4462 preload_ring_buffers(&ctx);
4463
4464 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
4465 int i;
4466 for (i = 0; i < 4; i++) {
4467 ctx.gs_next_vertex[i] =
4468 lp_build_alloca(bld_base->base.gallivm,
4469 ctx.i32, "");
4470 }
4471 }
4472
4473 if (!lp_build_tgsi_llvm(bld_base, tokens)) {
4474 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
4475 goto out;
4476 }
4477
4478 LLVMBuildRetVoid(bld_base->base.gallivm->builder);
4479 mod = bld_base->base.gallivm->module;
4480
4481 /* Dump LLVM IR before any optimization passes */
4482 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
4483 r600_can_dump_shader(&sscreen->b, ctx.type))
4484 LLVMDumpModule(mod);
4485
4486 radeon_llvm_finalize_module(&ctx.radeon_bld);
4487
4488 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
4489 mod, debug, ctx.type, "TGSI shader");
4490 if (r) {
4491 fprintf(stderr, "LLVM failed to compile shader\n");
4492 goto out;
4493 }
4494
4495 si_shader_dump(sscreen, shader, debug, ctx.type);
4496
4497 r = si_shader_binary_upload(sscreen, shader);
4498 if (r) {
4499 fprintf(stderr, "LLVM failed to upload shader\n");
4500 goto out;
4501 }
4502
4503 radeon_llvm_dispose(&ctx.radeon_bld);
4504
4505 if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
4506 shader->gs_copy_shader = CALLOC_STRUCT(si_shader);
4507 shader->gs_copy_shader->selector = shader->selector;
4508 ctx.shader = shader->gs_copy_shader;
4509 if ((r = si_generate_gs_copy_shader(sscreen, &ctx,
4510 shader, debug))) {
4511 free(shader->gs_copy_shader);
4512 shader->gs_copy_shader = NULL;
4513 goto out;
4514 }
4515 }
4516
4517 out:
4518 for (int i = 0; i < SI_NUM_CONST_BUFFERS; i++)
4519 FREE(ctx.constants[i]);
4520 if (poly_stipple)
4521 tgsi_free_tokens(tokens);
4522 return r;
4523 }
4524
4525 void si_shader_destroy(struct si_shader *shader)
4526 {
4527 if (shader->gs_copy_shader) {
4528 si_shader_destroy(shader->gs_copy_shader);
4529 FREE(shader->gs_copy_shader);
4530 }
4531
4532 if (shader->scratch_bo)
4533 r600_resource_reference(&shader->scratch_bo, NULL);
4534
4535 r600_resource_reference(&shader->bo, NULL);
4536
4537 radeon_shader_binary_clean(&shader->binary);
4538 }