6ae63c8a164ff26676c5d8a5140b780e910af888
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_flow.h"
35 #include "gallivm/lp_bld_misc.h"
36 #include "radeon/radeon_elf_util.h"
37 #include "util/u_memory.h"
38 #include "util/u_string.h"
39 #include "tgsi/tgsi_build.h"
40 #include "tgsi/tgsi_util.h"
41 #include "tgsi/tgsi_dump.h"
42
43 #include "si_shader_internal.h"
44 #include "si_pipe.h"
45 #include "sid.h"
46
47
48 static const char *scratch_rsrc_dword0_symbol =
49 "SCRATCH_RSRC_DWORD0";
50
51 static const char *scratch_rsrc_dword1_symbol =
52 "SCRATCH_RSRC_DWORD1";
53
54 struct si_shader_output_values
55 {
56 LLVMValueRef values[4];
57 unsigned name;
58 unsigned sid;
59 };
60
61 static void si_init_shader_ctx(struct si_shader_context *ctx,
62 struct si_screen *sscreen,
63 struct si_shader *shader,
64 LLVMTargetMachineRef tm);
65
66 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
67 struct lp_build_tgsi_context *bld_base,
68 struct lp_build_emit_data *emit_data);
69
70 static void si_dump_shader_key(unsigned shader, union si_shader_key *key,
71 FILE *f);
72
73 /* Ideally pass the sample mask input to the PS epilog as v13, which
74 * is its usual location, so that the shader doesn't have to add v_mov.
75 */
76 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 13
77
78 /* The VS location of the PrimitiveID input is the same in the epilog,
79 * so that the main shader part doesn't have to move it.
80 */
81 #define VS_EPILOG_PRIMID_LOC 2
82
83 enum {
84 CONST_ADDR_SPACE = 2,
85 LOCAL_ADDR_SPACE = 3,
86 };
87
88 #define SENDMSG_GS 2
89 #define SENDMSG_GS_DONE 3
90
91 #define SENDMSG_GS_OP_NOP (0 << 4)
92 #define SENDMSG_GS_OP_CUT (1 << 4)
93 #define SENDMSG_GS_OP_EMIT (2 << 4)
94 #define SENDMSG_GS_OP_EMIT_CUT (3 << 4)
95
96 /**
97 * Returns a unique index for a semantic name and index. The index must be
98 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
99 * calculated.
100 */
101 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
102 {
103 switch (semantic_name) {
104 case TGSI_SEMANTIC_POSITION:
105 return 0;
106 case TGSI_SEMANTIC_PSIZE:
107 return 1;
108 case TGSI_SEMANTIC_CLIPDIST:
109 assert(index <= 1);
110 return 2 + index;
111 case TGSI_SEMANTIC_GENERIC:
112 if (index <= 63-4)
113 return 4 + index;
114 else
115 /* same explanation as in the default statement,
116 * the only user hitting this is st/nine.
117 */
118 return 0;
119
120 /* patch indices are completely separate and thus start from 0 */
121 case TGSI_SEMANTIC_TESSOUTER:
122 return 0;
123 case TGSI_SEMANTIC_TESSINNER:
124 return 1;
125 case TGSI_SEMANTIC_PATCH:
126 return 2 + index;
127
128 default:
129 /* Don't fail here. The result of this function is only used
130 * for LS, TCS, TES, and GS, where legacy GL semantics can't
131 * occur, but this function is called for all vertex shaders
132 * before it's known whether LS will be compiled or not.
133 */
134 return 0;
135 }
136 }
137
138 /**
139 * Get the value of a shader input parameter and extract a bitfield.
140 */
141 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
142 unsigned param, unsigned rshift,
143 unsigned bitwidth)
144 {
145 struct gallivm_state *gallivm = &ctx->gallivm;
146 LLVMValueRef value = LLVMGetParam(ctx->main_fn,
147 param);
148
149 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
150 value = bitcast(&ctx->soa.bld_base,
151 TGSI_TYPE_UNSIGNED, value);
152
153 if (rshift)
154 value = LLVMBuildLShr(gallivm->builder, value,
155 lp_build_const_int32(gallivm, rshift), "");
156
157 if (rshift + bitwidth < 32) {
158 unsigned mask = (1 << bitwidth) - 1;
159 value = LLVMBuildAnd(gallivm->builder, value,
160 lp_build_const_int32(gallivm, mask), "");
161 }
162
163 return value;
164 }
165
166 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
167 {
168 switch (ctx->type) {
169 case PIPE_SHADER_TESS_CTRL:
170 return unpack_param(ctx, SI_PARAM_REL_IDS, 0, 8);
171
172 case PIPE_SHADER_TESS_EVAL:
173 return LLVMGetParam(ctx->main_fn,
174 ctx->param_tes_rel_patch_id);
175
176 default:
177 assert(0);
178 return NULL;
179 }
180 }
181
182 /* Tessellation shaders pass outputs to the next shader using LDS.
183 *
184 * LS outputs = TCS inputs
185 * TCS outputs = TES inputs
186 *
187 * The LDS layout is:
188 * - TCS inputs for patch 0
189 * - TCS inputs for patch 1
190 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
191 * - ...
192 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
193 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
194 * - TCS outputs for patch 1
195 * - Per-patch TCS outputs for patch 1
196 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
197 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
198 * - ...
199 *
200 * All three shaders VS(LS), TCS, TES share the same LDS space.
201 */
202
203 static LLVMValueRef
204 get_tcs_in_patch_stride(struct si_shader_context *ctx)
205 {
206 if (ctx->type == PIPE_SHADER_VERTEX)
207 return unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 0, 13);
208 else if (ctx->type == PIPE_SHADER_TESS_CTRL)
209 return unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 0, 13);
210 else {
211 assert(0);
212 return NULL;
213 }
214 }
215
216 static LLVMValueRef
217 get_tcs_out_patch_stride(struct si_shader_context *ctx)
218 {
219 return unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 0, 13);
220 }
221
222 static LLVMValueRef
223 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
224 {
225 return lp_build_mul_imm(&ctx->soa.bld_base.uint_bld,
226 unpack_param(ctx,
227 SI_PARAM_TCS_OUT_OFFSETS,
228 0, 16),
229 4);
230 }
231
232 static LLVMValueRef
233 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
234 {
235 return lp_build_mul_imm(&ctx->soa.bld_base.uint_bld,
236 unpack_param(ctx,
237 SI_PARAM_TCS_OUT_OFFSETS,
238 16, 16),
239 4);
240 }
241
242 static LLVMValueRef
243 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
244 {
245 struct gallivm_state *gallivm = &ctx->gallivm;
246 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
247 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
248
249 return LLVMBuildMul(gallivm->builder, patch_stride, rel_patch_id, "");
250 }
251
252 static LLVMValueRef
253 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
254 {
255 struct gallivm_state *gallivm = &ctx->gallivm;
256 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
257 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
258 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
259
260 return LLVMBuildAdd(gallivm->builder, patch0_offset,
261 LLVMBuildMul(gallivm->builder, patch_stride,
262 rel_patch_id, ""),
263 "");
264 }
265
266 static LLVMValueRef
267 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
268 {
269 struct gallivm_state *gallivm = &ctx->gallivm;
270 LLVMValueRef patch0_patch_data_offset =
271 get_tcs_out_patch0_patch_data_offset(ctx);
272 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
273 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
274
275 return LLVMBuildAdd(gallivm->builder, patch0_patch_data_offset,
276 LLVMBuildMul(gallivm->builder, patch_stride,
277 rel_patch_id, ""),
278 "");
279 }
280
281 static LLVMValueRef build_gep0(struct si_shader_context *ctx,
282 LLVMValueRef base_ptr, LLVMValueRef index)
283 {
284 LLVMValueRef indices[2] = {
285 LLVMConstInt(ctx->i32, 0, 0),
286 index,
287 };
288 return LLVMBuildGEP(ctx->gallivm.builder, base_ptr,
289 indices, 2, "");
290 }
291
292 static void build_indexed_store(struct si_shader_context *ctx,
293 LLVMValueRef base_ptr, LLVMValueRef index,
294 LLVMValueRef value)
295 {
296 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
297 struct gallivm_state *gallivm = bld_base->base.gallivm;
298
299 LLVMBuildStore(gallivm->builder, value,
300 build_gep0(ctx, base_ptr, index));
301 }
302
303 /**
304 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
305 * It's equivalent to doing a load from &base_ptr[index].
306 *
307 * \param base_ptr Where the array starts.
308 * \param index The element index into the array.
309 * \param uniform Whether the base_ptr and index can be assumed to be
310 * dynamically uniform
311 */
312 static LLVMValueRef build_indexed_load(struct si_shader_context *ctx,
313 LLVMValueRef base_ptr, LLVMValueRef index,
314 bool uniform)
315 {
316 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
317 struct gallivm_state *gallivm = bld_base->base.gallivm;
318 LLVMValueRef pointer;
319
320 pointer = build_gep0(ctx, base_ptr, index);
321 if (uniform)
322 LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
323 return LLVMBuildLoad(gallivm->builder, pointer, "");
324 }
325
326 /**
327 * Do a load from &base_ptr[index], but also add a flag that it's loading
328 * a constant from a dynamically uniform index.
329 */
330 static LLVMValueRef build_indexed_load_const(
331 struct si_shader_context *ctx,
332 LLVMValueRef base_ptr, LLVMValueRef index)
333 {
334 LLVMValueRef result = build_indexed_load(ctx, base_ptr, index, true);
335 LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
336 return result;
337 }
338
339 static LLVMValueRef get_instance_index_for_fetch(
340 struct si_shader_context *radeon_bld,
341 unsigned param_start_instance, unsigned divisor)
342 {
343 struct si_shader_context *ctx =
344 si_shader_context(&radeon_bld->soa.bld_base);
345 struct gallivm_state *gallivm = radeon_bld->soa.bld_base.base.gallivm;
346
347 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
348 ctx->param_instance_id);
349
350 /* The division must be done before START_INSTANCE is added. */
351 if (divisor > 1)
352 result = LLVMBuildUDiv(gallivm->builder, result,
353 lp_build_const_int32(gallivm, divisor), "");
354
355 return LLVMBuildAdd(gallivm->builder, result,
356 LLVMGetParam(radeon_bld->main_fn, param_start_instance), "");
357 }
358
359 static void declare_input_vs(
360 struct si_shader_context *radeon_bld,
361 unsigned input_index,
362 const struct tgsi_full_declaration *decl,
363 LLVMValueRef out[4])
364 {
365 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
366 struct gallivm_state *gallivm = base->gallivm;
367 struct si_shader_context *ctx =
368 si_shader_context(&radeon_bld->soa.bld_base);
369 unsigned divisor =
370 ctx->shader->key.vs.prolog.instance_divisors[input_index];
371
372 unsigned chan;
373
374 LLVMValueRef t_list_ptr;
375 LLVMValueRef t_offset;
376 LLVMValueRef t_list;
377 LLVMValueRef attribute_offset;
378 LLVMValueRef buffer_index;
379 LLVMValueRef args[3];
380 LLVMValueRef input;
381
382 /* Load the T list */
383 t_list_ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_VERTEX_BUFFERS);
384
385 t_offset = lp_build_const_int32(gallivm, input_index);
386
387 t_list = build_indexed_load_const(ctx, t_list_ptr, t_offset);
388
389 /* Build the attribute offset */
390 attribute_offset = lp_build_const_int32(gallivm, 0);
391
392 if (!ctx->is_monolithic) {
393 buffer_index = LLVMGetParam(radeon_bld->main_fn,
394 ctx->param_vertex_index0 +
395 input_index);
396 } else if (divisor) {
397 /* Build index from instance ID, start instance and divisor */
398 ctx->shader->info.uses_instanceid = true;
399 buffer_index = get_instance_index_for_fetch(ctx,
400 SI_PARAM_START_INSTANCE,
401 divisor);
402 } else {
403 /* Load the buffer index for vertices. */
404 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
405 ctx->param_vertex_id);
406 LLVMValueRef base_vertex = LLVMGetParam(radeon_bld->main_fn,
407 SI_PARAM_BASE_VERTEX);
408 buffer_index = LLVMBuildAdd(gallivm->builder, base_vertex, vertex_id, "");
409 }
410
411 args[0] = t_list;
412 args[1] = attribute_offset;
413 args[2] = buffer_index;
414 input = lp_build_intrinsic(gallivm->builder,
415 "llvm.SI.vs.load.input", ctx->v4f32, args, 3,
416 LLVMReadNoneAttribute);
417
418 /* Break up the vec4 into individual components */
419 for (chan = 0; chan < 4; chan++) {
420 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
421 out[chan] = LLVMBuildExtractElement(gallivm->builder,
422 input, llvm_chan, "");
423 }
424 }
425
426 static LLVMValueRef get_primitive_id(struct lp_build_tgsi_context *bld_base,
427 unsigned swizzle)
428 {
429 struct si_shader_context *ctx = si_shader_context(bld_base);
430
431 if (swizzle > 0)
432 return bld_base->uint_bld.zero;
433
434 switch (ctx->type) {
435 case PIPE_SHADER_VERTEX:
436 return LLVMGetParam(ctx->main_fn,
437 ctx->param_vs_prim_id);
438 case PIPE_SHADER_TESS_CTRL:
439 return LLVMGetParam(ctx->main_fn,
440 SI_PARAM_PATCH_ID);
441 case PIPE_SHADER_TESS_EVAL:
442 return LLVMGetParam(ctx->main_fn,
443 ctx->param_tes_patch_id);
444 case PIPE_SHADER_GEOMETRY:
445 return LLVMGetParam(ctx->main_fn,
446 SI_PARAM_PRIMITIVE_ID);
447 default:
448 assert(0);
449 return bld_base->uint_bld.zero;
450 }
451 }
452
453 /**
454 * Return the value of tgsi_ind_register for indexing.
455 * This is the indirect index with the constant offset added to it.
456 */
457 static LLVMValueRef get_indirect_index(struct si_shader_context *ctx,
458 const struct tgsi_ind_register *ind,
459 int rel_index)
460 {
461 struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
462 LLVMValueRef result;
463
464 result = ctx->soa.addr[ind->Index][ind->Swizzle];
465 result = LLVMBuildLoad(gallivm->builder, result, "");
466 result = LLVMBuildAdd(gallivm->builder, result,
467 lp_build_const_int32(gallivm, rel_index), "");
468 return result;
469 }
470
471 /**
472 * Like get_indirect_index, but restricts the return value to a (possibly
473 * undefined) value inside [0..num).
474 */
475 static LLVMValueRef get_bounded_indirect_index(struct si_shader_context *ctx,
476 const struct tgsi_ind_register *ind,
477 int rel_index, unsigned num)
478 {
479 LLVMValueRef result = get_indirect_index(ctx, ind, rel_index);
480
481 /* LLVM 3.8: If indirect resource indexing is used:
482 * - SI & CIK hang
483 * - VI crashes
484 */
485 if (HAVE_LLVM <= 0x0308)
486 return LLVMGetUndef(ctx->i32);
487
488 return si_llvm_bound_index(ctx, result, num);
489 }
490
491
492 /**
493 * Calculate a dword address given an input or output register and a stride.
494 */
495 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
496 const struct tgsi_full_dst_register *dst,
497 const struct tgsi_full_src_register *src,
498 LLVMValueRef vertex_dw_stride,
499 LLVMValueRef base_addr)
500 {
501 struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
502 struct tgsi_shader_info *info = &ctx->shader->selector->info;
503 ubyte *name, *index, *array_first;
504 int first, param;
505 struct tgsi_full_dst_register reg;
506
507 /* Set the register description. The address computation is the same
508 * for sources and destinations. */
509 if (src) {
510 reg.Register.File = src->Register.File;
511 reg.Register.Index = src->Register.Index;
512 reg.Register.Indirect = src->Register.Indirect;
513 reg.Register.Dimension = src->Register.Dimension;
514 reg.Indirect = src->Indirect;
515 reg.Dimension = src->Dimension;
516 reg.DimIndirect = src->DimIndirect;
517 } else
518 reg = *dst;
519
520 /* If the register is 2-dimensional (e.g. an array of vertices
521 * in a primitive), calculate the base address of the vertex. */
522 if (reg.Register.Dimension) {
523 LLVMValueRef index;
524
525 if (reg.Dimension.Indirect)
526 index = get_indirect_index(ctx, &reg.DimIndirect,
527 reg.Dimension.Index);
528 else
529 index = lp_build_const_int32(gallivm, reg.Dimension.Index);
530
531 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
532 LLVMBuildMul(gallivm->builder, index,
533 vertex_dw_stride, ""), "");
534 }
535
536 /* Get information about the register. */
537 if (reg.Register.File == TGSI_FILE_INPUT) {
538 name = info->input_semantic_name;
539 index = info->input_semantic_index;
540 array_first = info->input_array_first;
541 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
542 name = info->output_semantic_name;
543 index = info->output_semantic_index;
544 array_first = info->output_array_first;
545 } else {
546 assert(0);
547 return NULL;
548 }
549
550 if (reg.Register.Indirect) {
551 /* Add the relative address of the element. */
552 LLVMValueRef ind_index;
553
554 if (reg.Indirect.ArrayID)
555 first = array_first[reg.Indirect.ArrayID];
556 else
557 first = reg.Register.Index;
558
559 ind_index = get_indirect_index(ctx, &reg.Indirect,
560 reg.Register.Index - first);
561
562 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
563 LLVMBuildMul(gallivm->builder, ind_index,
564 lp_build_const_int32(gallivm, 4), ""), "");
565
566 param = si_shader_io_get_unique_index(name[first], index[first]);
567 } else {
568 param = si_shader_io_get_unique_index(name[reg.Register.Index],
569 index[reg.Register.Index]);
570 }
571
572 /* Add the base address of the element. */
573 return LLVMBuildAdd(gallivm->builder, base_addr,
574 lp_build_const_int32(gallivm, param * 4), "");
575 }
576
577 /* The offchip buffer layout for TCS->TES is
578 *
579 * - attribute 0 of patch 0 vertex 0
580 * - attribute 0 of patch 0 vertex 1
581 * - attribute 0 of patch 0 vertex 2
582 * ...
583 * - attribute 0 of patch 1 vertex 0
584 * - attribute 0 of patch 1 vertex 1
585 * ...
586 * - attribute 1 of patch 0 vertex 0
587 * - attribute 1 of patch 0 vertex 1
588 * ...
589 * - per patch attribute 0 of patch 0
590 * - per patch attribute 0 of patch 1
591 * ...
592 *
593 * Note that every attribute has 4 components.
594 */
595 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
596 LLVMValueRef vertex_index,
597 LLVMValueRef param_index)
598 {
599 struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
600 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
601 LLVMValueRef param_stride, constant16;
602
603 vertices_per_patch = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 9, 6);
604 num_patches = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 0, 9);
605 total_vertices = LLVMBuildMul(gallivm->builder, vertices_per_patch,
606 num_patches, "");
607
608 constant16 = lp_build_const_int32(gallivm, 16);
609 if (vertex_index) {
610 base_addr = LLVMBuildMul(gallivm->builder, get_rel_patch_id(ctx),
611 vertices_per_patch, "");
612
613 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
614 vertex_index, "");
615
616 param_stride = total_vertices;
617 } else {
618 base_addr = get_rel_patch_id(ctx);
619 param_stride = num_patches;
620 }
621
622 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
623 LLVMBuildMul(gallivm->builder, param_index,
624 param_stride, ""), "");
625
626 base_addr = LLVMBuildMul(gallivm->builder, base_addr, constant16, "");
627
628 if (!vertex_index) {
629 LLVMValueRef patch_data_offset =
630 unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 16, 16);
631
632 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
633 patch_data_offset, "");
634 }
635 return base_addr;
636 }
637
638 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
639 struct si_shader_context *ctx,
640 const struct tgsi_full_dst_register *dst,
641 const struct tgsi_full_src_register *src)
642 {
643 struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
644 struct tgsi_shader_info *info = &ctx->shader->selector->info;
645 ubyte *name, *index, *array_first;
646 struct tgsi_full_src_register reg;
647 LLVMValueRef vertex_index = NULL;
648 LLVMValueRef param_index = NULL;
649 unsigned param_index_base, param_base;
650
651 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
652
653 if (reg.Register.Dimension) {
654
655 if (reg.Dimension.Indirect)
656 vertex_index = get_indirect_index(ctx, &reg.DimIndirect,
657 reg.Dimension.Index);
658 else
659 vertex_index = lp_build_const_int32(gallivm,
660 reg.Dimension.Index);
661 }
662
663 /* Get information about the register. */
664 if (reg.Register.File == TGSI_FILE_INPUT) {
665 name = info->input_semantic_name;
666 index = info->input_semantic_index;
667 array_first = info->input_array_first;
668 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
669 name = info->output_semantic_name;
670 index = info->output_semantic_index;
671 array_first = info->output_array_first;
672 } else {
673 assert(0);
674 return NULL;
675 }
676
677 if (reg.Register.Indirect) {
678 if (reg.Indirect.ArrayID)
679 param_base = array_first[reg.Indirect.ArrayID];
680 else
681 param_base = reg.Register.Index;
682
683 param_index = get_indirect_index(ctx, &reg.Indirect,
684 reg.Register.Index - param_base);
685
686 } else {
687 param_base = reg.Register.Index;
688 param_index = lp_build_const_int32(gallivm, 0);
689 }
690
691 param_index_base = si_shader_io_get_unique_index(name[param_base],
692 index[param_base]);
693
694 param_index = LLVMBuildAdd(gallivm->builder, param_index,
695 lp_build_const_int32(gallivm, param_index_base),
696 "");
697
698 return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
699 }
700
701 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
702 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
703 * or v4i32 (num_channels=3,4). */
704 static void build_tbuffer_store(struct si_shader_context *ctx,
705 LLVMValueRef rsrc,
706 LLVMValueRef vdata,
707 unsigned num_channels,
708 LLVMValueRef vaddr,
709 LLVMValueRef soffset,
710 unsigned inst_offset,
711 unsigned dfmt,
712 unsigned nfmt,
713 unsigned offen,
714 unsigned idxen,
715 unsigned glc,
716 unsigned slc,
717 unsigned tfe)
718 {
719 struct gallivm_state *gallivm = &ctx->gallivm;
720 LLVMValueRef args[] = {
721 rsrc,
722 vdata,
723 LLVMConstInt(ctx->i32, num_channels, 0),
724 vaddr,
725 soffset,
726 LLVMConstInt(ctx->i32, inst_offset, 0),
727 LLVMConstInt(ctx->i32, dfmt, 0),
728 LLVMConstInt(ctx->i32, nfmt, 0),
729 LLVMConstInt(ctx->i32, offen, 0),
730 LLVMConstInt(ctx->i32, idxen, 0),
731 LLVMConstInt(ctx->i32, glc, 0),
732 LLVMConstInt(ctx->i32, slc, 0),
733 LLVMConstInt(ctx->i32, tfe, 0)
734 };
735
736 /* The instruction offset field has 12 bits */
737 assert(offen || inst_offset < (1 << 12));
738
739 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
740 unsigned func = CLAMP(num_channels, 1, 3) - 1;
741 const char *types[] = {"i32", "v2i32", "v4i32"};
742 char name[256];
743 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
744
745 lp_build_intrinsic(gallivm->builder, name, ctx->voidt,
746 args, ARRAY_SIZE(args), 0);
747 }
748
749 static void build_tbuffer_store_dwords(struct si_shader_context *ctx,
750 LLVMValueRef rsrc,
751 LLVMValueRef vdata,
752 unsigned num_channels,
753 LLVMValueRef vaddr,
754 LLVMValueRef soffset,
755 unsigned inst_offset)
756 {
757 static unsigned dfmt[] = {
758 V_008F0C_BUF_DATA_FORMAT_32,
759 V_008F0C_BUF_DATA_FORMAT_32_32,
760 V_008F0C_BUF_DATA_FORMAT_32_32_32,
761 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
762 };
763 assert(num_channels >= 1 && num_channels <= 4);
764
765 build_tbuffer_store(ctx, rsrc, vdata, num_channels, vaddr, soffset,
766 inst_offset, dfmt[num_channels-1],
767 V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
768 }
769
770 static LLVMValueRef build_buffer_load(struct si_shader_context *ctx,
771 LLVMValueRef rsrc,
772 int num_channels,
773 LLVMValueRef vindex,
774 LLVMValueRef voffset,
775 LLVMValueRef soffset,
776 unsigned inst_offset,
777 unsigned glc,
778 unsigned slc)
779 {
780 struct gallivm_state *gallivm = &ctx->gallivm;
781 unsigned func = CLAMP(num_channels, 1, 3) - 1;
782
783 if (HAVE_LLVM >= 0x309) {
784 LLVMValueRef args[] = {
785 LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v4i32, ""),
786 vindex ? vindex : LLVMConstInt(ctx->i32, 0, 0),
787 LLVMConstInt(ctx->i32, inst_offset, 0),
788 LLVMConstInt(ctx->i1, glc, 0),
789 LLVMConstInt(ctx->i1, slc, 0)
790 };
791
792 LLVMTypeRef types[] = {ctx->f32, LLVMVectorType(ctx->f32, 2),
793 ctx->v4f32};
794 const char *type_names[] = {"f32", "v2f32", "v4f32"};
795 char name[256];
796
797 if (voffset) {
798 args[2] = LLVMBuildAdd(gallivm->builder, args[2], voffset,
799 "");
800 }
801
802 if (soffset) {
803 args[2] = LLVMBuildAdd(gallivm->builder, args[2], soffset,
804 "");
805 }
806
807 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s",
808 type_names[func]);
809
810 return lp_build_intrinsic(gallivm->builder, name, types[func], args,
811 ARRAY_SIZE(args), LLVMReadOnlyAttribute);
812 } else {
813 LLVMValueRef args[] = {
814 LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v16i8, ""),
815 voffset ? voffset : vindex,
816 soffset,
817 LLVMConstInt(ctx->i32, inst_offset, 0),
818 LLVMConstInt(ctx->i32, voffset ? 1 : 0, 0), // offen
819 LLVMConstInt(ctx->i32, vindex ? 1 : 0, 0), //idxen
820 LLVMConstInt(ctx->i32, glc, 0),
821 LLVMConstInt(ctx->i32, slc, 0),
822 LLVMConstInt(ctx->i32, 0, 0), // TFE
823 };
824
825 LLVMTypeRef types[] = {ctx->i32, LLVMVectorType(ctx->i32, 2),
826 ctx->v4i32};
827 const char *type_names[] = {"i32", "v2i32", "v4i32"};
828 const char *arg_type = "i32";
829 char name[256];
830
831 if (voffset && vindex) {
832 LLVMValueRef vaddr[] = {vindex, voffset};
833
834 arg_type = "v2i32";
835 args[1] = lp_build_gather_values(gallivm, vaddr, 2);
836 }
837
838 snprintf(name, sizeof(name), "llvm.SI.buffer.load.dword.%s.%s",
839 type_names[func], arg_type);
840
841 return lp_build_intrinsic(gallivm->builder, name, types[func], args,
842 ARRAY_SIZE(args), LLVMReadOnlyAttribute);
843 }
844 }
845
846 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
847 enum tgsi_opcode_type type, unsigned swizzle,
848 LLVMValueRef buffer, LLVMValueRef offset,
849 LLVMValueRef base)
850 {
851 struct si_shader_context *ctx = si_shader_context(bld_base);
852 struct gallivm_state *gallivm = bld_base->base.gallivm;
853 LLVMValueRef value, value2;
854 LLVMTypeRef llvm_type = tgsi2llvmtype(bld_base, type);
855 LLVMTypeRef vec_type = LLVMVectorType(llvm_type, 4);
856
857 if (swizzle == ~0) {
858 value = build_buffer_load(ctx, buffer, 4, NULL, base, offset,
859 0, 1, 0);
860
861 return LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
862 }
863
864 if (!tgsi_type_is_64bit(type)) {
865 value = build_buffer_load(ctx, buffer, 4, NULL, base, offset,
866 0, 1, 0);
867
868 value = LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
869 return LLVMBuildExtractElement(gallivm->builder, value,
870 lp_build_const_int32(gallivm, swizzle), "");
871 }
872
873 value = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
874 swizzle * 4, 1, 0);
875
876 value2 = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
877 swizzle * 4 + 4, 1, 0);
878
879 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
880 }
881
882 /**
883 * Load from LDS.
884 *
885 * \param type output value type
886 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
887 * \param dw_addr address in dwords
888 */
889 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
890 enum tgsi_opcode_type type, unsigned swizzle,
891 LLVMValueRef dw_addr)
892 {
893 struct si_shader_context *ctx = si_shader_context(bld_base);
894 struct gallivm_state *gallivm = bld_base->base.gallivm;
895 LLVMValueRef value;
896
897 if (swizzle == ~0) {
898 LLVMValueRef values[TGSI_NUM_CHANNELS];
899
900 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
901 values[chan] = lds_load(bld_base, type, chan, dw_addr);
902
903 return lp_build_gather_values(bld_base->base.gallivm, values,
904 TGSI_NUM_CHANNELS);
905 }
906
907 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
908 lp_build_const_int32(gallivm, swizzle));
909
910 value = build_indexed_load(ctx, ctx->lds, dw_addr, false);
911 if (tgsi_type_is_64bit(type)) {
912 LLVMValueRef value2;
913 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
914 lp_build_const_int32(gallivm, 1));
915 value2 = build_indexed_load(ctx, ctx->lds, dw_addr, false);
916 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
917 }
918
919 return LLVMBuildBitCast(gallivm->builder, value,
920 tgsi2llvmtype(bld_base, type), "");
921 }
922
923 /**
924 * Store to LDS.
925 *
926 * \param swizzle offset (typically 0..3)
927 * \param dw_addr address in dwords
928 * \param value value to store
929 */
930 static void lds_store(struct lp_build_tgsi_context *bld_base,
931 unsigned swizzle, LLVMValueRef dw_addr,
932 LLVMValueRef value)
933 {
934 struct si_shader_context *ctx = si_shader_context(bld_base);
935 struct gallivm_state *gallivm = bld_base->base.gallivm;
936
937 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
938 lp_build_const_int32(gallivm, swizzle));
939
940 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
941 build_indexed_store(ctx, ctx->lds,
942 dw_addr, value);
943 }
944
945 static LLVMValueRef fetch_input_tcs(
946 struct lp_build_tgsi_context *bld_base,
947 const struct tgsi_full_src_register *reg,
948 enum tgsi_opcode_type type, unsigned swizzle)
949 {
950 struct si_shader_context *ctx = si_shader_context(bld_base);
951 LLVMValueRef dw_addr, stride;
952
953 stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
954 dw_addr = get_tcs_in_current_patch_offset(ctx);
955 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
956
957 return lds_load(bld_base, type, swizzle, dw_addr);
958 }
959
960 static LLVMValueRef fetch_output_tcs(
961 struct lp_build_tgsi_context *bld_base,
962 const struct tgsi_full_src_register *reg,
963 enum tgsi_opcode_type type, unsigned swizzle)
964 {
965 struct si_shader_context *ctx = si_shader_context(bld_base);
966 LLVMValueRef dw_addr, stride;
967
968 if (reg->Register.Dimension) {
969 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
970 dw_addr = get_tcs_out_current_patch_offset(ctx);
971 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
972 } else {
973 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
974 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
975 }
976
977 return lds_load(bld_base, type, swizzle, dw_addr);
978 }
979
980 static LLVMValueRef fetch_input_tes(
981 struct lp_build_tgsi_context *bld_base,
982 const struct tgsi_full_src_register *reg,
983 enum tgsi_opcode_type type, unsigned swizzle)
984 {
985 struct si_shader_context *ctx = si_shader_context(bld_base);
986 struct gallivm_state *gallivm = bld_base->base.gallivm;
987 LLVMValueRef rw_buffers, buffer, base, addr;
988
989 rw_buffers = LLVMGetParam(ctx->main_fn,
990 SI_PARAM_RW_BUFFERS);
991 buffer = build_indexed_load_const(ctx, rw_buffers,
992 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
993
994 base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
995 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
996
997 return buffer_load(bld_base, type, swizzle, buffer, base, addr);
998 }
999
1000 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1001 const struct tgsi_full_instruction *inst,
1002 const struct tgsi_opcode_info *info,
1003 LLVMValueRef dst[4])
1004 {
1005 struct si_shader_context *ctx = si_shader_context(bld_base);
1006 struct gallivm_state *gallivm = bld_base->base.gallivm;
1007 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
1008 unsigned chan_index;
1009 LLVMValueRef dw_addr, stride;
1010 LLVMValueRef rw_buffers, buffer, base, buf_addr;
1011 LLVMValueRef values[4];
1012
1013 /* Only handle per-patch and per-vertex outputs here.
1014 * Vectors will be lowered to scalars and this function will be called again.
1015 */
1016 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1017 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1018 si_llvm_emit_store(bld_base, inst, info, dst);
1019 return;
1020 }
1021
1022 if (reg->Register.Dimension) {
1023 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
1024 dw_addr = get_tcs_out_current_patch_offset(ctx);
1025 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1026 } else {
1027 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1028 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1029 }
1030
1031 rw_buffers = LLVMGetParam(ctx->main_fn,
1032 SI_PARAM_RW_BUFFERS);
1033 buffer = build_indexed_load_const(ctx, rw_buffers,
1034 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
1035
1036 base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
1037 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1038
1039
1040 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
1041 LLVMValueRef value = dst[chan_index];
1042
1043 if (inst->Instruction.Saturate)
1044 value = si_llvm_saturate(bld_base, value);
1045
1046 lds_store(bld_base, chan_index, dw_addr, value);
1047
1048 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
1049 values[chan_index] = value;
1050
1051 if (inst->Dst[0].Register.WriteMask != 0xF) {
1052 build_tbuffer_store_dwords(ctx, buffer, value, 1,
1053 buf_addr, base,
1054 4 * chan_index);
1055 }
1056 }
1057
1058 if (inst->Dst[0].Register.WriteMask == 0xF) {
1059 LLVMValueRef value = lp_build_gather_values(bld_base->base.gallivm,
1060 values, 4);
1061 build_tbuffer_store_dwords(ctx, buffer, value, 4, buf_addr,
1062 base, 0);
1063 }
1064 }
1065
1066 static LLVMValueRef fetch_input_gs(
1067 struct lp_build_tgsi_context *bld_base,
1068 const struct tgsi_full_src_register *reg,
1069 enum tgsi_opcode_type type,
1070 unsigned swizzle)
1071 {
1072 struct lp_build_context *base = &bld_base->base;
1073 struct si_shader_context *ctx = si_shader_context(bld_base);
1074 struct si_shader *shader = ctx->shader;
1075 struct lp_build_context *uint = &ctx->soa.bld_base.uint_bld;
1076 struct gallivm_state *gallivm = base->gallivm;
1077 LLVMValueRef vtx_offset;
1078 LLVMValueRef args[9];
1079 unsigned vtx_offset_param;
1080 struct tgsi_shader_info *info = &shader->selector->info;
1081 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1082 unsigned semantic_index = info->input_semantic_index[reg->Register.Index];
1083 unsigned param;
1084 LLVMValueRef value;
1085
1086 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1087 return get_primitive_id(bld_base, swizzle);
1088
1089 if (!reg->Register.Dimension)
1090 return NULL;
1091
1092 if (swizzle == ~0) {
1093 LLVMValueRef values[TGSI_NUM_CHANNELS];
1094 unsigned chan;
1095 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1096 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
1097 }
1098 return lp_build_gather_values(bld_base->base.gallivm, values,
1099 TGSI_NUM_CHANNELS);
1100 }
1101
1102 /* Get the vertex offset parameter */
1103 vtx_offset_param = reg->Dimension.Index;
1104 if (vtx_offset_param < 2) {
1105 vtx_offset_param += SI_PARAM_VTX0_OFFSET;
1106 } else {
1107 assert(vtx_offset_param < 6);
1108 vtx_offset_param += SI_PARAM_VTX2_OFFSET - 2;
1109 }
1110 vtx_offset = lp_build_mul_imm(uint,
1111 LLVMGetParam(ctx->main_fn,
1112 vtx_offset_param),
1113 4);
1114
1115 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1116 args[0] = ctx->esgs_ring;
1117 args[1] = vtx_offset;
1118 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle) * 256);
1119 args[3] = uint->zero;
1120 args[4] = uint->one; /* OFFEN */
1121 args[5] = uint->zero; /* IDXEN */
1122 args[6] = uint->one; /* GLC */
1123 args[7] = uint->zero; /* SLC */
1124 args[8] = uint->zero; /* TFE */
1125
1126 value = lp_build_intrinsic(gallivm->builder,
1127 "llvm.SI.buffer.load.dword.i32.i32",
1128 ctx->i32, args, 9,
1129 LLVMReadOnlyAttribute);
1130 if (tgsi_type_is_64bit(type)) {
1131 LLVMValueRef value2;
1132 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle + 1) * 256);
1133 value2 = lp_build_intrinsic(gallivm->builder,
1134 "llvm.SI.buffer.load.dword.i32.i32",
1135 ctx->i32, args, 9,
1136 LLVMReadOnlyAttribute);
1137 return si_llvm_emit_fetch_64bit(bld_base, type,
1138 value, value2);
1139 }
1140 return LLVMBuildBitCast(gallivm->builder,
1141 value,
1142 tgsi2llvmtype(bld_base, type), "");
1143 }
1144
1145 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1146 {
1147 switch (interpolate) {
1148 case TGSI_INTERPOLATE_CONSTANT:
1149 return 0;
1150
1151 case TGSI_INTERPOLATE_LINEAR:
1152 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1153 return SI_PARAM_LINEAR_SAMPLE;
1154 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1155 return SI_PARAM_LINEAR_CENTROID;
1156 else
1157 return SI_PARAM_LINEAR_CENTER;
1158 break;
1159 case TGSI_INTERPOLATE_COLOR:
1160 case TGSI_INTERPOLATE_PERSPECTIVE:
1161 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1162 return SI_PARAM_PERSP_SAMPLE;
1163 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1164 return SI_PARAM_PERSP_CENTROID;
1165 else
1166 return SI_PARAM_PERSP_CENTER;
1167 break;
1168 default:
1169 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1170 return -1;
1171 }
1172 }
1173
1174 /* This shouldn't be used by explicit INTERP opcodes. */
1175 static unsigned select_interp_param(struct si_shader_context *ctx,
1176 unsigned param)
1177 {
1178 if (!ctx->is_monolithic)
1179 return param;
1180
1181 if (ctx->shader->key.ps.prolog.force_persp_sample_interp) {
1182 switch (param) {
1183 case SI_PARAM_PERSP_CENTROID:
1184 case SI_PARAM_PERSP_CENTER:
1185 return SI_PARAM_PERSP_SAMPLE;
1186 }
1187 }
1188 if (ctx->shader->key.ps.prolog.force_linear_sample_interp) {
1189 switch (param) {
1190 case SI_PARAM_LINEAR_CENTROID:
1191 case SI_PARAM_LINEAR_CENTER:
1192 return SI_PARAM_LINEAR_SAMPLE;
1193 }
1194 }
1195 if (ctx->shader->key.ps.prolog.force_persp_center_interp) {
1196 switch (param) {
1197 case SI_PARAM_PERSP_CENTROID:
1198 case SI_PARAM_PERSP_SAMPLE:
1199 return SI_PARAM_PERSP_CENTER;
1200 }
1201 }
1202 if (ctx->shader->key.ps.prolog.force_linear_center_interp) {
1203 switch (param) {
1204 case SI_PARAM_LINEAR_CENTROID:
1205 case SI_PARAM_LINEAR_SAMPLE:
1206 return SI_PARAM_LINEAR_CENTER;
1207 }
1208 }
1209
1210 return param;
1211 }
1212
1213 /**
1214 * Interpolate a fragment shader input.
1215 *
1216 * @param ctx context
1217 * @param input_index index of the input in hardware
1218 * @param semantic_name TGSI_SEMANTIC_*
1219 * @param semantic_index semantic index
1220 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1221 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1222 * @param interp_param interpolation weights (i,j)
1223 * @param prim_mask SI_PARAM_PRIM_MASK
1224 * @param face SI_PARAM_FRONT_FACE
1225 * @param result the return value (4 components)
1226 */
1227 static void interp_fs_input(struct si_shader_context *ctx,
1228 unsigned input_index,
1229 unsigned semantic_name,
1230 unsigned semantic_index,
1231 unsigned num_interp_inputs,
1232 unsigned colors_read_mask,
1233 LLVMValueRef interp_param,
1234 LLVMValueRef prim_mask,
1235 LLVMValueRef face,
1236 LLVMValueRef result[4])
1237 {
1238 struct lp_build_context *base = &ctx->soa.bld_base.base;
1239 struct lp_build_context *uint = &ctx->soa.bld_base.uint_bld;
1240 struct gallivm_state *gallivm = base->gallivm;
1241 const char *intr_name;
1242 LLVMValueRef attr_number;
1243
1244 unsigned chan;
1245
1246 attr_number = lp_build_const_int32(gallivm, input_index);
1247
1248 /* fs.constant returns the param from the middle vertex, so it's not
1249 * really useful for flat shading. It's meant to be used for custom
1250 * interpolation (but the intrinsic can't fetch from the other two
1251 * vertices).
1252 *
1253 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1254 * to do the right thing. The only reason we use fs.constant is that
1255 * fs.interp cannot be used on integers, because they can be equal
1256 * to NaN.
1257 */
1258 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
1259
1260 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1261 ctx->shader->key.ps.prolog.color_two_side) {
1262 LLVMValueRef args[4];
1263 LLVMValueRef is_face_positive;
1264 LLVMValueRef back_attr_number;
1265
1266 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1267 * otherwise it's at offset "num_inputs".
1268 */
1269 unsigned back_attr_offset = num_interp_inputs;
1270 if (semantic_index == 1 && colors_read_mask & 0xf)
1271 back_attr_offset += 1;
1272
1273 back_attr_number = lp_build_const_int32(gallivm, back_attr_offset);
1274
1275 is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1276 face, uint->zero, "");
1277
1278 args[2] = prim_mask;
1279 args[3] = interp_param;
1280 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1281 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
1282 LLVMValueRef front, back;
1283
1284 args[0] = llvm_chan;
1285 args[1] = attr_number;
1286 front = lp_build_intrinsic(gallivm->builder, intr_name,
1287 ctx->f32, args, args[3] ? 4 : 3,
1288 LLVMReadNoneAttribute);
1289
1290 args[1] = back_attr_number;
1291 back = lp_build_intrinsic(gallivm->builder, intr_name,
1292 ctx->f32, args, args[3] ? 4 : 3,
1293 LLVMReadNoneAttribute);
1294
1295 result[chan] = LLVMBuildSelect(gallivm->builder,
1296 is_face_positive,
1297 front,
1298 back,
1299 "");
1300 }
1301 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1302 LLVMValueRef args[4];
1303
1304 args[0] = uint->zero;
1305 args[1] = attr_number;
1306 args[2] = prim_mask;
1307 args[3] = interp_param;
1308 result[0] = lp_build_intrinsic(gallivm->builder, intr_name,
1309 ctx->f32, args, args[3] ? 4 : 3,
1310 LLVMReadNoneAttribute);
1311 result[1] =
1312 result[2] = lp_build_const_float(gallivm, 0.0f);
1313 result[3] = lp_build_const_float(gallivm, 1.0f);
1314 } else {
1315 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1316 LLVMValueRef args[4];
1317 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
1318
1319 args[0] = llvm_chan;
1320 args[1] = attr_number;
1321 args[2] = prim_mask;
1322 args[3] = interp_param;
1323 result[chan] = lp_build_intrinsic(gallivm->builder, intr_name,
1324 ctx->f32, args, args[3] ? 4 : 3,
1325 LLVMReadNoneAttribute);
1326 }
1327 }
1328 }
1329
1330 /* LLVMGetParam with bc_optimize resolved. */
1331 static LLVMValueRef get_interp_param(struct si_shader_context *ctx,
1332 int interp_param_idx)
1333 {
1334 LLVMBuilderRef builder = ctx->gallivm.builder;
1335 LLVMValueRef main_fn = ctx->main_fn;
1336 LLVMValueRef param = NULL;
1337
1338 /* Handle PRIM_MASK[31] (bc_optimize). */
1339 if (ctx->is_monolithic &&
1340 ((ctx->shader->key.ps.prolog.bc_optimize_for_persp &&
1341 interp_param_idx == SI_PARAM_PERSP_CENTROID) ||
1342 (ctx->shader->key.ps.prolog.bc_optimize_for_linear &&
1343 interp_param_idx == SI_PARAM_LINEAR_CENTROID))) {
1344 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
1345 * The hw doesn't compute CENTROID if the whole wave only
1346 * contains fully-covered quads.
1347 */
1348 LLVMValueRef bc_optimize =
1349 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK);
1350 bc_optimize = LLVMBuildLShr(builder,
1351 bc_optimize,
1352 LLVMConstInt(ctx->i32, 31, 0), "");
1353 bc_optimize = LLVMBuildTrunc(builder, bc_optimize, ctx->i1, "");
1354
1355 if (ctx->shader->key.ps.prolog.bc_optimize_for_persp &&
1356 interp_param_idx == SI_PARAM_PERSP_CENTROID) {
1357 param = LLVMBuildSelect(builder, bc_optimize,
1358 LLVMGetParam(main_fn,
1359 SI_PARAM_PERSP_CENTER),
1360 LLVMGetParam(main_fn,
1361 SI_PARAM_PERSP_CENTROID),
1362 "");
1363 }
1364 if (ctx->shader->key.ps.prolog.bc_optimize_for_linear &&
1365 interp_param_idx == SI_PARAM_LINEAR_CENTROID) {
1366 param = LLVMBuildSelect(builder, bc_optimize,
1367 LLVMGetParam(main_fn,
1368 SI_PARAM_LINEAR_CENTER),
1369 LLVMGetParam(main_fn,
1370 SI_PARAM_LINEAR_CENTROID),
1371 "");
1372 }
1373 }
1374
1375 if (!param)
1376 param = LLVMGetParam(main_fn, interp_param_idx);
1377 return param;
1378 }
1379
1380 static void declare_input_fs(
1381 struct si_shader_context *radeon_bld,
1382 unsigned input_index,
1383 const struct tgsi_full_declaration *decl,
1384 LLVMValueRef out[4])
1385 {
1386 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
1387 struct si_shader_context *ctx =
1388 si_shader_context(&radeon_bld->soa.bld_base);
1389 struct si_shader *shader = ctx->shader;
1390 LLVMValueRef main_fn = radeon_bld->main_fn;
1391 LLVMValueRef interp_param = NULL;
1392 int interp_param_idx;
1393
1394 /* Get colors from input VGPRs (set by the prolog). */
1395 if (!ctx->is_monolithic &&
1396 decl->Semantic.Name == TGSI_SEMANTIC_COLOR) {
1397 unsigned i = decl->Semantic.Index;
1398 unsigned colors_read = shader->selector->info.colors_read;
1399 unsigned mask = colors_read >> (i * 4);
1400 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1401 (i ? util_bitcount(colors_read & 0xf) : 0);
1402
1403 out[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1404 out[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1405 out[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1406 out[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1407 return;
1408 }
1409
1410 interp_param_idx = lookup_interp_param_index(decl->Interp.Interpolate,
1411 decl->Interp.Location);
1412 if (interp_param_idx == -1)
1413 return;
1414 else if (interp_param_idx) {
1415 interp_param_idx = select_interp_param(ctx,
1416 interp_param_idx);
1417 interp_param = get_interp_param(ctx, interp_param_idx);
1418 }
1419
1420 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
1421 decl->Interp.Interpolate == TGSI_INTERPOLATE_COLOR &&
1422 ctx->shader->key.ps.prolog.flatshade_colors)
1423 interp_param = NULL; /* load the constant color */
1424
1425 interp_fs_input(ctx, input_index, decl->Semantic.Name,
1426 decl->Semantic.Index, shader->selector->info.num_inputs,
1427 shader->selector->info.colors_read, interp_param,
1428 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1429 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1430 &out[0]);
1431 }
1432
1433 static LLVMValueRef get_sample_id(struct si_shader_context *radeon_bld)
1434 {
1435 return unpack_param(si_shader_context(&radeon_bld->soa.bld_base),
1436 SI_PARAM_ANCILLARY, 8, 4);
1437 }
1438
1439 /**
1440 * Set range metadata on an instruction. This can only be used on load and
1441 * call instructions. If you know an instruction can only produce the values
1442 * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
1443 * \p lo is the minimum value inclusive.
1444 * \p hi is the maximum value exclusive.
1445 */
1446 static void set_range_metadata(struct si_shader_context *ctx,
1447 LLVMValueRef value, unsigned lo, unsigned hi)
1448 {
1449 LLVMValueRef range_md, md_args[2];
1450 LLVMTypeRef type = LLVMTypeOf(value);
1451 LLVMContextRef context = LLVMGetTypeContext(type);
1452
1453 md_args[0] = LLVMConstInt(type, lo, false);
1454 md_args[1] = LLVMConstInt(type, hi, false);
1455 range_md = LLVMMDNodeInContext(context, md_args, 2);
1456 LLVMSetMetadata(value, ctx->range_md_kind, range_md);
1457 }
1458
1459 static LLVMValueRef get_thread_id(struct si_shader_context *ctx)
1460 {
1461 struct gallivm_state *gallivm = &ctx->gallivm;
1462 LLVMValueRef tid;
1463
1464 if (HAVE_LLVM < 0x0308) {
1465 tid = lp_build_intrinsic(gallivm->builder, "llvm.SI.tid",
1466 ctx->i32, NULL, 0, LLVMReadNoneAttribute);
1467 } else {
1468 LLVMValueRef tid_args[2];
1469 tid_args[0] = lp_build_const_int32(gallivm, 0xffffffff);
1470 tid_args[1] = lp_build_const_int32(gallivm, 0);
1471 tid_args[1] = lp_build_intrinsic(gallivm->builder,
1472 "llvm.amdgcn.mbcnt.lo", ctx->i32,
1473 tid_args, 2, LLVMReadNoneAttribute);
1474
1475 tid = lp_build_intrinsic(gallivm->builder,
1476 "llvm.amdgcn.mbcnt.hi", ctx->i32,
1477 tid_args, 2, LLVMReadNoneAttribute);
1478 }
1479 set_range_metadata(ctx, tid, 0, 64);
1480 return tid;
1481 }
1482
1483 /**
1484 * Load a dword from a constant buffer.
1485 */
1486 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1487 LLVMValueRef resource,
1488 LLVMValueRef offset)
1489 {
1490 LLVMBuilderRef builder = ctx->gallivm.builder;
1491 LLVMValueRef args[2] = {resource, offset};
1492
1493 return lp_build_intrinsic(builder, "llvm.SI.load.const", ctx->f32, args, 2,
1494 LLVMReadNoneAttribute);
1495 }
1496
1497 static LLVMValueRef load_sample_position(struct si_shader_context *radeon_bld, LLVMValueRef sample_id)
1498 {
1499 struct si_shader_context *ctx =
1500 si_shader_context(&radeon_bld->soa.bld_base);
1501 struct lp_build_context *uint_bld = &radeon_bld->soa.bld_base.uint_bld;
1502 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1503 LLVMBuilderRef builder = gallivm->builder;
1504 LLVMValueRef desc = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
1505 LLVMValueRef buf_index = lp_build_const_int32(gallivm, SI_PS_CONST_SAMPLE_POSITIONS);
1506 LLVMValueRef resource = build_indexed_load_const(ctx, desc, buf_index);
1507
1508 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1509 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1510 LLVMValueRef offset1 = LLVMBuildAdd(builder, offset0, lp_build_const_int32(gallivm, 4), "");
1511
1512 LLVMValueRef pos[4] = {
1513 buffer_load_const(ctx, resource, offset0),
1514 buffer_load_const(ctx, resource, offset1),
1515 lp_build_const_float(gallivm, 0),
1516 lp_build_const_float(gallivm, 0)
1517 };
1518
1519 return lp_build_gather_values(gallivm, pos, 4);
1520 }
1521
1522 static void declare_system_value(
1523 struct si_shader_context *radeon_bld,
1524 unsigned index,
1525 const struct tgsi_full_declaration *decl)
1526 {
1527 struct si_shader_context *ctx =
1528 si_shader_context(&radeon_bld->soa.bld_base);
1529 struct lp_build_context *bld = &radeon_bld->soa.bld_base.base;
1530 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1531 LLVMValueRef value = 0;
1532
1533 switch (decl->Semantic.Name) {
1534 case TGSI_SEMANTIC_INSTANCEID:
1535 value = LLVMGetParam(radeon_bld->main_fn,
1536 ctx->param_instance_id);
1537 break;
1538
1539 case TGSI_SEMANTIC_VERTEXID:
1540 value = LLVMBuildAdd(gallivm->builder,
1541 LLVMGetParam(radeon_bld->main_fn,
1542 ctx->param_vertex_id),
1543 LLVMGetParam(radeon_bld->main_fn,
1544 SI_PARAM_BASE_VERTEX), "");
1545 break;
1546
1547 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1548 value = LLVMGetParam(radeon_bld->main_fn,
1549 ctx->param_vertex_id);
1550 break;
1551
1552 case TGSI_SEMANTIC_BASEVERTEX:
1553 value = LLVMGetParam(radeon_bld->main_fn,
1554 SI_PARAM_BASE_VERTEX);
1555 break;
1556
1557 case TGSI_SEMANTIC_BASEINSTANCE:
1558 value = LLVMGetParam(radeon_bld->main_fn,
1559 SI_PARAM_START_INSTANCE);
1560 break;
1561
1562 case TGSI_SEMANTIC_DRAWID:
1563 value = LLVMGetParam(radeon_bld->main_fn,
1564 SI_PARAM_DRAWID);
1565 break;
1566
1567 case TGSI_SEMANTIC_INVOCATIONID:
1568 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1569 value = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
1570 else if (ctx->type == PIPE_SHADER_GEOMETRY)
1571 value = LLVMGetParam(radeon_bld->main_fn,
1572 SI_PARAM_GS_INSTANCE_ID);
1573 else
1574 assert(!"INVOCATIONID not implemented");
1575 break;
1576
1577 case TGSI_SEMANTIC_POSITION:
1578 {
1579 LLVMValueRef pos[4] = {
1580 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1581 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1582 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Z_FLOAT),
1583 lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base, TGSI_OPCODE_RCP,
1584 LLVMGetParam(radeon_bld->main_fn,
1585 SI_PARAM_POS_W_FLOAT)),
1586 };
1587 value = lp_build_gather_values(gallivm, pos, 4);
1588 break;
1589 }
1590
1591 case TGSI_SEMANTIC_FACE:
1592 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_FRONT_FACE);
1593 break;
1594
1595 case TGSI_SEMANTIC_SAMPLEID:
1596 value = get_sample_id(radeon_bld);
1597 break;
1598
1599 case TGSI_SEMANTIC_SAMPLEPOS: {
1600 LLVMValueRef pos[4] = {
1601 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1602 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1603 lp_build_const_float(gallivm, 0),
1604 lp_build_const_float(gallivm, 0)
1605 };
1606 pos[0] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1607 TGSI_OPCODE_FRC, pos[0]);
1608 pos[1] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1609 TGSI_OPCODE_FRC, pos[1]);
1610 value = lp_build_gather_values(gallivm, pos, 4);
1611 break;
1612 }
1613
1614 case TGSI_SEMANTIC_SAMPLEMASK:
1615 /* This can only occur with the OpenGL Core profile, which
1616 * doesn't support smoothing.
1617 */
1618 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1619 break;
1620
1621 case TGSI_SEMANTIC_TESSCOORD:
1622 {
1623 LLVMValueRef coord[4] = {
1624 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_u),
1625 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_v),
1626 bld->zero,
1627 bld->zero
1628 };
1629
1630 /* For triangles, the vector should be (u, v, 1-u-v). */
1631 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1632 PIPE_PRIM_TRIANGLES)
1633 coord[2] = lp_build_sub(bld, bld->one,
1634 lp_build_add(bld, coord[0], coord[1]));
1635
1636 value = lp_build_gather_values(gallivm, coord, 4);
1637 break;
1638 }
1639
1640 case TGSI_SEMANTIC_VERTICESIN:
1641 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1642 value = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 26, 6);
1643 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1644 value = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 9, 7);
1645 else
1646 assert(!"invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1647 break;
1648
1649 case TGSI_SEMANTIC_TESSINNER:
1650 case TGSI_SEMANTIC_TESSOUTER:
1651 {
1652 LLVMValueRef rw_buffers, buffer, base, addr;
1653 int param = si_shader_io_get_unique_index(decl->Semantic.Name, 0);
1654
1655 rw_buffers = LLVMGetParam(ctx->main_fn,
1656 SI_PARAM_RW_BUFFERS);
1657 buffer = build_indexed_load_const(ctx, rw_buffers,
1658 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
1659
1660 base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
1661 addr = get_tcs_tes_buffer_address(ctx, NULL,
1662 lp_build_const_int32(gallivm, param));
1663
1664 value = buffer_load(&radeon_bld->soa.bld_base, TGSI_TYPE_FLOAT,
1665 ~0, buffer, base, addr);
1666
1667 break;
1668 }
1669
1670 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
1671 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
1672 {
1673 LLVMValueRef buf, slot, val[4];
1674 int i, offset;
1675
1676 slot = lp_build_const_int32(gallivm, SI_HS_CONST_DEFAULT_TESS_LEVELS);
1677 buf = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
1678 buf = build_indexed_load_const(ctx, buf, slot);
1679 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
1680
1681 for (i = 0; i < 4; i++)
1682 val[i] = buffer_load_const(ctx, buf,
1683 lp_build_const_int32(gallivm, (offset + i) * 4));
1684 value = lp_build_gather_values(gallivm, val, 4);
1685 break;
1686 }
1687
1688 case TGSI_SEMANTIC_PRIMID:
1689 value = get_primitive_id(&radeon_bld->soa.bld_base, 0);
1690 break;
1691
1692 case TGSI_SEMANTIC_GRID_SIZE:
1693 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_GRID_SIZE);
1694 break;
1695
1696 case TGSI_SEMANTIC_BLOCK_SIZE:
1697 {
1698 LLVMValueRef values[3];
1699 unsigned i;
1700 unsigned *properties = ctx->shader->selector->info.properties;
1701
1702 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1703 unsigned sizes[3] = {
1704 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1705 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1706 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1707 };
1708
1709 for (i = 0; i < 3; ++i)
1710 values[i] = lp_build_const_int32(gallivm, sizes[i]);
1711
1712 value = lp_build_gather_values(gallivm, values, 3);
1713 } else {
1714 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_BLOCK_SIZE);
1715 }
1716 break;
1717 }
1718
1719 case TGSI_SEMANTIC_BLOCK_ID:
1720 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_BLOCK_ID);
1721 break;
1722
1723 case TGSI_SEMANTIC_THREAD_ID:
1724 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_THREAD_ID);
1725 break;
1726
1727 #if HAVE_LLVM >= 0x0309
1728 case TGSI_SEMANTIC_HELPER_INVOCATION:
1729 value = lp_build_intrinsic(gallivm->builder,
1730 "llvm.amdgcn.ps.live",
1731 ctx->i1, NULL, 0,
1732 LLVMReadNoneAttribute);
1733 value = LLVMBuildNot(gallivm->builder, value, "");
1734 value = LLVMBuildSExt(gallivm->builder, value, ctx->i32, "");
1735 break;
1736 #endif
1737
1738 default:
1739 assert(!"unknown system value");
1740 return;
1741 }
1742
1743 radeon_bld->system_values[index] = value;
1744 }
1745
1746 static void declare_compute_memory(struct si_shader_context *radeon_bld,
1747 const struct tgsi_full_declaration *decl)
1748 {
1749 struct si_shader_context *ctx =
1750 si_shader_context(&radeon_bld->soa.bld_base);
1751 struct si_shader_selector *sel = ctx->shader->selector;
1752 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1753
1754 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, LOCAL_ADDR_SPACE);
1755 LLVMValueRef var;
1756
1757 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
1758 assert(decl->Range.First == decl->Range.Last);
1759 assert(!ctx->shared_memory);
1760
1761 var = LLVMAddGlobalInAddressSpace(gallivm->module,
1762 LLVMArrayType(ctx->i8, sel->local_size),
1763 "compute_lds",
1764 LOCAL_ADDR_SPACE);
1765 LLVMSetAlignment(var, 4);
1766
1767 ctx->shared_memory = LLVMBuildBitCast(gallivm->builder, var, i8p, "");
1768 }
1769
1770 static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
1771 {
1772 LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
1773 SI_PARAM_CONST_BUFFERS);
1774
1775 return build_indexed_load_const(ctx, list_ptr,
1776 LLVMConstInt(ctx->i32, i, 0));
1777 }
1778
1779 static LLVMValueRef fetch_constant(
1780 struct lp_build_tgsi_context *bld_base,
1781 const struct tgsi_full_src_register *reg,
1782 enum tgsi_opcode_type type,
1783 unsigned swizzle)
1784 {
1785 struct si_shader_context *ctx = si_shader_context(bld_base);
1786 struct lp_build_context *base = &bld_base->base;
1787 const struct tgsi_ind_register *ireg = &reg->Indirect;
1788 unsigned buf, idx;
1789
1790 LLVMValueRef addr, bufp;
1791 LLVMValueRef result;
1792
1793 if (swizzle == LP_CHAN_ALL) {
1794 unsigned chan;
1795 LLVMValueRef values[4];
1796 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
1797 values[chan] = fetch_constant(bld_base, reg, type, chan);
1798
1799 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
1800 }
1801
1802 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
1803 idx = reg->Register.Index * 4 + swizzle;
1804
1805 if (reg->Register.Dimension && reg->Dimension.Indirect) {
1806 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_CONST_BUFFERS);
1807 LLVMValueRef index;
1808 index = get_bounded_indirect_index(ctx, &reg->DimIndirect,
1809 reg->Dimension.Index,
1810 SI_NUM_CONST_BUFFERS);
1811 bufp = build_indexed_load_const(ctx, ptr, index);
1812 } else
1813 bufp = load_const_buffer_desc(ctx, buf);
1814
1815 if (reg->Register.Indirect) {
1816 addr = ctx->soa.addr[ireg->Index][ireg->Swizzle];
1817 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
1818 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
1819 addr = lp_build_add(&bld_base->uint_bld, addr,
1820 lp_build_const_int32(base->gallivm, idx * 4));
1821 } else {
1822 addr = LLVMConstInt(ctx->i32, idx * 4, 0);
1823 }
1824
1825 result = buffer_load_const(ctx, bufp, addr);
1826
1827 if (!tgsi_type_is_64bit(type))
1828 result = bitcast(bld_base, type, result);
1829 else {
1830 LLVMValueRef addr2, result2;
1831
1832 addr2 = lp_build_add(&bld_base->uint_bld, addr,
1833 LLVMConstInt(ctx->i32, 4, 0));
1834 result2 = buffer_load_const(ctx, bufp, addr2);
1835
1836 result = si_llvm_emit_fetch_64bit(bld_base, type,
1837 result, result2);
1838 }
1839 return result;
1840 }
1841
1842 /* Upper 16 bits must be zero. */
1843 static LLVMValueRef si_llvm_pack_two_int16(struct gallivm_state *gallivm,
1844 LLVMValueRef val[2])
1845 {
1846 return LLVMBuildOr(gallivm->builder, val[0],
1847 LLVMBuildShl(gallivm->builder, val[1],
1848 lp_build_const_int32(gallivm, 16),
1849 ""), "");
1850 }
1851
1852 /* Upper 16 bits are ignored and will be dropped. */
1853 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct gallivm_state *gallivm,
1854 LLVMValueRef val[2])
1855 {
1856 LLVMValueRef v[2] = {
1857 LLVMBuildAnd(gallivm->builder, val[0],
1858 lp_build_const_int32(gallivm, 0xffff), ""),
1859 val[1],
1860 };
1861 return si_llvm_pack_two_int16(gallivm, v);
1862 }
1863
1864 /* Initialize arguments for the shader export intrinsic */
1865 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
1866 LLVMValueRef *values,
1867 unsigned target,
1868 LLVMValueRef *args)
1869 {
1870 struct si_shader_context *ctx = si_shader_context(bld_base);
1871 struct lp_build_context *uint =
1872 &ctx->soa.bld_base.uint_bld;
1873 struct lp_build_context *base = &bld_base->base;
1874 struct gallivm_state *gallivm = base->gallivm;
1875 LLVMBuilderRef builder = base->gallivm->builder;
1876 LLVMValueRef val[4];
1877 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1878 unsigned chan;
1879 bool is_int8;
1880
1881 /* Default is 0xf. Adjusted below depending on the format. */
1882 args[0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1883
1884 /* Specify whether the EXEC mask represents the valid mask */
1885 args[1] = uint->zero;
1886
1887 /* Specify whether this is the last export */
1888 args[2] = uint->zero;
1889
1890 /* Specify the target we are exporting */
1891 args[3] = lp_build_const_int32(base->gallivm, target);
1892
1893 if (ctx->type == PIPE_SHADER_FRAGMENT) {
1894 const union si_shader_key *key = &ctx->shader->key;
1895 unsigned col_formats = key->ps.epilog.spi_shader_col_format;
1896 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1897
1898 assert(cbuf >= 0 && cbuf < 8);
1899 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1900 is_int8 = (key->ps.epilog.color_is_int8 >> cbuf) & 0x1;
1901 }
1902
1903 args[4] = uint->zero; /* COMPR flag */
1904 args[5] = base->undef;
1905 args[6] = base->undef;
1906 args[7] = base->undef;
1907 args[8] = base->undef;
1908
1909 switch (spi_shader_col_format) {
1910 case V_028714_SPI_SHADER_ZERO:
1911 args[0] = uint->zero; /* writemask */
1912 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
1913 break;
1914
1915 case V_028714_SPI_SHADER_32_R:
1916 args[0] = uint->one; /* writemask */
1917 args[5] = values[0];
1918 break;
1919
1920 case V_028714_SPI_SHADER_32_GR:
1921 args[0] = lp_build_const_int32(base->gallivm, 0x3); /* writemask */
1922 args[5] = values[0];
1923 args[6] = values[1];
1924 break;
1925
1926 case V_028714_SPI_SHADER_32_AR:
1927 args[0] = lp_build_const_int32(base->gallivm, 0x9); /* writemask */
1928 args[5] = values[0];
1929 args[8] = values[3];
1930 break;
1931
1932 case V_028714_SPI_SHADER_FP16_ABGR:
1933 args[4] = uint->one; /* COMPR flag */
1934
1935 for (chan = 0; chan < 2; chan++) {
1936 LLVMValueRef pack_args[2] = {
1937 values[2 * chan],
1938 values[2 * chan + 1]
1939 };
1940 LLVMValueRef packed;
1941
1942 packed = lp_build_intrinsic(base->gallivm->builder,
1943 "llvm.SI.packf16",
1944 ctx->i32, pack_args, 2,
1945 LLVMReadNoneAttribute);
1946 args[chan + 5] =
1947 LLVMBuildBitCast(base->gallivm->builder,
1948 packed, ctx->f32, "");
1949 }
1950 break;
1951
1952 case V_028714_SPI_SHADER_UNORM16_ABGR:
1953 for (chan = 0; chan < 4; chan++) {
1954 val[chan] = si_llvm_saturate(bld_base, values[chan]);
1955 val[chan] = LLVMBuildFMul(builder, val[chan],
1956 lp_build_const_float(gallivm, 65535), "");
1957 val[chan] = LLVMBuildFAdd(builder, val[chan],
1958 lp_build_const_float(gallivm, 0.5), "");
1959 val[chan] = LLVMBuildFPToUI(builder, val[chan],
1960 ctx->i32, "");
1961 }
1962
1963 args[4] = uint->one; /* COMPR flag */
1964 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1965 si_llvm_pack_two_int16(gallivm, val));
1966 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1967 si_llvm_pack_two_int16(gallivm, val+2));
1968 break;
1969
1970 case V_028714_SPI_SHADER_SNORM16_ABGR:
1971 for (chan = 0; chan < 4; chan++) {
1972 /* Clamp between [-1, 1]. */
1973 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MIN,
1974 values[chan],
1975 lp_build_const_float(gallivm, 1));
1976 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MAX,
1977 val[chan],
1978 lp_build_const_float(gallivm, -1));
1979 /* Convert to a signed integer in [-32767, 32767]. */
1980 val[chan] = LLVMBuildFMul(builder, val[chan],
1981 lp_build_const_float(gallivm, 32767), "");
1982 /* If positive, add 0.5, else add -0.5. */
1983 val[chan] = LLVMBuildFAdd(builder, val[chan],
1984 LLVMBuildSelect(builder,
1985 LLVMBuildFCmp(builder, LLVMRealOGE,
1986 val[chan], base->zero, ""),
1987 lp_build_const_float(gallivm, 0.5),
1988 lp_build_const_float(gallivm, -0.5), ""), "");
1989 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
1990 }
1991
1992 args[4] = uint->one; /* COMPR flag */
1993 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1994 si_llvm_pack_two_int32_as_int16(gallivm, val));
1995 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1996 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
1997 break;
1998
1999 case V_028714_SPI_SHADER_UINT16_ABGR: {
2000 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
2001 255 : 65535);
2002 /* Clamp. */
2003 for (chan = 0; chan < 4; chan++) {
2004 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
2005 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
2006 val[chan], max);
2007 }
2008
2009 args[4] = uint->one; /* COMPR flag */
2010 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2011 si_llvm_pack_two_int16(gallivm, val));
2012 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2013 si_llvm_pack_two_int16(gallivm, val+2));
2014 break;
2015 }
2016
2017 case V_028714_SPI_SHADER_SINT16_ABGR: {
2018 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
2019 127 : 32767);
2020 LLVMValueRef min = lp_build_const_int32(gallivm, is_int8 ?
2021 -128 : -32768);
2022 /* Clamp. */
2023 for (chan = 0; chan < 4; chan++) {
2024 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
2025 val[chan] = lp_build_emit_llvm_binary(bld_base,
2026 TGSI_OPCODE_IMIN,
2027 val[chan], max);
2028 val[chan] = lp_build_emit_llvm_binary(bld_base,
2029 TGSI_OPCODE_IMAX,
2030 val[chan], min);
2031 }
2032
2033 args[4] = uint->one; /* COMPR flag */
2034 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2035 si_llvm_pack_two_int32_as_int16(gallivm, val));
2036 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2037 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
2038 break;
2039 }
2040
2041 case V_028714_SPI_SHADER_32_ABGR:
2042 memcpy(&args[5], values, sizeof(values[0]) * 4);
2043 break;
2044 }
2045 }
2046
2047 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2048 LLVMValueRef alpha)
2049 {
2050 struct si_shader_context *ctx = si_shader_context(bld_base);
2051 struct gallivm_state *gallivm = bld_base->base.gallivm;
2052
2053 if (ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2054 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
2055 SI_PARAM_ALPHA_REF);
2056
2057 LLVMValueRef alpha_pass =
2058 lp_build_cmp(&bld_base->base,
2059 ctx->shader->key.ps.epilog.alpha_func,
2060 alpha, alpha_ref);
2061 LLVMValueRef arg =
2062 lp_build_select(&bld_base->base,
2063 alpha_pass,
2064 lp_build_const_float(gallivm, 1.0f),
2065 lp_build_const_float(gallivm, -1.0f));
2066
2067 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
2068 ctx->voidt, &arg, 1, 0);
2069 } else {
2070 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kilp",
2071 ctx->voidt, NULL, 0, 0);
2072 }
2073 }
2074
2075 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2076 LLVMValueRef alpha,
2077 unsigned samplemask_param)
2078 {
2079 struct si_shader_context *ctx = si_shader_context(bld_base);
2080 struct gallivm_state *gallivm = bld_base->base.gallivm;
2081 LLVMValueRef coverage;
2082
2083 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2084 coverage = LLVMGetParam(ctx->main_fn,
2085 samplemask_param);
2086 coverage = bitcast(bld_base, TGSI_TYPE_SIGNED, coverage);
2087
2088 coverage = lp_build_intrinsic(gallivm->builder, "llvm.ctpop.i32",
2089 ctx->i32,
2090 &coverage, 1, LLVMReadNoneAttribute);
2091
2092 coverage = LLVMBuildUIToFP(gallivm->builder, coverage,
2093 ctx->f32, "");
2094
2095 coverage = LLVMBuildFMul(gallivm->builder, coverage,
2096 lp_build_const_float(gallivm,
2097 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2098
2099 return LLVMBuildFMul(gallivm->builder, alpha, coverage, "");
2100 }
2101
2102 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context *bld_base,
2103 LLVMValueRef (*pos)[9], LLVMValueRef *out_elts)
2104 {
2105 struct si_shader_context *ctx = si_shader_context(bld_base);
2106 struct lp_build_context *base = &bld_base->base;
2107 struct lp_build_context *uint = &ctx->soa.bld_base.uint_bld;
2108 unsigned reg_index;
2109 unsigned chan;
2110 unsigned const_chan;
2111 LLVMValueRef base_elt;
2112 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
2113 LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm,
2114 SI_VS_CONST_CLIP_PLANES);
2115 LLVMValueRef const_resource = build_indexed_load_const(ctx, ptr, constbuf_index);
2116
2117 for (reg_index = 0; reg_index < 2; reg_index ++) {
2118 LLVMValueRef *args = pos[2 + reg_index];
2119
2120 args[5] =
2121 args[6] =
2122 args[7] =
2123 args[8] = lp_build_const_float(base->gallivm, 0.0f);
2124
2125 /* Compute dot products of position and user clip plane vectors */
2126 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2127 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2128 args[1] = lp_build_const_int32(base->gallivm,
2129 ((reg_index * 4 + chan) * 4 +
2130 const_chan) * 4);
2131 base_elt = buffer_load_const(ctx, const_resource,
2132 args[1]);
2133 args[5 + chan] =
2134 lp_build_add(base, args[5 + chan],
2135 lp_build_mul(base, base_elt,
2136 out_elts[const_chan]));
2137 }
2138 }
2139
2140 args[0] = lp_build_const_int32(base->gallivm, 0xf);
2141 args[1] = uint->zero;
2142 args[2] = uint->zero;
2143 args[3] = lp_build_const_int32(base->gallivm,
2144 V_008DFC_SQ_EXP_POS + 2 + reg_index);
2145 args[4] = uint->zero;
2146 }
2147 }
2148
2149 static void si_dump_streamout(struct pipe_stream_output_info *so)
2150 {
2151 unsigned i;
2152
2153 if (so->num_outputs)
2154 fprintf(stderr, "STREAMOUT\n");
2155
2156 for (i = 0; i < so->num_outputs; i++) {
2157 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2158 so->output[i].start_component;
2159 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2160 i, so->output[i].output_buffer,
2161 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2162 so->output[i].register_index,
2163 mask & 1 ? "x" : "",
2164 mask & 2 ? "y" : "",
2165 mask & 4 ? "z" : "",
2166 mask & 8 ? "w" : "");
2167 }
2168 }
2169
2170 /* On SI, the vertex shader is responsible for writing streamout data
2171 * to buffers. */
2172 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2173 struct si_shader_output_values *outputs,
2174 unsigned noutput)
2175 {
2176 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
2177 struct gallivm_state *gallivm = &ctx->gallivm;
2178 LLVMBuilderRef builder = gallivm->builder;
2179 int i, j;
2180 struct lp_build_if_state if_ctx;
2181 LLVMValueRef so_buffers[4];
2182 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
2183 SI_PARAM_RW_BUFFERS);
2184
2185 /* Load the descriptors. */
2186 for (i = 0; i < 4; ++i) {
2187 if (ctx->shader->selector->so.stride[i]) {
2188 LLVMValueRef offset = lp_build_const_int32(gallivm,
2189 SI_VS_STREAMOUT_BUF0 + i);
2190
2191 so_buffers[i] = build_indexed_load_const(ctx, buf_ptr, offset);
2192 }
2193 }
2194
2195 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2196 LLVMValueRef so_vtx_count =
2197 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2198
2199 LLVMValueRef tid = get_thread_id(ctx);
2200
2201 /* can_emit = tid < so_vtx_count; */
2202 LLVMValueRef can_emit =
2203 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2204
2205 LLVMValueRef stream_id =
2206 unpack_param(ctx, ctx->param_streamout_config, 24, 2);
2207
2208 /* Emit the streamout code conditionally. This actually avoids
2209 * out-of-bounds buffer access. The hw tells us via the SGPR
2210 * (so_vtx_count) which threads are allowed to emit streamout data. */
2211 lp_build_if(&if_ctx, gallivm, can_emit);
2212 {
2213 /* The buffer offset is computed as follows:
2214 * ByteOffset = streamout_offset[buffer_id]*4 +
2215 * (streamout_write_index + thread_id)*stride[buffer_id] +
2216 * attrib_offset
2217 */
2218
2219 LLVMValueRef so_write_index =
2220 LLVMGetParam(ctx->main_fn,
2221 ctx->param_streamout_write_index);
2222
2223 /* Compute (streamout_write_index + thread_id). */
2224 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2225
2226 /* Compute the write offset for each enabled buffer. */
2227 LLVMValueRef so_write_offset[4] = {};
2228 for (i = 0; i < 4; i++) {
2229 if (!so->stride[i])
2230 continue;
2231
2232 LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
2233 ctx->param_streamout_offset[i]);
2234 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2235
2236 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2237 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2238 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2239 }
2240
2241 /* Write streamout data. */
2242 for (i = 0; i < so->num_outputs; i++) {
2243 unsigned buf_idx = so->output[i].output_buffer;
2244 unsigned reg = so->output[i].register_index;
2245 unsigned start = so->output[i].start_component;
2246 unsigned num_comps = so->output[i].num_components;
2247 unsigned stream = so->output[i].stream;
2248 LLVMValueRef out[4];
2249 struct lp_build_if_state if_ctx_stream;
2250
2251 assert(num_comps && num_comps <= 4);
2252 if (!num_comps || num_comps > 4)
2253 continue;
2254
2255 if (reg >= noutput)
2256 continue;
2257
2258 /* Load the output as int. */
2259 for (j = 0; j < num_comps; j++) {
2260 out[j] = LLVMBuildBitCast(builder,
2261 outputs[reg].values[start+j],
2262 ctx->i32, "");
2263 }
2264
2265 /* Pack the output. */
2266 LLVMValueRef vdata = NULL;
2267
2268 switch (num_comps) {
2269 case 1: /* as i32 */
2270 vdata = out[0];
2271 break;
2272 case 2: /* as v2i32 */
2273 case 3: /* as v4i32 (aligned to 4) */
2274 case 4: /* as v4i32 */
2275 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2276 for (j = 0; j < num_comps; j++) {
2277 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
2278 LLVMConstInt(ctx->i32, j, 0), "");
2279 }
2280 break;
2281 }
2282
2283 LLVMValueRef can_emit_stream =
2284 LLVMBuildICmp(builder, LLVMIntEQ,
2285 stream_id,
2286 lp_build_const_int32(gallivm, stream), "");
2287
2288 lp_build_if(&if_ctx_stream, gallivm, can_emit_stream);
2289 build_tbuffer_store_dwords(ctx, so_buffers[buf_idx],
2290 vdata, num_comps,
2291 so_write_offset[buf_idx],
2292 LLVMConstInt(ctx->i32, 0, 0),
2293 so->output[i].dst_offset*4);
2294 lp_build_endif(&if_ctx_stream);
2295 }
2296 }
2297 lp_build_endif(&if_ctx);
2298 }
2299
2300
2301 /* Generate export instructions for hardware VS shader stage */
2302 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
2303 struct si_shader_output_values *outputs,
2304 unsigned noutput)
2305 {
2306 struct si_shader_context *ctx = si_shader_context(bld_base);
2307 struct si_shader *shader = ctx->shader;
2308 struct lp_build_context *base = &bld_base->base;
2309 struct lp_build_context *uint =
2310 &ctx->soa.bld_base.uint_bld;
2311 LLVMValueRef args[9];
2312 LLVMValueRef pos_args[4][9] = { { 0 } };
2313 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2314 unsigned semantic_name, semantic_index;
2315 unsigned target;
2316 unsigned param_count = 0;
2317 unsigned pos_idx;
2318 int i;
2319
2320 if (outputs && ctx->shader->selector->so.num_outputs) {
2321 si_llvm_emit_streamout(ctx, outputs, noutput);
2322 }
2323
2324 for (i = 0; i < noutput; i++) {
2325 semantic_name = outputs[i].name;
2326 semantic_index = outputs[i].sid;
2327
2328 handle_semantic:
2329 /* Select the correct target */
2330 switch(semantic_name) {
2331 case TGSI_SEMANTIC_PSIZE:
2332 psize_value = outputs[i].values[0];
2333 continue;
2334 case TGSI_SEMANTIC_EDGEFLAG:
2335 edgeflag_value = outputs[i].values[0];
2336 continue;
2337 case TGSI_SEMANTIC_LAYER:
2338 layer_value = outputs[i].values[0];
2339 semantic_name = TGSI_SEMANTIC_GENERIC;
2340 goto handle_semantic;
2341 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2342 viewport_index_value = outputs[i].values[0];
2343 semantic_name = TGSI_SEMANTIC_GENERIC;
2344 goto handle_semantic;
2345 case TGSI_SEMANTIC_POSITION:
2346 target = V_008DFC_SQ_EXP_POS;
2347 break;
2348 case TGSI_SEMANTIC_COLOR:
2349 case TGSI_SEMANTIC_BCOLOR:
2350 target = V_008DFC_SQ_EXP_PARAM + param_count;
2351 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2352 shader->info.vs_output_param_offset[i] = param_count;
2353 param_count++;
2354 break;
2355 case TGSI_SEMANTIC_CLIPDIST:
2356 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
2357 break;
2358 case TGSI_SEMANTIC_CLIPVERTEX:
2359 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
2360 continue;
2361 case TGSI_SEMANTIC_PRIMID:
2362 case TGSI_SEMANTIC_FOG:
2363 case TGSI_SEMANTIC_TEXCOORD:
2364 case TGSI_SEMANTIC_GENERIC:
2365 target = V_008DFC_SQ_EXP_PARAM + param_count;
2366 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2367 shader->info.vs_output_param_offset[i] = param_count;
2368 param_count++;
2369 break;
2370 default:
2371 target = 0;
2372 fprintf(stderr,
2373 "Warning: SI unhandled vs output type:%d\n",
2374 semantic_name);
2375 }
2376
2377 si_llvm_init_export_args(bld_base, outputs[i].values, target, args);
2378
2379 if (target >= V_008DFC_SQ_EXP_POS &&
2380 target <= (V_008DFC_SQ_EXP_POS + 3)) {
2381 memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
2382 args, sizeof(args));
2383 } else {
2384 lp_build_intrinsic(base->gallivm->builder,
2385 "llvm.SI.export", ctx->voidt,
2386 args, 9, 0);
2387 }
2388
2389 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
2390 semantic_name = TGSI_SEMANTIC_GENERIC;
2391 goto handle_semantic;
2392 }
2393 }
2394
2395 shader->info.nr_param_exports = param_count;
2396
2397 /* We need to add the position output manually if it's missing. */
2398 if (!pos_args[0][0]) {
2399 pos_args[0][0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
2400 pos_args[0][1] = uint->zero; /* EXEC mask */
2401 pos_args[0][2] = uint->zero; /* last export? */
2402 pos_args[0][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS);
2403 pos_args[0][4] = uint->zero; /* COMPR flag */
2404 pos_args[0][5] = base->zero; /* X */
2405 pos_args[0][6] = base->zero; /* Y */
2406 pos_args[0][7] = base->zero; /* Z */
2407 pos_args[0][8] = base->one; /* W */
2408 }
2409
2410 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2411 if (shader->selector->info.writes_psize ||
2412 shader->selector->info.writes_edgeflag ||
2413 shader->selector->info.writes_viewport_index ||
2414 shader->selector->info.writes_layer) {
2415 pos_args[1][0] = lp_build_const_int32(base->gallivm, /* writemask */
2416 shader->selector->info.writes_psize |
2417 (shader->selector->info.writes_edgeflag << 1) |
2418 (shader->selector->info.writes_layer << 2) |
2419 (shader->selector->info.writes_viewport_index << 3));
2420 pos_args[1][1] = uint->zero; /* EXEC mask */
2421 pos_args[1][2] = uint->zero; /* last export? */
2422 pos_args[1][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + 1);
2423 pos_args[1][4] = uint->zero; /* COMPR flag */
2424 pos_args[1][5] = base->zero; /* X */
2425 pos_args[1][6] = base->zero; /* Y */
2426 pos_args[1][7] = base->zero; /* Z */
2427 pos_args[1][8] = base->zero; /* W */
2428
2429 if (shader->selector->info.writes_psize)
2430 pos_args[1][5] = psize_value;
2431
2432 if (shader->selector->info.writes_edgeflag) {
2433 /* The output is a float, but the hw expects an integer
2434 * with the first bit containing the edge flag. */
2435 edgeflag_value = LLVMBuildFPToUI(base->gallivm->builder,
2436 edgeflag_value,
2437 ctx->i32, "");
2438 edgeflag_value = lp_build_min(&bld_base->int_bld,
2439 edgeflag_value,
2440 bld_base->int_bld.one);
2441
2442 /* The LLVM intrinsic expects a float. */
2443 pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder,
2444 edgeflag_value,
2445 ctx->f32, "");
2446 }
2447
2448 if (shader->selector->info.writes_layer)
2449 pos_args[1][7] = layer_value;
2450
2451 if (shader->selector->info.writes_viewport_index)
2452 pos_args[1][8] = viewport_index_value;
2453 }
2454
2455 for (i = 0; i < 4; i++)
2456 if (pos_args[i][0])
2457 shader->info.nr_pos_exports++;
2458
2459 pos_idx = 0;
2460 for (i = 0; i < 4; i++) {
2461 if (!pos_args[i][0])
2462 continue;
2463
2464 /* Specify the target we are exporting */
2465 pos_args[i][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + pos_idx++);
2466
2467 if (pos_idx == shader->info.nr_pos_exports)
2468 /* Specify that this is the last export */
2469 pos_args[i][2] = uint->one;
2470
2471 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2472 ctx->voidt, pos_args[i], 9, 0);
2473 }
2474 }
2475
2476 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2477 {
2478 struct si_shader_context *ctx = si_shader_context(bld_base);
2479 struct gallivm_state *gallivm = bld_base->base.gallivm;
2480 LLVMValueRef invocation_id, rw_buffers, buffer, buffer_offset;
2481 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
2482 uint64_t inputs;
2483
2484 invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
2485
2486 rw_buffers = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
2487 buffer = build_indexed_load_const(ctx, rw_buffers,
2488 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
2489
2490 buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
2491
2492 lds_vertex_stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
2493 lds_vertex_offset = LLVMBuildMul(gallivm->builder, invocation_id,
2494 lds_vertex_stride, "");
2495 lds_base = get_tcs_in_current_patch_offset(ctx);
2496 lds_base = LLVMBuildAdd(gallivm->builder, lds_base, lds_vertex_offset, "");
2497
2498 inputs = ctx->shader->key.tcs.epilog.inputs_to_copy;
2499 while (inputs) {
2500 unsigned i = u_bit_scan64(&inputs);
2501
2502 LLVMValueRef lds_ptr = LLVMBuildAdd(gallivm->builder, lds_base,
2503 lp_build_const_int32(gallivm, 4 * i),
2504 "");
2505
2506 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
2507 invocation_id,
2508 lp_build_const_int32(gallivm, i));
2509
2510 LLVMValueRef value = lds_load(bld_base, TGSI_TYPE_SIGNED, ~0,
2511 lds_ptr);
2512
2513 build_tbuffer_store_dwords(ctx, buffer, value, 4, buffer_addr,
2514 buffer_offset, 0);
2515 }
2516 }
2517
2518 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
2519 LLVMValueRef rel_patch_id,
2520 LLVMValueRef invocation_id,
2521 LLVMValueRef tcs_out_current_patch_data_offset)
2522 {
2523 struct si_shader_context *ctx = si_shader_context(bld_base);
2524 struct gallivm_state *gallivm = bld_base->base.gallivm;
2525 struct si_shader *shader = ctx->shader;
2526 unsigned tess_inner_index, tess_outer_index;
2527 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2528 LLVMValueRef out[6], vec0, vec1, rw_buffers, tf_base;
2529 unsigned stride, outer_comps, inner_comps, i;
2530 struct lp_build_if_state if_ctx, inner_if_ctx;
2531
2532 si_llvm_emit_barrier(NULL, bld_base, NULL);
2533
2534 /* Do this only for invocation 0, because the tess levels are per-patch,
2535 * not per-vertex.
2536 *
2537 * This can't jump, because invocation 0 executes this. It should
2538 * at least mask out the loads and stores for other invocations.
2539 */
2540 lp_build_if(&if_ctx, gallivm,
2541 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2542 invocation_id, bld_base->uint_bld.zero, ""));
2543
2544 /* Determine the layout of one tess factor element in the buffer. */
2545 switch (shader->key.tcs.epilog.prim_mode) {
2546 case PIPE_PRIM_LINES:
2547 stride = 2; /* 2 dwords, 1 vec2 store */
2548 outer_comps = 2;
2549 inner_comps = 0;
2550 break;
2551 case PIPE_PRIM_TRIANGLES:
2552 stride = 4; /* 4 dwords, 1 vec4 store */
2553 outer_comps = 3;
2554 inner_comps = 1;
2555 break;
2556 case PIPE_PRIM_QUADS:
2557 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2558 outer_comps = 4;
2559 inner_comps = 2;
2560 break;
2561 default:
2562 assert(0);
2563 return;
2564 }
2565
2566 /* Load tess_inner and tess_outer from LDS.
2567 * Any invocation can write them, so we can't get them from a temporary.
2568 */
2569 tess_inner_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSINNER, 0);
2570 tess_outer_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSOUTER, 0);
2571
2572 lds_base = tcs_out_current_patch_data_offset;
2573 lds_inner = LLVMBuildAdd(gallivm->builder, lds_base,
2574 lp_build_const_int32(gallivm,
2575 tess_inner_index * 4), "");
2576 lds_outer = LLVMBuildAdd(gallivm->builder, lds_base,
2577 lp_build_const_int32(gallivm,
2578 tess_outer_index * 4), "");
2579
2580 for (i = 0; i < outer_comps; i++)
2581 out[i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
2582 for (i = 0; i < inner_comps; i++)
2583 out[outer_comps+i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
2584
2585 /* Convert the outputs to vectors for stores. */
2586 vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
2587 vec1 = NULL;
2588
2589 if (stride > 4)
2590 vec1 = lp_build_gather_values(gallivm, out+4, stride - 4);
2591
2592 /* Get the buffer. */
2593 rw_buffers = LLVMGetParam(ctx->main_fn,
2594 SI_PARAM_RW_BUFFERS);
2595 buffer = build_indexed_load_const(ctx, rw_buffers,
2596 lp_build_const_int32(gallivm, SI_HS_RING_TESS_FACTOR));
2597
2598 /* Get the offset. */
2599 tf_base = LLVMGetParam(ctx->main_fn,
2600 SI_PARAM_TESS_FACTOR_OFFSET);
2601 byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
2602 lp_build_const_int32(gallivm, 4 * stride), "");
2603
2604 lp_build_if(&inner_if_ctx, gallivm,
2605 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2606 rel_patch_id, bld_base->uint_bld.zero, ""));
2607
2608 /* Store the dynamic HS control word. */
2609 build_tbuffer_store_dwords(ctx, buffer,
2610 lp_build_const_int32(gallivm, 0x80000000),
2611 1, lp_build_const_int32(gallivm, 0), tf_base, 0);
2612
2613 lp_build_endif(&inner_if_ctx);
2614
2615 /* Store the tessellation factors. */
2616 build_tbuffer_store_dwords(ctx, buffer, vec0,
2617 MIN2(stride, 4), byteoffset, tf_base, 4);
2618 if (vec1)
2619 build_tbuffer_store_dwords(ctx, buffer, vec1,
2620 stride - 4, byteoffset, tf_base, 20);
2621 lp_build_endif(&if_ctx);
2622 }
2623
2624 /* This only writes the tessellation factor levels. */
2625 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
2626 {
2627 struct si_shader_context *ctx = si_shader_context(bld_base);
2628 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2629
2630 rel_patch_id = get_rel_patch_id(ctx);
2631 invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
2632 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2633
2634 if (!ctx->is_monolithic) {
2635 /* Return epilog parameters from this function. */
2636 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
2637 LLVMValueRef ret = ctx->return_value;
2638 LLVMValueRef rw_buffers, rw0, rw1, tf_soffset;
2639 unsigned vgpr;
2640
2641 /* RW_BUFFERS pointer */
2642 rw_buffers = LLVMGetParam(ctx->main_fn,
2643 SI_PARAM_RW_BUFFERS);
2644 rw_buffers = LLVMBuildPtrToInt(builder, rw_buffers, ctx->i64, "");
2645 rw_buffers = LLVMBuildBitCast(builder, rw_buffers, ctx->v2i32, "");
2646 rw0 = LLVMBuildExtractElement(builder, rw_buffers,
2647 bld_base->uint_bld.zero, "");
2648 rw1 = LLVMBuildExtractElement(builder, rw_buffers,
2649 bld_base->uint_bld.one, "");
2650 ret = LLVMBuildInsertValue(builder, ret, rw0, 0, "");
2651 ret = LLVMBuildInsertValue(builder, ret, rw1, 1, "");
2652
2653 /* Tess factor buffer soffset is after user SGPRs. */
2654 tf_soffset = LLVMGetParam(ctx->main_fn,
2655 SI_PARAM_TESS_FACTOR_OFFSET);
2656 ret = LLVMBuildInsertValue(builder, ret, tf_soffset,
2657 SI_TCS_NUM_USER_SGPR + 1, "");
2658
2659 /* VGPRs */
2660 rel_patch_id = bitcast(bld_base, TGSI_TYPE_FLOAT, rel_patch_id);
2661 invocation_id = bitcast(bld_base, TGSI_TYPE_FLOAT, invocation_id);
2662 tf_lds_offset = bitcast(bld_base, TGSI_TYPE_FLOAT, tf_lds_offset);
2663
2664 vgpr = SI_TCS_NUM_USER_SGPR + 2;
2665 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2666 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2667 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2668 ctx->return_value = ret;
2669 return;
2670 }
2671
2672 si_copy_tcs_inputs(bld_base);
2673 si_write_tess_factors(bld_base, rel_patch_id, invocation_id, tf_lds_offset);
2674 }
2675
2676 static void si_llvm_emit_ls_epilogue(struct lp_build_tgsi_context *bld_base)
2677 {
2678 struct si_shader_context *ctx = si_shader_context(bld_base);
2679 struct si_shader *shader = ctx->shader;
2680 struct tgsi_shader_info *info = &shader->selector->info;
2681 struct gallivm_state *gallivm = bld_base->base.gallivm;
2682 unsigned i, chan;
2683 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
2684 ctx->param_rel_auto_id);
2685 LLVMValueRef vertex_dw_stride =
2686 unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 13, 8);
2687 LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
2688 vertex_dw_stride, "");
2689
2690 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2691 * its inputs from it. */
2692 for (i = 0; i < info->num_outputs; i++) {
2693 LLVMValueRef *out_ptr = ctx->soa.outputs[i];
2694 unsigned name = info->output_semantic_name[i];
2695 unsigned index = info->output_semantic_index[i];
2696 int param = si_shader_io_get_unique_index(name, index);
2697 LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
2698 lp_build_const_int32(gallivm, param * 4), "");
2699
2700 for (chan = 0; chan < 4; chan++) {
2701 lds_store(bld_base, chan, dw_addr,
2702 LLVMBuildLoad(gallivm->builder, out_ptr[chan], ""));
2703 }
2704 }
2705 }
2706
2707 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context *bld_base)
2708 {
2709 struct si_shader_context *ctx = si_shader_context(bld_base);
2710 struct gallivm_state *gallivm = bld_base->base.gallivm;
2711 struct si_shader *es = ctx->shader;
2712 struct tgsi_shader_info *info = &es->selector->info;
2713 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
2714 ctx->param_es2gs_offset);
2715 unsigned chan;
2716 int i;
2717
2718 for (i = 0; i < info->num_outputs; i++) {
2719 LLVMValueRef *out_ptr =
2720 ctx->soa.outputs[i];
2721 int param_index;
2722
2723 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2724 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2725 continue;
2726
2727 param_index = si_shader_io_get_unique_index(info->output_semantic_name[i],
2728 info->output_semantic_index[i]);
2729
2730 for (chan = 0; chan < 4; chan++) {
2731 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
2732 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
2733
2734 build_tbuffer_store(ctx,
2735 ctx->esgs_ring,
2736 out_val, 1,
2737 LLVMGetUndef(ctx->i32), soffset,
2738 (4 * param_index + chan) * 4,
2739 V_008F0C_BUF_DATA_FORMAT_32,
2740 V_008F0C_BUF_NUM_FORMAT_UINT,
2741 0, 0, 1, 1, 0);
2742 }
2743 }
2744 }
2745
2746 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
2747 {
2748 struct si_shader_context *ctx = si_shader_context(bld_base);
2749 struct gallivm_state *gallivm = bld_base->base.gallivm;
2750 LLVMValueRef args[2];
2751
2752 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_NOP | SENDMSG_GS_DONE);
2753 args[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID);
2754 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
2755 ctx->voidt, args, 2, 0);
2756 }
2757
2758 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
2759 {
2760 struct si_shader_context *ctx = si_shader_context(bld_base);
2761 struct gallivm_state *gallivm = bld_base->base.gallivm;
2762 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2763 struct si_shader_output_values *outputs = NULL;
2764 int i,j;
2765
2766 assert(!ctx->is_gs_copy_shader);
2767
2768 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2769
2770 /* Vertex color clamping.
2771 *
2772 * This uses a state constant loaded in a user data SGPR and
2773 * an IF statement is added that clamps all colors if the constant
2774 * is true.
2775 */
2776 if (ctx->type == PIPE_SHADER_VERTEX) {
2777 struct lp_build_if_state if_ctx;
2778 LLVMValueRef cond = NULL;
2779 LLVMValueRef addr, val;
2780
2781 for (i = 0; i < info->num_outputs; i++) {
2782 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
2783 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
2784 continue;
2785
2786 /* We've found a color. */
2787 if (!cond) {
2788 /* The state is in the first bit of the user SGPR. */
2789 cond = LLVMGetParam(ctx->main_fn,
2790 SI_PARAM_VS_STATE_BITS);
2791 cond = LLVMBuildTrunc(gallivm->builder, cond,
2792 ctx->i1, "");
2793 lp_build_if(&if_ctx, gallivm, cond);
2794 }
2795
2796 for (j = 0; j < 4; j++) {
2797 addr = ctx->soa.outputs[i][j];
2798 val = LLVMBuildLoad(gallivm->builder, addr, "");
2799 val = si_llvm_saturate(bld_base, val);
2800 LLVMBuildStore(gallivm->builder, val, addr);
2801 }
2802 }
2803
2804 if (cond)
2805 lp_build_endif(&if_ctx);
2806 }
2807
2808 for (i = 0; i < info->num_outputs; i++) {
2809 outputs[i].name = info->output_semantic_name[i];
2810 outputs[i].sid = info->output_semantic_index[i];
2811
2812 for (j = 0; j < 4; j++)
2813 outputs[i].values[j] =
2814 LLVMBuildLoad(gallivm->builder,
2815 ctx->soa.outputs[i][j],
2816 "");
2817 }
2818
2819 if (ctx->is_monolithic) {
2820 /* Export PrimitiveID when PS needs it. */
2821 if (si_vs_exports_prim_id(ctx->shader)) {
2822 outputs[i].name = TGSI_SEMANTIC_PRIMID;
2823 outputs[i].sid = 0;
2824 outputs[i].values[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2825 get_primitive_id(bld_base, 0));
2826 outputs[i].values[1] = bld_base->base.undef;
2827 outputs[i].values[2] = bld_base->base.undef;
2828 outputs[i].values[3] = bld_base->base.undef;
2829 i++;
2830 }
2831 } else {
2832 /* Return the primitive ID from the LLVM function. */
2833 ctx->return_value =
2834 LLVMBuildInsertValue(gallivm->builder,
2835 ctx->return_value,
2836 bitcast(bld_base, TGSI_TYPE_FLOAT,
2837 get_primitive_id(bld_base, 0)),
2838 VS_EPILOG_PRIMID_LOC, "");
2839 }
2840
2841 si_llvm_export_vs(bld_base, outputs, i);
2842 FREE(outputs);
2843 }
2844
2845 struct si_ps_exports {
2846 unsigned num;
2847 LLVMValueRef args[10][9];
2848 };
2849
2850 unsigned si_get_spi_shader_z_format(bool writes_z, bool writes_stencil,
2851 bool writes_samplemask)
2852 {
2853 if (writes_z) {
2854 /* Z needs 32 bits. */
2855 if (writes_samplemask)
2856 return V_028710_SPI_SHADER_32_ABGR;
2857 else if (writes_stencil)
2858 return V_028710_SPI_SHADER_32_GR;
2859 else
2860 return V_028710_SPI_SHADER_32_R;
2861 } else if (writes_stencil || writes_samplemask) {
2862 /* Both stencil and sample mask need only 16 bits. */
2863 return V_028710_SPI_SHADER_UINT16_ABGR;
2864 } else {
2865 return V_028710_SPI_SHADER_ZERO;
2866 }
2867 }
2868
2869 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
2870 LLVMValueRef depth, LLVMValueRef stencil,
2871 LLVMValueRef samplemask, struct si_ps_exports *exp)
2872 {
2873 struct si_shader_context *ctx = si_shader_context(bld_base);
2874 struct lp_build_context *base = &bld_base->base;
2875 struct lp_build_context *uint = &bld_base->uint_bld;
2876 LLVMValueRef args[9];
2877 unsigned mask = 0;
2878 unsigned format = si_get_spi_shader_z_format(depth != NULL,
2879 stencil != NULL,
2880 samplemask != NULL);
2881
2882 assert(depth || stencil || samplemask);
2883
2884 args[1] = uint->one; /* whether the EXEC mask is valid */
2885 args[2] = uint->one; /* DONE bit */
2886
2887 /* Specify the target we are exporting */
2888 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
2889
2890 args[4] = uint->zero; /* COMP flag */
2891 args[5] = base->undef; /* R, depth */
2892 args[6] = base->undef; /* G, stencil test value[0:7], stencil op value[8:15] */
2893 args[7] = base->undef; /* B, sample mask */
2894 args[8] = base->undef; /* A, alpha to mask */
2895
2896 if (format == V_028710_SPI_SHADER_UINT16_ABGR) {
2897 assert(!depth);
2898 args[4] = uint->one; /* COMPR flag */
2899
2900 if (stencil) {
2901 /* Stencil should be in X[23:16]. */
2902 stencil = bitcast(bld_base, TGSI_TYPE_UNSIGNED, stencil);
2903 stencil = LLVMBuildShl(base->gallivm->builder, stencil,
2904 LLVMConstInt(ctx->i32, 16, 0), "");
2905 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT, stencil);
2906 mask |= 0x3;
2907 }
2908 if (samplemask) {
2909 /* SampleMask should be in Y[15:0]. */
2910 args[6] = samplemask;
2911 mask |= 0xc;
2912 }
2913 } else {
2914 if (depth) {
2915 args[5] = depth;
2916 mask |= 0x1;
2917 }
2918 if (stencil) {
2919 args[6] = stencil;
2920 mask |= 0x2;
2921 }
2922 if (samplemask) {
2923 args[7] = samplemask;
2924 mask |= 0x4;
2925 }
2926 }
2927
2928 /* SI (except OLAND) has a bug that it only looks
2929 * at the X writemask component. */
2930 if (ctx->screen->b.chip_class == SI &&
2931 ctx->screen->b.family != CHIP_OLAND)
2932 mask |= 0x1;
2933
2934 /* Specify which components to enable */
2935 args[0] = lp_build_const_int32(base->gallivm, mask);
2936
2937 memcpy(exp->args[exp->num++], args, sizeof(args));
2938 }
2939
2940 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
2941 LLVMValueRef *color, unsigned index,
2942 unsigned samplemask_param,
2943 bool is_last, struct si_ps_exports *exp)
2944 {
2945 struct si_shader_context *ctx = si_shader_context(bld_base);
2946 struct lp_build_context *base = &bld_base->base;
2947 int i;
2948
2949 /* Clamp color */
2950 if (ctx->shader->key.ps.epilog.clamp_color)
2951 for (i = 0; i < 4; i++)
2952 color[i] = si_llvm_saturate(bld_base, color[i]);
2953
2954 /* Alpha to one */
2955 if (ctx->shader->key.ps.epilog.alpha_to_one)
2956 color[3] = base->one;
2957
2958 /* Alpha test */
2959 if (index == 0 &&
2960 ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
2961 si_alpha_test(bld_base, color[3]);
2962
2963 /* Line & polygon smoothing */
2964 if (ctx->shader->key.ps.epilog.poly_line_smoothing)
2965 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
2966 samplemask_param);
2967
2968 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
2969 if (ctx->shader->key.ps.epilog.last_cbuf > 0) {
2970 LLVMValueRef args[8][9];
2971 int c, last = -1;
2972
2973 /* Get the export arguments, also find out what the last one is. */
2974 for (c = 0; c <= ctx->shader->key.ps.epilog.last_cbuf; c++) {
2975 si_llvm_init_export_args(bld_base, color,
2976 V_008DFC_SQ_EXP_MRT + c, args[c]);
2977 if (args[c][0] != bld_base->uint_bld.zero)
2978 last = c;
2979 }
2980
2981 /* Emit all exports. */
2982 for (c = 0; c <= ctx->shader->key.ps.epilog.last_cbuf; c++) {
2983 if (is_last && last == c) {
2984 args[c][1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
2985 args[c][2] = bld_base->uint_bld.one; /* DONE bit */
2986 } else if (args[c][0] == bld_base->uint_bld.zero)
2987 continue; /* unnecessary NULL export */
2988
2989 memcpy(exp->args[exp->num++], args[c], sizeof(args[c]));
2990 }
2991 } else {
2992 LLVMValueRef args[9];
2993
2994 /* Export */
2995 si_llvm_init_export_args(bld_base, color, V_008DFC_SQ_EXP_MRT + index,
2996 args);
2997 if (is_last) {
2998 args[1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
2999 args[2] = bld_base->uint_bld.one; /* DONE bit */
3000 } else if (args[0] == bld_base->uint_bld.zero)
3001 return; /* unnecessary NULL export */
3002
3003 memcpy(exp->args[exp->num++], args, sizeof(args));
3004 }
3005 }
3006
3007 static void si_emit_ps_exports(struct si_shader_context *ctx,
3008 struct si_ps_exports *exp)
3009 {
3010 for (unsigned i = 0; i < exp->num; i++)
3011 lp_build_intrinsic(ctx->gallivm.builder,
3012 "llvm.SI.export", ctx->voidt,
3013 exp->args[i], 9, 0);
3014 }
3015
3016 static void si_export_null(struct lp_build_tgsi_context *bld_base)
3017 {
3018 struct si_shader_context *ctx = si_shader_context(bld_base);
3019 struct lp_build_context *base = &bld_base->base;
3020 struct lp_build_context *uint = &bld_base->uint_bld;
3021 LLVMValueRef args[9];
3022
3023 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
3024 args[1] = uint->one; /* whether the EXEC mask is valid */
3025 args[2] = uint->one; /* DONE bit */
3026 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
3027 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
3028 args[5] = base->undef; /* R */
3029 args[6] = base->undef; /* G */
3030 args[7] = base->undef; /* B */
3031 args[8] = base->undef; /* A */
3032
3033 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
3034 ctx->voidt, args, 9, 0);
3035 }
3036
3037 static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context *bld_base)
3038 {
3039 struct si_shader_context *ctx = si_shader_context(bld_base);
3040 struct si_shader *shader = ctx->shader;
3041 struct lp_build_context *base = &bld_base->base;
3042 struct tgsi_shader_info *info = &shader->selector->info;
3043 LLVMBuilderRef builder = base->gallivm->builder;
3044 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3045 int last_color_export = -1;
3046 int i;
3047 struct si_ps_exports exp = {};
3048
3049 /* Determine the last export. If MRTZ is present, it's always last.
3050 * Otherwise, find the last color export.
3051 */
3052 if (!info->writes_z && !info->writes_stencil && !info->writes_samplemask) {
3053 unsigned spi_format = shader->key.ps.epilog.spi_shader_col_format;
3054
3055 /* Don't export NULL and return if alpha-test is enabled. */
3056 if (shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS &&
3057 shader->key.ps.epilog.alpha_func != PIPE_FUNC_NEVER &&
3058 (spi_format & 0xf) == 0)
3059 spi_format |= V_028714_SPI_SHADER_32_AR;
3060
3061 for (i = 0; i < info->num_outputs; i++) {
3062 unsigned index = info->output_semantic_index[i];
3063
3064 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR)
3065 continue;
3066
3067 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3068 if (shader->key.ps.epilog.last_cbuf > 0) {
3069 /* Just set this if any of the colorbuffers are enabled. */
3070 if (spi_format &
3071 ((1llu << (4 * (shader->key.ps.epilog.last_cbuf + 1))) - 1))
3072 last_color_export = i;
3073 continue;
3074 }
3075
3076 if ((spi_format >> (index * 4)) & 0xf)
3077 last_color_export = i;
3078 }
3079
3080 /* If there are no outputs, export NULL. */
3081 if (last_color_export == -1) {
3082 si_export_null(bld_base);
3083 return;
3084 }
3085 }
3086
3087 for (i = 0; i < info->num_outputs; i++) {
3088 unsigned semantic_name = info->output_semantic_name[i];
3089 unsigned semantic_index = info->output_semantic_index[i];
3090 unsigned j;
3091 LLVMValueRef color[4] = {};
3092
3093 /* Select the correct target */
3094 switch (semantic_name) {
3095 case TGSI_SEMANTIC_POSITION:
3096 depth = LLVMBuildLoad(builder,
3097 ctx->soa.outputs[i][2], "");
3098 break;
3099 case TGSI_SEMANTIC_STENCIL:
3100 stencil = LLVMBuildLoad(builder,
3101 ctx->soa.outputs[i][1], "");
3102 break;
3103 case TGSI_SEMANTIC_SAMPLEMASK:
3104 samplemask = LLVMBuildLoad(builder,
3105 ctx->soa.outputs[i][0], "");
3106 break;
3107 case TGSI_SEMANTIC_COLOR:
3108 for (j = 0; j < 4; j++)
3109 color[j] = LLVMBuildLoad(builder,
3110 ctx->soa.outputs[i][j], "");
3111
3112 si_export_mrt_color(bld_base, color, semantic_index,
3113 SI_PARAM_SAMPLE_COVERAGE,
3114 last_color_export == i, &exp);
3115 break;
3116 default:
3117 fprintf(stderr,
3118 "Warning: SI unhandled fs output type:%d\n",
3119 semantic_name);
3120 }
3121 }
3122
3123 if (depth || stencil || samplemask)
3124 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
3125
3126 si_emit_ps_exports(ctx, &exp);
3127 }
3128
3129 /**
3130 * Return PS outputs in this order:
3131 *
3132 * v[0:3] = color0.xyzw
3133 * v[4:7] = color1.xyzw
3134 * ...
3135 * vN+0 = Depth
3136 * vN+1 = Stencil
3137 * vN+2 = SampleMask
3138 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3139 *
3140 * The alpha-ref SGPR is returned via its original location.
3141 */
3142 static void si_llvm_return_fs_outputs(struct lp_build_tgsi_context *bld_base)
3143 {
3144 struct si_shader_context *ctx = si_shader_context(bld_base);
3145 struct si_shader *shader = ctx->shader;
3146 struct lp_build_context *base = &bld_base->base;
3147 struct tgsi_shader_info *info = &shader->selector->info;
3148 LLVMBuilderRef builder = base->gallivm->builder;
3149 unsigned i, j, first_vgpr, vgpr;
3150
3151 LLVMValueRef color[8][4] = {};
3152 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3153 LLVMValueRef ret;
3154
3155 /* Read the output values. */
3156 for (i = 0; i < info->num_outputs; i++) {
3157 unsigned semantic_name = info->output_semantic_name[i];
3158 unsigned semantic_index = info->output_semantic_index[i];
3159
3160 switch (semantic_name) {
3161 case TGSI_SEMANTIC_COLOR:
3162 assert(semantic_index < 8);
3163 for (j = 0; j < 4; j++) {
3164 LLVMValueRef ptr = ctx->soa.outputs[i][j];
3165 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3166 color[semantic_index][j] = result;
3167 }
3168 break;
3169 case TGSI_SEMANTIC_POSITION:
3170 depth = LLVMBuildLoad(builder,
3171 ctx->soa.outputs[i][2], "");
3172 break;
3173 case TGSI_SEMANTIC_STENCIL:
3174 stencil = LLVMBuildLoad(builder,
3175 ctx->soa.outputs[i][1], "");
3176 break;
3177 case TGSI_SEMANTIC_SAMPLEMASK:
3178 samplemask = LLVMBuildLoad(builder,
3179 ctx->soa.outputs[i][0], "");
3180 break;
3181 default:
3182 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3183 semantic_name);
3184 }
3185 }
3186
3187 /* Fill the return structure. */
3188 ret = ctx->return_value;
3189
3190 /* Set SGPRs. */
3191 ret = LLVMBuildInsertValue(builder, ret,
3192 bitcast(bld_base, TGSI_TYPE_SIGNED,
3193 LLVMGetParam(ctx->main_fn,
3194 SI_PARAM_ALPHA_REF)),
3195 SI_SGPR_ALPHA_REF, "");
3196
3197 /* Set VGPRs */
3198 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3199 for (i = 0; i < ARRAY_SIZE(color); i++) {
3200 if (!color[i][0])
3201 continue;
3202
3203 for (j = 0; j < 4; j++)
3204 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3205 }
3206 if (depth)
3207 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3208 if (stencil)
3209 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3210 if (samplemask)
3211 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3212
3213 /* Add the input sample mask for smoothing at the end. */
3214 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3215 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3216 ret = LLVMBuildInsertValue(builder, ret,
3217 LLVMGetParam(ctx->main_fn,
3218 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3219
3220 ctx->return_value = ret;
3221 }
3222
3223 /**
3224 * Given a v8i32 resource descriptor for a buffer, extract the size of the
3225 * buffer in number of elements and return it as an i32.
3226 */
3227 static LLVMValueRef get_buffer_size(
3228 struct lp_build_tgsi_context *bld_base,
3229 LLVMValueRef descriptor)
3230 {
3231 struct si_shader_context *ctx = si_shader_context(bld_base);
3232 struct gallivm_state *gallivm = bld_base->base.gallivm;
3233 LLVMBuilderRef builder = gallivm->builder;
3234 LLVMValueRef size =
3235 LLVMBuildExtractElement(builder, descriptor,
3236 lp_build_const_int32(gallivm, 6), "");
3237
3238 if (ctx->screen->b.chip_class >= VI) {
3239 /* On VI, the descriptor contains the size in bytes,
3240 * but TXQ must return the size in elements.
3241 * The stride is always non-zero for resources using TXQ.
3242 */
3243 LLVMValueRef stride =
3244 LLVMBuildExtractElement(builder, descriptor,
3245 lp_build_const_int32(gallivm, 5), "");
3246 stride = LLVMBuildLShr(builder, stride,
3247 lp_build_const_int32(gallivm, 16), "");
3248 stride = LLVMBuildAnd(builder, stride,
3249 lp_build_const_int32(gallivm, 0x3FFF), "");
3250
3251 size = LLVMBuildUDiv(builder, size, stride, "");
3252 }
3253
3254 return size;
3255 }
3256
3257 /**
3258 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
3259 * intrinsic names).
3260 */
3261 static void build_type_name_for_intr(
3262 LLVMTypeRef type,
3263 char *buf, unsigned bufsize)
3264 {
3265 LLVMTypeRef elem_type = type;
3266
3267 assert(bufsize >= 8);
3268
3269 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
3270 int ret = snprintf(buf, bufsize, "v%u",
3271 LLVMGetVectorSize(type));
3272 if (ret < 0) {
3273 char *type_name = LLVMPrintTypeToString(type);
3274 fprintf(stderr, "Error building type name for: %s\n",
3275 type_name);
3276 return;
3277 }
3278 elem_type = LLVMGetElementType(type);
3279 buf += ret;
3280 bufsize -= ret;
3281 }
3282 switch (LLVMGetTypeKind(elem_type)) {
3283 default: break;
3284 case LLVMIntegerTypeKind:
3285 snprintf(buf, bufsize, "i%d", LLVMGetIntTypeWidth(elem_type));
3286 break;
3287 case LLVMFloatTypeKind:
3288 snprintf(buf, bufsize, "f32");
3289 break;
3290 case LLVMDoubleTypeKind:
3291 snprintf(buf, bufsize, "f64");
3292 break;
3293 }
3294 }
3295
3296 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
3297 struct lp_build_tgsi_context *bld_base,
3298 struct lp_build_emit_data *emit_data);
3299
3300 /* Prevent optimizations (at least of memory accesses) across the current
3301 * point in the program by emitting empty inline assembly that is marked as
3302 * having side effects.
3303 */
3304 static void emit_optimization_barrier(struct si_shader_context *ctx)
3305 {
3306 LLVMBuilderRef builder = ctx->gallivm.builder;
3307 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
3308 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, "", "", true, false);
3309 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
3310 }
3311
3312 static void emit_waitcnt(struct si_shader_context *ctx)
3313 {
3314 struct gallivm_state *gallivm = &ctx->gallivm;
3315 LLVMBuilderRef builder = gallivm->builder;
3316 LLVMValueRef args[1] = {
3317 lp_build_const_int32(gallivm, 0xf70)
3318 };
3319 lp_build_intrinsic(builder, "llvm.amdgcn.s.waitcnt",
3320 ctx->voidt, args, 1, 0);
3321 }
3322
3323 static void membar_emit(
3324 const struct lp_build_tgsi_action *action,
3325 struct lp_build_tgsi_context *bld_base,
3326 struct lp_build_emit_data *emit_data)
3327 {
3328 struct si_shader_context *ctx = si_shader_context(bld_base);
3329
3330 emit_waitcnt(ctx);
3331 }
3332
3333 static LLVMValueRef
3334 shader_buffer_fetch_rsrc(struct si_shader_context *ctx,
3335 const struct tgsi_full_src_register *reg)
3336 {
3337 LLVMValueRef index;
3338 LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
3339 SI_PARAM_SHADER_BUFFERS);
3340
3341 if (!reg->Register.Indirect)
3342 index = LLVMConstInt(ctx->i32, reg->Register.Index, 0);
3343 else
3344 index = get_bounded_indirect_index(ctx, &reg->Indirect,
3345 reg->Register.Index,
3346 SI_NUM_SHADER_BUFFERS);
3347
3348 return build_indexed_load_const(ctx, rsrc_ptr, index);
3349 }
3350
3351 static bool tgsi_is_array_sampler(unsigned target)
3352 {
3353 return target == TGSI_TEXTURE_1D_ARRAY ||
3354 target == TGSI_TEXTURE_SHADOW1D_ARRAY ||
3355 target == TGSI_TEXTURE_2D_ARRAY ||
3356 target == TGSI_TEXTURE_SHADOW2D_ARRAY ||
3357 target == TGSI_TEXTURE_CUBE_ARRAY ||
3358 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY ||
3359 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
3360 }
3361
3362 static bool tgsi_is_array_image(unsigned target)
3363 {
3364 return target == TGSI_TEXTURE_3D ||
3365 target == TGSI_TEXTURE_CUBE ||
3366 target == TGSI_TEXTURE_1D_ARRAY ||
3367 target == TGSI_TEXTURE_2D_ARRAY ||
3368 target == TGSI_TEXTURE_CUBE_ARRAY ||
3369 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
3370 }
3371
3372 /**
3373 * Given a 256-bit resource descriptor, force the DCC enable bit to off.
3374 *
3375 * At least on Tonga, executing image stores on images with DCC enabled and
3376 * non-trivial can eventually lead to lockups. This can occur when an
3377 * application binds an image as read-only but then uses a shader that writes
3378 * to it. The OpenGL spec allows almost arbitrarily bad behavior (including
3379 * program termination) in this case, but it doesn't cost much to be a bit
3380 * nicer: disabling DCC in the shader still leads to undefined results but
3381 * avoids the lockup.
3382 */
3383 static LLVMValueRef force_dcc_off(struct si_shader_context *ctx,
3384 LLVMValueRef rsrc)
3385 {
3386 if (ctx->screen->b.chip_class <= CIK) {
3387 return rsrc;
3388 } else {
3389 LLVMBuilderRef builder = ctx->gallivm.builder;
3390 LLVMValueRef i32_6 = LLVMConstInt(ctx->i32, 6, 0);
3391 LLVMValueRef i32_C = LLVMConstInt(ctx->i32, C_008F28_COMPRESSION_EN, 0);
3392 LLVMValueRef tmp;
3393
3394 tmp = LLVMBuildExtractElement(builder, rsrc, i32_6, "");
3395 tmp = LLVMBuildAnd(builder, tmp, i32_C, "");
3396 return LLVMBuildInsertElement(builder, rsrc, tmp, i32_6, "");
3397 }
3398 }
3399
3400 /**
3401 * Load the resource descriptor for \p image.
3402 */
3403 static void
3404 image_fetch_rsrc(
3405 struct lp_build_tgsi_context *bld_base,
3406 const struct tgsi_full_src_register *image,
3407 bool dcc_off,
3408 LLVMValueRef *rsrc)
3409 {
3410 struct si_shader_context *ctx = si_shader_context(bld_base);
3411 LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
3412 SI_PARAM_IMAGES);
3413 LLVMValueRef index, tmp;
3414
3415 assert(image->Register.File == TGSI_FILE_IMAGE);
3416
3417 if (!image->Register.Indirect) {
3418 const struct tgsi_shader_info *info = bld_base->info;
3419
3420 index = LLVMConstInt(ctx->i32, image->Register.Index, 0);
3421
3422 if (info->images_writemask & (1 << image->Register.Index) &&
3423 !(info->images_buffers & (1 << image->Register.Index)))
3424 dcc_off = true;
3425 } else {
3426 /* From the GL_ARB_shader_image_load_store extension spec:
3427 *
3428 * If a shader performs an image load, store, or atomic
3429 * operation using an image variable declared as an array,
3430 * and if the index used to select an individual element is
3431 * negative or greater than or equal to the size of the
3432 * array, the results of the operation are undefined but may
3433 * not lead to termination.
3434 */
3435 index = get_bounded_indirect_index(ctx, &image->Indirect,
3436 image->Register.Index,
3437 SI_NUM_IMAGES);
3438 }
3439
3440 tmp = build_indexed_load_const(ctx, rsrc_ptr, index);
3441 if (dcc_off)
3442 tmp = force_dcc_off(ctx, tmp);
3443 *rsrc = tmp;
3444 }
3445
3446 static LLVMValueRef image_fetch_coords(
3447 struct lp_build_tgsi_context *bld_base,
3448 const struct tgsi_full_instruction *inst,
3449 unsigned src)
3450 {
3451 struct gallivm_state *gallivm = bld_base->base.gallivm;
3452 LLVMBuilderRef builder = gallivm->builder;
3453 unsigned target = inst->Memory.Texture;
3454 unsigned num_coords = tgsi_util_get_texture_coord_dim(target);
3455 LLVMValueRef coords[4];
3456 LLVMValueRef tmp;
3457 int chan;
3458
3459 for (chan = 0; chan < num_coords; ++chan) {
3460 tmp = lp_build_emit_fetch(bld_base, inst, src, chan);
3461 tmp = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3462 coords[chan] = tmp;
3463 }
3464
3465 if (num_coords == 1)
3466 return coords[0];
3467
3468 if (num_coords == 3) {
3469 /* LLVM has difficulties lowering 3-element vectors. */
3470 coords[3] = bld_base->uint_bld.undef;
3471 num_coords = 4;
3472 }
3473
3474 return lp_build_gather_values(gallivm, coords, num_coords);
3475 }
3476
3477 /**
3478 * Append the extra mode bits that are used by image load and store.
3479 */
3480 static void image_append_args(
3481 struct si_shader_context *ctx,
3482 struct lp_build_emit_data * emit_data,
3483 unsigned target,
3484 bool atomic)
3485 {
3486 const struct tgsi_full_instruction *inst = emit_data->inst;
3487 LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
3488 LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
3489 LLVMValueRef r128 = i1false;
3490 LLVMValueRef da = tgsi_is_array_image(target) ? i1true : i1false;
3491 LLVMValueRef glc =
3492 inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
3493 i1true : i1false;
3494 LLVMValueRef slc = i1false;
3495 LLVMValueRef lwe = i1false;
3496
3497 if (atomic || (HAVE_LLVM <= 0x0309)) {
3498 emit_data->args[emit_data->arg_count++] = r128;
3499 emit_data->args[emit_data->arg_count++] = da;
3500 if (!atomic) {
3501 emit_data->args[emit_data->arg_count++] = glc;
3502 }
3503 emit_data->args[emit_data->arg_count++] = slc;
3504 return;
3505 }
3506
3507 /* HAVE_LLVM >= 0x0400 */
3508 emit_data->args[emit_data->arg_count++] = glc;
3509 emit_data->args[emit_data->arg_count++] = slc;
3510 emit_data->args[emit_data->arg_count++] = lwe;
3511 emit_data->args[emit_data->arg_count++] = da;
3512 }
3513
3514 /**
3515 * Given a 256 bit resource, extract the top half (which stores the buffer
3516 * resource in the case of textures and images).
3517 */
3518 static LLVMValueRef extract_rsrc_top_half(
3519 struct si_shader_context *ctx,
3520 LLVMValueRef rsrc)
3521 {
3522 struct gallivm_state *gallivm = &ctx->gallivm;
3523 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
3524 LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
3525
3526 rsrc = LLVMBuildBitCast(gallivm->builder, rsrc, v2i128, "");
3527 rsrc = LLVMBuildExtractElement(gallivm->builder, rsrc, bld_base->uint_bld.one, "");
3528 rsrc = LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v4i32, "");
3529
3530 return rsrc;
3531 }
3532
3533 /**
3534 * Append the resource and indexing arguments for buffer intrinsics.
3535 *
3536 * \param rsrc the v4i32 buffer resource
3537 * \param index index into the buffer (stride-based)
3538 * \param offset byte offset into the buffer
3539 */
3540 static void buffer_append_args(
3541 struct si_shader_context *ctx,
3542 struct lp_build_emit_data *emit_data,
3543 LLVMValueRef rsrc,
3544 LLVMValueRef index,
3545 LLVMValueRef offset,
3546 bool atomic)
3547 {
3548 const struct tgsi_full_instruction *inst = emit_data->inst;
3549 LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
3550 LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
3551
3552 emit_data->args[emit_data->arg_count++] = rsrc;
3553 emit_data->args[emit_data->arg_count++] = index; /* vindex */
3554 emit_data->args[emit_data->arg_count++] = offset; /* voffset */
3555 if (!atomic) {
3556 emit_data->args[emit_data->arg_count++] =
3557 inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
3558 i1true : i1false; /* glc */
3559 }
3560 emit_data->args[emit_data->arg_count++] = i1false; /* slc */
3561 }
3562
3563 static void load_fetch_args(
3564 struct lp_build_tgsi_context * bld_base,
3565 struct lp_build_emit_data * emit_data)
3566 {
3567 struct si_shader_context *ctx = si_shader_context(bld_base);
3568 struct gallivm_state *gallivm = bld_base->base.gallivm;
3569 const struct tgsi_full_instruction * inst = emit_data->inst;
3570 unsigned target = inst->Memory.Texture;
3571 LLVMValueRef rsrc;
3572
3573 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
3574
3575 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3576 LLVMBuilderRef builder = gallivm->builder;
3577 LLVMValueRef offset;
3578 LLVMValueRef tmp;
3579
3580 rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0]);
3581
3582 tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
3583 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3584
3585 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
3586 offset, false);
3587 } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
3588 LLVMValueRef coords;
3589
3590 image_fetch_rsrc(bld_base, &inst->Src[0], false, &rsrc);
3591 coords = image_fetch_coords(bld_base, inst, 1);
3592
3593 if (target == TGSI_TEXTURE_BUFFER) {
3594 rsrc = extract_rsrc_top_half(ctx, rsrc);
3595 buffer_append_args(ctx, emit_data, rsrc, coords,
3596 bld_base->uint_bld.zero, false);
3597 } else {
3598 emit_data->args[0] = coords;
3599 emit_data->args[1] = rsrc;
3600 emit_data->args[2] = lp_build_const_int32(gallivm, 15); /* dmask */
3601 emit_data->arg_count = 3;
3602
3603 image_append_args(ctx, emit_data, target, false);
3604 }
3605 }
3606 }
3607
3608 static void load_emit_buffer(struct si_shader_context *ctx,
3609 struct lp_build_emit_data *emit_data)
3610 {
3611 const struct tgsi_full_instruction *inst = emit_data->inst;
3612 struct gallivm_state *gallivm = &ctx->gallivm;
3613 LLVMBuilderRef builder = gallivm->builder;
3614 uint writemask = inst->Dst[0].Register.WriteMask;
3615 uint count = util_last_bit(writemask);
3616 const char *intrinsic_name;
3617 LLVMTypeRef dst_type;
3618
3619 switch (count) {
3620 case 1:
3621 intrinsic_name = "llvm.amdgcn.buffer.load.f32";
3622 dst_type = ctx->f32;
3623 break;
3624 case 2:
3625 intrinsic_name = "llvm.amdgcn.buffer.load.v2f32";
3626 dst_type = LLVMVectorType(ctx->f32, 2);
3627 break;
3628 default: // 3 & 4
3629 intrinsic_name = "llvm.amdgcn.buffer.load.v4f32";
3630 dst_type = ctx->v4f32;
3631 count = 4;
3632 }
3633
3634 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3635 builder, intrinsic_name, dst_type,
3636 emit_data->args, emit_data->arg_count,
3637 LLVMReadOnlyAttribute);
3638 }
3639
3640 static LLVMValueRef get_memory_ptr(struct si_shader_context *ctx,
3641 const struct tgsi_full_instruction *inst,
3642 LLVMTypeRef type, int arg)
3643 {
3644 struct gallivm_state *gallivm = &ctx->gallivm;
3645 LLVMBuilderRef builder = gallivm->builder;
3646 LLVMValueRef offset, ptr;
3647 int addr_space;
3648
3649 offset = lp_build_emit_fetch(&ctx->soa.bld_base, inst, arg, 0);
3650 offset = LLVMBuildBitCast(builder, offset, ctx->i32, "");
3651
3652 ptr = ctx->shared_memory;
3653 ptr = LLVMBuildGEP(builder, ptr, &offset, 1, "");
3654 addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
3655 ptr = LLVMBuildBitCast(builder, ptr, LLVMPointerType(type, addr_space), "");
3656
3657 return ptr;
3658 }
3659
3660 static void load_emit_memory(
3661 struct si_shader_context *ctx,
3662 struct lp_build_emit_data *emit_data)
3663 {
3664 const struct tgsi_full_instruction *inst = emit_data->inst;
3665 struct lp_build_context *base = &ctx->soa.bld_base.base;
3666 struct gallivm_state *gallivm = &ctx->gallivm;
3667 LLVMBuilderRef builder = gallivm->builder;
3668 unsigned writemask = inst->Dst[0].Register.WriteMask;
3669 LLVMValueRef channels[4], ptr, derived_ptr, index;
3670 int chan;
3671
3672 ptr = get_memory_ptr(ctx, inst, base->elem_type, 1);
3673
3674 for (chan = 0; chan < 4; ++chan) {
3675 if (!(writemask & (1 << chan))) {
3676 channels[chan] = LLVMGetUndef(base->elem_type);
3677 continue;
3678 }
3679
3680 index = lp_build_const_int32(gallivm, chan);
3681 derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
3682 channels[chan] = LLVMBuildLoad(builder, derived_ptr, "");
3683 }
3684 emit_data->output[emit_data->chan] = lp_build_gather_values(gallivm, channels, 4);
3685 }
3686
3687 static void get_image_intr_name(const char *base_name,
3688 LLVMTypeRef data_type,
3689 LLVMTypeRef coords_type,
3690 LLVMTypeRef rsrc_type,
3691 char *out_name, unsigned out_len)
3692 {
3693 char coords_type_name[8];
3694
3695 build_type_name_for_intr(coords_type, coords_type_name,
3696 sizeof(coords_type_name));
3697
3698 if (HAVE_LLVM <= 0x0309) {
3699 snprintf(out_name, out_len, "%s.%s", base_name, coords_type_name);
3700 } else {
3701 char data_type_name[8];
3702 char rsrc_type_name[8];
3703
3704 build_type_name_for_intr(data_type, data_type_name,
3705 sizeof(data_type_name));
3706 build_type_name_for_intr(rsrc_type, rsrc_type_name,
3707 sizeof(rsrc_type_name));
3708 snprintf(out_name, out_len, "%s.%s.%s.%s", base_name,
3709 data_type_name, coords_type_name, rsrc_type_name);
3710 }
3711 }
3712
3713 static void load_emit(
3714 const struct lp_build_tgsi_action *action,
3715 struct lp_build_tgsi_context *bld_base,
3716 struct lp_build_emit_data *emit_data)
3717 {
3718 struct si_shader_context *ctx = si_shader_context(bld_base);
3719 struct gallivm_state *gallivm = bld_base->base.gallivm;
3720 LLVMBuilderRef builder = gallivm->builder;
3721 const struct tgsi_full_instruction * inst = emit_data->inst;
3722 char intrinsic_name[64];
3723
3724 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
3725 load_emit_memory(ctx, emit_data);
3726 return;
3727 }
3728
3729 if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
3730 emit_waitcnt(ctx);
3731
3732 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3733 load_emit_buffer(ctx, emit_data);
3734 return;
3735 }
3736
3737 if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
3738 emit_data->output[emit_data->chan] =
3739 lp_build_intrinsic(
3740 builder, "llvm.amdgcn.buffer.load.format.v4f32", emit_data->dst_type,
3741 emit_data->args, emit_data->arg_count,
3742 LLVMReadOnlyAttribute);
3743 } else {
3744 get_image_intr_name("llvm.amdgcn.image.load",
3745 emit_data->dst_type, /* vdata */
3746 LLVMTypeOf(emit_data->args[0]), /* coords */
3747 LLVMTypeOf(emit_data->args[1]), /* rsrc */
3748 intrinsic_name, sizeof(intrinsic_name));
3749
3750 emit_data->output[emit_data->chan] =
3751 lp_build_intrinsic(
3752 builder, intrinsic_name, emit_data->dst_type,
3753 emit_data->args, emit_data->arg_count,
3754 LLVMReadOnlyAttribute);
3755 }
3756 }
3757
3758 static void store_fetch_args(
3759 struct lp_build_tgsi_context * bld_base,
3760 struct lp_build_emit_data * emit_data)
3761 {
3762 struct si_shader_context *ctx = si_shader_context(bld_base);
3763 struct gallivm_state *gallivm = bld_base->base.gallivm;
3764 LLVMBuilderRef builder = gallivm->builder;
3765 const struct tgsi_full_instruction * inst = emit_data->inst;
3766 struct tgsi_full_src_register memory;
3767 LLVMValueRef chans[4];
3768 LLVMValueRef data;
3769 LLVMValueRef rsrc;
3770 unsigned chan;
3771
3772 emit_data->dst_type = LLVMVoidTypeInContext(gallivm->context);
3773
3774 for (chan = 0; chan < 4; ++chan) {
3775 chans[chan] = lp_build_emit_fetch(bld_base, inst, 1, chan);
3776 }
3777 data = lp_build_gather_values(gallivm, chans, 4);
3778
3779 emit_data->args[emit_data->arg_count++] = data;
3780
3781 memory = tgsi_full_src_register_from_dst(&inst->Dst[0]);
3782
3783 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
3784 LLVMValueRef offset;
3785 LLVMValueRef tmp;
3786
3787 rsrc = shader_buffer_fetch_rsrc(ctx, &memory);
3788
3789 tmp = lp_build_emit_fetch(bld_base, inst, 0, 0);
3790 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3791
3792 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
3793 offset, false);
3794 } else if (inst->Dst[0].Register.File == TGSI_FILE_IMAGE) {
3795 unsigned target = inst->Memory.Texture;
3796 LLVMValueRef coords;
3797
3798 coords = image_fetch_coords(bld_base, inst, 0);
3799
3800 if (target == TGSI_TEXTURE_BUFFER) {
3801 image_fetch_rsrc(bld_base, &memory, false, &rsrc);
3802
3803 rsrc = extract_rsrc_top_half(ctx, rsrc);
3804 buffer_append_args(ctx, emit_data, rsrc, coords,
3805 bld_base->uint_bld.zero, false);
3806 } else {
3807 emit_data->args[1] = coords;
3808 image_fetch_rsrc(bld_base, &memory, true, &emit_data->args[2]);
3809 emit_data->args[3] = lp_build_const_int32(gallivm, 15); /* dmask */
3810 emit_data->arg_count = 4;
3811
3812 image_append_args(ctx, emit_data, target, false);
3813 }
3814 }
3815 }
3816
3817 static void store_emit_buffer(
3818 struct si_shader_context *ctx,
3819 struct lp_build_emit_data *emit_data)
3820 {
3821 const struct tgsi_full_instruction *inst = emit_data->inst;
3822 struct gallivm_state *gallivm = &ctx->gallivm;
3823 LLVMBuilderRef builder = gallivm->builder;
3824 struct lp_build_context *uint_bld = &ctx->soa.bld_base.uint_bld;
3825 LLVMValueRef base_data = emit_data->args[0];
3826 LLVMValueRef base_offset = emit_data->args[3];
3827 unsigned writemask = inst->Dst[0].Register.WriteMask;
3828
3829 while (writemask) {
3830 int start, count;
3831 const char *intrinsic_name;
3832 LLVMValueRef data;
3833 LLVMValueRef offset;
3834 LLVMValueRef tmp;
3835
3836 u_bit_scan_consecutive_range(&writemask, &start, &count);
3837
3838 /* Due to an LLVM limitation, split 3-element writes
3839 * into a 2-element and a 1-element write. */
3840 if (count == 3) {
3841 writemask |= 1 << (start + 2);
3842 count = 2;
3843 }
3844
3845 if (count == 4) {
3846 data = base_data;
3847 intrinsic_name = "llvm.amdgcn.buffer.store.v4f32";
3848 } else if (count == 2) {
3849 LLVMTypeRef v2f32 = LLVMVectorType(ctx->f32, 2);
3850
3851 tmp = LLVMBuildExtractElement(
3852 builder, base_data,
3853 lp_build_const_int32(gallivm, start), "");
3854 data = LLVMBuildInsertElement(
3855 builder, LLVMGetUndef(v2f32), tmp,
3856 uint_bld->zero, "");
3857
3858 tmp = LLVMBuildExtractElement(
3859 builder, base_data,
3860 lp_build_const_int32(gallivm, start + 1), "");
3861 data = LLVMBuildInsertElement(
3862 builder, data, tmp, uint_bld->one, "");
3863
3864 intrinsic_name = "llvm.amdgcn.buffer.store.v2f32";
3865 } else {
3866 assert(count == 1);
3867 data = LLVMBuildExtractElement(
3868 builder, base_data,
3869 lp_build_const_int32(gallivm, start), "");
3870 intrinsic_name = "llvm.amdgcn.buffer.store.f32";
3871 }
3872
3873 offset = base_offset;
3874 if (start != 0) {
3875 offset = LLVMBuildAdd(
3876 builder, offset,
3877 lp_build_const_int32(gallivm, start * 4), "");
3878 }
3879
3880 emit_data->args[0] = data;
3881 emit_data->args[3] = offset;
3882
3883 lp_build_intrinsic(
3884 builder, intrinsic_name, emit_data->dst_type,
3885 emit_data->args, emit_data->arg_count, 0);
3886 }
3887 }
3888
3889 static void store_emit_memory(
3890 struct si_shader_context *ctx,
3891 struct lp_build_emit_data *emit_data)
3892 {
3893 const struct tgsi_full_instruction *inst = emit_data->inst;
3894 struct gallivm_state *gallivm = &ctx->gallivm;
3895 struct lp_build_context *base = &ctx->soa.bld_base.base;
3896 LLVMBuilderRef builder = gallivm->builder;
3897 unsigned writemask = inst->Dst[0].Register.WriteMask;
3898 LLVMValueRef ptr, derived_ptr, data, index;
3899 int chan;
3900
3901 ptr = get_memory_ptr(ctx, inst, base->elem_type, 0);
3902
3903 for (chan = 0; chan < 4; ++chan) {
3904 if (!(writemask & (1 << chan))) {
3905 continue;
3906 }
3907 data = lp_build_emit_fetch(&ctx->soa.bld_base, inst, 1, chan);
3908 index = lp_build_const_int32(gallivm, chan);
3909 derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
3910 LLVMBuildStore(builder, data, derived_ptr);
3911 }
3912 }
3913
3914 static void store_emit(
3915 const struct lp_build_tgsi_action *action,
3916 struct lp_build_tgsi_context *bld_base,
3917 struct lp_build_emit_data *emit_data)
3918 {
3919 struct si_shader_context *ctx = si_shader_context(bld_base);
3920 struct gallivm_state *gallivm = bld_base->base.gallivm;
3921 LLVMBuilderRef builder = gallivm->builder;
3922 const struct tgsi_full_instruction * inst = emit_data->inst;
3923 unsigned target = inst->Memory.Texture;
3924 char intrinsic_name[64];
3925
3926 if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY) {
3927 store_emit_memory(ctx, emit_data);
3928 return;
3929 }
3930
3931 if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
3932 emit_waitcnt(ctx);
3933
3934 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
3935 store_emit_buffer(ctx, emit_data);
3936 return;
3937 }
3938
3939 if (target == TGSI_TEXTURE_BUFFER) {
3940 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3941 builder, "llvm.amdgcn.buffer.store.format.v4f32",
3942 emit_data->dst_type, emit_data->args,
3943 emit_data->arg_count, 0);
3944 } else {
3945 get_image_intr_name("llvm.amdgcn.image.store",
3946 LLVMTypeOf(emit_data->args[0]), /* vdata */
3947 LLVMTypeOf(emit_data->args[1]), /* coords */
3948 LLVMTypeOf(emit_data->args[2]), /* rsrc */
3949 intrinsic_name, sizeof(intrinsic_name));
3950
3951 emit_data->output[emit_data->chan] =
3952 lp_build_intrinsic(
3953 builder, intrinsic_name, emit_data->dst_type,
3954 emit_data->args, emit_data->arg_count, 0);
3955 }
3956 }
3957
3958 static void atomic_fetch_args(
3959 struct lp_build_tgsi_context * bld_base,
3960 struct lp_build_emit_data * emit_data)
3961 {
3962 struct si_shader_context *ctx = si_shader_context(bld_base);
3963 struct gallivm_state *gallivm = bld_base->base.gallivm;
3964 LLVMBuilderRef builder = gallivm->builder;
3965 const struct tgsi_full_instruction * inst = emit_data->inst;
3966 LLVMValueRef data1, data2;
3967 LLVMValueRef rsrc;
3968 LLVMValueRef tmp;
3969
3970 emit_data->dst_type = bld_base->base.elem_type;
3971
3972 tmp = lp_build_emit_fetch(bld_base, inst, 2, 0);
3973 data1 = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3974
3975 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
3976 tmp = lp_build_emit_fetch(bld_base, inst, 3, 0);
3977 data2 = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3978 }
3979
3980 /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
3981 * of arguments, which is reversed relative to TGSI (and GLSL)
3982 */
3983 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
3984 emit_data->args[emit_data->arg_count++] = data2;
3985 emit_data->args[emit_data->arg_count++] = data1;
3986
3987 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3988 LLVMValueRef offset;
3989
3990 rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0]);
3991
3992 tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
3993 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3994
3995 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
3996 offset, true);
3997 } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
3998 unsigned target = inst->Memory.Texture;
3999 LLVMValueRef coords;
4000
4001 image_fetch_rsrc(bld_base, &inst->Src[0],
4002 target != TGSI_TEXTURE_BUFFER, &rsrc);
4003 coords = image_fetch_coords(bld_base, inst, 1);
4004
4005 if (target == TGSI_TEXTURE_BUFFER) {
4006 rsrc = extract_rsrc_top_half(ctx, rsrc);
4007 buffer_append_args(ctx, emit_data, rsrc, coords,
4008 bld_base->uint_bld.zero, true);
4009 } else {
4010 emit_data->args[emit_data->arg_count++] = coords;
4011 emit_data->args[emit_data->arg_count++] = rsrc;
4012
4013 image_append_args(ctx, emit_data, target, true);
4014 }
4015 }
4016 }
4017
4018 static void atomic_emit_memory(struct si_shader_context *ctx,
4019 struct lp_build_emit_data *emit_data) {
4020 struct gallivm_state *gallivm = &ctx->gallivm;
4021 LLVMBuilderRef builder = gallivm->builder;
4022 const struct tgsi_full_instruction * inst = emit_data->inst;
4023 LLVMValueRef ptr, result, arg;
4024
4025 ptr = get_memory_ptr(ctx, inst, ctx->i32, 1);
4026
4027 arg = lp_build_emit_fetch(&ctx->soa.bld_base, inst, 2, 0);
4028 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
4029
4030 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
4031 LLVMValueRef new_data;
4032 new_data = lp_build_emit_fetch(&ctx->soa.bld_base,
4033 inst, 3, 0);
4034
4035 new_data = LLVMBuildBitCast(builder, new_data, ctx->i32, "");
4036
4037 #if HAVE_LLVM >= 0x309
4038 result = LLVMBuildAtomicCmpXchg(builder, ptr, arg, new_data,
4039 LLVMAtomicOrderingSequentiallyConsistent,
4040 LLVMAtomicOrderingSequentiallyConsistent,
4041 false);
4042 #endif
4043
4044 result = LLVMBuildExtractValue(builder, result, 0, "");
4045 } else {
4046 LLVMAtomicRMWBinOp op;
4047
4048 switch(inst->Instruction.Opcode) {
4049 case TGSI_OPCODE_ATOMUADD:
4050 op = LLVMAtomicRMWBinOpAdd;
4051 break;
4052 case TGSI_OPCODE_ATOMXCHG:
4053 op = LLVMAtomicRMWBinOpXchg;
4054 break;
4055 case TGSI_OPCODE_ATOMAND:
4056 op = LLVMAtomicRMWBinOpAnd;
4057 break;
4058 case TGSI_OPCODE_ATOMOR:
4059 op = LLVMAtomicRMWBinOpOr;
4060 break;
4061 case TGSI_OPCODE_ATOMXOR:
4062 op = LLVMAtomicRMWBinOpXor;
4063 break;
4064 case TGSI_OPCODE_ATOMUMIN:
4065 op = LLVMAtomicRMWBinOpUMin;
4066 break;
4067 case TGSI_OPCODE_ATOMUMAX:
4068 op = LLVMAtomicRMWBinOpUMax;
4069 break;
4070 case TGSI_OPCODE_ATOMIMIN:
4071 op = LLVMAtomicRMWBinOpMin;
4072 break;
4073 case TGSI_OPCODE_ATOMIMAX:
4074 op = LLVMAtomicRMWBinOpMax;
4075 break;
4076 default:
4077 unreachable("unknown atomic opcode");
4078 }
4079
4080 result = LLVMBuildAtomicRMW(builder, op, ptr, arg,
4081 LLVMAtomicOrderingSequentiallyConsistent,
4082 false);
4083 }
4084 emit_data->output[emit_data->chan] = LLVMBuildBitCast(builder, result, emit_data->dst_type, "");
4085 }
4086
4087 static void atomic_emit(
4088 const struct lp_build_tgsi_action *action,
4089 struct lp_build_tgsi_context *bld_base,
4090 struct lp_build_emit_data *emit_data)
4091 {
4092 struct si_shader_context *ctx = si_shader_context(bld_base);
4093 struct gallivm_state *gallivm = bld_base->base.gallivm;
4094 LLVMBuilderRef builder = gallivm->builder;
4095 const struct tgsi_full_instruction * inst = emit_data->inst;
4096 char intrinsic_name[40];
4097 LLVMValueRef tmp;
4098
4099 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
4100 atomic_emit_memory(ctx, emit_data);
4101 return;
4102 }
4103
4104 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
4105 inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
4106 snprintf(intrinsic_name, sizeof(intrinsic_name),
4107 "llvm.amdgcn.buffer.atomic.%s", action->intr_name);
4108 } else {
4109 LLVMValueRef coords;
4110 char coords_type[8];
4111
4112 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
4113 coords = emit_data->args[2];
4114 else
4115 coords = emit_data->args[1];
4116
4117 build_type_name_for_intr(LLVMTypeOf(coords), coords_type, sizeof(coords_type));
4118 snprintf(intrinsic_name, sizeof(intrinsic_name),
4119 "llvm.amdgcn.image.atomic.%s.%s",
4120 action->intr_name, coords_type);
4121 }
4122
4123 tmp = lp_build_intrinsic(
4124 builder, intrinsic_name, bld_base->uint_bld.elem_type,
4125 emit_data->args, emit_data->arg_count, 0);
4126 emit_data->output[emit_data->chan] =
4127 LLVMBuildBitCast(builder, tmp, bld_base->base.elem_type, "");
4128 }
4129
4130 static void resq_fetch_args(
4131 struct lp_build_tgsi_context * bld_base,
4132 struct lp_build_emit_data * emit_data)
4133 {
4134 struct si_shader_context *ctx = si_shader_context(bld_base);
4135 struct gallivm_state *gallivm = bld_base->base.gallivm;
4136 const struct tgsi_full_instruction *inst = emit_data->inst;
4137 const struct tgsi_full_src_register *reg = &inst->Src[0];
4138
4139 emit_data->dst_type = ctx->v4i32;
4140
4141 if (reg->Register.File == TGSI_FILE_BUFFER) {
4142 emit_data->args[0] = shader_buffer_fetch_rsrc(ctx, reg);
4143 emit_data->arg_count = 1;
4144 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
4145 image_fetch_rsrc(bld_base, reg, false, &emit_data->args[0]);
4146 emit_data->arg_count = 1;
4147 } else {
4148 emit_data->args[0] = bld_base->uint_bld.zero; /* mip level */
4149 image_fetch_rsrc(bld_base, reg, false, &emit_data->args[1]);
4150 emit_data->args[2] = lp_build_const_int32(gallivm, 15); /* dmask */
4151 emit_data->args[3] = bld_base->uint_bld.zero; /* unorm */
4152 emit_data->args[4] = bld_base->uint_bld.zero; /* r128 */
4153 emit_data->args[5] = tgsi_is_array_image(inst->Memory.Texture) ?
4154 bld_base->uint_bld.one : bld_base->uint_bld.zero; /* da */
4155 emit_data->args[6] = bld_base->uint_bld.zero; /* glc */
4156 emit_data->args[7] = bld_base->uint_bld.zero; /* slc */
4157 emit_data->args[8] = bld_base->uint_bld.zero; /* tfe */
4158 emit_data->args[9] = bld_base->uint_bld.zero; /* lwe */
4159 emit_data->arg_count = 10;
4160 }
4161 }
4162
4163 static void resq_emit(
4164 const struct lp_build_tgsi_action *action,
4165 struct lp_build_tgsi_context *bld_base,
4166 struct lp_build_emit_data *emit_data)
4167 {
4168 struct gallivm_state *gallivm = bld_base->base.gallivm;
4169 LLVMBuilderRef builder = gallivm->builder;
4170 const struct tgsi_full_instruction *inst = emit_data->inst;
4171 LLVMValueRef out;
4172
4173 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
4174 out = LLVMBuildExtractElement(builder, emit_data->args[0],
4175 lp_build_const_int32(gallivm, 2), "");
4176 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
4177 out = get_buffer_size(bld_base, emit_data->args[0]);
4178 } else {
4179 out = lp_build_intrinsic(
4180 builder, "llvm.SI.getresinfo.i32", emit_data->dst_type,
4181 emit_data->args, emit_data->arg_count,
4182 LLVMReadNoneAttribute);
4183
4184 /* Divide the number of layers by 6 to get the number of cubes. */
4185 if (inst->Memory.Texture == TGSI_TEXTURE_CUBE_ARRAY) {
4186 LLVMValueRef imm2 = lp_build_const_int32(gallivm, 2);
4187 LLVMValueRef imm6 = lp_build_const_int32(gallivm, 6);
4188
4189 LLVMValueRef z = LLVMBuildExtractElement(builder, out, imm2, "");
4190 z = LLVMBuildSDiv(builder, z, imm6, "");
4191 out = LLVMBuildInsertElement(builder, out, z, imm2, "");
4192 }
4193 }
4194
4195 emit_data->output[emit_data->chan] = out;
4196 }
4197
4198 static void set_tex_fetch_args(struct si_shader_context *ctx,
4199 struct lp_build_emit_data *emit_data,
4200 unsigned opcode, unsigned target,
4201 LLVMValueRef res_ptr, LLVMValueRef samp_ptr,
4202 LLVMValueRef *param, unsigned count,
4203 unsigned dmask)
4204 {
4205 struct gallivm_state *gallivm = &ctx->gallivm;
4206 unsigned num_args;
4207 unsigned is_rect = target == TGSI_TEXTURE_RECT;
4208
4209 /* Pad to power of two vector */
4210 while (count < util_next_power_of_two(count))
4211 param[count++] = LLVMGetUndef(ctx->i32);
4212
4213 /* Texture coordinates. */
4214 if (count > 1)
4215 emit_data->args[0] = lp_build_gather_values(gallivm, param, count);
4216 else
4217 emit_data->args[0] = param[0];
4218
4219 /* Resource. */
4220 emit_data->args[1] = res_ptr;
4221 num_args = 2;
4222
4223 if (opcode == TGSI_OPCODE_TXF || opcode == TGSI_OPCODE_TXQ)
4224 emit_data->dst_type = ctx->v4i32;
4225 else {
4226 emit_data->dst_type = ctx->v4f32;
4227
4228 emit_data->args[num_args++] = samp_ptr;
4229 }
4230
4231 emit_data->args[num_args++] = lp_build_const_int32(gallivm, dmask);
4232 emit_data->args[num_args++] = lp_build_const_int32(gallivm, is_rect); /* unorm */
4233 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* r128 */
4234 emit_data->args[num_args++] = lp_build_const_int32(gallivm,
4235 tgsi_is_array_sampler(target)); /* da */
4236 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* glc */
4237 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* slc */
4238 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* tfe */
4239 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* lwe */
4240
4241 emit_data->arg_count = num_args;
4242 }
4243
4244 static const struct lp_build_tgsi_action tex_action;
4245
4246 enum desc_type {
4247 DESC_IMAGE,
4248 DESC_FMASK,
4249 DESC_SAMPLER
4250 };
4251
4252 static LLVMTypeRef const_array(LLVMTypeRef elem_type, int num_elements)
4253 {
4254 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
4255 CONST_ADDR_SPACE);
4256 }
4257
4258 /**
4259 * Load an image view, fmask view. or sampler state descriptor.
4260 */
4261 static LLVMValueRef load_sampler_desc_custom(struct si_shader_context *ctx,
4262 LLVMValueRef list, LLVMValueRef index,
4263 enum desc_type type)
4264 {
4265 struct gallivm_state *gallivm = &ctx->gallivm;
4266 LLVMBuilderRef builder = gallivm->builder;
4267
4268 switch (type) {
4269 case DESC_IMAGE:
4270 /* The image is at [0:7]. */
4271 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
4272 break;
4273 case DESC_FMASK:
4274 /* The FMASK is at [8:15]. */
4275 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
4276 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 1, 0), "");
4277 break;
4278 case DESC_SAMPLER:
4279 /* The sampler state is at [12:15]. */
4280 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
4281 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 3, 0), "");
4282 list = LLVMBuildPointerCast(builder, list,
4283 const_array(ctx->v4i32, 0), "");
4284 break;
4285 }
4286
4287 return build_indexed_load_const(ctx, list, index);
4288 }
4289
4290 static LLVMValueRef load_sampler_desc(struct si_shader_context *ctx,
4291 LLVMValueRef index, enum desc_type type)
4292 {
4293 LLVMValueRef list = LLVMGetParam(ctx->main_fn,
4294 SI_PARAM_SAMPLERS);
4295
4296 return load_sampler_desc_custom(ctx, list, index, type);
4297 }
4298
4299 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4300 *
4301 * SI-CI:
4302 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4303 * filtering manually. The driver sets img7 to a mask clearing
4304 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4305 * s_and_b32 samp0, samp0, img7
4306 *
4307 * VI:
4308 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4309 */
4310 static LLVMValueRef sici_fix_sampler_aniso(struct si_shader_context *ctx,
4311 LLVMValueRef res, LLVMValueRef samp)
4312 {
4313 LLVMBuilderRef builder = ctx->gallivm.builder;
4314 LLVMValueRef img7, samp0;
4315
4316 if (ctx->screen->b.chip_class >= VI)
4317 return samp;
4318
4319 img7 = LLVMBuildExtractElement(builder, res,
4320 LLVMConstInt(ctx->i32, 7, 0), "");
4321 samp0 = LLVMBuildExtractElement(builder, samp,
4322 LLVMConstInt(ctx->i32, 0, 0), "");
4323 samp0 = LLVMBuildAnd(builder, samp0, img7, "");
4324 return LLVMBuildInsertElement(builder, samp, samp0,
4325 LLVMConstInt(ctx->i32, 0, 0), "");
4326 }
4327
4328 static void tex_fetch_ptrs(
4329 struct lp_build_tgsi_context *bld_base,
4330 struct lp_build_emit_data *emit_data,
4331 LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr, LLVMValueRef *fmask_ptr)
4332 {
4333 struct si_shader_context *ctx = si_shader_context(bld_base);
4334 const struct tgsi_full_instruction *inst = emit_data->inst;
4335 unsigned target = inst->Texture.Texture;
4336 unsigned sampler_src;
4337 unsigned sampler_index;
4338 LLVMValueRef index;
4339
4340 sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
4341 sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
4342
4343 if (emit_data->inst->Src[sampler_src].Register.Indirect) {
4344 const struct tgsi_full_src_register *reg = &emit_data->inst->Src[sampler_src];
4345
4346 index = get_bounded_indirect_index(ctx,
4347 &reg->Indirect,
4348 reg->Register.Index,
4349 SI_NUM_SAMPLERS);
4350 } else {
4351 index = LLVMConstInt(ctx->i32, sampler_index, 0);
4352 }
4353
4354 *res_ptr = load_sampler_desc(ctx, index, DESC_IMAGE);
4355
4356 if (target == TGSI_TEXTURE_2D_MSAA ||
4357 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
4358 if (samp_ptr)
4359 *samp_ptr = NULL;
4360 if (fmask_ptr)
4361 *fmask_ptr = load_sampler_desc(ctx, index, DESC_FMASK);
4362 } else {
4363 if (samp_ptr) {
4364 *samp_ptr = load_sampler_desc(ctx, index, DESC_SAMPLER);
4365 *samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
4366 }
4367 if (fmask_ptr)
4368 *fmask_ptr = NULL;
4369 }
4370 }
4371
4372 static void txq_fetch_args(
4373 struct lp_build_tgsi_context *bld_base,
4374 struct lp_build_emit_data *emit_data)
4375 {
4376 struct si_shader_context *ctx = si_shader_context(bld_base);
4377 struct gallivm_state *gallivm = bld_base->base.gallivm;
4378 LLVMBuilderRef builder = gallivm->builder;
4379 const struct tgsi_full_instruction *inst = emit_data->inst;
4380 unsigned target = inst->Texture.Texture;
4381 LLVMValueRef res_ptr;
4382 LLVMValueRef address;
4383
4384 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, NULL, NULL);
4385
4386 if (target == TGSI_TEXTURE_BUFFER) {
4387 /* Read the size from the buffer descriptor directly. */
4388 LLVMValueRef res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
4389 emit_data->args[0] = get_buffer_size(bld_base, res);
4390 return;
4391 }
4392
4393 /* Textures - set the mip level. */
4394 address = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
4395
4396 set_tex_fetch_args(ctx, emit_data, TGSI_OPCODE_TXQ, target, res_ptr,
4397 NULL, &address, 1, 0xf);
4398 }
4399
4400 static void txq_emit(const struct lp_build_tgsi_action *action,
4401 struct lp_build_tgsi_context *bld_base,
4402 struct lp_build_emit_data *emit_data)
4403 {
4404 struct lp_build_context *base = &bld_base->base;
4405 unsigned target = emit_data->inst->Texture.Texture;
4406
4407 if (target == TGSI_TEXTURE_BUFFER) {
4408 /* Just return the buffer size. */
4409 emit_data->output[emit_data->chan] = emit_data->args[0];
4410 return;
4411 }
4412
4413 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4414 base->gallivm->builder, "llvm.SI.getresinfo.i32",
4415 emit_data->dst_type, emit_data->args, emit_data->arg_count,
4416 LLVMReadNoneAttribute);
4417
4418 /* Divide the number of layers by 6 to get the number of cubes. */
4419 if (target == TGSI_TEXTURE_CUBE_ARRAY ||
4420 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4421 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
4422 LLVMValueRef two = lp_build_const_int32(bld_base->base.gallivm, 2);
4423 LLVMValueRef six = lp_build_const_int32(bld_base->base.gallivm, 6);
4424
4425 LLVMValueRef v4 = emit_data->output[emit_data->chan];
4426 LLVMValueRef z = LLVMBuildExtractElement(builder, v4, two, "");
4427 z = LLVMBuildSDiv(builder, z, six, "");
4428
4429 emit_data->output[emit_data->chan] =
4430 LLVMBuildInsertElement(builder, v4, z, two, "");
4431 }
4432 }
4433
4434 static void tex_fetch_args(
4435 struct lp_build_tgsi_context *bld_base,
4436 struct lp_build_emit_data *emit_data)
4437 {
4438 struct si_shader_context *ctx = si_shader_context(bld_base);
4439 struct gallivm_state *gallivm = bld_base->base.gallivm;
4440 const struct tgsi_full_instruction *inst = emit_data->inst;
4441 unsigned opcode = inst->Instruction.Opcode;
4442 unsigned target = inst->Texture.Texture;
4443 LLVMValueRef coords[5], derivs[6];
4444 LLVMValueRef address[16];
4445 unsigned num_coords = tgsi_util_get_texture_coord_dim(target);
4446 int ref_pos = tgsi_util_get_shadow_ref_src_index(target);
4447 unsigned count = 0;
4448 unsigned chan;
4449 unsigned num_deriv_channels = 0;
4450 bool has_offset = inst->Texture.NumOffsets > 0;
4451 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
4452 unsigned dmask = 0xf;
4453
4454 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
4455
4456 if (target == TGSI_TEXTURE_BUFFER) {
4457 LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
4458
4459 /* Bitcast and truncate v8i32 to v16i8. */
4460 LLVMValueRef res = res_ptr;
4461 res = LLVMBuildBitCast(gallivm->builder, res, v2i128, "");
4462 res = LLVMBuildExtractElement(gallivm->builder, res, bld_base->uint_bld.one, "");
4463 res = LLVMBuildBitCast(gallivm->builder, res, ctx->v16i8, "");
4464
4465 emit_data->dst_type = ctx->v4f32;
4466 emit_data->args[0] = res;
4467 emit_data->args[1] = bld_base->uint_bld.zero;
4468 emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
4469 emit_data->arg_count = 3;
4470 return;
4471 }
4472
4473 /* Fetch and project texture coordinates */
4474 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
4475 for (chan = 0; chan < 3; chan++ ) {
4476 coords[chan] = lp_build_emit_fetch(bld_base,
4477 emit_data->inst, 0,
4478 chan);
4479 if (opcode == TGSI_OPCODE_TXP)
4480 coords[chan] = lp_build_emit_llvm_binary(bld_base,
4481 TGSI_OPCODE_DIV,
4482 coords[chan],
4483 coords[3]);
4484 }
4485
4486 if (opcode == TGSI_OPCODE_TXP)
4487 coords[3] = bld_base->base.one;
4488
4489 /* Pack offsets. */
4490 if (has_offset && opcode != TGSI_OPCODE_TXF) {
4491 /* The offsets are six-bit signed integers packed like this:
4492 * X=[5:0], Y=[13:8], and Z=[21:16].
4493 */
4494 LLVMValueRef offset[3], pack;
4495
4496 assert(inst->Texture.NumOffsets == 1);
4497
4498 for (chan = 0; chan < 3; chan++) {
4499 offset[chan] = lp_build_emit_fetch_texoffset(bld_base,
4500 emit_data->inst, 0, chan);
4501 offset[chan] = LLVMBuildAnd(gallivm->builder, offset[chan],
4502 lp_build_const_int32(gallivm, 0x3f), "");
4503 if (chan)
4504 offset[chan] = LLVMBuildShl(gallivm->builder, offset[chan],
4505 lp_build_const_int32(gallivm, chan*8), "");
4506 }
4507
4508 pack = LLVMBuildOr(gallivm->builder, offset[0], offset[1], "");
4509 pack = LLVMBuildOr(gallivm->builder, pack, offset[2], "");
4510 address[count++] = pack;
4511 }
4512
4513 /* Pack LOD bias value */
4514 if (opcode == TGSI_OPCODE_TXB)
4515 address[count++] = coords[3];
4516 if (opcode == TGSI_OPCODE_TXB2)
4517 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4518
4519 /* Pack depth comparison value */
4520 if (tgsi_is_shadow_target(target) && opcode != TGSI_OPCODE_LODQ) {
4521 LLVMValueRef z;
4522
4523 if (target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4524 z = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4525 } else {
4526 assert(ref_pos >= 0);
4527 z = coords[ref_pos];
4528 }
4529
4530 /* TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
4531 * so the depth comparison value isn't clamped for Z16 and
4532 * Z24 anymore. Do it manually here.
4533 *
4534 * It's unnecessary if the original texture format was
4535 * Z32_FLOAT, but we don't know that here.
4536 */
4537 if (ctx->screen->b.chip_class == VI)
4538 z = si_llvm_saturate(bld_base, z);
4539
4540 address[count++] = z;
4541 }
4542
4543 /* Pack user derivatives */
4544 if (opcode == TGSI_OPCODE_TXD) {
4545 int param, num_src_deriv_channels;
4546
4547 switch (target) {
4548 case TGSI_TEXTURE_3D:
4549 num_src_deriv_channels = 3;
4550 num_deriv_channels = 3;
4551 break;
4552 case TGSI_TEXTURE_2D:
4553 case TGSI_TEXTURE_SHADOW2D:
4554 case TGSI_TEXTURE_RECT:
4555 case TGSI_TEXTURE_SHADOWRECT:
4556 case TGSI_TEXTURE_2D_ARRAY:
4557 case TGSI_TEXTURE_SHADOW2D_ARRAY:
4558 num_src_deriv_channels = 2;
4559 num_deriv_channels = 2;
4560 break;
4561 case TGSI_TEXTURE_CUBE:
4562 case TGSI_TEXTURE_SHADOWCUBE:
4563 case TGSI_TEXTURE_CUBE_ARRAY:
4564 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
4565 /* Cube derivatives will be converted to 2D. */
4566 num_src_deriv_channels = 3;
4567 num_deriv_channels = 2;
4568 break;
4569 case TGSI_TEXTURE_1D:
4570 case TGSI_TEXTURE_SHADOW1D:
4571 case TGSI_TEXTURE_1D_ARRAY:
4572 case TGSI_TEXTURE_SHADOW1D_ARRAY:
4573 num_src_deriv_channels = 1;
4574 num_deriv_channels = 1;
4575 break;
4576 default:
4577 unreachable("invalid target");
4578 }
4579
4580 for (param = 0; param < 2; param++)
4581 for (chan = 0; chan < num_src_deriv_channels; chan++)
4582 derivs[param * num_src_deriv_channels + chan] =
4583 lp_build_emit_fetch(bld_base, inst, param+1, chan);
4584 }
4585
4586 if (target == TGSI_TEXTURE_CUBE ||
4587 target == TGSI_TEXTURE_CUBE_ARRAY ||
4588 target == TGSI_TEXTURE_SHADOWCUBE ||
4589 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
4590 si_prepare_cube_coords(bld_base, emit_data, coords, derivs);
4591
4592 if (opcode == TGSI_OPCODE_TXD)
4593 for (int i = 0; i < num_deriv_channels * 2; i++)
4594 address[count++] = derivs[i];
4595
4596 /* Pack texture coordinates */
4597 address[count++] = coords[0];
4598 if (num_coords > 1)
4599 address[count++] = coords[1];
4600 if (num_coords > 2)
4601 address[count++] = coords[2];
4602
4603 /* Pack LOD or sample index */
4604 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
4605 address[count++] = coords[3];
4606 else if (opcode == TGSI_OPCODE_TXL2)
4607 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4608
4609 if (count > 16) {
4610 assert(!"Cannot handle more than 16 texture address parameters");
4611 count = 16;
4612 }
4613
4614 for (chan = 0; chan < count; chan++ ) {
4615 address[chan] = LLVMBuildBitCast(gallivm->builder,
4616 address[chan], ctx->i32, "");
4617 }
4618
4619 /* Adjust the sample index according to FMASK.
4620 *
4621 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
4622 * which is the identity mapping. Each nibble says which physical sample
4623 * should be fetched to get that sample.
4624 *
4625 * For example, 0x11111100 means there are only 2 samples stored and
4626 * the second sample covers 3/4 of the pixel. When reading samples 0
4627 * and 1, return physical sample 0 (determined by the first two 0s
4628 * in FMASK), otherwise return physical sample 1.
4629 *
4630 * The sample index should be adjusted as follows:
4631 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
4632 */
4633 if (target == TGSI_TEXTURE_2D_MSAA ||
4634 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
4635 struct lp_build_context *uint_bld = &bld_base->uint_bld;
4636 struct lp_build_emit_data txf_emit_data = *emit_data;
4637 LLVMValueRef txf_address[4];
4638 unsigned txf_count = count;
4639 struct tgsi_full_instruction inst = {};
4640
4641 memcpy(txf_address, address, sizeof(txf_address));
4642
4643 if (target == TGSI_TEXTURE_2D_MSAA) {
4644 txf_address[2] = bld_base->uint_bld.zero;
4645 }
4646 txf_address[3] = bld_base->uint_bld.zero;
4647
4648 /* Read FMASK using TXF. */
4649 inst.Instruction.Opcode = TGSI_OPCODE_TXF;
4650 inst.Texture.Texture = target;
4651 txf_emit_data.inst = &inst;
4652 txf_emit_data.chan = 0;
4653 set_tex_fetch_args(ctx, &txf_emit_data, TGSI_OPCODE_TXF,
4654 target, fmask_ptr, NULL,
4655 txf_address, txf_count, 0xf);
4656 build_tex_intrinsic(&tex_action, bld_base, &txf_emit_data);
4657
4658 /* Initialize some constants. */
4659 LLVMValueRef four = LLVMConstInt(ctx->i32, 4, 0);
4660 LLVMValueRef F = LLVMConstInt(ctx->i32, 0xF, 0);
4661
4662 /* Apply the formula. */
4663 LLVMValueRef fmask =
4664 LLVMBuildExtractElement(gallivm->builder,
4665 txf_emit_data.output[0],
4666 uint_bld->zero, "");
4667
4668 unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
4669
4670 LLVMValueRef sample_index4 =
4671 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
4672
4673 LLVMValueRef shifted_fmask =
4674 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
4675
4676 LLVMValueRef final_sample =
4677 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
4678
4679 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
4680 * resource descriptor is 0 (invalid),
4681 */
4682 LLVMValueRef fmask_desc =
4683 LLVMBuildBitCast(gallivm->builder, fmask_ptr,
4684 ctx->v8i32, "");
4685
4686 LLVMValueRef fmask_word1 =
4687 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
4688 uint_bld->one, "");
4689
4690 LLVMValueRef word1_is_nonzero =
4691 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
4692 fmask_word1, uint_bld->zero, "");
4693
4694 /* Replace the MSAA sample index. */
4695 address[sample_chan] =
4696 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
4697 final_sample, address[sample_chan], "");
4698 }
4699
4700 if (opcode == TGSI_OPCODE_TXF) {
4701 /* add tex offsets */
4702 if (inst->Texture.NumOffsets) {
4703 struct lp_build_context *uint_bld = &bld_base->uint_bld;
4704 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
4705 const struct tgsi_texture_offset *off = inst->TexOffsets;
4706
4707 assert(inst->Texture.NumOffsets == 1);
4708
4709 switch (target) {
4710 case TGSI_TEXTURE_3D:
4711 address[2] = lp_build_add(uint_bld, address[2],
4712 bld->immediates[off->Index][off->SwizzleZ]);
4713 /* fall through */
4714 case TGSI_TEXTURE_2D:
4715 case TGSI_TEXTURE_SHADOW2D:
4716 case TGSI_TEXTURE_RECT:
4717 case TGSI_TEXTURE_SHADOWRECT:
4718 case TGSI_TEXTURE_2D_ARRAY:
4719 case TGSI_TEXTURE_SHADOW2D_ARRAY:
4720 address[1] =
4721 lp_build_add(uint_bld, address[1],
4722 bld->immediates[off->Index][off->SwizzleY]);
4723 /* fall through */
4724 case TGSI_TEXTURE_1D:
4725 case TGSI_TEXTURE_SHADOW1D:
4726 case TGSI_TEXTURE_1D_ARRAY:
4727 case TGSI_TEXTURE_SHADOW1D_ARRAY:
4728 address[0] =
4729 lp_build_add(uint_bld, address[0],
4730 bld->immediates[off->Index][off->SwizzleX]);
4731 break;
4732 /* texture offsets do not apply to other texture targets */
4733 }
4734 }
4735 }
4736
4737 if (opcode == TGSI_OPCODE_TG4) {
4738 unsigned gather_comp = 0;
4739
4740 /* DMASK was repurposed for GATHER4. 4 components are always
4741 * returned and DMASK works like a swizzle - it selects
4742 * the component to fetch. The only valid DMASK values are
4743 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4744 * (red,red,red,red) etc.) The ISA document doesn't mention
4745 * this.
4746 */
4747
4748 /* Get the component index from src1.x for Gather4. */
4749 if (!tgsi_is_shadow_target(target)) {
4750 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
4751 LLVMValueRef comp_imm;
4752 struct tgsi_src_register src1 = inst->Src[1].Register;
4753
4754 assert(src1.File == TGSI_FILE_IMMEDIATE);
4755
4756 comp_imm = imms[src1.Index][src1.SwizzleX];
4757 gather_comp = LLVMConstIntGetZExtValue(comp_imm);
4758 gather_comp = CLAMP(gather_comp, 0, 3);
4759 }
4760
4761 dmask = 1 << gather_comp;
4762 }
4763
4764 set_tex_fetch_args(ctx, emit_data, opcode, target, res_ptr,
4765 samp_ptr, address, count, dmask);
4766 }
4767
4768 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
4769 * incorrectly forces nearest filtering if the texture format is integer.
4770 * The only effect it has on Gather4, which always returns 4 texels for
4771 * bilinear filtering, is that the final coordinates are off by 0.5 of
4772 * the texel size.
4773 *
4774 * The workaround is to subtract 0.5 from the unnormalized coordinates,
4775 * or (0.5 / size) from the normalized coordinates.
4776 */
4777 static void si_lower_gather4_integer(struct si_shader_context *ctx,
4778 struct lp_build_emit_data *emit_data,
4779 const char *intr_name,
4780 unsigned coord_vgpr_index)
4781 {
4782 LLVMBuilderRef builder = ctx->gallivm.builder;
4783 LLVMValueRef coord = emit_data->args[0];
4784 LLVMValueRef half_texel[2];
4785 int c;
4786
4787 if (emit_data->inst->Texture.Texture == TGSI_TEXTURE_RECT ||
4788 emit_data->inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT) {
4789 half_texel[0] = half_texel[1] = LLVMConstReal(ctx->f32, -0.5);
4790 } else {
4791 struct tgsi_full_instruction txq_inst = {};
4792 struct lp_build_emit_data txq_emit_data = {};
4793
4794 /* Query the texture size. */
4795 txq_inst.Texture.Texture = emit_data->inst->Texture.Texture;
4796 txq_emit_data.inst = &txq_inst;
4797 txq_emit_data.dst_type = ctx->v4i32;
4798 set_tex_fetch_args(ctx, &txq_emit_data, TGSI_OPCODE_TXQ,
4799 txq_inst.Texture.Texture,
4800 emit_data->args[1], NULL,
4801 &ctx->soa.bld_base.uint_bld.zero,
4802 1, 0xf);
4803 txq_emit(NULL, &ctx->soa.bld_base, &txq_emit_data);
4804
4805 /* Compute -0.5 / size. */
4806 for (c = 0; c < 2; c++) {
4807 half_texel[c] =
4808 LLVMBuildExtractElement(builder, txq_emit_data.output[0],
4809 LLVMConstInt(ctx->i32, c, 0), "");
4810 half_texel[c] = LLVMBuildUIToFP(builder, half_texel[c], ctx->f32, "");
4811 half_texel[c] =
4812 lp_build_emit_llvm_unary(&ctx->soa.bld_base,
4813 TGSI_OPCODE_RCP, half_texel[c]);
4814 half_texel[c] = LLVMBuildFMul(builder, half_texel[c],
4815 LLVMConstReal(ctx->f32, -0.5), "");
4816 }
4817 }
4818
4819 for (c = 0; c < 2; c++) {
4820 LLVMValueRef tmp;
4821 LLVMValueRef index = LLVMConstInt(ctx->i32, coord_vgpr_index + c, 0);
4822
4823 tmp = LLVMBuildExtractElement(builder, coord, index, "");
4824 tmp = LLVMBuildBitCast(builder, tmp, ctx->f32, "");
4825 tmp = LLVMBuildFAdd(builder, tmp, half_texel[c], "");
4826 tmp = LLVMBuildBitCast(builder, tmp, ctx->i32, "");
4827 coord = LLVMBuildInsertElement(builder, coord, tmp, index, "");
4828 }
4829
4830 emit_data->args[0] = coord;
4831 emit_data->output[emit_data->chan] =
4832 lp_build_intrinsic(builder, intr_name, emit_data->dst_type,
4833 emit_data->args, emit_data->arg_count,
4834 LLVMReadNoneAttribute);
4835 }
4836
4837 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
4838 struct lp_build_tgsi_context *bld_base,
4839 struct lp_build_emit_data *emit_data)
4840 {
4841 struct si_shader_context *ctx = si_shader_context(bld_base);
4842 struct lp_build_context *base = &bld_base->base;
4843 const struct tgsi_full_instruction *inst = emit_data->inst;
4844 unsigned opcode = inst->Instruction.Opcode;
4845 unsigned target = inst->Texture.Texture;
4846 char intr_name[127];
4847 bool has_offset = inst->Texture.NumOffsets > 0;
4848 bool is_shadow = tgsi_is_shadow_target(target);
4849 char type[64];
4850 const char *name = "llvm.SI.image.sample";
4851 const char *infix = "";
4852
4853 if (target == TGSI_TEXTURE_BUFFER) {
4854 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4855 base->gallivm->builder,
4856 "llvm.SI.vs.load.input", emit_data->dst_type,
4857 emit_data->args, emit_data->arg_count,
4858 LLVMReadNoneAttribute);
4859 return;
4860 }
4861
4862 switch (opcode) {
4863 case TGSI_OPCODE_TXF:
4864 name = target == TGSI_TEXTURE_2D_MSAA ||
4865 target == TGSI_TEXTURE_2D_ARRAY_MSAA ?
4866 "llvm.SI.image.load" :
4867 "llvm.SI.image.load.mip";
4868 is_shadow = false;
4869 has_offset = false;
4870 break;
4871 case TGSI_OPCODE_LODQ:
4872 name = "llvm.SI.getlod";
4873 is_shadow = false;
4874 has_offset = false;
4875 break;
4876 case TGSI_OPCODE_TEX:
4877 case TGSI_OPCODE_TEX2:
4878 case TGSI_OPCODE_TXP:
4879 if (ctx->type != PIPE_SHADER_FRAGMENT)
4880 infix = ".lz";
4881 break;
4882 case TGSI_OPCODE_TXB:
4883 case TGSI_OPCODE_TXB2:
4884 assert(ctx->type == PIPE_SHADER_FRAGMENT);
4885 infix = ".b";
4886 break;
4887 case TGSI_OPCODE_TXL:
4888 case TGSI_OPCODE_TXL2:
4889 infix = ".l";
4890 break;
4891 case TGSI_OPCODE_TXD:
4892 infix = ".d";
4893 break;
4894 case TGSI_OPCODE_TG4:
4895 name = "llvm.SI.gather4";
4896 infix = ".lz";
4897 break;
4898 default:
4899 assert(0);
4900 return;
4901 }
4902
4903 /* Add the type and suffixes .c, .o if needed. */
4904 build_type_name_for_intr(LLVMTypeOf(emit_data->args[0]), type, sizeof(type));
4905 sprintf(intr_name, "%s%s%s%s.%s",
4906 name, is_shadow ? ".c" : "", infix,
4907 has_offset ? ".o" : "", type);
4908
4909 /* The hardware needs special lowering for Gather4 with integer formats. */
4910 if (opcode == TGSI_OPCODE_TG4) {
4911 struct tgsi_shader_info *info = &ctx->shader->selector->info;
4912 /* This will also work with non-constant indexing because of how
4913 * glsl_to_tgsi works and we intent to preserve that behavior.
4914 */
4915 const unsigned src_idx = 2;
4916 unsigned sampler = inst->Src[src_idx].Register.Index;
4917
4918 assert(inst->Src[src_idx].Register.File == TGSI_FILE_SAMPLER);
4919
4920 if (info->sampler_type[sampler] == TGSI_RETURN_TYPE_SINT ||
4921 info->sampler_type[sampler] == TGSI_RETURN_TYPE_UINT) {
4922 /* Texture coordinates start after:
4923 * {offset, bias, z-compare, derivatives}
4924 * Only the offset and z-compare can occur here.
4925 */
4926 si_lower_gather4_integer(ctx, emit_data, intr_name,
4927 (int)has_offset + (int)is_shadow);
4928 return;
4929 }
4930 }
4931
4932 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4933 base->gallivm->builder, intr_name, emit_data->dst_type,
4934 emit_data->args, emit_data->arg_count,
4935 LLVMReadNoneAttribute);
4936 }
4937
4938 static void si_llvm_emit_txqs(
4939 const struct lp_build_tgsi_action *action,
4940 struct lp_build_tgsi_context *bld_base,
4941 struct lp_build_emit_data *emit_data)
4942 {
4943 struct si_shader_context *ctx = si_shader_context(bld_base);
4944 struct gallivm_state *gallivm = bld_base->base.gallivm;
4945 LLVMBuilderRef builder = gallivm->builder;
4946 LLVMValueRef res, samples;
4947 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
4948
4949 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
4950
4951
4952 /* Read the samples from the descriptor directly. */
4953 res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
4954 samples = LLVMBuildExtractElement(
4955 builder, res,
4956 lp_build_const_int32(gallivm, 3), "");
4957 samples = LLVMBuildLShr(builder, samples,
4958 lp_build_const_int32(gallivm, 16), "");
4959 samples = LLVMBuildAnd(builder, samples,
4960 lp_build_const_int32(gallivm, 0xf), "");
4961 samples = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1),
4962 samples, "");
4963
4964 emit_data->output[emit_data->chan] = samples;
4965 }
4966
4967 /*
4968 * SI implements derivatives using the local data store (LDS)
4969 * All writes to the LDS happen in all executing threads at
4970 * the same time. TID is the Thread ID for the current
4971 * thread and is a value between 0 and 63, representing
4972 * the thread's position in the wavefront.
4973 *
4974 * For the pixel shader threads are grouped into quads of four pixels.
4975 * The TIDs of the pixels of a quad are:
4976 *
4977 * +------+------+
4978 * |4n + 0|4n + 1|
4979 * +------+------+
4980 * |4n + 2|4n + 3|
4981 * +------+------+
4982 *
4983 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
4984 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
4985 * the current pixel's column, and masking with 0xfffffffe yields the TID
4986 * of the left pixel of the current pixel's row.
4987 *
4988 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
4989 * adding 2 yields the TID of the pixel below the top pixel.
4990 */
4991 /* masks for thread ID. */
4992 #define TID_MASK_TOP_LEFT 0xfffffffc
4993 #define TID_MASK_TOP 0xfffffffd
4994 #define TID_MASK_LEFT 0xfffffffe
4995
4996 static void si_llvm_emit_ddxy(
4997 const struct lp_build_tgsi_action *action,
4998 struct lp_build_tgsi_context *bld_base,
4999 struct lp_build_emit_data *emit_data)
5000 {
5001 struct si_shader_context *ctx = si_shader_context(bld_base);
5002 struct gallivm_state *gallivm = bld_base->base.gallivm;
5003 unsigned opcode = emit_data->info->opcode;
5004 LLVMValueRef thread_id, tl, trbl, tl_tid, trbl_tid, val, args[2];
5005 int idx;
5006 unsigned mask;
5007
5008 thread_id = get_thread_id(ctx);
5009
5010 if (opcode == TGSI_OPCODE_DDX_FINE)
5011 mask = TID_MASK_LEFT;
5012 else if (opcode == TGSI_OPCODE_DDY_FINE)
5013 mask = TID_MASK_TOP;
5014 else
5015 mask = TID_MASK_TOP_LEFT;
5016
5017 tl_tid = LLVMBuildAnd(gallivm->builder, thread_id,
5018 lp_build_const_int32(gallivm, mask), "");
5019
5020 /* for DDX we want to next X pixel, DDY next Y pixel. */
5021 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
5022 trbl_tid = LLVMBuildAdd(gallivm->builder, tl_tid,
5023 lp_build_const_int32(gallivm, idx), "");
5024
5025 val = LLVMBuildBitCast(gallivm->builder, emit_data->args[0], ctx->i32, "");
5026
5027 if (ctx->screen->has_ds_bpermute) {
5028 args[0] = LLVMBuildMul(gallivm->builder, tl_tid,
5029 lp_build_const_int32(gallivm, 4), "");
5030 args[1] = val;
5031 tl = lp_build_intrinsic(gallivm->builder,
5032 "llvm.amdgcn.ds.bpermute", ctx->i32,
5033 args, 2, LLVMReadNoneAttribute);
5034
5035 args[0] = LLVMBuildMul(gallivm->builder, trbl_tid,
5036 lp_build_const_int32(gallivm, 4), "");
5037 trbl = lp_build_intrinsic(gallivm->builder,
5038 "llvm.amdgcn.ds.bpermute", ctx->i32,
5039 args, 2, LLVMReadNoneAttribute);
5040 } else {
5041 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
5042
5043 store_ptr = build_gep0(ctx, ctx->lds, thread_id);
5044 load_ptr0 = build_gep0(ctx, ctx->lds, tl_tid);
5045 load_ptr1 = build_gep0(ctx, ctx->lds, trbl_tid);
5046
5047 LLVMBuildStore(gallivm->builder, val, store_ptr);
5048 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
5049 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
5050 }
5051
5052 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
5053 trbl = LLVMBuildBitCast(gallivm->builder, trbl, ctx->f32, "");
5054
5055 emit_data->output[emit_data->chan] =
5056 LLVMBuildFSub(gallivm->builder, trbl, tl, "");
5057 }
5058
5059 /*
5060 * this takes an I,J coordinate pair,
5061 * and works out the X and Y derivatives.
5062 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
5063 */
5064 static LLVMValueRef si_llvm_emit_ddxy_interp(
5065 struct lp_build_tgsi_context *bld_base,
5066 LLVMValueRef interp_ij)
5067 {
5068 struct si_shader_context *ctx = si_shader_context(bld_base);
5069 struct gallivm_state *gallivm = bld_base->base.gallivm;
5070 LLVMValueRef result[4], a;
5071 unsigned i;
5072
5073 for (i = 0; i < 2; i++) {
5074 a = LLVMBuildExtractElement(gallivm->builder, interp_ij,
5075 LLVMConstInt(ctx->i32, i, 0), "");
5076 result[i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDX, a);
5077 result[2+i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDY, a);
5078 }
5079
5080 return lp_build_gather_values(gallivm, result, 4);
5081 }
5082
5083 static void interp_fetch_args(
5084 struct lp_build_tgsi_context *bld_base,
5085 struct lp_build_emit_data *emit_data)
5086 {
5087 struct si_shader_context *ctx = si_shader_context(bld_base);
5088 struct gallivm_state *gallivm = bld_base->base.gallivm;
5089 const struct tgsi_full_instruction *inst = emit_data->inst;
5090
5091 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
5092 /* offset is in second src, first two channels */
5093 emit_data->args[0] = lp_build_emit_fetch(bld_base,
5094 emit_data->inst, 1,
5095 TGSI_CHAN_X);
5096 emit_data->args[1] = lp_build_emit_fetch(bld_base,
5097 emit_data->inst, 1,
5098 TGSI_CHAN_Y);
5099 emit_data->arg_count = 2;
5100 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5101 LLVMValueRef sample_position;
5102 LLVMValueRef sample_id;
5103 LLVMValueRef halfval = lp_build_const_float(gallivm, 0.5f);
5104
5105 /* fetch sample ID, then fetch its sample position,
5106 * and place into first two channels.
5107 */
5108 sample_id = lp_build_emit_fetch(bld_base,
5109 emit_data->inst, 1, TGSI_CHAN_X);
5110 sample_id = LLVMBuildBitCast(gallivm->builder, sample_id,
5111 ctx->i32, "");
5112 sample_position = load_sample_position(ctx, sample_id);
5113
5114 emit_data->args[0] = LLVMBuildExtractElement(gallivm->builder,
5115 sample_position,
5116 lp_build_const_int32(gallivm, 0), "");
5117
5118 emit_data->args[0] = LLVMBuildFSub(gallivm->builder, emit_data->args[0], halfval, "");
5119 emit_data->args[1] = LLVMBuildExtractElement(gallivm->builder,
5120 sample_position,
5121 lp_build_const_int32(gallivm, 1), "");
5122 emit_data->args[1] = LLVMBuildFSub(gallivm->builder, emit_data->args[1], halfval, "");
5123 emit_data->arg_count = 2;
5124 }
5125 }
5126
5127 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
5128 struct lp_build_tgsi_context *bld_base,
5129 struct lp_build_emit_data *emit_data)
5130 {
5131 struct si_shader_context *ctx = si_shader_context(bld_base);
5132 struct si_shader *shader = ctx->shader;
5133 struct gallivm_state *gallivm = bld_base->base.gallivm;
5134 LLVMValueRef interp_param;
5135 const struct tgsi_full_instruction *inst = emit_data->inst;
5136 const char *intr_name;
5137 int input_index = inst->Src[0].Register.Index;
5138 int chan;
5139 int i;
5140 LLVMValueRef attr_number;
5141 LLVMValueRef params = LLVMGetParam(ctx->main_fn, SI_PARAM_PRIM_MASK);
5142 int interp_param_idx;
5143 unsigned interp = shader->selector->info.input_interpolate[input_index];
5144 unsigned location;
5145
5146 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
5147
5148 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5149 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
5150 location = TGSI_INTERPOLATE_LOC_CENTER;
5151 else
5152 location = TGSI_INTERPOLATE_LOC_CENTROID;
5153
5154 interp_param_idx = lookup_interp_param_index(interp, location);
5155 if (interp_param_idx == -1)
5156 return;
5157 else if (interp_param_idx)
5158 interp_param = get_interp_param(ctx, interp_param_idx);
5159 else
5160 interp_param = NULL;
5161
5162 attr_number = lp_build_const_int32(gallivm, input_index);
5163
5164 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5165 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5166 LLVMValueRef ij_out[2];
5167 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
5168
5169 /*
5170 * take the I then J parameters, and the DDX/Y for it, and
5171 * calculate the IJ inputs for the interpolator.
5172 * temp1 = ddx * offset/sample.x + I;
5173 * interp_param.I = ddy * offset/sample.y + temp1;
5174 * temp1 = ddx * offset/sample.x + J;
5175 * interp_param.J = ddy * offset/sample.y + temp1;
5176 */
5177 for (i = 0; i < 2; i++) {
5178 LLVMValueRef ix_ll = lp_build_const_int32(gallivm, i);
5179 LLVMValueRef iy_ll = lp_build_const_int32(gallivm, i + 2);
5180 LLVMValueRef ddx_el = LLVMBuildExtractElement(gallivm->builder,
5181 ddxy_out, ix_ll, "");
5182 LLVMValueRef ddy_el = LLVMBuildExtractElement(gallivm->builder,
5183 ddxy_out, iy_ll, "");
5184 LLVMValueRef interp_el = LLVMBuildExtractElement(gallivm->builder,
5185 interp_param, ix_ll, "");
5186 LLVMValueRef temp1, temp2;
5187
5188 interp_el = LLVMBuildBitCast(gallivm->builder, interp_el,
5189 ctx->f32, "");
5190
5191 temp1 = LLVMBuildFMul(gallivm->builder, ddx_el, emit_data->args[0], "");
5192
5193 temp1 = LLVMBuildFAdd(gallivm->builder, temp1, interp_el, "");
5194
5195 temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
5196
5197 temp2 = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
5198
5199 ij_out[i] = LLVMBuildBitCast(gallivm->builder,
5200 temp2, ctx->i32, "");
5201 }
5202 interp_param = lp_build_gather_values(bld_base->base.gallivm, ij_out, 2);
5203 }
5204
5205 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
5206 for (chan = 0; chan < 4; chan++) {
5207 LLVMValueRef args[4];
5208 LLVMValueRef llvm_chan;
5209 unsigned schan;
5210
5211 schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
5212 llvm_chan = lp_build_const_int32(gallivm, schan);
5213
5214 args[0] = llvm_chan;
5215 args[1] = attr_number;
5216 args[2] = params;
5217 args[3] = interp_param;
5218
5219 emit_data->output[chan] =
5220 lp_build_intrinsic(gallivm->builder, intr_name,
5221 ctx->f32, args, args[3] ? 4 : 3,
5222 LLVMReadNoneAttribute);
5223 }
5224 }
5225
5226 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
5227 struct lp_build_emit_data *emit_data)
5228 {
5229 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
5230 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
5231 unsigned stream;
5232
5233 assert(src0.File == TGSI_FILE_IMMEDIATE);
5234
5235 stream = LLVMConstIntGetZExtValue(imms[src0.Index][src0.SwizzleX]) & 0x3;
5236 return stream;
5237 }
5238
5239 /* Emit one vertex from the geometry shader */
5240 static void si_llvm_emit_vertex(
5241 const struct lp_build_tgsi_action *action,
5242 struct lp_build_tgsi_context *bld_base,
5243 struct lp_build_emit_data *emit_data)
5244 {
5245 struct si_shader_context *ctx = si_shader_context(bld_base);
5246 struct lp_build_context *uint = &bld_base->uint_bld;
5247 struct si_shader *shader = ctx->shader;
5248 struct tgsi_shader_info *info = &shader->selector->info;
5249 struct gallivm_state *gallivm = bld_base->base.gallivm;
5250 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
5251 SI_PARAM_GS2VS_OFFSET);
5252 LLVMValueRef gs_next_vertex;
5253 LLVMValueRef can_emit, kill;
5254 LLVMValueRef args[2];
5255 unsigned chan;
5256 int i;
5257 unsigned stream;
5258
5259 stream = si_llvm_get_stream(bld_base, emit_data);
5260
5261 /* Write vertex attribute values to GSVS ring */
5262 gs_next_vertex = LLVMBuildLoad(gallivm->builder,
5263 ctx->gs_next_vertex[stream],
5264 "");
5265
5266 /* If this thread has already emitted the declared maximum number of
5267 * vertices, kill it: excessive vertex emissions are not supposed to
5268 * have any effect, and GS threads have no externally observable
5269 * effects other than emitting vertices.
5270 */
5271 can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULE, gs_next_vertex,
5272 lp_build_const_int32(gallivm,
5273 shader->selector->gs_max_out_vertices), "");
5274 kill = lp_build_select(&bld_base->base, can_emit,
5275 lp_build_const_float(gallivm, 1.0f),
5276 lp_build_const_float(gallivm, -1.0f));
5277
5278 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
5279 ctx->voidt, &kill, 1, 0);
5280
5281 for (i = 0; i < info->num_outputs; i++) {
5282 LLVMValueRef *out_ptr =
5283 ctx->soa.outputs[i];
5284
5285 for (chan = 0; chan < 4; chan++) {
5286 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
5287 LLVMValueRef voffset =
5288 lp_build_const_int32(gallivm, (i * 4 + chan) *
5289 shader->selector->gs_max_out_vertices);
5290
5291 voffset = lp_build_add(uint, voffset, gs_next_vertex);
5292 voffset = lp_build_mul_imm(uint, voffset, 4);
5293
5294 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
5295
5296 build_tbuffer_store(ctx,
5297 ctx->gsvs_ring[stream],
5298 out_val, 1,
5299 voffset, soffset, 0,
5300 V_008F0C_BUF_DATA_FORMAT_32,
5301 V_008F0C_BUF_NUM_FORMAT_UINT,
5302 1, 0, 1, 1, 0);
5303 }
5304 }
5305 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
5306 lp_build_const_int32(gallivm, 1));
5307
5308 LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
5309
5310 /* Signal vertex emission */
5311 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_EMIT | SENDMSG_GS | (stream << 8));
5312 args[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID);
5313 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
5314 ctx->voidt, args, 2, 0);
5315 }
5316
5317 /* Cut one primitive from the geometry shader */
5318 static void si_llvm_emit_primitive(
5319 const struct lp_build_tgsi_action *action,
5320 struct lp_build_tgsi_context *bld_base,
5321 struct lp_build_emit_data *emit_data)
5322 {
5323 struct si_shader_context *ctx = si_shader_context(bld_base);
5324 struct gallivm_state *gallivm = bld_base->base.gallivm;
5325 LLVMValueRef args[2];
5326 unsigned stream;
5327
5328 /* Signal primitive cut */
5329 stream = si_llvm_get_stream(bld_base, emit_data);
5330 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_CUT | SENDMSG_GS | (stream << 8));
5331 args[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID);
5332 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
5333 ctx->voidt, args, 2, 0);
5334 }
5335
5336 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
5337 struct lp_build_tgsi_context *bld_base,
5338 struct lp_build_emit_data *emit_data)
5339 {
5340 struct si_shader_context *ctx = si_shader_context(bld_base);
5341 struct gallivm_state *gallivm = bld_base->base.gallivm;
5342
5343 /* The real barrier instruction isn’t needed, because an entire patch
5344 * always fits into a single wave.
5345 */
5346 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
5347 emit_optimization_barrier(ctx);
5348 return;
5349 }
5350
5351 lp_build_intrinsic(gallivm->builder,
5352 HAVE_LLVM >= 0x0309 ? "llvm.amdgcn.s.barrier"
5353 : "llvm.AMDGPU.barrier.local",
5354 ctx->voidt, NULL, 0, 0);
5355 }
5356
5357 static const struct lp_build_tgsi_action tex_action = {
5358 .fetch_args = tex_fetch_args,
5359 .emit = build_tex_intrinsic,
5360 };
5361
5362 static const struct lp_build_tgsi_action interp_action = {
5363 .fetch_args = interp_fetch_args,
5364 .emit = build_interp_intrinsic,
5365 };
5366
5367 static void si_create_function(struct si_shader_context *ctx,
5368 LLVMTypeRef *returns, unsigned num_returns,
5369 LLVMTypeRef *params, unsigned num_params,
5370 int last_sgpr)
5371 {
5372 int i;
5373
5374 si_llvm_create_func(ctx, returns, num_returns,
5375 params, num_params);
5376 si_llvm_shader_type(ctx->main_fn, ctx->type);
5377 ctx->return_value = LLVMGetUndef(ctx->return_type);
5378
5379 for (i = 0; i <= last_sgpr; ++i) {
5380 LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
5381
5382 /* The combination of:
5383 * - ByVal
5384 * - dereferenceable
5385 * - invariant.load
5386 * allows the optimization passes to move loads and reduces
5387 * SGPR spilling significantly.
5388 */
5389 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
5390 LLVMAddAttribute(P, LLVMByValAttribute);
5391 lp_add_attr_dereferenceable(P, UINT64_MAX);
5392 } else
5393 LLVMAddAttribute(P, LLVMInRegAttribute);
5394 }
5395
5396 if (ctx->screen->b.debug_flags & DBG_UNSAFE_MATH) {
5397 /* These were copied from some LLVM test. */
5398 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
5399 "less-precise-fpmad",
5400 "true");
5401 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
5402 "no-infs-fp-math",
5403 "true");
5404 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
5405 "no-nans-fp-math",
5406 "true");
5407 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
5408 "unsafe-fp-math",
5409 "true");
5410 }
5411 }
5412
5413 static void create_meta_data(struct si_shader_context *ctx)
5414 {
5415 struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
5416
5417 ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
5418 "invariant.load", 14);
5419 ctx->range_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
5420 "range", 5);
5421 ctx->uniform_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
5422 "amdgpu.uniform", 14);
5423
5424 ctx->empty_md = LLVMMDNodeInContext(gallivm->context, NULL, 0);
5425 }
5426
5427 static void declare_streamout_params(struct si_shader_context *ctx,
5428 struct pipe_stream_output_info *so,
5429 LLVMTypeRef *params, LLVMTypeRef i32,
5430 unsigned *num_params)
5431 {
5432 int i;
5433
5434 /* Streamout SGPRs. */
5435 if (so->num_outputs) {
5436 if (ctx->type != PIPE_SHADER_TESS_EVAL)
5437 params[ctx->param_streamout_config = (*num_params)++] = i32;
5438 else
5439 ctx->param_streamout_config = ctx->param_tess_offchip;
5440
5441 params[ctx->param_streamout_write_index = (*num_params)++] = i32;
5442 }
5443 /* A streamout buffer offset is loaded if the stride is non-zero. */
5444 for (i = 0; i < 4; i++) {
5445 if (!so->stride[i])
5446 continue;
5447
5448 params[ctx->param_streamout_offset[i] = (*num_params)++] = i32;
5449 }
5450 }
5451
5452 static unsigned llvm_get_type_size(LLVMTypeRef type)
5453 {
5454 LLVMTypeKind kind = LLVMGetTypeKind(type);
5455
5456 switch (kind) {
5457 case LLVMIntegerTypeKind:
5458 return LLVMGetIntTypeWidth(type) / 8;
5459 case LLVMFloatTypeKind:
5460 return 4;
5461 case LLVMPointerTypeKind:
5462 return 8;
5463 case LLVMVectorTypeKind:
5464 return LLVMGetVectorSize(type) *
5465 llvm_get_type_size(LLVMGetElementType(type));
5466 default:
5467 assert(0);
5468 return 0;
5469 }
5470 }
5471
5472 static void declare_tess_lds(struct si_shader_context *ctx)
5473 {
5474 struct gallivm_state *gallivm = &ctx->gallivm;
5475 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
5476 struct lp_build_context *uint = &bld_base->uint_bld;
5477
5478 unsigned lds_size = ctx->screen->b.chip_class >= CIK ? 65536 : 32768;
5479 ctx->lds = LLVMBuildIntToPtr(gallivm->builder, uint->zero,
5480 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), LOCAL_ADDR_SPACE),
5481 "tess_lds");
5482 }
5483
5484 static void create_function(struct si_shader_context *ctx)
5485 {
5486 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
5487 struct gallivm_state *gallivm = bld_base->base.gallivm;
5488 struct si_shader *shader = ctx->shader;
5489 LLVMTypeRef params[SI_NUM_PARAMS + SI_NUM_VERTEX_BUFFERS], v3i32;
5490 LLVMTypeRef returns[16+32*4];
5491 unsigned i, last_sgpr, num_params, num_return_sgprs;
5492 unsigned num_returns = 0;
5493
5494 v3i32 = LLVMVectorType(ctx->i32, 3);
5495
5496 params[SI_PARAM_RW_BUFFERS] = const_array(ctx->v16i8, SI_NUM_RW_BUFFERS);
5497 params[SI_PARAM_CONST_BUFFERS] = const_array(ctx->v16i8, SI_NUM_CONST_BUFFERS);
5498 params[SI_PARAM_SAMPLERS] = const_array(ctx->v8i32, SI_NUM_SAMPLERS);
5499 params[SI_PARAM_IMAGES] = const_array(ctx->v8i32, SI_NUM_IMAGES);
5500 params[SI_PARAM_SHADER_BUFFERS] = const_array(ctx->v4i32, SI_NUM_SHADER_BUFFERS);
5501
5502 switch (ctx->type) {
5503 case PIPE_SHADER_VERTEX:
5504 params[SI_PARAM_VERTEX_BUFFERS] = const_array(ctx->v16i8, SI_NUM_VERTEX_BUFFERS);
5505 params[SI_PARAM_BASE_VERTEX] = ctx->i32;
5506 params[SI_PARAM_START_INSTANCE] = ctx->i32;
5507 params[SI_PARAM_DRAWID] = ctx->i32;
5508 num_params = SI_PARAM_DRAWID+1;
5509
5510 if (shader->key.vs.as_es) {
5511 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
5512 } else if (shader->key.vs.as_ls) {
5513 params[SI_PARAM_LS_OUT_LAYOUT] = ctx->i32;
5514 num_params = SI_PARAM_LS_OUT_LAYOUT+1;
5515 } else {
5516 if (ctx->is_gs_copy_shader) {
5517 num_params = SI_PARAM_RW_BUFFERS+1;
5518 } else {
5519 params[SI_PARAM_VS_STATE_BITS] = ctx->i32;
5520 num_params = SI_PARAM_VS_STATE_BITS+1;
5521 }
5522
5523 /* The locations of the other parameters are assigned dynamically. */
5524 declare_streamout_params(ctx, &shader->selector->so,
5525 params, ctx->i32, &num_params);
5526 }
5527
5528 last_sgpr = num_params-1;
5529
5530 /* VGPRs */
5531 params[ctx->param_vertex_id = num_params++] = ctx->i32;
5532 params[ctx->param_rel_auto_id = num_params++] = ctx->i32;
5533 params[ctx->param_vs_prim_id = num_params++] = ctx->i32;
5534 params[ctx->param_instance_id = num_params++] = ctx->i32;
5535
5536 if (!ctx->is_monolithic &&
5537 !ctx->is_gs_copy_shader) {
5538 /* Vertex load indices. */
5539 ctx->param_vertex_index0 = num_params;
5540
5541 for (i = 0; i < shader->selector->info.num_inputs; i++)
5542 params[num_params++] = ctx->i32;
5543
5544 /* PrimitiveID output. */
5545 if (!shader->key.vs.as_es && !shader->key.vs.as_ls)
5546 for (i = 0; i <= VS_EPILOG_PRIMID_LOC; i++)
5547 returns[num_returns++] = ctx->f32;
5548 }
5549 break;
5550
5551 case PIPE_SHADER_TESS_CTRL:
5552 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
5553 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
5554 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
5555 params[SI_PARAM_TCS_IN_LAYOUT] = ctx->i32;
5556 params[ctx->param_oc_lds = SI_PARAM_TCS_OC_LDS] = ctx->i32;
5557 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx->i32;
5558 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
5559
5560 /* VGPRs */
5561 params[SI_PARAM_PATCH_ID] = ctx->i32;
5562 params[SI_PARAM_REL_IDS] = ctx->i32;
5563 num_params = SI_PARAM_REL_IDS+1;
5564
5565 if (!ctx->is_monolithic) {
5566 /* SI_PARAM_TCS_OC_LDS and PARAM_TESS_FACTOR_OFFSET are
5567 * placed after the user SGPRs.
5568 */
5569 for (i = 0; i < SI_TCS_NUM_USER_SGPR + 2; i++)
5570 returns[num_returns++] = ctx->i32; /* SGPRs */
5571
5572 for (i = 0; i < 3; i++)
5573 returns[num_returns++] = ctx->f32; /* VGPRs */
5574 }
5575 break;
5576
5577 case PIPE_SHADER_TESS_EVAL:
5578 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
5579 num_params = SI_PARAM_TCS_OFFCHIP_LAYOUT+1;
5580
5581 if (shader->key.tes.as_es) {
5582 params[ctx->param_oc_lds = num_params++] = ctx->i32;
5583 params[ctx->param_tess_offchip = num_params++] = ctx->i32;
5584 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
5585 } else {
5586 params[ctx->param_tess_offchip = num_params++] = ctx->i32;
5587 declare_streamout_params(ctx, &shader->selector->so,
5588 params, ctx->i32, &num_params);
5589 params[ctx->param_oc_lds = num_params++] = ctx->i32;
5590 }
5591 last_sgpr = num_params - 1;
5592
5593 /* VGPRs */
5594 params[ctx->param_tes_u = num_params++] = ctx->f32;
5595 params[ctx->param_tes_v = num_params++] = ctx->f32;
5596 params[ctx->param_tes_rel_patch_id = num_params++] = ctx->i32;
5597 params[ctx->param_tes_patch_id = num_params++] = ctx->i32;
5598
5599 /* PrimitiveID output. */
5600 if (!ctx->is_monolithic && !shader->key.tes.as_es)
5601 for (i = 0; i <= VS_EPILOG_PRIMID_LOC; i++)
5602 returns[num_returns++] = ctx->f32;
5603 break;
5604
5605 case PIPE_SHADER_GEOMETRY:
5606 params[SI_PARAM_GS2VS_OFFSET] = ctx->i32;
5607 params[SI_PARAM_GS_WAVE_ID] = ctx->i32;
5608 last_sgpr = SI_PARAM_GS_WAVE_ID;
5609
5610 /* VGPRs */
5611 params[SI_PARAM_VTX0_OFFSET] = ctx->i32;
5612 params[SI_PARAM_VTX1_OFFSET] = ctx->i32;
5613 params[SI_PARAM_PRIMITIVE_ID] = ctx->i32;
5614 params[SI_PARAM_VTX2_OFFSET] = ctx->i32;
5615 params[SI_PARAM_VTX3_OFFSET] = ctx->i32;
5616 params[SI_PARAM_VTX4_OFFSET] = ctx->i32;
5617 params[SI_PARAM_VTX5_OFFSET] = ctx->i32;
5618 params[SI_PARAM_GS_INSTANCE_ID] = ctx->i32;
5619 num_params = SI_PARAM_GS_INSTANCE_ID+1;
5620 break;
5621
5622 case PIPE_SHADER_FRAGMENT:
5623 params[SI_PARAM_ALPHA_REF] = ctx->f32;
5624 params[SI_PARAM_PRIM_MASK] = ctx->i32;
5625 last_sgpr = SI_PARAM_PRIM_MASK;
5626 params[SI_PARAM_PERSP_SAMPLE] = ctx->v2i32;
5627 params[SI_PARAM_PERSP_CENTER] = ctx->v2i32;
5628 params[SI_PARAM_PERSP_CENTROID] = ctx->v2i32;
5629 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
5630 params[SI_PARAM_LINEAR_SAMPLE] = ctx->v2i32;
5631 params[SI_PARAM_LINEAR_CENTER] = ctx->v2i32;
5632 params[SI_PARAM_LINEAR_CENTROID] = ctx->v2i32;
5633 params[SI_PARAM_LINE_STIPPLE_TEX] = ctx->f32;
5634 params[SI_PARAM_POS_X_FLOAT] = ctx->f32;
5635 params[SI_PARAM_POS_Y_FLOAT] = ctx->f32;
5636 params[SI_PARAM_POS_Z_FLOAT] = ctx->f32;
5637 params[SI_PARAM_POS_W_FLOAT] = ctx->f32;
5638 params[SI_PARAM_FRONT_FACE] = ctx->i32;
5639 params[SI_PARAM_ANCILLARY] = ctx->i32;
5640 params[SI_PARAM_SAMPLE_COVERAGE] = ctx->f32;
5641 params[SI_PARAM_POS_FIXED_PT] = ctx->i32;
5642 num_params = SI_PARAM_POS_FIXED_PT+1;
5643
5644 if (!ctx->is_monolithic) {
5645 /* Color inputs from the prolog. */
5646 if (shader->selector->info.colors_read) {
5647 unsigned num_color_elements =
5648 util_bitcount(shader->selector->info.colors_read);
5649
5650 assert(num_params + num_color_elements <= ARRAY_SIZE(params));
5651 for (i = 0; i < num_color_elements; i++)
5652 params[num_params++] = ctx->f32;
5653 }
5654
5655 /* Outputs for the epilog. */
5656 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
5657 num_returns =
5658 num_return_sgprs +
5659 util_bitcount(shader->selector->info.colors_written) * 4 +
5660 shader->selector->info.writes_z +
5661 shader->selector->info.writes_stencil +
5662 shader->selector->info.writes_samplemask +
5663 1 /* SampleMaskIn */;
5664
5665 num_returns = MAX2(num_returns,
5666 num_return_sgprs +
5667 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
5668
5669 for (i = 0; i < num_return_sgprs; i++)
5670 returns[i] = ctx->i32;
5671 for (; i < num_returns; i++)
5672 returns[i] = ctx->f32;
5673 }
5674 break;
5675
5676 case PIPE_SHADER_COMPUTE:
5677 params[SI_PARAM_GRID_SIZE] = v3i32;
5678 params[SI_PARAM_BLOCK_SIZE] = v3i32;
5679 params[SI_PARAM_BLOCK_ID] = v3i32;
5680 last_sgpr = SI_PARAM_BLOCK_ID;
5681
5682 params[SI_PARAM_THREAD_ID] = v3i32;
5683 num_params = SI_PARAM_THREAD_ID + 1;
5684 break;
5685 default:
5686 assert(0 && "unimplemented shader");
5687 return;
5688 }
5689
5690 assert(num_params <= ARRAY_SIZE(params));
5691
5692 si_create_function(ctx, returns, num_returns, params,
5693 num_params, last_sgpr);
5694
5695 /* Reserve register locations for VGPR inputs the PS prolog may need. */
5696 if (ctx->type == PIPE_SHADER_FRAGMENT &&
5697 !ctx->is_monolithic) {
5698 si_llvm_add_attribute(ctx->main_fn,
5699 "InitialPSInputAddr",
5700 S_0286D0_PERSP_SAMPLE_ENA(1) |
5701 S_0286D0_PERSP_CENTER_ENA(1) |
5702 S_0286D0_PERSP_CENTROID_ENA(1) |
5703 S_0286D0_LINEAR_SAMPLE_ENA(1) |
5704 S_0286D0_LINEAR_CENTER_ENA(1) |
5705 S_0286D0_LINEAR_CENTROID_ENA(1) |
5706 S_0286D0_FRONT_FACE_ENA(1) |
5707 S_0286D0_POS_FIXED_PT_ENA(1));
5708 } else if (ctx->type == PIPE_SHADER_COMPUTE) {
5709 const unsigned *properties = shader->selector->info.properties;
5710 unsigned max_work_group_size =
5711 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
5712 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
5713 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
5714
5715 if (!max_work_group_size) {
5716 /* This is a variable group size compute shader,
5717 * compile it for the maximum possible group size.
5718 */
5719 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
5720 }
5721
5722 si_llvm_add_attribute(ctx->main_fn,
5723 "amdgpu-max-work-group-size",
5724 max_work_group_size);
5725 }
5726
5727 shader->info.num_input_sgprs = 0;
5728 shader->info.num_input_vgprs = 0;
5729
5730 for (i = 0; i <= last_sgpr; ++i)
5731 shader->info.num_input_sgprs += llvm_get_type_size(params[i]) / 4;
5732
5733 /* Unused fragment shader inputs are eliminated by the compiler,
5734 * so we don't know yet how many there will be.
5735 */
5736 if (ctx->type != PIPE_SHADER_FRAGMENT)
5737 for (; i < num_params; ++i)
5738 shader->info.num_input_vgprs += llvm_get_type_size(params[i]) / 4;
5739
5740 if (!ctx->screen->has_ds_bpermute &&
5741 bld_base->info &&
5742 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
5743 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0 ||
5744 bld_base->info->opcode_count[TGSI_OPCODE_DDX_FINE] > 0 ||
5745 bld_base->info->opcode_count[TGSI_OPCODE_DDY_FINE] > 0 ||
5746 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_OFFSET] > 0 ||
5747 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_SAMPLE] > 0))
5748 ctx->lds =
5749 LLVMAddGlobalInAddressSpace(gallivm->module,
5750 LLVMArrayType(ctx->i32, 64),
5751 "ddxy_lds",
5752 LOCAL_ADDR_SPACE);
5753
5754 if ((ctx->type == PIPE_SHADER_VERTEX && shader->key.vs.as_ls) ||
5755 ctx->type == PIPE_SHADER_TESS_CTRL ||
5756 ctx->type == PIPE_SHADER_TESS_EVAL)
5757 declare_tess_lds(ctx);
5758 }
5759
5760 /**
5761 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
5762 * for later use.
5763 */
5764 static void preload_ring_buffers(struct si_shader_context *ctx)
5765 {
5766 struct gallivm_state *gallivm =
5767 ctx->soa.bld_base.base.gallivm;
5768
5769 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
5770 SI_PARAM_RW_BUFFERS);
5771
5772 if ((ctx->type == PIPE_SHADER_VERTEX &&
5773 ctx->shader->key.vs.as_es) ||
5774 (ctx->type == PIPE_SHADER_TESS_EVAL &&
5775 ctx->shader->key.tes.as_es) ||
5776 ctx->type == PIPE_SHADER_GEOMETRY) {
5777 unsigned ring =
5778 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
5779 : SI_ES_RING_ESGS;
5780 LLVMValueRef offset = lp_build_const_int32(gallivm, ring);
5781
5782 ctx->esgs_ring =
5783 build_indexed_load_const(ctx, buf_ptr, offset);
5784 }
5785
5786 if (ctx->is_gs_copy_shader) {
5787 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_VS_RING_GSVS);
5788
5789 ctx->gsvs_ring[0] =
5790 build_indexed_load_const(ctx, buf_ptr, offset);
5791 }
5792 if (ctx->type == PIPE_SHADER_GEOMETRY) {
5793 int i;
5794 for (i = 0; i < 4; i++) {
5795 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_GS_RING_GSVS0 + i);
5796
5797 ctx->gsvs_ring[i] =
5798 build_indexed_load_const(ctx, buf_ptr, offset);
5799 }
5800 }
5801 }
5802
5803 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
5804 LLVMValueRef param_rw_buffers,
5805 unsigned param_pos_fixed_pt)
5806 {
5807 struct lp_build_tgsi_context *bld_base =
5808 &ctx->soa.bld_base;
5809 struct gallivm_state *gallivm = bld_base->base.gallivm;
5810 LLVMBuilderRef builder = gallivm->builder;
5811 LLVMValueRef slot, desc, offset, row, bit, address[2];
5812
5813 /* Use the fixed-point gl_FragCoord input.
5814 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
5815 * per coordinate to get the repeating effect.
5816 */
5817 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
5818 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
5819
5820 /* Load the buffer descriptor. */
5821 slot = lp_build_const_int32(gallivm, SI_PS_CONST_POLY_STIPPLE);
5822 desc = build_indexed_load_const(ctx, param_rw_buffers, slot);
5823
5824 /* The stipple pattern is 32x32, each row has 32 bits. */
5825 offset = LLVMBuildMul(builder, address[1],
5826 LLVMConstInt(ctx->i32, 4, 0), "");
5827 row = buffer_load_const(ctx, desc, offset);
5828 row = LLVMBuildBitCast(builder, row, ctx->i32, "");
5829 bit = LLVMBuildLShr(builder, row, address[0], "");
5830 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
5831
5832 /* The intrinsic kills the thread if arg < 0. */
5833 bit = LLVMBuildSelect(builder, bit, LLVMConstReal(ctx->f32, 0),
5834 LLVMConstReal(ctx->f32, -1), "");
5835 lp_build_intrinsic(builder, "llvm.AMDGPU.kill", ctx->voidt, &bit, 1, 0);
5836 }
5837
5838 void si_shader_binary_read_config(struct radeon_shader_binary *binary,
5839 struct si_shader_config *conf,
5840 unsigned symbol_offset)
5841 {
5842 unsigned i;
5843 const unsigned char *config =
5844 radeon_shader_binary_config_start(binary, symbol_offset);
5845 bool really_needs_scratch = false;
5846
5847 /* LLVM adds SGPR spills to the scratch size.
5848 * Find out if we really need the scratch buffer.
5849 */
5850 for (i = 0; i < binary->reloc_count; i++) {
5851 const struct radeon_shader_reloc *reloc = &binary->relocs[i];
5852
5853 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
5854 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5855 really_needs_scratch = true;
5856 break;
5857 }
5858 }
5859
5860 /* XXX: We may be able to emit some of these values directly rather than
5861 * extracting fields to be emitted later.
5862 */
5863
5864 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
5865 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
5866 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
5867 switch (reg) {
5868 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
5869 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
5870 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
5871 case R_00B848_COMPUTE_PGM_RSRC1:
5872 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
5873 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
5874 conf->float_mode = G_00B028_FLOAT_MODE(value);
5875 conf->rsrc1 = value;
5876 break;
5877 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
5878 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
5879 break;
5880 case R_00B84C_COMPUTE_PGM_RSRC2:
5881 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
5882 conf->rsrc2 = value;
5883 break;
5884 case R_0286CC_SPI_PS_INPUT_ENA:
5885 conf->spi_ps_input_ena = value;
5886 break;
5887 case R_0286D0_SPI_PS_INPUT_ADDR:
5888 conf->spi_ps_input_addr = value;
5889 break;
5890 case R_0286E8_SPI_TMPRING_SIZE:
5891 case R_00B860_COMPUTE_TMPRING_SIZE:
5892 /* WAVESIZE is in units of 256 dwords. */
5893 if (really_needs_scratch)
5894 conf->scratch_bytes_per_wave =
5895 G_00B860_WAVESIZE(value) * 256 * 4;
5896 break;
5897 case 0x4: /* SPILLED_SGPRS */
5898 conf->spilled_sgprs = value;
5899 break;
5900 case 0x8: /* SPILLED_VGPRS */
5901 conf->spilled_vgprs = value;
5902 break;
5903 default:
5904 {
5905 static bool printed;
5906
5907 if (!printed) {
5908 fprintf(stderr, "Warning: LLVM emitted unknown "
5909 "config register: 0x%x\n", reg);
5910 printed = true;
5911 }
5912 }
5913 break;
5914 }
5915 }
5916
5917 if (!conf->spi_ps_input_addr)
5918 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
5919 }
5920
5921 void si_shader_apply_scratch_relocs(struct si_context *sctx,
5922 struct si_shader *shader,
5923 struct si_shader_config *config,
5924 uint64_t scratch_va)
5925 {
5926 unsigned i;
5927 uint32_t scratch_rsrc_dword0 = scratch_va;
5928 uint32_t scratch_rsrc_dword1 =
5929 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
5930
5931 /* Enable scratch coalescing if LLVM sets ELEMENT_SIZE & INDEX_STRIDE
5932 * correctly.
5933 */
5934 if (HAVE_LLVM >= 0x0309)
5935 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
5936 else
5937 scratch_rsrc_dword1 |=
5938 S_008F04_STRIDE(config->scratch_bytes_per_wave / 64);
5939
5940 for (i = 0 ; i < shader->binary.reloc_count; i++) {
5941 const struct radeon_shader_reloc *reloc =
5942 &shader->binary.relocs[i];
5943 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
5944 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5945 &scratch_rsrc_dword0, 4);
5946 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5947 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5948 &scratch_rsrc_dword1, 4);
5949 }
5950 }
5951 }
5952
5953 static unsigned si_get_shader_binary_size(struct si_shader *shader)
5954 {
5955 unsigned size = shader->binary.code_size;
5956
5957 if (shader->prolog)
5958 size += shader->prolog->binary.code_size;
5959 if (shader->epilog)
5960 size += shader->epilog->binary.code_size;
5961 return size;
5962 }
5963
5964 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
5965 {
5966 const struct radeon_shader_binary *prolog =
5967 shader->prolog ? &shader->prolog->binary : NULL;
5968 const struct radeon_shader_binary *epilog =
5969 shader->epilog ? &shader->epilog->binary : NULL;
5970 const struct radeon_shader_binary *mainb = &shader->binary;
5971 unsigned bo_size = si_get_shader_binary_size(shader) +
5972 (!epilog ? mainb->rodata_size : 0);
5973 unsigned char *ptr;
5974
5975 assert(!prolog || !prolog->rodata_size);
5976 assert((!prolog && !epilog) || !mainb->rodata_size);
5977 assert(!epilog || !epilog->rodata_size);
5978
5979 r600_resource_reference(&shader->bo, NULL);
5980 shader->bo = (struct r600_resource*)
5981 pipe_buffer_create(&sscreen->b.b, 0,
5982 PIPE_USAGE_IMMUTABLE, bo_size);
5983 if (!shader->bo)
5984 return -ENOMEM;
5985
5986 /* Upload. */
5987 ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
5988 PIPE_TRANSFER_READ_WRITE);
5989
5990 if (prolog) {
5991 util_memcpy_cpu_to_le32(ptr, prolog->code, prolog->code_size);
5992 ptr += prolog->code_size;
5993 }
5994
5995 util_memcpy_cpu_to_le32(ptr, mainb->code, mainb->code_size);
5996 ptr += mainb->code_size;
5997
5998 if (epilog)
5999 util_memcpy_cpu_to_le32(ptr, epilog->code, epilog->code_size);
6000 else if (mainb->rodata_size > 0)
6001 util_memcpy_cpu_to_le32(ptr, mainb->rodata, mainb->rodata_size);
6002
6003 sscreen->b.ws->buffer_unmap(shader->bo->buf);
6004 return 0;
6005 }
6006
6007 static void si_shader_dump_disassembly(const struct radeon_shader_binary *binary,
6008 struct pipe_debug_callback *debug,
6009 const char *name, FILE *file)
6010 {
6011 char *line, *p;
6012 unsigned i, count;
6013
6014 if (binary->disasm_string) {
6015 fprintf(file, "Shader %s disassembly:\n", name);
6016 fprintf(file, "%s", binary->disasm_string);
6017
6018 if (debug && debug->debug_message) {
6019 /* Very long debug messages are cut off, so send the
6020 * disassembly one line at a time. This causes more
6021 * overhead, but on the plus side it simplifies
6022 * parsing of resulting logs.
6023 */
6024 pipe_debug_message(debug, SHADER_INFO,
6025 "Shader Disassembly Begin");
6026
6027 line = binary->disasm_string;
6028 while (*line) {
6029 p = util_strchrnul(line, '\n');
6030 count = p - line;
6031
6032 if (count) {
6033 pipe_debug_message(debug, SHADER_INFO,
6034 "%.*s", count, line);
6035 }
6036
6037 if (!*p)
6038 break;
6039 line = p + 1;
6040 }
6041
6042 pipe_debug_message(debug, SHADER_INFO,
6043 "Shader Disassembly End");
6044 }
6045 } else {
6046 fprintf(file, "Shader %s binary:\n", name);
6047 for (i = 0; i < binary->code_size; i += 4) {
6048 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
6049 binary->code[i + 3], binary->code[i + 2],
6050 binary->code[i + 1], binary->code[i]);
6051 }
6052 }
6053 }
6054
6055 static void si_shader_dump_stats(struct si_screen *sscreen,
6056 struct si_shader_config *conf,
6057 unsigned num_inputs,
6058 unsigned code_size,
6059 struct pipe_debug_callback *debug,
6060 unsigned processor,
6061 FILE *file)
6062 {
6063 unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
6064 unsigned lds_per_wave = 0;
6065 unsigned max_simd_waves = 10;
6066
6067 /* Compute LDS usage for PS. */
6068 if (processor == PIPE_SHADER_FRAGMENT) {
6069 /* The minimum usage per wave is (num_inputs * 48). The maximum
6070 * usage is (num_inputs * 48 * 16).
6071 * We can get anything in between and it varies between waves.
6072 *
6073 * The 48 bytes per input for a single primitive is equal to
6074 * 4 bytes/component * 4 components/input * 3 points.
6075 *
6076 * Other stages don't know the size at compile time or don't
6077 * allocate LDS per wave, but instead they do it per thread group.
6078 */
6079 lds_per_wave = conf->lds_size * lds_increment +
6080 align(num_inputs * 48, lds_increment);
6081 }
6082
6083 /* Compute the per-SIMD wave counts. */
6084 if (conf->num_sgprs) {
6085 if (sscreen->b.chip_class >= VI)
6086 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
6087 else
6088 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
6089 }
6090
6091 if (conf->num_vgprs)
6092 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
6093
6094 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
6095 * that PS can use.
6096 */
6097 if (lds_per_wave)
6098 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
6099
6100 if (file != stderr ||
6101 r600_can_dump_shader(&sscreen->b, processor)) {
6102 if (processor == PIPE_SHADER_FRAGMENT) {
6103 fprintf(file, "*** SHADER CONFIG ***\n"
6104 "SPI_PS_INPUT_ADDR = 0x%04x\n"
6105 "SPI_PS_INPUT_ENA = 0x%04x\n",
6106 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
6107 }
6108
6109 fprintf(file, "*** SHADER STATS ***\n"
6110 "SGPRS: %d\n"
6111 "VGPRS: %d\n"
6112 "Spilled SGPRs: %d\n"
6113 "Spilled VGPRs: %d\n"
6114 "Code Size: %d bytes\n"
6115 "LDS: %d blocks\n"
6116 "Scratch: %d bytes per wave\n"
6117 "Max Waves: %d\n"
6118 "********************\n\n\n",
6119 conf->num_sgprs, conf->num_vgprs,
6120 conf->spilled_sgprs, conf->spilled_vgprs, code_size,
6121 conf->lds_size, conf->scratch_bytes_per_wave,
6122 max_simd_waves);
6123 }
6124
6125 pipe_debug_message(debug, SHADER_INFO,
6126 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
6127 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
6128 "Spilled VGPRs: %d",
6129 conf->num_sgprs, conf->num_vgprs, code_size,
6130 conf->lds_size, conf->scratch_bytes_per_wave,
6131 max_simd_waves, conf->spilled_sgprs,
6132 conf->spilled_vgprs);
6133 }
6134
6135 static const char *si_get_shader_name(struct si_shader *shader,
6136 unsigned processor)
6137 {
6138 switch (processor) {
6139 case PIPE_SHADER_VERTEX:
6140 if (shader->key.vs.as_es)
6141 return "Vertex Shader as ES";
6142 else if (shader->key.vs.as_ls)
6143 return "Vertex Shader as LS";
6144 else
6145 return "Vertex Shader as VS";
6146 case PIPE_SHADER_TESS_CTRL:
6147 return "Tessellation Control Shader";
6148 case PIPE_SHADER_TESS_EVAL:
6149 if (shader->key.tes.as_es)
6150 return "Tessellation Evaluation Shader as ES";
6151 else
6152 return "Tessellation Evaluation Shader as VS";
6153 case PIPE_SHADER_GEOMETRY:
6154 if (shader->gs_copy_shader == NULL)
6155 return "GS Copy Shader as VS";
6156 else
6157 return "Geometry Shader";
6158 case PIPE_SHADER_FRAGMENT:
6159 return "Pixel Shader";
6160 case PIPE_SHADER_COMPUTE:
6161 return "Compute Shader";
6162 default:
6163 return "Unknown Shader";
6164 }
6165 }
6166
6167 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
6168 struct pipe_debug_callback *debug, unsigned processor,
6169 FILE *file)
6170 {
6171 if (file != stderr ||
6172 r600_can_dump_shader(&sscreen->b, processor))
6173 si_dump_shader_key(processor, &shader->key, file);
6174
6175 if (file != stderr && shader->binary.llvm_ir_string) {
6176 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
6177 si_get_shader_name(shader, processor));
6178 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
6179 }
6180
6181 if (file != stderr ||
6182 (r600_can_dump_shader(&sscreen->b, processor) &&
6183 !(sscreen->b.debug_flags & DBG_NO_ASM))) {
6184 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
6185
6186 if (shader->prolog)
6187 si_shader_dump_disassembly(&shader->prolog->binary,
6188 debug, "prolog", file);
6189
6190 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
6191
6192 if (shader->epilog)
6193 si_shader_dump_disassembly(&shader->epilog->binary,
6194 debug, "epilog", file);
6195 fprintf(file, "\n");
6196 }
6197
6198 si_shader_dump_stats(sscreen, &shader->config,
6199 shader->selector ? shader->selector->info.num_inputs : 0,
6200 si_get_shader_binary_size(shader), debug, processor,
6201 file);
6202 }
6203
6204 int si_compile_llvm(struct si_screen *sscreen,
6205 struct radeon_shader_binary *binary,
6206 struct si_shader_config *conf,
6207 LLVMTargetMachineRef tm,
6208 LLVMModuleRef mod,
6209 struct pipe_debug_callback *debug,
6210 unsigned processor,
6211 const char *name)
6212 {
6213 int r = 0;
6214 unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
6215
6216 if (r600_can_dump_shader(&sscreen->b, processor)) {
6217 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
6218
6219 if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
6220 fprintf(stderr, "%s LLVM IR:\n\n", name);
6221 LLVMDumpModule(mod);
6222 fprintf(stderr, "\n");
6223 }
6224 }
6225
6226 if (sscreen->record_llvm_ir) {
6227 char *ir = LLVMPrintModuleToString(mod);
6228 binary->llvm_ir_string = strdup(ir);
6229 LLVMDisposeMessage(ir);
6230 }
6231
6232 if (!si_replace_shader(count, binary)) {
6233 r = si_llvm_compile(mod, binary, tm, debug);
6234 if (r)
6235 return r;
6236 }
6237
6238 si_shader_binary_read_config(binary, conf, 0);
6239
6240 /* Enable 64-bit and 16-bit denormals, because there is no performance
6241 * cost.
6242 *
6243 * If denormals are enabled, all floating-point output modifiers are
6244 * ignored.
6245 *
6246 * Don't enable denormals for 32-bit floats, because:
6247 * - Floating-point output modifiers would be ignored by the hw.
6248 * - Some opcodes don't support denormals, such as v_mad_f32. We would
6249 * have to stop using those.
6250 * - SI & CI would be very slow.
6251 */
6252 conf->float_mode |= V_00B028_FP_64_DENORMS;
6253
6254 FREE(binary->config);
6255 FREE(binary->global_symbol_offsets);
6256 binary->config = NULL;
6257 binary->global_symbol_offsets = NULL;
6258
6259 /* Some shaders can't have rodata because their binaries can be
6260 * concatenated.
6261 */
6262 if (binary->rodata_size &&
6263 (processor == PIPE_SHADER_VERTEX ||
6264 processor == PIPE_SHADER_TESS_CTRL ||
6265 processor == PIPE_SHADER_TESS_EVAL ||
6266 processor == PIPE_SHADER_FRAGMENT)) {
6267 fprintf(stderr, "radeonsi: The shader can't have rodata.");
6268 return -EINVAL;
6269 }
6270
6271 return r;
6272 }
6273
6274 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
6275 {
6276 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
6277 LLVMBuildRetVoid(ctx->gallivm.builder);
6278 else
6279 LLVMBuildRet(ctx->gallivm.builder, ret);
6280 }
6281
6282 /* Generate code for the hardware VS shader stage to go with a geometry shader */
6283 static int si_generate_gs_copy_shader(struct si_screen *sscreen,
6284 struct si_shader_context *ctx,
6285 struct si_shader *gs,
6286 struct pipe_debug_callback *debug)
6287 {
6288 struct gallivm_state *gallivm = &ctx->gallivm;
6289 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
6290 struct lp_build_context *uint = &bld_base->uint_bld;
6291 struct si_shader_output_values *outputs;
6292 struct tgsi_shader_info *gsinfo = &gs->selector->info;
6293 LLVMValueRef args[9];
6294 int i, r;
6295
6296 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
6297
6298 si_init_shader_ctx(ctx, sscreen, ctx->shader, ctx->tm);
6299 ctx->type = PIPE_SHADER_VERTEX;
6300 ctx->is_gs_copy_shader = true;
6301
6302 create_meta_data(ctx);
6303 create_function(ctx);
6304 preload_ring_buffers(ctx);
6305
6306 args[0] = ctx->gsvs_ring[0];
6307 args[1] = lp_build_mul_imm(uint,
6308 LLVMGetParam(ctx->main_fn,
6309 ctx->param_vertex_id),
6310 4);
6311 args[3] = uint->zero;
6312 args[4] = uint->one; /* OFFEN */
6313 args[5] = uint->zero; /* IDXEN */
6314 args[6] = uint->one; /* GLC */
6315 args[7] = uint->one; /* SLC */
6316 args[8] = uint->zero; /* TFE */
6317
6318 /* Fetch vertex data from GSVS ring */
6319 for (i = 0; i < gsinfo->num_outputs; ++i) {
6320 unsigned chan;
6321
6322 outputs[i].name = gsinfo->output_semantic_name[i];
6323 outputs[i].sid = gsinfo->output_semantic_index[i];
6324
6325 for (chan = 0; chan < 4; chan++) {
6326 args[2] = lp_build_const_int32(gallivm,
6327 (i * 4 + chan) *
6328 gs->selector->gs_max_out_vertices * 16 * 4);
6329
6330 outputs[i].values[chan] =
6331 LLVMBuildBitCast(gallivm->builder,
6332 lp_build_intrinsic(gallivm->builder,
6333 "llvm.SI.buffer.load.dword.i32.i32",
6334 ctx->i32, args, 9,
6335 LLVMReadOnlyAttribute),
6336 ctx->f32, "");
6337 }
6338 }
6339
6340 si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
6341
6342 LLVMBuildRetVoid(gallivm->builder);
6343
6344 /* Dump LLVM IR before any optimization passes */
6345 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
6346 r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
6347 LLVMDumpModule(bld_base->base.gallivm->module);
6348
6349 si_llvm_finalize_module(ctx,
6350 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_GEOMETRY));
6351
6352 r = si_compile_llvm(sscreen, &ctx->shader->binary,
6353 &ctx->shader->config, ctx->tm,
6354 bld_base->base.gallivm->module,
6355 debug, PIPE_SHADER_GEOMETRY,
6356 "GS Copy Shader");
6357 if (!r) {
6358 if (r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
6359 fprintf(stderr, "GS Copy Shader:\n");
6360 si_shader_dump(sscreen, ctx->shader, debug,
6361 PIPE_SHADER_GEOMETRY, stderr);
6362 r = si_shader_binary_upload(sscreen, ctx->shader);
6363 }
6364
6365 si_llvm_dispose(ctx);
6366
6367 FREE(outputs);
6368 return r;
6369 }
6370
6371 static void si_dump_shader_key(unsigned shader, union si_shader_key *key,
6372 FILE *f)
6373 {
6374 int i;
6375
6376 fprintf(f, "SHADER KEY\n");
6377
6378 switch (shader) {
6379 case PIPE_SHADER_VERTEX:
6380 fprintf(f, " instance_divisors = {");
6381 for (i = 0; i < ARRAY_SIZE(key->vs.prolog.instance_divisors); i++)
6382 fprintf(f, !i ? "%u" : ", %u",
6383 key->vs.prolog.instance_divisors[i]);
6384 fprintf(f, "}\n");
6385 fprintf(f, " as_es = %u\n", key->vs.as_es);
6386 fprintf(f, " as_ls = %u\n", key->vs.as_ls);
6387 fprintf(f, " export_prim_id = %u\n", key->vs.epilog.export_prim_id);
6388 break;
6389
6390 case PIPE_SHADER_TESS_CTRL:
6391 fprintf(f, " prim_mode = %u\n", key->tcs.epilog.prim_mode);
6392 break;
6393
6394 case PIPE_SHADER_TESS_EVAL:
6395 fprintf(f, " as_es = %u\n", key->tes.as_es);
6396 fprintf(f, " export_prim_id = %u\n", key->tes.epilog.export_prim_id);
6397 break;
6398
6399 case PIPE_SHADER_GEOMETRY:
6400 case PIPE_SHADER_COMPUTE:
6401 break;
6402
6403 case PIPE_SHADER_FRAGMENT:
6404 fprintf(f, " prolog.color_two_side = %u\n", key->ps.prolog.color_two_side);
6405 fprintf(f, " prolog.flatshade_colors = %u\n", key->ps.prolog.flatshade_colors);
6406 fprintf(f, " prolog.poly_stipple = %u\n", key->ps.prolog.poly_stipple);
6407 fprintf(f, " prolog.force_persp_sample_interp = %u\n", key->ps.prolog.force_persp_sample_interp);
6408 fprintf(f, " prolog.force_linear_sample_interp = %u\n", key->ps.prolog.force_linear_sample_interp);
6409 fprintf(f, " prolog.force_persp_center_interp = %u\n", key->ps.prolog.force_persp_center_interp);
6410 fprintf(f, " prolog.force_linear_center_interp = %u\n", key->ps.prolog.force_linear_center_interp);
6411 fprintf(f, " prolog.bc_optimize_for_persp = %u\n", key->ps.prolog.bc_optimize_for_persp);
6412 fprintf(f, " prolog.bc_optimize_for_linear = %u\n", key->ps.prolog.bc_optimize_for_linear);
6413 fprintf(f, " epilog.spi_shader_col_format = 0x%x\n", key->ps.epilog.spi_shader_col_format);
6414 fprintf(f, " epilog.color_is_int8 = 0x%X\n", key->ps.epilog.color_is_int8);
6415 fprintf(f, " epilog.last_cbuf = %u\n", key->ps.epilog.last_cbuf);
6416 fprintf(f, " epilog.alpha_func = %u\n", key->ps.epilog.alpha_func);
6417 fprintf(f, " epilog.alpha_to_one = %u\n", key->ps.epilog.alpha_to_one);
6418 fprintf(f, " epilog.poly_line_smoothing = %u\n", key->ps.epilog.poly_line_smoothing);
6419 fprintf(f, " epilog.clamp_color = %u\n", key->ps.epilog.clamp_color);
6420 break;
6421
6422 default:
6423 assert(0);
6424 }
6425 }
6426
6427 static void si_init_shader_ctx(struct si_shader_context *ctx,
6428 struct si_screen *sscreen,
6429 struct si_shader *shader,
6430 LLVMTargetMachineRef tm)
6431 {
6432 struct lp_build_tgsi_context *bld_base;
6433 struct lp_build_tgsi_action tmpl = {};
6434
6435 memset(ctx, 0, sizeof(*ctx));
6436 si_llvm_context_init(
6437 ctx, "amdgcn--",
6438 (shader && shader->selector) ? &shader->selector->info : NULL,
6439 (shader && shader->selector) ? shader->selector->tokens : NULL);
6440 si_shader_context_init_alu(&ctx->soa.bld_base);
6441 ctx->tm = tm;
6442 ctx->screen = sscreen;
6443 if (shader && shader->selector)
6444 ctx->type = shader->selector->info.processor;
6445 else
6446 ctx->type = -1;
6447 ctx->shader = shader;
6448
6449 ctx->voidt = LLVMVoidTypeInContext(ctx->gallivm.context);
6450 ctx->i1 = LLVMInt1TypeInContext(ctx->gallivm.context);
6451 ctx->i8 = LLVMInt8TypeInContext(ctx->gallivm.context);
6452 ctx->i32 = LLVMInt32TypeInContext(ctx->gallivm.context);
6453 ctx->i64 = LLVMInt64TypeInContext(ctx->gallivm.context);
6454 ctx->i128 = LLVMIntTypeInContext(ctx->gallivm.context, 128);
6455 ctx->f32 = LLVMFloatTypeInContext(ctx->gallivm.context);
6456 ctx->v16i8 = LLVMVectorType(ctx->i8, 16);
6457 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
6458 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
6459 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
6460 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
6461
6462 bld_base = &ctx->soa.bld_base;
6463 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
6464
6465 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
6466 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
6467 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
6468
6469 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
6470 bld_base->op_actions[TGSI_OPCODE_TEX2] = tex_action;
6471 bld_base->op_actions[TGSI_OPCODE_TXB] = tex_action;
6472 bld_base->op_actions[TGSI_OPCODE_TXB2] = tex_action;
6473 bld_base->op_actions[TGSI_OPCODE_TXD] = tex_action;
6474 bld_base->op_actions[TGSI_OPCODE_TXF] = tex_action;
6475 bld_base->op_actions[TGSI_OPCODE_TXL] = tex_action;
6476 bld_base->op_actions[TGSI_OPCODE_TXL2] = tex_action;
6477 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
6478 bld_base->op_actions[TGSI_OPCODE_TXQ].fetch_args = txq_fetch_args;
6479 bld_base->op_actions[TGSI_OPCODE_TXQ].emit = txq_emit;
6480 bld_base->op_actions[TGSI_OPCODE_TG4] = tex_action;
6481 bld_base->op_actions[TGSI_OPCODE_LODQ] = tex_action;
6482 bld_base->op_actions[TGSI_OPCODE_TXQS].emit = si_llvm_emit_txqs;
6483
6484 bld_base->op_actions[TGSI_OPCODE_LOAD].fetch_args = load_fetch_args;
6485 bld_base->op_actions[TGSI_OPCODE_LOAD].emit = load_emit;
6486 bld_base->op_actions[TGSI_OPCODE_STORE].fetch_args = store_fetch_args;
6487 bld_base->op_actions[TGSI_OPCODE_STORE].emit = store_emit;
6488 bld_base->op_actions[TGSI_OPCODE_RESQ].fetch_args = resq_fetch_args;
6489 bld_base->op_actions[TGSI_OPCODE_RESQ].emit = resq_emit;
6490
6491 tmpl.fetch_args = atomic_fetch_args;
6492 tmpl.emit = atomic_emit;
6493 bld_base->op_actions[TGSI_OPCODE_ATOMUADD] = tmpl;
6494 bld_base->op_actions[TGSI_OPCODE_ATOMUADD].intr_name = "add";
6495 bld_base->op_actions[TGSI_OPCODE_ATOMXCHG] = tmpl;
6496 bld_base->op_actions[TGSI_OPCODE_ATOMXCHG].intr_name = "swap";
6497 bld_base->op_actions[TGSI_OPCODE_ATOMCAS] = tmpl;
6498 bld_base->op_actions[TGSI_OPCODE_ATOMCAS].intr_name = "cmpswap";
6499 bld_base->op_actions[TGSI_OPCODE_ATOMAND] = tmpl;
6500 bld_base->op_actions[TGSI_OPCODE_ATOMAND].intr_name = "and";
6501 bld_base->op_actions[TGSI_OPCODE_ATOMOR] = tmpl;
6502 bld_base->op_actions[TGSI_OPCODE_ATOMOR].intr_name = "or";
6503 bld_base->op_actions[TGSI_OPCODE_ATOMXOR] = tmpl;
6504 bld_base->op_actions[TGSI_OPCODE_ATOMXOR].intr_name = "xor";
6505 bld_base->op_actions[TGSI_OPCODE_ATOMUMIN] = tmpl;
6506 bld_base->op_actions[TGSI_OPCODE_ATOMUMIN].intr_name = "umin";
6507 bld_base->op_actions[TGSI_OPCODE_ATOMUMAX] = tmpl;
6508 bld_base->op_actions[TGSI_OPCODE_ATOMUMAX].intr_name = "umax";
6509 bld_base->op_actions[TGSI_OPCODE_ATOMIMIN] = tmpl;
6510 bld_base->op_actions[TGSI_OPCODE_ATOMIMIN].intr_name = "smin";
6511 bld_base->op_actions[TGSI_OPCODE_ATOMIMAX] = tmpl;
6512 bld_base->op_actions[TGSI_OPCODE_ATOMIMAX].intr_name = "smax";
6513
6514 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
6515
6516 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
6517 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
6518 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
6519 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
6520
6521 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
6522 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
6523 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
6524 }
6525
6526 /* Return true if the PARAM export has been eliminated. */
6527 static bool si_eliminate_const_output(struct si_shader_context *ctx,
6528 LLVMValueRef inst, unsigned offset)
6529 {
6530 struct si_shader *shader = ctx->shader;
6531 unsigned num_outputs = shader->selector->info.num_outputs;
6532 unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */
6533 bool is_zero[4] = {}, is_one[4] = {};
6534
6535 for (i = 0; i < 4; i++) {
6536 LLVMBool loses_info;
6537 LLVMValueRef p = LLVMGetOperand(inst, 5 + i);
6538
6539 /* It's a constant expression. Undef outputs are eliminated too. */
6540 if (LLVMIsUndef(p)) {
6541 is_zero[i] = true;
6542 is_one[i] = true;
6543 } else if (LLVMIsAConstantFP(p)) {
6544 double a = LLVMConstRealGetDouble(p, &loses_info);
6545
6546 if (a == 0)
6547 is_zero[i] = true;
6548 else if (a == 1)
6549 is_one[i] = true;
6550 else
6551 return false; /* other constant */
6552 } else
6553 return false;
6554 }
6555
6556 /* Only certain combinations of 0 and 1 can be eliminated. */
6557 if (is_zero[0] && is_zero[1] && is_zero[2])
6558 default_val = is_zero[3] ? 0 : 1;
6559 else if (is_one[0] && is_one[1] && is_one[2])
6560 default_val = is_zero[3] ? 2 : 3;
6561 else
6562 return false;
6563
6564 /* The PARAM export can be represented as DEFAULT_VAL. Kill it. */
6565 LLVMInstructionEraseFromParent(inst);
6566
6567 /* Change OFFSET to DEFAULT_VAL. */
6568 for (i = 0; i < num_outputs; i++) {
6569 if (shader->info.vs_output_param_offset[i] == offset) {
6570 shader->info.vs_output_param_offset[i] =
6571 EXP_PARAM_DEFAULT_VAL_0000 + default_val;
6572 break;
6573 }
6574 }
6575 return true;
6576 }
6577
6578 struct si_vs_exports {
6579 unsigned num;
6580 unsigned offset[SI_MAX_VS_OUTPUTS];
6581 LLVMValueRef inst[SI_MAX_VS_OUTPUTS];
6582 };
6583
6584 static void si_eliminate_const_vs_outputs(struct si_shader_context *ctx)
6585 {
6586 struct si_shader *shader = ctx->shader;
6587 struct tgsi_shader_info *info = &shader->selector->info;
6588 LLVMBasicBlockRef bb;
6589 struct si_vs_exports exports;
6590 bool removed_any = false;
6591
6592 exports.num = 0;
6593
6594 if ((ctx->type == PIPE_SHADER_VERTEX &&
6595 (shader->key.vs.as_es || shader->key.vs.as_ls)) ||
6596 (ctx->type == PIPE_SHADER_TESS_EVAL && shader->key.tes.as_es))
6597 return;
6598
6599 /* Process all LLVM instructions. */
6600 bb = LLVMGetFirstBasicBlock(ctx->main_fn);
6601 while (bb) {
6602 LLVMValueRef inst = LLVMGetFirstInstruction(bb);
6603
6604 while (inst) {
6605 LLVMValueRef cur = inst;
6606 inst = LLVMGetNextInstruction(inst);
6607
6608 if (LLVMGetInstructionOpcode(cur) != LLVMCall)
6609 continue;
6610
6611 LLVMValueRef callee = lp_get_called_value(cur);
6612
6613 if (!lp_is_function(callee))
6614 continue;
6615
6616 const char *name = LLVMGetValueName(callee);
6617 unsigned num_args = LLVMCountParams(callee);
6618
6619 /* Check if this is an export instruction. */
6620 if (num_args != 9 || strcmp(name, "llvm.SI.export"))
6621 continue;
6622
6623 LLVMValueRef arg = LLVMGetOperand(cur, 3);
6624 unsigned target = LLVMConstIntGetZExtValue(arg);
6625
6626 if (target < V_008DFC_SQ_EXP_PARAM)
6627 continue;
6628
6629 target -= V_008DFC_SQ_EXP_PARAM;
6630
6631 /* Eliminate constant value PARAM exports. */
6632 if (si_eliminate_const_output(ctx, cur, target)) {
6633 removed_any = true;
6634 } else {
6635 exports.offset[exports.num] = target;
6636 exports.inst[exports.num] = cur;
6637 exports.num++;
6638 }
6639 }
6640 bb = LLVMGetNextBasicBlock(bb);
6641 }
6642
6643 /* Remove holes in export memory due to removed PARAM exports.
6644 * This is done by renumbering all PARAM exports.
6645 */
6646 if (removed_any) {
6647 ubyte current_offset[SI_MAX_VS_OUTPUTS];
6648 unsigned new_count = 0;
6649 unsigned out, i;
6650
6651 /* Make a copy of the offsets. We need the old version while
6652 * we are modifying some of them. */
6653 assert(sizeof(current_offset) ==
6654 sizeof(shader->info.vs_output_param_offset));
6655 memcpy(current_offset, shader->info.vs_output_param_offset,
6656 sizeof(current_offset));
6657
6658 for (i = 0; i < exports.num; i++) {
6659 unsigned offset = exports.offset[i];
6660
6661 for (out = 0; out < info->num_outputs; out++) {
6662 if (current_offset[out] != offset)
6663 continue;
6664
6665 LLVMSetOperand(exports.inst[i], 3,
6666 LLVMConstInt(ctx->i32,
6667 V_008DFC_SQ_EXP_PARAM + new_count, 0));
6668 shader->info.vs_output_param_offset[out] = new_count;
6669 new_count++;
6670 break;
6671 }
6672 }
6673 shader->info.nr_param_exports = new_count;
6674 }
6675 }
6676
6677 static bool si_compile_tgsi_main(struct si_shader_context *ctx,
6678 struct si_shader *shader)
6679 {
6680 struct si_shader_selector *sel = shader->selector;
6681 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
6682
6683 switch (ctx->type) {
6684 case PIPE_SHADER_VERTEX:
6685 ctx->load_input = declare_input_vs;
6686 if (shader->key.vs.as_ls)
6687 bld_base->emit_epilogue = si_llvm_emit_ls_epilogue;
6688 else if (shader->key.vs.as_es)
6689 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
6690 else
6691 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
6692 break;
6693 case PIPE_SHADER_TESS_CTRL:
6694 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
6695 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
6696 bld_base->emit_store = store_output_tcs;
6697 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
6698 break;
6699 case PIPE_SHADER_TESS_EVAL:
6700 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
6701 if (shader->key.tes.as_es)
6702 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
6703 else
6704 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
6705 break;
6706 case PIPE_SHADER_GEOMETRY:
6707 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
6708 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
6709 break;
6710 case PIPE_SHADER_FRAGMENT:
6711 ctx->load_input = declare_input_fs;
6712 if (ctx->is_monolithic)
6713 bld_base->emit_epilogue = si_llvm_emit_fs_epilogue;
6714 else
6715 bld_base->emit_epilogue = si_llvm_return_fs_outputs;
6716 break;
6717 case PIPE_SHADER_COMPUTE:
6718 ctx->declare_memory_region = declare_compute_memory;
6719 break;
6720 default:
6721 assert(!"Unsupported shader type");
6722 return false;
6723 }
6724
6725 create_meta_data(ctx);
6726 create_function(ctx);
6727 preload_ring_buffers(ctx);
6728
6729 if (ctx->is_monolithic && sel->type == PIPE_SHADER_FRAGMENT &&
6730 shader->key.ps.prolog.poly_stipple) {
6731 LLVMValueRef list = LLVMGetParam(ctx->main_fn,
6732 SI_PARAM_RW_BUFFERS);
6733 si_llvm_emit_polygon_stipple(ctx, list,
6734 SI_PARAM_POS_FIXED_PT);
6735 }
6736
6737 if (ctx->type == PIPE_SHADER_GEOMETRY) {
6738 int i;
6739 for (i = 0; i < 4; i++) {
6740 ctx->gs_next_vertex[i] =
6741 lp_build_alloca(bld_base->base.gallivm,
6742 ctx->i32, "");
6743 }
6744 }
6745
6746 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
6747 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
6748 return false;
6749 }
6750
6751 si_llvm_build_ret(ctx, ctx->return_value);
6752 return true;
6753 }
6754
6755 int si_compile_tgsi_shader(struct si_screen *sscreen,
6756 LLVMTargetMachineRef tm,
6757 struct si_shader *shader,
6758 bool is_monolithic,
6759 struct pipe_debug_callback *debug)
6760 {
6761 struct si_shader_selector *sel = shader->selector;
6762 struct si_shader_context ctx;
6763 struct lp_build_tgsi_context *bld_base;
6764 LLVMModuleRef mod;
6765 int r = -1;
6766
6767 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
6768 * conversion fails. */
6769 if (r600_can_dump_shader(&sscreen->b, sel->info.processor) &&
6770 !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
6771 tgsi_dump(sel->tokens, 0);
6772 si_dump_streamout(&sel->so);
6773 }
6774
6775 si_init_shader_ctx(&ctx, sscreen, shader, tm);
6776 ctx.is_monolithic = is_monolithic;
6777
6778 memset(shader->info.vs_output_param_offset, 0xff,
6779 sizeof(shader->info.vs_output_param_offset));
6780
6781 shader->info.uses_instanceid = sel->info.uses_instanceid;
6782
6783 bld_base = &ctx.soa.bld_base;
6784 ctx.load_system_value = declare_system_value;
6785
6786 if (!si_compile_tgsi_main(&ctx, shader))
6787 goto out;
6788
6789 mod = bld_base->base.gallivm->module;
6790
6791 /* Dump LLVM IR before any optimization passes */
6792 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
6793 r600_can_dump_shader(&sscreen->b, ctx.type))
6794 LLVMDumpModule(mod);
6795
6796 si_llvm_finalize_module(&ctx,
6797 r600_extra_shader_checks(&sscreen->b, ctx.type));
6798
6799 /* Post-optimization transformations. */
6800 si_eliminate_const_vs_outputs(&ctx);
6801
6802 /* Compile to bytecode. */
6803 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
6804 mod, debug, ctx.type, "TGSI shader");
6805 if (r) {
6806 fprintf(stderr, "LLVM failed to compile shader\n");
6807 goto out;
6808 }
6809
6810 si_llvm_dispose(&ctx);
6811
6812 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
6813 * LLVM 3.9svn has this bug.
6814 */
6815 if (sel->type == PIPE_SHADER_COMPUTE) {
6816 unsigned *props = sel->info.properties;
6817 unsigned wave_size = 64;
6818 unsigned max_vgprs = 256;
6819 unsigned max_sgprs = sscreen->b.chip_class >= VI ? 800 : 512;
6820 unsigned max_sgprs_per_wave = 128;
6821 unsigned max_block_threads;
6822
6823 if (props[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH])
6824 max_block_threads = props[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
6825 props[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
6826 props[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
6827 else
6828 max_block_threads = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
6829
6830 unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
6831 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
6832
6833 max_vgprs = max_vgprs / min_waves_per_simd;
6834 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
6835
6836 if (shader->config.num_sgprs > max_sgprs ||
6837 shader->config.num_vgprs > max_vgprs) {
6838 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6839 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6840 shader->config.num_sgprs, shader->config.num_vgprs,
6841 max_sgprs, max_vgprs);
6842
6843 /* Just terminate the process, because dependent
6844 * shaders can hang due to bad input data, but use
6845 * the env var to allow shader-db to work.
6846 */
6847 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6848 abort();
6849 }
6850 }
6851
6852 /* Add the scratch offset to input SGPRs. */
6853 if (shader->config.scratch_bytes_per_wave)
6854 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6855
6856 /* Calculate the number of fragment input VGPRs. */
6857 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6858 shader->info.num_input_vgprs = 0;
6859 shader->info.face_vgpr_index = -1;
6860
6861 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6862 shader->info.num_input_vgprs += 2;
6863 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
6864 shader->info.num_input_vgprs += 2;
6865 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
6866 shader->info.num_input_vgprs += 2;
6867 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
6868 shader->info.num_input_vgprs += 3;
6869 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
6870 shader->info.num_input_vgprs += 2;
6871 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
6872 shader->info.num_input_vgprs += 2;
6873 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
6874 shader->info.num_input_vgprs += 2;
6875 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
6876 shader->info.num_input_vgprs += 1;
6877 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
6878 shader->info.num_input_vgprs += 1;
6879 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
6880 shader->info.num_input_vgprs += 1;
6881 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
6882 shader->info.num_input_vgprs += 1;
6883 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
6884 shader->info.num_input_vgprs += 1;
6885 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
6886 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
6887 shader->info.num_input_vgprs += 1;
6888 }
6889 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr))
6890 shader->info.num_input_vgprs += 1;
6891 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
6892 shader->info.num_input_vgprs += 1;
6893 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
6894 shader->info.num_input_vgprs += 1;
6895 }
6896
6897 if (ctx.type == PIPE_SHADER_GEOMETRY) {
6898 shader->gs_copy_shader = CALLOC_STRUCT(si_shader);
6899 shader->gs_copy_shader->selector = shader->selector;
6900 ctx.shader = shader->gs_copy_shader;
6901 if ((r = si_generate_gs_copy_shader(sscreen, &ctx,
6902 shader, debug))) {
6903 free(shader->gs_copy_shader);
6904 shader->gs_copy_shader = NULL;
6905 goto out;
6906 }
6907 }
6908
6909 r = 0;
6910 out:
6911 return r;
6912 }
6913
6914 /**
6915 * Create, compile and return a shader part (prolog or epilog).
6916 *
6917 * \param sscreen screen
6918 * \param list list of shader parts of the same category
6919 * \param key shader part key
6920 * \param tm LLVM target machine
6921 * \param debug debug callback
6922 * \param compile the callback responsible for compilation
6923 * \return non-NULL on success
6924 */
6925 static struct si_shader_part *
6926 si_get_shader_part(struct si_screen *sscreen,
6927 struct si_shader_part **list,
6928 union si_shader_part_key *key,
6929 LLVMTargetMachineRef tm,
6930 struct pipe_debug_callback *debug,
6931 bool (*compile)(struct si_screen *,
6932 LLVMTargetMachineRef,
6933 struct pipe_debug_callback *,
6934 struct si_shader_part *))
6935 {
6936 struct si_shader_part *result;
6937
6938 pipe_mutex_lock(sscreen->shader_parts_mutex);
6939
6940 /* Find existing. */
6941 for (result = *list; result; result = result->next) {
6942 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
6943 pipe_mutex_unlock(sscreen->shader_parts_mutex);
6944 return result;
6945 }
6946 }
6947
6948 /* Compile a new one. */
6949 result = CALLOC_STRUCT(si_shader_part);
6950 result->key = *key;
6951 if (!compile(sscreen, tm, debug, result)) {
6952 FREE(result);
6953 pipe_mutex_unlock(sscreen->shader_parts_mutex);
6954 return NULL;
6955 }
6956
6957 result->next = *list;
6958 *list = result;
6959 pipe_mutex_unlock(sscreen->shader_parts_mutex);
6960 return result;
6961 }
6962
6963 /**
6964 * Create a vertex shader prolog.
6965 *
6966 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
6967 * All inputs are returned unmodified. The vertex load indices are
6968 * stored after them, which will used by the API VS for fetching inputs.
6969 *
6970 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
6971 * input_v0,
6972 * input_v1,
6973 * input_v2,
6974 * input_v3,
6975 * (VertexID + BaseVertex),
6976 * (InstanceID + StartInstance),
6977 * (InstanceID / 2 + StartInstance)
6978 */
6979 static bool si_compile_vs_prolog(struct si_screen *sscreen,
6980 LLVMTargetMachineRef tm,
6981 struct pipe_debug_callback *debug,
6982 struct si_shader_part *out)
6983 {
6984 union si_shader_part_key *key = &out->key;
6985 struct si_shader shader = {};
6986 struct si_shader_context ctx;
6987 struct gallivm_state *gallivm = &ctx.gallivm;
6988 LLVMTypeRef *params, *returns;
6989 LLVMValueRef ret, func;
6990 int last_sgpr, num_params, num_returns, i;
6991 bool status = true;
6992
6993 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
6994 ctx.type = PIPE_SHADER_VERTEX;
6995 ctx.param_vertex_id = key->vs_prolog.num_input_sgprs;
6996 ctx.param_instance_id = key->vs_prolog.num_input_sgprs + 3;
6997
6998 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
6999 params = alloca((key->vs_prolog.num_input_sgprs + 4) *
7000 sizeof(LLVMTypeRef));
7001 returns = alloca((key->vs_prolog.num_input_sgprs + 4 +
7002 key->vs_prolog.last_input + 1) *
7003 sizeof(LLVMTypeRef));
7004 num_params = 0;
7005 num_returns = 0;
7006
7007 /* Declare input and output SGPRs. */
7008 num_params = 0;
7009 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7010 params[num_params++] = ctx.i32;
7011 returns[num_returns++] = ctx.i32;
7012 }
7013 last_sgpr = num_params - 1;
7014
7015 /* 4 preloaded VGPRs (outputs must be floats) */
7016 for (i = 0; i < 4; i++) {
7017 params[num_params++] = ctx.i32;
7018 returns[num_returns++] = ctx.f32;
7019 }
7020
7021 /* Vertex load indices. */
7022 for (i = 0; i <= key->vs_prolog.last_input; i++)
7023 returns[num_returns++] = ctx.f32;
7024
7025 /* Create the function. */
7026 si_create_function(&ctx, returns, num_returns, params,
7027 num_params, last_sgpr);
7028 func = ctx.main_fn;
7029
7030 /* Copy inputs to outputs. This should be no-op, as the registers match,
7031 * but it will prevent the compiler from overwriting them unintentionally.
7032 */
7033 ret = ctx.return_value;
7034 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7035 LLVMValueRef p = LLVMGetParam(func, i);
7036 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
7037 }
7038 for (i = num_params - 4; i < num_params; i++) {
7039 LLVMValueRef p = LLVMGetParam(func, i);
7040 p = LLVMBuildBitCast(gallivm->builder, p, ctx.f32, "");
7041 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
7042 }
7043
7044 /* Compute vertex load indices from instance divisors. */
7045 for (i = 0; i <= key->vs_prolog.last_input; i++) {
7046 unsigned divisor = key->vs_prolog.states.instance_divisors[i];
7047 LLVMValueRef index;
7048
7049 if (divisor) {
7050 /* InstanceID / Divisor + StartInstance */
7051 index = get_instance_index_for_fetch(&ctx,
7052 SI_SGPR_START_INSTANCE,
7053 divisor);
7054 } else {
7055 /* VertexID + BaseVertex */
7056 index = LLVMBuildAdd(gallivm->builder,
7057 LLVMGetParam(func, ctx.param_vertex_id),
7058 LLVMGetParam(func, SI_SGPR_BASE_VERTEX), "");
7059 }
7060
7061 index = LLVMBuildBitCast(gallivm->builder, index, ctx.f32, "");
7062 ret = LLVMBuildInsertValue(gallivm->builder, ret, index,
7063 num_params++, "");
7064 }
7065
7066 /* Compile. */
7067 si_llvm_build_ret(&ctx, ret);
7068 si_llvm_finalize_module(&ctx,
7069 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_VERTEX));
7070
7071 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7072 gallivm->module, debug, ctx.type,
7073 "Vertex Shader Prolog"))
7074 status = false;
7075
7076 si_llvm_dispose(&ctx);
7077 return status;
7078 }
7079
7080 /**
7081 * Compile the vertex shader epilog. This is also used by the tessellation
7082 * evaluation shader compiled as VS.
7083 *
7084 * The input is PrimitiveID.
7085 *
7086 * If PrimitiveID is required by the pixel shader, export it.
7087 * Otherwise, do nothing.
7088 */
7089 static bool si_compile_vs_epilog(struct si_screen *sscreen,
7090 LLVMTargetMachineRef tm,
7091 struct pipe_debug_callback *debug,
7092 struct si_shader_part *out)
7093 {
7094 union si_shader_part_key *key = &out->key;
7095 struct si_shader_context ctx;
7096 struct gallivm_state *gallivm = &ctx.gallivm;
7097 struct lp_build_tgsi_context *bld_base = &ctx.soa.bld_base;
7098 LLVMTypeRef params[5];
7099 int num_params, i;
7100 bool status = true;
7101
7102 si_init_shader_ctx(&ctx, sscreen, NULL, tm);
7103 ctx.type = PIPE_SHADER_VERTEX;
7104
7105 /* Declare input VGPRs. */
7106 num_params = key->vs_epilog.states.export_prim_id ?
7107 (VS_EPILOG_PRIMID_LOC + 1) : 0;
7108 assert(num_params <= ARRAY_SIZE(params));
7109
7110 for (i = 0; i < num_params; i++)
7111 params[i] = ctx.f32;
7112
7113 /* Create the function. */
7114 si_create_function(&ctx, NULL, 0, params, num_params, -1);
7115
7116 /* Emit exports. */
7117 if (key->vs_epilog.states.export_prim_id) {
7118 struct lp_build_context *base = &bld_base->base;
7119 struct lp_build_context *uint = &bld_base->uint_bld;
7120 LLVMValueRef args[9];
7121
7122 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
7123 args[1] = uint->zero; /* whether the EXEC mask is valid */
7124 args[2] = uint->zero; /* DONE bit */
7125 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_PARAM +
7126 key->vs_epilog.prim_id_param_offset);
7127 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
7128 args[5] = LLVMGetParam(ctx.main_fn,
7129 VS_EPILOG_PRIMID_LOC); /* X */
7130 args[6] = base->undef; /* Y */
7131 args[7] = base->undef; /* Z */
7132 args[8] = base->undef; /* W */
7133
7134 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
7135 LLVMVoidTypeInContext(base->gallivm->context),
7136 args, 9, 0);
7137 }
7138
7139 /* Compile. */
7140 LLVMBuildRetVoid(gallivm->builder);
7141 si_llvm_finalize_module(&ctx,
7142 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_VERTEX));
7143
7144 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7145 gallivm->module, debug, ctx.type,
7146 "Vertex Shader Epilog"))
7147 status = false;
7148
7149 si_llvm_dispose(&ctx);
7150 return status;
7151 }
7152
7153 /**
7154 * Create & compile a vertex shader epilog. This a helper used by VS and TES.
7155 */
7156 static bool si_get_vs_epilog(struct si_screen *sscreen,
7157 LLVMTargetMachineRef tm,
7158 struct si_shader *shader,
7159 struct pipe_debug_callback *debug,
7160 struct si_vs_epilog_bits *states)
7161 {
7162 union si_shader_part_key epilog_key;
7163
7164 memset(&epilog_key, 0, sizeof(epilog_key));
7165 epilog_key.vs_epilog.states = *states;
7166
7167 /* Set up the PrimitiveID output. */
7168 if (shader->key.vs.epilog.export_prim_id) {
7169 unsigned index = shader->selector->info.num_outputs;
7170 unsigned offset = shader->info.nr_param_exports++;
7171
7172 epilog_key.vs_epilog.prim_id_param_offset = offset;
7173 assert(index < ARRAY_SIZE(shader->info.vs_output_param_offset));
7174 shader->info.vs_output_param_offset[index] = offset;
7175 }
7176
7177 shader->epilog = si_get_shader_part(sscreen, &sscreen->vs_epilogs,
7178 &epilog_key, tm, debug,
7179 si_compile_vs_epilog);
7180 return shader->epilog != NULL;
7181 }
7182
7183 /**
7184 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
7185 */
7186 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
7187 LLVMTargetMachineRef tm,
7188 struct si_shader *shader,
7189 struct pipe_debug_callback *debug)
7190 {
7191 struct tgsi_shader_info *info = &shader->selector->info;
7192 union si_shader_part_key prolog_key;
7193 unsigned i;
7194
7195 /* Get the prolog. */
7196 memset(&prolog_key, 0, sizeof(prolog_key));
7197 prolog_key.vs_prolog.states = shader->key.vs.prolog;
7198 prolog_key.vs_prolog.num_input_sgprs = shader->info.num_input_sgprs;
7199 prolog_key.vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
7200
7201 /* The prolog is a no-op if there are no inputs. */
7202 if (info->num_inputs) {
7203 shader->prolog =
7204 si_get_shader_part(sscreen, &sscreen->vs_prologs,
7205 &prolog_key, tm, debug,
7206 si_compile_vs_prolog);
7207 if (!shader->prolog)
7208 return false;
7209 }
7210
7211 /* Get the epilog. */
7212 if (!shader->key.vs.as_es && !shader->key.vs.as_ls &&
7213 !si_get_vs_epilog(sscreen, tm, shader, debug,
7214 &shader->key.vs.epilog))
7215 return false;
7216
7217 /* Set the instanceID flag. */
7218 for (i = 0; i < info->num_inputs; i++)
7219 if (prolog_key.vs_prolog.states.instance_divisors[i])
7220 shader->info.uses_instanceid = true;
7221
7222 return true;
7223 }
7224
7225 /**
7226 * Select and compile (or reuse) TES parts (epilog).
7227 */
7228 static bool si_shader_select_tes_parts(struct si_screen *sscreen,
7229 LLVMTargetMachineRef tm,
7230 struct si_shader *shader,
7231 struct pipe_debug_callback *debug)
7232 {
7233 if (shader->key.tes.as_es)
7234 return true;
7235
7236 /* TES compiled as VS. */
7237 return si_get_vs_epilog(sscreen, tm, shader, debug,
7238 &shader->key.tes.epilog);
7239 }
7240
7241 /**
7242 * Compile the TCS epilog. This writes tesselation factors to memory based on
7243 * the output primitive type of the tesselator (determined by TES).
7244 */
7245 static bool si_compile_tcs_epilog(struct si_screen *sscreen,
7246 LLVMTargetMachineRef tm,
7247 struct pipe_debug_callback *debug,
7248 struct si_shader_part *out)
7249 {
7250 union si_shader_part_key *key = &out->key;
7251 struct si_shader shader = {};
7252 struct si_shader_context ctx;
7253 struct gallivm_state *gallivm = &ctx.gallivm;
7254 struct lp_build_tgsi_context *bld_base = &ctx.soa.bld_base;
7255 LLVMTypeRef params[16];
7256 LLVMValueRef func;
7257 int last_sgpr, num_params;
7258 bool status = true;
7259
7260 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7261 ctx.type = PIPE_SHADER_TESS_CTRL;
7262 shader.key.tcs.epilog = key->tcs_epilog.states;
7263
7264 /* Declare inputs. Only RW_BUFFERS and TESS_FACTOR_OFFSET are used. */
7265 params[SI_PARAM_RW_BUFFERS] = const_array(ctx.v16i8, SI_NUM_RW_BUFFERS);
7266 params[SI_PARAM_CONST_BUFFERS] = ctx.i64;
7267 params[SI_PARAM_SAMPLERS] = ctx.i64;
7268 params[SI_PARAM_IMAGES] = ctx.i64;
7269 params[SI_PARAM_SHADER_BUFFERS] = ctx.i64;
7270 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx.i32;
7271 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx.i32;
7272 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx.i32;
7273 params[SI_PARAM_TCS_IN_LAYOUT] = ctx.i32;
7274 params[ctx.param_oc_lds = SI_PARAM_TCS_OC_LDS] = ctx.i32;
7275 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx.i32;
7276 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
7277 num_params = last_sgpr + 1;
7278
7279 params[num_params++] = ctx.i32; /* patch index within the wave (REL_PATCH_ID) */
7280 params[num_params++] = ctx.i32; /* invocation ID within the patch */
7281 params[num_params++] = ctx.i32; /* LDS offset where tess factors should be loaded from */
7282
7283 /* Create the function. */
7284 si_create_function(&ctx, NULL, 0, params, num_params, last_sgpr);
7285 declare_tess_lds(&ctx);
7286 func = ctx.main_fn;
7287
7288 si_write_tess_factors(bld_base,
7289 LLVMGetParam(func, last_sgpr + 1),
7290 LLVMGetParam(func, last_sgpr + 2),
7291 LLVMGetParam(func, last_sgpr + 3));
7292
7293 /* Compile. */
7294 LLVMBuildRetVoid(gallivm->builder);
7295 si_llvm_finalize_module(&ctx,
7296 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_TESS_CTRL));
7297
7298 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7299 gallivm->module, debug, ctx.type,
7300 "Tessellation Control Shader Epilog"))
7301 status = false;
7302
7303 si_llvm_dispose(&ctx);
7304 return status;
7305 }
7306
7307 /**
7308 * Select and compile (or reuse) TCS parts (epilog).
7309 */
7310 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
7311 LLVMTargetMachineRef tm,
7312 struct si_shader *shader,
7313 struct pipe_debug_callback *debug)
7314 {
7315 union si_shader_part_key epilog_key;
7316
7317 /* Get the epilog. */
7318 memset(&epilog_key, 0, sizeof(epilog_key));
7319 epilog_key.tcs_epilog.states = shader->key.tcs.epilog;
7320
7321 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
7322 &epilog_key, tm, debug,
7323 si_compile_tcs_epilog);
7324 return shader->epilog != NULL;
7325 }
7326
7327 /**
7328 * Compile the pixel shader prolog. This handles:
7329 * - two-side color selection and interpolation
7330 * - overriding interpolation parameters for the API PS
7331 * - polygon stippling
7332 *
7333 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
7334 * overriden by other states. (e.g. per-sample interpolation)
7335 * Interpolated colors are stored after the preloaded VGPRs.
7336 */
7337 static bool si_compile_ps_prolog(struct si_screen *sscreen,
7338 LLVMTargetMachineRef tm,
7339 struct pipe_debug_callback *debug,
7340 struct si_shader_part *out)
7341 {
7342 union si_shader_part_key *key = &out->key;
7343 struct si_shader shader = {};
7344 struct si_shader_context ctx;
7345 struct gallivm_state *gallivm = &ctx.gallivm;
7346 LLVMTypeRef *params;
7347 LLVMValueRef ret, func;
7348 int last_sgpr, num_params, num_returns, i, num_color_channels;
7349 bool status = true;
7350
7351 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7352 ctx.type = PIPE_SHADER_FRAGMENT;
7353 shader.key.ps.prolog = key->ps_prolog.states;
7354
7355 /* Number of inputs + 8 color elements. */
7356 params = alloca((key->ps_prolog.num_input_sgprs +
7357 key->ps_prolog.num_input_vgprs + 8) *
7358 sizeof(LLVMTypeRef));
7359
7360 /* Declare inputs. */
7361 num_params = 0;
7362 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
7363 params[num_params++] = ctx.i32;
7364 last_sgpr = num_params - 1;
7365
7366 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
7367 params[num_params++] = ctx.f32;
7368
7369 /* Declare outputs (same as inputs + add colors if needed) */
7370 num_returns = num_params;
7371 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
7372 for (i = 0; i < num_color_channels; i++)
7373 params[num_returns++] = ctx.f32;
7374
7375 /* Create the function. */
7376 si_create_function(&ctx, params, num_returns, params,
7377 num_params, last_sgpr);
7378 func = ctx.main_fn;
7379
7380 /* Copy inputs to outputs. This should be no-op, as the registers match,
7381 * but it will prevent the compiler from overwriting them unintentionally.
7382 */
7383 ret = ctx.return_value;
7384 for (i = 0; i < num_params; i++) {
7385 LLVMValueRef p = LLVMGetParam(func, i);
7386 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
7387 }
7388
7389 /* Polygon stippling. */
7390 if (key->ps_prolog.states.poly_stipple) {
7391 /* POS_FIXED_PT is always last. */
7392 unsigned pos = key->ps_prolog.num_input_sgprs +
7393 key->ps_prolog.num_input_vgprs - 1;
7394 LLVMValueRef ptr[2], list;
7395
7396 /* Get the pointer to rw buffers. */
7397 ptr[0] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS);
7398 ptr[1] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS_HI);
7399 list = lp_build_gather_values(gallivm, ptr, 2);
7400 list = LLVMBuildBitCast(gallivm->builder, list, ctx.i64, "");
7401 list = LLVMBuildIntToPtr(gallivm->builder, list,
7402 const_array(ctx.v16i8, SI_NUM_RW_BUFFERS), "");
7403
7404 si_llvm_emit_polygon_stipple(&ctx, list, pos);
7405 }
7406
7407 if (key->ps_prolog.states.bc_optimize_for_persp ||
7408 key->ps_prolog.states.bc_optimize_for_linear) {
7409 unsigned i, base = key->ps_prolog.num_input_sgprs;
7410 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
7411
7412 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
7413 * The hw doesn't compute CENTROID if the whole wave only
7414 * contains fully-covered quads.
7415 *
7416 * PRIM_MASK is after user SGPRs.
7417 */
7418 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7419 bc_optimize = LLVMBuildLShr(gallivm->builder, bc_optimize,
7420 LLVMConstInt(ctx.i32, 31, 0), "");
7421 bc_optimize = LLVMBuildTrunc(gallivm->builder, bc_optimize,
7422 ctx.i1, "");
7423
7424 if (key->ps_prolog.states.bc_optimize_for_persp) {
7425 /* Read PERSP_CENTER. */
7426 for (i = 0; i < 2; i++)
7427 center[i] = LLVMGetParam(func, base + 2 + i);
7428 /* Read PERSP_CENTROID. */
7429 for (i = 0; i < 2; i++)
7430 centroid[i] = LLVMGetParam(func, base + 4 + i);
7431 /* Select PERSP_CENTROID. */
7432 for (i = 0; i < 2; i++) {
7433 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
7434 center[i], centroid[i], "");
7435 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7436 tmp, base + 4 + i, "");
7437 }
7438 }
7439 if (key->ps_prolog.states.bc_optimize_for_linear) {
7440 /* Read LINEAR_CENTER. */
7441 for (i = 0; i < 2; i++)
7442 center[i] = LLVMGetParam(func, base + 8 + i);
7443 /* Read LINEAR_CENTROID. */
7444 for (i = 0; i < 2; i++)
7445 centroid[i] = LLVMGetParam(func, base + 10 + i);
7446 /* Select LINEAR_CENTROID. */
7447 for (i = 0; i < 2; i++) {
7448 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
7449 center[i], centroid[i], "");
7450 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7451 tmp, base + 10 + i, "");
7452 }
7453 }
7454 }
7455
7456 /* Force per-sample interpolation. */
7457 if (key->ps_prolog.states.force_persp_sample_interp) {
7458 unsigned i, base = key->ps_prolog.num_input_sgprs;
7459 LLVMValueRef persp_sample[2];
7460
7461 /* Read PERSP_SAMPLE. */
7462 for (i = 0; i < 2; i++)
7463 persp_sample[i] = LLVMGetParam(func, base + i);
7464 /* Overwrite PERSP_CENTER. */
7465 for (i = 0; i < 2; i++)
7466 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7467 persp_sample[i], base + 2 + i, "");
7468 /* Overwrite PERSP_CENTROID. */
7469 for (i = 0; i < 2; i++)
7470 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7471 persp_sample[i], base + 4 + i, "");
7472 }
7473 if (key->ps_prolog.states.force_linear_sample_interp) {
7474 unsigned i, base = key->ps_prolog.num_input_sgprs;
7475 LLVMValueRef linear_sample[2];
7476
7477 /* Read LINEAR_SAMPLE. */
7478 for (i = 0; i < 2; i++)
7479 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
7480 /* Overwrite LINEAR_CENTER. */
7481 for (i = 0; i < 2; i++)
7482 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7483 linear_sample[i], base + 8 + i, "");
7484 /* Overwrite LINEAR_CENTROID. */
7485 for (i = 0; i < 2; i++)
7486 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7487 linear_sample[i], base + 10 + i, "");
7488 }
7489
7490 /* Force center interpolation. */
7491 if (key->ps_prolog.states.force_persp_center_interp) {
7492 unsigned i, base = key->ps_prolog.num_input_sgprs;
7493 LLVMValueRef persp_center[2];
7494
7495 /* Read PERSP_CENTER. */
7496 for (i = 0; i < 2; i++)
7497 persp_center[i] = LLVMGetParam(func, base + 2 + i);
7498 /* Overwrite PERSP_SAMPLE. */
7499 for (i = 0; i < 2; i++)
7500 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7501 persp_center[i], base + i, "");
7502 /* Overwrite PERSP_CENTROID. */
7503 for (i = 0; i < 2; i++)
7504 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7505 persp_center[i], base + 4 + i, "");
7506 }
7507 if (key->ps_prolog.states.force_linear_center_interp) {
7508 unsigned i, base = key->ps_prolog.num_input_sgprs;
7509 LLVMValueRef linear_center[2];
7510
7511 /* Read LINEAR_CENTER. */
7512 for (i = 0; i < 2; i++)
7513 linear_center[i] = LLVMGetParam(func, base + 8 + i);
7514 /* Overwrite LINEAR_SAMPLE. */
7515 for (i = 0; i < 2; i++)
7516 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7517 linear_center[i], base + 6 + i, "");
7518 /* Overwrite LINEAR_CENTROID. */
7519 for (i = 0; i < 2; i++)
7520 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7521 linear_center[i], base + 10 + i, "");
7522 }
7523
7524 /* Interpolate colors. */
7525 for (i = 0; i < 2; i++) {
7526 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
7527 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
7528 key->ps_prolog.face_vgpr_index;
7529 LLVMValueRef interp[2], color[4];
7530 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
7531
7532 if (!writemask)
7533 continue;
7534
7535 /* If the interpolation qualifier is not CONSTANT (-1). */
7536 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
7537 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
7538 key->ps_prolog.color_interp_vgpr_index[i];
7539
7540 /* Get the (i,j) updated by bc_optimize handling. */
7541 interp[0] = LLVMBuildExtractValue(gallivm->builder, ret,
7542 interp_vgpr, "");
7543 interp[1] = LLVMBuildExtractValue(gallivm->builder, ret,
7544 interp_vgpr + 1, "");
7545 interp_ij = lp_build_gather_values(gallivm, interp, 2);
7546 interp_ij = LLVMBuildBitCast(gallivm->builder, interp_ij,
7547 ctx.v2i32, "");
7548 }
7549
7550 /* Use the absolute location of the input. */
7551 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7552
7553 if (key->ps_prolog.states.color_two_side) {
7554 face = LLVMGetParam(func, face_vgpr);
7555 face = LLVMBuildBitCast(gallivm->builder, face, ctx.i32, "");
7556 }
7557
7558 interp_fs_input(&ctx,
7559 key->ps_prolog.color_attr_index[i],
7560 TGSI_SEMANTIC_COLOR, i,
7561 key->ps_prolog.num_interp_inputs,
7562 key->ps_prolog.colors_read, interp_ij,
7563 prim_mask, face, color);
7564
7565 while (writemask) {
7566 unsigned chan = u_bit_scan(&writemask);
7567 ret = LLVMBuildInsertValue(gallivm->builder, ret, color[chan],
7568 num_params++, "");
7569 }
7570 }
7571
7572 /* Tell LLVM to insert WQM instruction sequence when needed. */
7573 if (key->ps_prolog.wqm) {
7574 LLVMAddTargetDependentFunctionAttr(func,
7575 "amdgpu-ps-wqm-outputs", "");
7576 }
7577
7578 /* Compile. */
7579 si_llvm_build_ret(&ctx, ret);
7580 si_llvm_finalize_module(&ctx,
7581 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_FRAGMENT));
7582
7583 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7584 gallivm->module, debug, ctx.type,
7585 "Fragment Shader Prolog"))
7586 status = false;
7587
7588 si_llvm_dispose(&ctx);
7589 return status;
7590 }
7591
7592 /**
7593 * Compile the pixel shader epilog. This handles everything that must be
7594 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7595 */
7596 static bool si_compile_ps_epilog(struct si_screen *sscreen,
7597 LLVMTargetMachineRef tm,
7598 struct pipe_debug_callback *debug,
7599 struct si_shader_part *out)
7600 {
7601 union si_shader_part_key *key = &out->key;
7602 struct si_shader shader = {};
7603 struct si_shader_context ctx;
7604 struct gallivm_state *gallivm = &ctx.gallivm;
7605 struct lp_build_tgsi_context *bld_base = &ctx.soa.bld_base;
7606 LLVMTypeRef params[16+8*4+3];
7607 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7608 int last_sgpr, num_params, i;
7609 bool status = true;
7610 struct si_ps_exports exp = {};
7611
7612 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7613 ctx.type = PIPE_SHADER_FRAGMENT;
7614 shader.key.ps.epilog = key->ps_epilog.states;
7615
7616 /* Declare input SGPRs. */
7617 params[SI_PARAM_RW_BUFFERS] = ctx.i64;
7618 params[SI_PARAM_CONST_BUFFERS] = ctx.i64;
7619 params[SI_PARAM_SAMPLERS] = ctx.i64;
7620 params[SI_PARAM_IMAGES] = ctx.i64;
7621 params[SI_PARAM_SHADER_BUFFERS] = ctx.i64;
7622 params[SI_PARAM_ALPHA_REF] = ctx.f32;
7623 last_sgpr = SI_PARAM_ALPHA_REF;
7624
7625 /* Declare input VGPRs. */
7626 num_params = (last_sgpr + 1) +
7627 util_bitcount(key->ps_epilog.colors_written) * 4 +
7628 key->ps_epilog.writes_z +
7629 key->ps_epilog.writes_stencil +
7630 key->ps_epilog.writes_samplemask;
7631
7632 num_params = MAX2(num_params,
7633 last_sgpr + 1 + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
7634
7635 assert(num_params <= ARRAY_SIZE(params));
7636
7637 for (i = last_sgpr + 1; i < num_params; i++)
7638 params[i] = ctx.f32;
7639
7640 /* Create the function. */
7641 si_create_function(&ctx, NULL, 0, params, num_params, last_sgpr);
7642 /* Disable elimination of unused inputs. */
7643 si_llvm_add_attribute(ctx.main_fn,
7644 "InitialPSInputAddr", 0xffffff);
7645
7646 /* Process colors. */
7647 unsigned vgpr = last_sgpr + 1;
7648 unsigned colors_written = key->ps_epilog.colors_written;
7649 int last_color_export = -1;
7650
7651 /* Find the last color export. */
7652 if (!key->ps_epilog.writes_z &&
7653 !key->ps_epilog.writes_stencil &&
7654 !key->ps_epilog.writes_samplemask) {
7655 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
7656
7657 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
7658 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
7659 /* Just set this if any of the colorbuffers are enabled. */
7660 if (spi_format &
7661 ((1llu << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
7662 last_color_export = 0;
7663 } else {
7664 for (i = 0; i < 8; i++)
7665 if (colors_written & (1 << i) &&
7666 (spi_format >> (i * 4)) & 0xf)
7667 last_color_export = i;
7668 }
7669 }
7670
7671 while (colors_written) {
7672 LLVMValueRef color[4];
7673 int mrt = u_bit_scan(&colors_written);
7674
7675 for (i = 0; i < 4; i++)
7676 color[i] = LLVMGetParam(ctx.main_fn, vgpr++);
7677
7678 si_export_mrt_color(bld_base, color, mrt,
7679 num_params - 1,
7680 mrt == last_color_export, &exp);
7681 }
7682
7683 /* Process depth, stencil, samplemask. */
7684 if (key->ps_epilog.writes_z)
7685 depth = LLVMGetParam(ctx.main_fn, vgpr++);
7686 if (key->ps_epilog.writes_stencil)
7687 stencil = LLVMGetParam(ctx.main_fn, vgpr++);
7688 if (key->ps_epilog.writes_samplemask)
7689 samplemask = LLVMGetParam(ctx.main_fn, vgpr++);
7690
7691 if (depth || stencil || samplemask)
7692 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
7693 else if (last_color_export == -1)
7694 si_export_null(bld_base);
7695
7696 if (exp.num)
7697 si_emit_ps_exports(&ctx, &exp);
7698
7699 /* Compile. */
7700 LLVMBuildRetVoid(gallivm->builder);
7701 si_llvm_finalize_module(&ctx,
7702 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_FRAGMENT));
7703
7704 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7705 gallivm->module, debug, ctx.type,
7706 "Fragment Shader Epilog"))
7707 status = false;
7708
7709 si_llvm_dispose(&ctx);
7710 return status;
7711 }
7712
7713 /**
7714 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
7715 */
7716 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
7717 LLVMTargetMachineRef tm,
7718 struct si_shader *shader,
7719 struct pipe_debug_callback *debug)
7720 {
7721 struct tgsi_shader_info *info = &shader->selector->info;
7722 union si_shader_part_key prolog_key;
7723 union si_shader_part_key epilog_key;
7724 unsigned i;
7725
7726 /* Get the prolog. */
7727 memset(&prolog_key, 0, sizeof(prolog_key));
7728 prolog_key.ps_prolog.states = shader->key.ps.prolog;
7729 prolog_key.ps_prolog.colors_read = info->colors_read;
7730 prolog_key.ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
7731 prolog_key.ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
7732 prolog_key.ps_prolog.wqm = info->uses_derivatives &&
7733 (prolog_key.ps_prolog.colors_read ||
7734 prolog_key.ps_prolog.states.force_persp_sample_interp ||
7735 prolog_key.ps_prolog.states.force_linear_sample_interp ||
7736 prolog_key.ps_prolog.states.force_persp_center_interp ||
7737 prolog_key.ps_prolog.states.force_linear_center_interp ||
7738 prolog_key.ps_prolog.states.bc_optimize_for_persp ||
7739 prolog_key.ps_prolog.states.bc_optimize_for_linear);
7740
7741 if (info->colors_read) {
7742 unsigned *color = shader->selector->color_attr_index;
7743
7744 if (shader->key.ps.prolog.color_two_side) {
7745 /* BCOLORs are stored after the last input. */
7746 prolog_key.ps_prolog.num_interp_inputs = info->num_inputs;
7747 prolog_key.ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
7748 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
7749 }
7750
7751 for (i = 0; i < 2; i++) {
7752 unsigned interp = info->input_interpolate[color[i]];
7753 unsigned location = info->input_interpolate_loc[color[i]];
7754
7755 if (!(info->colors_read & (0xf << i*4)))
7756 continue;
7757
7758 prolog_key.ps_prolog.color_attr_index[i] = color[i];
7759
7760 if (shader->key.ps.prolog.flatshade_colors &&
7761 interp == TGSI_INTERPOLATE_COLOR)
7762 interp = TGSI_INTERPOLATE_CONSTANT;
7763
7764 switch (interp) {
7765 case TGSI_INTERPOLATE_CONSTANT:
7766 prolog_key.ps_prolog.color_interp_vgpr_index[i] = -1;
7767 break;
7768 case TGSI_INTERPOLATE_PERSPECTIVE:
7769 case TGSI_INTERPOLATE_COLOR:
7770 /* Force the interpolation location for colors here. */
7771 if (shader->key.ps.prolog.force_persp_sample_interp)
7772 location = TGSI_INTERPOLATE_LOC_SAMPLE;
7773 if (shader->key.ps.prolog.force_persp_center_interp)
7774 location = TGSI_INTERPOLATE_LOC_CENTER;
7775
7776 switch (location) {
7777 case TGSI_INTERPOLATE_LOC_SAMPLE:
7778 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 0;
7779 shader->config.spi_ps_input_ena |=
7780 S_0286CC_PERSP_SAMPLE_ENA(1);
7781 break;
7782 case TGSI_INTERPOLATE_LOC_CENTER:
7783 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 2;
7784 shader->config.spi_ps_input_ena |=
7785 S_0286CC_PERSP_CENTER_ENA(1);
7786 break;
7787 case TGSI_INTERPOLATE_LOC_CENTROID:
7788 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 4;
7789 shader->config.spi_ps_input_ena |=
7790 S_0286CC_PERSP_CENTROID_ENA(1);
7791 break;
7792 default:
7793 assert(0);
7794 }
7795 break;
7796 case TGSI_INTERPOLATE_LINEAR:
7797 /* Force the interpolation location for colors here. */
7798 if (shader->key.ps.prolog.force_linear_sample_interp)
7799 location = TGSI_INTERPOLATE_LOC_SAMPLE;
7800 if (shader->key.ps.prolog.force_linear_center_interp)
7801 location = TGSI_INTERPOLATE_LOC_CENTER;
7802
7803 switch (location) {
7804 case TGSI_INTERPOLATE_LOC_SAMPLE:
7805 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 6;
7806 shader->config.spi_ps_input_ena |=
7807 S_0286CC_LINEAR_SAMPLE_ENA(1);
7808 break;
7809 case TGSI_INTERPOLATE_LOC_CENTER:
7810 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 8;
7811 shader->config.spi_ps_input_ena |=
7812 S_0286CC_LINEAR_CENTER_ENA(1);
7813 break;
7814 case TGSI_INTERPOLATE_LOC_CENTROID:
7815 prolog_key.ps_prolog.color_interp_vgpr_index[i] = 10;
7816 shader->config.spi_ps_input_ena |=
7817 S_0286CC_LINEAR_CENTROID_ENA(1);
7818 break;
7819 default:
7820 assert(0);
7821 }
7822 break;
7823 default:
7824 assert(0);
7825 }
7826 }
7827 }
7828
7829 /* The prolog is a no-op if these aren't set. */
7830 if (prolog_key.ps_prolog.colors_read ||
7831 prolog_key.ps_prolog.states.force_persp_sample_interp ||
7832 prolog_key.ps_prolog.states.force_linear_sample_interp ||
7833 prolog_key.ps_prolog.states.force_persp_center_interp ||
7834 prolog_key.ps_prolog.states.force_linear_center_interp ||
7835 prolog_key.ps_prolog.states.bc_optimize_for_persp ||
7836 prolog_key.ps_prolog.states.bc_optimize_for_linear ||
7837 prolog_key.ps_prolog.states.poly_stipple) {
7838 shader->prolog =
7839 si_get_shader_part(sscreen, &sscreen->ps_prologs,
7840 &prolog_key, tm, debug,
7841 si_compile_ps_prolog);
7842 if (!shader->prolog)
7843 return false;
7844 }
7845
7846 /* Get the epilog. */
7847 memset(&epilog_key, 0, sizeof(epilog_key));
7848 epilog_key.ps_epilog.colors_written = info->colors_written;
7849 epilog_key.ps_epilog.writes_z = info->writes_z;
7850 epilog_key.ps_epilog.writes_stencil = info->writes_stencil;
7851 epilog_key.ps_epilog.writes_samplemask = info->writes_samplemask;
7852 epilog_key.ps_epilog.states = shader->key.ps.epilog;
7853
7854 shader->epilog =
7855 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
7856 &epilog_key, tm, debug,
7857 si_compile_ps_epilog);
7858 if (!shader->epilog)
7859 return false;
7860
7861 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
7862 if (shader->key.ps.prolog.poly_stipple) {
7863 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
7864 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
7865 }
7866
7867 /* Set up the enable bits for per-sample shading if needed. */
7868 if (shader->key.ps.prolog.force_persp_sample_interp &&
7869 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7870 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7871 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
7872 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7873 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
7874 }
7875 if (shader->key.ps.prolog.force_linear_sample_interp &&
7876 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
7877 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7878 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
7879 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7880 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
7881 }
7882 if (shader->key.ps.prolog.force_persp_center_interp &&
7883 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7884 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7885 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
7886 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
7887 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7888 }
7889 if (shader->key.ps.prolog.force_linear_center_interp &&
7890 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
7891 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
7892 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
7893 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
7894 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7895 }
7896
7897 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
7898 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
7899 !(shader->config.spi_ps_input_ena & 0xf)) {
7900 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
7901 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
7902 }
7903
7904 /* At least one pair of interpolation weights must be enabled. */
7905 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
7906 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
7907 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
7908 }
7909
7910 /* The sample mask input is always enabled, because the API shader always
7911 * passes it through to the epilog. Disable it here if it's unused.
7912 */
7913 if (!shader->key.ps.epilog.poly_line_smoothing &&
7914 !shader->selector->info.reads_samplemask)
7915 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7916
7917 return true;
7918 }
7919
7920 static void si_fix_num_sgprs(struct si_shader *shader)
7921 {
7922 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7923
7924 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7925 }
7926
7927 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
7928 struct si_shader *shader,
7929 struct pipe_debug_callback *debug)
7930 {
7931 struct si_shader_selector *sel = shader->selector;
7932 struct si_shader *mainp = sel->main_shader_part;
7933 int r;
7934
7935 /* LS, ES, VS are compiled on demand if the main part hasn't been
7936 * compiled for that stage.
7937 */
7938 if (!mainp ||
7939 (sel->type == PIPE_SHADER_VERTEX &&
7940 (shader->key.vs.as_es != mainp->key.vs.as_es ||
7941 shader->key.vs.as_ls != mainp->key.vs.as_ls)) ||
7942 (sel->type == PIPE_SHADER_TESS_EVAL &&
7943 shader->key.tes.as_es != mainp->key.tes.as_es) ||
7944 (sel->type == PIPE_SHADER_TESS_CTRL &&
7945 shader->key.tcs.epilog.inputs_to_copy) ||
7946 sel->type == PIPE_SHADER_COMPUTE) {
7947 /* Monolithic shader (compiled as a whole, has many variants,
7948 * may take a long time to compile).
7949 */
7950 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
7951 if (r)
7952 return r;
7953 } else {
7954 /* The shader consists of 2-3 parts:
7955 *
7956 * - the middle part is the user shader, it has 1 variant only
7957 * and it was compiled during the creation of the shader
7958 * selector
7959 * - the prolog part is inserted at the beginning
7960 * - the epilog part is inserted at the end
7961 *
7962 * The prolog and epilog have many (but simple) variants.
7963 */
7964
7965 /* Copy the compiled TGSI shader data over. */
7966 shader->is_binary_shared = true;
7967 shader->binary = mainp->binary;
7968 shader->config = mainp->config;
7969 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7970 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7971 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7972 memcpy(shader->info.vs_output_param_offset,
7973 mainp->info.vs_output_param_offset,
7974 sizeof(mainp->info.vs_output_param_offset));
7975 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7976 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7977 shader->info.nr_param_exports = mainp->info.nr_param_exports;
7978
7979 /* Select prologs and/or epilogs. */
7980 switch (sel->type) {
7981 case PIPE_SHADER_VERTEX:
7982 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
7983 return -1;
7984 break;
7985 case PIPE_SHADER_TESS_CTRL:
7986 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
7987 return -1;
7988 break;
7989 case PIPE_SHADER_TESS_EVAL:
7990 if (!si_shader_select_tes_parts(sscreen, tm, shader, debug))
7991 return -1;
7992 break;
7993 case PIPE_SHADER_FRAGMENT:
7994 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
7995 return -1;
7996
7997 /* Make sure we have at least as many VGPRs as there
7998 * are allocated inputs.
7999 */
8000 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8001 shader->info.num_input_vgprs);
8002 break;
8003 }
8004
8005 /* Update SGPR and VGPR counts. */
8006 if (shader->prolog) {
8007 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8008 shader->prolog->config.num_sgprs);
8009 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8010 shader->prolog->config.num_vgprs);
8011 }
8012 if (shader->epilog) {
8013 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8014 shader->epilog->config.num_sgprs);
8015 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8016 shader->epilog->config.num_vgprs);
8017 }
8018 }
8019
8020 si_fix_num_sgprs(shader);
8021 si_shader_dump(sscreen, shader, debug, sel->info.processor,
8022 stderr);
8023
8024 /* Upload. */
8025 r = si_shader_binary_upload(sscreen, shader);
8026 if (r) {
8027 fprintf(stderr, "LLVM failed to upload shader\n");
8028 return r;
8029 }
8030
8031 return 0;
8032 }
8033
8034 void si_shader_destroy(struct si_shader *shader)
8035 {
8036 if (shader->gs_copy_shader) {
8037 si_shader_destroy(shader->gs_copy_shader);
8038 FREE(shader->gs_copy_shader);
8039 }
8040
8041 if (shader->scratch_bo)
8042 r600_resource_reference(&shader->scratch_bo, NULL);
8043
8044 r600_resource_reference(&shader->bo, NULL);
8045
8046 if (!shader->is_binary_shared)
8047 radeon_shader_binary_clean(&shader->binary);
8048
8049 free(shader->shader_log);
8050 }