radeonsi: set num_input_vgprs for fragment shaders in create_function
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Tom Stellard <thomas.stellard@amd.com>
25 * Michel Dänzer <michel.daenzer@amd.com>
26 * Christian König <christian.koenig@amd.com>
27 */
28
29 #include "gallivm/lp_bld_const.h"
30 #include "gallivm/lp_bld_gather.h"
31 #include "gallivm/lp_bld_intr.h"
32 #include "gallivm/lp_bld_logic.h"
33 #include "gallivm/lp_bld_arit.h"
34 #include "gallivm/lp_bld_flow.h"
35 #include "gallivm/lp_bld_misc.h"
36 #include "radeon/radeon_elf_util.h"
37 #include "util/u_memory.h"
38 #include "util/u_string.h"
39 #include "tgsi/tgsi_build.h"
40 #include "tgsi/tgsi_util.h"
41 #include "tgsi/tgsi_dump.h"
42
43 #include "ac_llvm_util.h"
44 #include "si_shader_internal.h"
45 #include "si_pipe.h"
46 #include "sid.h"
47
48
49 static const char *scratch_rsrc_dword0_symbol =
50 "SCRATCH_RSRC_DWORD0";
51
52 static const char *scratch_rsrc_dword1_symbol =
53 "SCRATCH_RSRC_DWORD1";
54
55 struct si_shader_output_values
56 {
57 LLVMValueRef values[4];
58 unsigned name;
59 unsigned sid;
60 };
61
62 static void si_init_shader_ctx(struct si_shader_context *ctx,
63 struct si_screen *sscreen,
64 struct si_shader *shader,
65 LLVMTargetMachineRef tm);
66
67 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
68 struct lp_build_tgsi_context *bld_base,
69 struct lp_build_emit_data *emit_data);
70
71 static void si_dump_shader_key(unsigned shader, union si_shader_key *key,
72 FILE *f);
73
74 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
75 union si_shader_part_key *key);
76
77 /* Ideally pass the sample mask input to the PS epilog as v13, which
78 * is its usual location, so that the shader doesn't have to add v_mov.
79 */
80 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 13
81
82 /* The VS location of the PrimitiveID input is the same in the epilog,
83 * so that the main shader part doesn't have to move it.
84 */
85 #define VS_EPILOG_PRIMID_LOC 2
86
87 enum {
88 CONST_ADDR_SPACE = 2,
89 LOCAL_ADDR_SPACE = 3,
90 };
91
92 #define SENDMSG_GS 2
93 #define SENDMSG_GS_DONE 3
94
95 #define SENDMSG_GS_OP_NOP (0 << 4)
96 #define SENDMSG_GS_OP_CUT (1 << 4)
97 #define SENDMSG_GS_OP_EMIT (2 << 4)
98 #define SENDMSG_GS_OP_EMIT_CUT (3 << 4)
99
100 /**
101 * Returns a unique index for a semantic name and index. The index must be
102 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
103 * calculated.
104 */
105 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index)
106 {
107 switch (semantic_name) {
108 case TGSI_SEMANTIC_POSITION:
109 return 0;
110 case TGSI_SEMANTIC_PSIZE:
111 return 1;
112 case TGSI_SEMANTIC_CLIPDIST:
113 assert(index <= 1);
114 return 2 + index;
115 case TGSI_SEMANTIC_GENERIC:
116 if (index <= 63-4)
117 return 4 + index;
118 else
119 /* same explanation as in the default statement,
120 * the only user hitting this is st/nine.
121 */
122 return 0;
123
124 /* patch indices are completely separate and thus start from 0 */
125 case TGSI_SEMANTIC_TESSOUTER:
126 return 0;
127 case TGSI_SEMANTIC_TESSINNER:
128 return 1;
129 case TGSI_SEMANTIC_PATCH:
130 return 2 + index;
131
132 default:
133 /* Don't fail here. The result of this function is only used
134 * for LS, TCS, TES, and GS, where legacy GL semantics can't
135 * occur, but this function is called for all vertex shaders
136 * before it's known whether LS will be compiled or not.
137 */
138 return 0;
139 }
140 }
141
142 /**
143 * Get the value of a shader input parameter and extract a bitfield.
144 */
145 static LLVMValueRef unpack_param(struct si_shader_context *ctx,
146 unsigned param, unsigned rshift,
147 unsigned bitwidth)
148 {
149 struct gallivm_state *gallivm = &ctx->gallivm;
150 LLVMValueRef value = LLVMGetParam(ctx->main_fn,
151 param);
152
153 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
154 value = bitcast(&ctx->soa.bld_base,
155 TGSI_TYPE_UNSIGNED, value);
156
157 if (rshift)
158 value = LLVMBuildLShr(gallivm->builder, value,
159 lp_build_const_int32(gallivm, rshift), "");
160
161 if (rshift + bitwidth < 32) {
162 unsigned mask = (1 << bitwidth) - 1;
163 value = LLVMBuildAnd(gallivm->builder, value,
164 lp_build_const_int32(gallivm, mask), "");
165 }
166
167 return value;
168 }
169
170 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
171 {
172 switch (ctx->type) {
173 case PIPE_SHADER_TESS_CTRL:
174 return unpack_param(ctx, SI_PARAM_REL_IDS, 0, 8);
175
176 case PIPE_SHADER_TESS_EVAL:
177 return LLVMGetParam(ctx->main_fn,
178 ctx->param_tes_rel_patch_id);
179
180 default:
181 assert(0);
182 return NULL;
183 }
184 }
185
186 /* Tessellation shaders pass outputs to the next shader using LDS.
187 *
188 * LS outputs = TCS inputs
189 * TCS outputs = TES inputs
190 *
191 * The LDS layout is:
192 * - TCS inputs for patch 0
193 * - TCS inputs for patch 1
194 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
195 * - ...
196 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
197 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
198 * - TCS outputs for patch 1
199 * - Per-patch TCS outputs for patch 1
200 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
201 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
202 * - ...
203 *
204 * All three shaders VS(LS), TCS, TES share the same LDS space.
205 */
206
207 static LLVMValueRef
208 get_tcs_in_patch_stride(struct si_shader_context *ctx)
209 {
210 if (ctx->type == PIPE_SHADER_VERTEX)
211 return unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 0, 13);
212 else if (ctx->type == PIPE_SHADER_TESS_CTRL)
213 return unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 0, 13);
214 else {
215 assert(0);
216 return NULL;
217 }
218 }
219
220 static LLVMValueRef
221 get_tcs_out_patch_stride(struct si_shader_context *ctx)
222 {
223 return unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 0, 13);
224 }
225
226 static LLVMValueRef
227 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
228 {
229 return lp_build_mul_imm(&ctx->soa.bld_base.uint_bld,
230 unpack_param(ctx,
231 SI_PARAM_TCS_OUT_OFFSETS,
232 0, 16),
233 4);
234 }
235
236 static LLVMValueRef
237 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
238 {
239 return lp_build_mul_imm(&ctx->soa.bld_base.uint_bld,
240 unpack_param(ctx,
241 SI_PARAM_TCS_OUT_OFFSETS,
242 16, 16),
243 4);
244 }
245
246 static LLVMValueRef
247 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
248 {
249 struct gallivm_state *gallivm = &ctx->gallivm;
250 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
251 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
252
253 return LLVMBuildMul(gallivm->builder, patch_stride, rel_patch_id, "");
254 }
255
256 static LLVMValueRef
257 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
258 {
259 struct gallivm_state *gallivm = &ctx->gallivm;
260 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
261 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
262 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
263
264 return LLVMBuildAdd(gallivm->builder, patch0_offset,
265 LLVMBuildMul(gallivm->builder, patch_stride,
266 rel_patch_id, ""),
267 "");
268 }
269
270 static LLVMValueRef
271 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
272 {
273 struct gallivm_state *gallivm = &ctx->gallivm;
274 LLVMValueRef patch0_patch_data_offset =
275 get_tcs_out_patch0_patch_data_offset(ctx);
276 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
277 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
278
279 return LLVMBuildAdd(gallivm->builder, patch0_patch_data_offset,
280 LLVMBuildMul(gallivm->builder, patch_stride,
281 rel_patch_id, ""),
282 "");
283 }
284
285 static LLVMValueRef build_gep0(struct si_shader_context *ctx,
286 LLVMValueRef base_ptr, LLVMValueRef index)
287 {
288 LLVMValueRef indices[2] = {
289 LLVMConstInt(ctx->i32, 0, 0),
290 index,
291 };
292 return LLVMBuildGEP(ctx->gallivm.builder, base_ptr,
293 indices, 2, "");
294 }
295
296 static void build_indexed_store(struct si_shader_context *ctx,
297 LLVMValueRef base_ptr, LLVMValueRef index,
298 LLVMValueRef value)
299 {
300 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
301 struct gallivm_state *gallivm = bld_base->base.gallivm;
302
303 LLVMBuildStore(gallivm->builder, value,
304 build_gep0(ctx, base_ptr, index));
305 }
306
307 /**
308 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
309 * It's equivalent to doing a load from &base_ptr[index].
310 *
311 * \param base_ptr Where the array starts.
312 * \param index The element index into the array.
313 * \param uniform Whether the base_ptr and index can be assumed to be
314 * dynamically uniform
315 */
316 static LLVMValueRef build_indexed_load(struct si_shader_context *ctx,
317 LLVMValueRef base_ptr, LLVMValueRef index,
318 bool uniform)
319 {
320 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
321 struct gallivm_state *gallivm = bld_base->base.gallivm;
322 LLVMValueRef pointer;
323
324 pointer = build_gep0(ctx, base_ptr, index);
325 if (uniform)
326 LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
327 return LLVMBuildLoad(gallivm->builder, pointer, "");
328 }
329
330 /**
331 * Do a load from &base_ptr[index], but also add a flag that it's loading
332 * a constant from a dynamically uniform index.
333 */
334 static LLVMValueRef build_indexed_load_const(
335 struct si_shader_context *ctx,
336 LLVMValueRef base_ptr, LLVMValueRef index)
337 {
338 LLVMValueRef result = build_indexed_load(ctx, base_ptr, index, true);
339 LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
340 return result;
341 }
342
343 static LLVMValueRef get_instance_index_for_fetch(
344 struct si_shader_context *radeon_bld,
345 unsigned param_start_instance, unsigned divisor)
346 {
347 struct si_shader_context *ctx =
348 si_shader_context(&radeon_bld->soa.bld_base);
349 struct gallivm_state *gallivm = radeon_bld->soa.bld_base.base.gallivm;
350
351 LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
352 ctx->param_instance_id);
353
354 /* The division must be done before START_INSTANCE is added. */
355 if (divisor > 1)
356 result = LLVMBuildUDiv(gallivm->builder, result,
357 lp_build_const_int32(gallivm, divisor), "");
358
359 return LLVMBuildAdd(gallivm->builder, result,
360 LLVMGetParam(radeon_bld->main_fn, param_start_instance), "");
361 }
362
363 static void declare_input_vs(
364 struct si_shader_context *radeon_bld,
365 unsigned input_index,
366 const struct tgsi_full_declaration *decl,
367 LLVMValueRef out[4])
368 {
369 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
370 struct gallivm_state *gallivm = base->gallivm;
371 struct si_shader_context *ctx =
372 si_shader_context(&radeon_bld->soa.bld_base);
373 unsigned divisor =
374 ctx->shader->key.vs.prolog.instance_divisors[input_index];
375
376 unsigned chan;
377
378 LLVMValueRef t_list_ptr;
379 LLVMValueRef t_offset;
380 LLVMValueRef t_list;
381 LLVMValueRef attribute_offset;
382 LLVMValueRef buffer_index;
383 LLVMValueRef args[3];
384 LLVMValueRef input;
385
386 /* Load the T list */
387 t_list_ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_VERTEX_BUFFERS);
388
389 t_offset = lp_build_const_int32(gallivm, input_index);
390
391 t_list = build_indexed_load_const(ctx, t_list_ptr, t_offset);
392
393 /* Build the attribute offset */
394 attribute_offset = lp_build_const_int32(gallivm, 0);
395
396 if (!ctx->no_prolog) {
397 buffer_index = LLVMGetParam(radeon_bld->main_fn,
398 ctx->param_vertex_index0 +
399 input_index);
400 } else if (divisor) {
401 /* Build index from instance ID, start instance and divisor */
402 ctx->shader->info.uses_instanceid = true;
403 buffer_index = get_instance_index_for_fetch(ctx,
404 SI_PARAM_START_INSTANCE,
405 divisor);
406 } else {
407 /* Load the buffer index for vertices. */
408 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
409 ctx->param_vertex_id);
410 LLVMValueRef base_vertex = LLVMGetParam(radeon_bld->main_fn,
411 SI_PARAM_BASE_VERTEX);
412 buffer_index = LLVMBuildAdd(gallivm->builder, base_vertex, vertex_id, "");
413 }
414
415 args[0] = t_list;
416 args[1] = attribute_offset;
417 args[2] = buffer_index;
418 input = lp_build_intrinsic(gallivm->builder,
419 "llvm.SI.vs.load.input", ctx->v4f32, args, 3,
420 LLVMReadNoneAttribute);
421
422 /* Break up the vec4 into individual components */
423 for (chan = 0; chan < 4; chan++) {
424 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
425 out[chan] = LLVMBuildExtractElement(gallivm->builder,
426 input, llvm_chan, "");
427 }
428 }
429
430 static LLVMValueRef get_primitive_id(struct lp_build_tgsi_context *bld_base,
431 unsigned swizzle)
432 {
433 struct si_shader_context *ctx = si_shader_context(bld_base);
434
435 if (swizzle > 0)
436 return bld_base->uint_bld.zero;
437
438 switch (ctx->type) {
439 case PIPE_SHADER_VERTEX:
440 return LLVMGetParam(ctx->main_fn,
441 ctx->param_vs_prim_id);
442 case PIPE_SHADER_TESS_CTRL:
443 return LLVMGetParam(ctx->main_fn,
444 SI_PARAM_PATCH_ID);
445 case PIPE_SHADER_TESS_EVAL:
446 return LLVMGetParam(ctx->main_fn,
447 ctx->param_tes_patch_id);
448 case PIPE_SHADER_GEOMETRY:
449 return LLVMGetParam(ctx->main_fn,
450 SI_PARAM_PRIMITIVE_ID);
451 default:
452 assert(0);
453 return bld_base->uint_bld.zero;
454 }
455 }
456
457 /**
458 * Return the value of tgsi_ind_register for indexing.
459 * This is the indirect index with the constant offset added to it.
460 */
461 static LLVMValueRef get_indirect_index(struct si_shader_context *ctx,
462 const struct tgsi_ind_register *ind,
463 int rel_index)
464 {
465 struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
466 LLVMValueRef result;
467
468 result = ctx->soa.addr[ind->Index][ind->Swizzle];
469 result = LLVMBuildLoad(gallivm->builder, result, "");
470 result = LLVMBuildAdd(gallivm->builder, result,
471 lp_build_const_int32(gallivm, rel_index), "");
472 return result;
473 }
474
475 /**
476 * Like get_indirect_index, but restricts the return value to a (possibly
477 * undefined) value inside [0..num).
478 */
479 static LLVMValueRef get_bounded_indirect_index(struct si_shader_context *ctx,
480 const struct tgsi_ind_register *ind,
481 int rel_index, unsigned num)
482 {
483 LLVMValueRef result = get_indirect_index(ctx, ind, rel_index);
484
485 /* LLVM 3.8: If indirect resource indexing is used:
486 * - SI & CIK hang
487 * - VI crashes
488 */
489 if (HAVE_LLVM <= 0x0308)
490 return LLVMGetUndef(ctx->i32);
491
492 return si_llvm_bound_index(ctx, result, num);
493 }
494
495
496 /**
497 * Calculate a dword address given an input or output register and a stride.
498 */
499 static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
500 const struct tgsi_full_dst_register *dst,
501 const struct tgsi_full_src_register *src,
502 LLVMValueRef vertex_dw_stride,
503 LLVMValueRef base_addr)
504 {
505 struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
506 struct tgsi_shader_info *info = &ctx->shader->selector->info;
507 ubyte *name, *index, *array_first;
508 int first, param;
509 struct tgsi_full_dst_register reg;
510
511 /* Set the register description. The address computation is the same
512 * for sources and destinations. */
513 if (src) {
514 reg.Register.File = src->Register.File;
515 reg.Register.Index = src->Register.Index;
516 reg.Register.Indirect = src->Register.Indirect;
517 reg.Register.Dimension = src->Register.Dimension;
518 reg.Indirect = src->Indirect;
519 reg.Dimension = src->Dimension;
520 reg.DimIndirect = src->DimIndirect;
521 } else
522 reg = *dst;
523
524 /* If the register is 2-dimensional (e.g. an array of vertices
525 * in a primitive), calculate the base address of the vertex. */
526 if (reg.Register.Dimension) {
527 LLVMValueRef index;
528
529 if (reg.Dimension.Indirect)
530 index = get_indirect_index(ctx, &reg.DimIndirect,
531 reg.Dimension.Index);
532 else
533 index = lp_build_const_int32(gallivm, reg.Dimension.Index);
534
535 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
536 LLVMBuildMul(gallivm->builder, index,
537 vertex_dw_stride, ""), "");
538 }
539
540 /* Get information about the register. */
541 if (reg.Register.File == TGSI_FILE_INPUT) {
542 name = info->input_semantic_name;
543 index = info->input_semantic_index;
544 array_first = info->input_array_first;
545 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
546 name = info->output_semantic_name;
547 index = info->output_semantic_index;
548 array_first = info->output_array_first;
549 } else {
550 assert(0);
551 return NULL;
552 }
553
554 if (reg.Register.Indirect) {
555 /* Add the relative address of the element. */
556 LLVMValueRef ind_index;
557
558 if (reg.Indirect.ArrayID)
559 first = array_first[reg.Indirect.ArrayID];
560 else
561 first = reg.Register.Index;
562
563 ind_index = get_indirect_index(ctx, &reg.Indirect,
564 reg.Register.Index - first);
565
566 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
567 LLVMBuildMul(gallivm->builder, ind_index,
568 lp_build_const_int32(gallivm, 4), ""), "");
569
570 param = si_shader_io_get_unique_index(name[first], index[first]);
571 } else {
572 param = si_shader_io_get_unique_index(name[reg.Register.Index],
573 index[reg.Register.Index]);
574 }
575
576 /* Add the base address of the element. */
577 return LLVMBuildAdd(gallivm->builder, base_addr,
578 lp_build_const_int32(gallivm, param * 4), "");
579 }
580
581 /* The offchip buffer layout for TCS->TES is
582 *
583 * - attribute 0 of patch 0 vertex 0
584 * - attribute 0 of patch 0 vertex 1
585 * - attribute 0 of patch 0 vertex 2
586 * ...
587 * - attribute 0 of patch 1 vertex 0
588 * - attribute 0 of patch 1 vertex 1
589 * ...
590 * - attribute 1 of patch 0 vertex 0
591 * - attribute 1 of patch 0 vertex 1
592 * ...
593 * - per patch attribute 0 of patch 0
594 * - per patch attribute 0 of patch 1
595 * ...
596 *
597 * Note that every attribute has 4 components.
598 */
599 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
600 LLVMValueRef vertex_index,
601 LLVMValueRef param_index)
602 {
603 struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
604 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
605 LLVMValueRef param_stride, constant16;
606
607 vertices_per_patch = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 9, 6);
608 num_patches = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 0, 9);
609 total_vertices = LLVMBuildMul(gallivm->builder, vertices_per_patch,
610 num_patches, "");
611
612 constant16 = lp_build_const_int32(gallivm, 16);
613 if (vertex_index) {
614 base_addr = LLVMBuildMul(gallivm->builder, get_rel_patch_id(ctx),
615 vertices_per_patch, "");
616
617 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
618 vertex_index, "");
619
620 param_stride = total_vertices;
621 } else {
622 base_addr = get_rel_patch_id(ctx);
623 param_stride = num_patches;
624 }
625
626 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
627 LLVMBuildMul(gallivm->builder, param_index,
628 param_stride, ""), "");
629
630 base_addr = LLVMBuildMul(gallivm->builder, base_addr, constant16, "");
631
632 if (!vertex_index) {
633 LLVMValueRef patch_data_offset =
634 unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 16, 16);
635
636 base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
637 patch_data_offset, "");
638 }
639 return base_addr;
640 }
641
642 static LLVMValueRef get_tcs_tes_buffer_address_from_reg(
643 struct si_shader_context *ctx,
644 const struct tgsi_full_dst_register *dst,
645 const struct tgsi_full_src_register *src)
646 {
647 struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
648 struct tgsi_shader_info *info = &ctx->shader->selector->info;
649 ubyte *name, *index, *array_first;
650 struct tgsi_full_src_register reg;
651 LLVMValueRef vertex_index = NULL;
652 LLVMValueRef param_index = NULL;
653 unsigned param_index_base, param_base;
654
655 reg = src ? *src : tgsi_full_src_register_from_dst(dst);
656
657 if (reg.Register.Dimension) {
658
659 if (reg.Dimension.Indirect)
660 vertex_index = get_indirect_index(ctx, &reg.DimIndirect,
661 reg.Dimension.Index);
662 else
663 vertex_index = lp_build_const_int32(gallivm,
664 reg.Dimension.Index);
665 }
666
667 /* Get information about the register. */
668 if (reg.Register.File == TGSI_FILE_INPUT) {
669 name = info->input_semantic_name;
670 index = info->input_semantic_index;
671 array_first = info->input_array_first;
672 } else if (reg.Register.File == TGSI_FILE_OUTPUT) {
673 name = info->output_semantic_name;
674 index = info->output_semantic_index;
675 array_first = info->output_array_first;
676 } else {
677 assert(0);
678 return NULL;
679 }
680
681 if (reg.Register.Indirect) {
682 if (reg.Indirect.ArrayID)
683 param_base = array_first[reg.Indirect.ArrayID];
684 else
685 param_base = reg.Register.Index;
686
687 param_index = get_indirect_index(ctx, &reg.Indirect,
688 reg.Register.Index - param_base);
689
690 } else {
691 param_base = reg.Register.Index;
692 param_index = lp_build_const_int32(gallivm, 0);
693 }
694
695 param_index_base = si_shader_io_get_unique_index(name[param_base],
696 index[param_base]);
697
698 param_index = LLVMBuildAdd(gallivm->builder, param_index,
699 lp_build_const_int32(gallivm, param_index_base),
700 "");
701
702 return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
703 }
704
705 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
706 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
707 * or v4i32 (num_channels=3,4). */
708 static void build_tbuffer_store(struct si_shader_context *ctx,
709 LLVMValueRef rsrc,
710 LLVMValueRef vdata,
711 unsigned num_channels,
712 LLVMValueRef vaddr,
713 LLVMValueRef soffset,
714 unsigned inst_offset,
715 unsigned dfmt,
716 unsigned nfmt,
717 unsigned offen,
718 unsigned idxen,
719 unsigned glc,
720 unsigned slc,
721 unsigned tfe)
722 {
723 struct gallivm_state *gallivm = &ctx->gallivm;
724 LLVMValueRef args[] = {
725 rsrc,
726 vdata,
727 LLVMConstInt(ctx->i32, num_channels, 0),
728 vaddr,
729 soffset,
730 LLVMConstInt(ctx->i32, inst_offset, 0),
731 LLVMConstInt(ctx->i32, dfmt, 0),
732 LLVMConstInt(ctx->i32, nfmt, 0),
733 LLVMConstInt(ctx->i32, offen, 0),
734 LLVMConstInt(ctx->i32, idxen, 0),
735 LLVMConstInt(ctx->i32, glc, 0),
736 LLVMConstInt(ctx->i32, slc, 0),
737 LLVMConstInt(ctx->i32, tfe, 0)
738 };
739
740 /* The instruction offset field has 12 bits */
741 assert(offen || inst_offset < (1 << 12));
742
743 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
744 unsigned func = CLAMP(num_channels, 1, 3) - 1;
745 const char *types[] = {"i32", "v2i32", "v4i32"};
746 char name[256];
747 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
748
749 lp_build_intrinsic(gallivm->builder, name, ctx->voidt,
750 args, ARRAY_SIZE(args), 0);
751 }
752
753 static void build_tbuffer_store_dwords(struct si_shader_context *ctx,
754 LLVMValueRef rsrc,
755 LLVMValueRef vdata,
756 unsigned num_channels,
757 LLVMValueRef vaddr,
758 LLVMValueRef soffset,
759 unsigned inst_offset)
760 {
761 static unsigned dfmt[] = {
762 V_008F0C_BUF_DATA_FORMAT_32,
763 V_008F0C_BUF_DATA_FORMAT_32_32,
764 V_008F0C_BUF_DATA_FORMAT_32_32_32,
765 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
766 };
767 assert(num_channels >= 1 && num_channels <= 4);
768
769 build_tbuffer_store(ctx, rsrc, vdata, num_channels, vaddr, soffset,
770 inst_offset, dfmt[num_channels-1],
771 V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
772 }
773
774 static LLVMValueRef build_buffer_load(struct si_shader_context *ctx,
775 LLVMValueRef rsrc,
776 int num_channels,
777 LLVMValueRef vindex,
778 LLVMValueRef voffset,
779 LLVMValueRef soffset,
780 unsigned inst_offset,
781 unsigned glc,
782 unsigned slc)
783 {
784 struct gallivm_state *gallivm = &ctx->gallivm;
785 unsigned func = CLAMP(num_channels, 1, 3) - 1;
786
787 if (HAVE_LLVM >= 0x309) {
788 LLVMValueRef args[] = {
789 LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v4i32, ""),
790 vindex ? vindex : LLVMConstInt(ctx->i32, 0, 0),
791 LLVMConstInt(ctx->i32, inst_offset, 0),
792 LLVMConstInt(ctx->i1, glc, 0),
793 LLVMConstInt(ctx->i1, slc, 0)
794 };
795
796 LLVMTypeRef types[] = {ctx->f32, LLVMVectorType(ctx->f32, 2),
797 ctx->v4f32};
798 const char *type_names[] = {"f32", "v2f32", "v4f32"};
799 char name[256];
800
801 if (voffset) {
802 args[2] = LLVMBuildAdd(gallivm->builder, args[2], voffset,
803 "");
804 }
805
806 if (soffset) {
807 args[2] = LLVMBuildAdd(gallivm->builder, args[2], soffset,
808 "");
809 }
810
811 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s",
812 type_names[func]);
813
814 return lp_build_intrinsic(gallivm->builder, name, types[func], args,
815 ARRAY_SIZE(args), LLVMReadOnlyAttribute);
816 } else {
817 LLVMValueRef args[] = {
818 LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v16i8, ""),
819 voffset ? voffset : vindex,
820 soffset,
821 LLVMConstInt(ctx->i32, inst_offset, 0),
822 LLVMConstInt(ctx->i32, voffset ? 1 : 0, 0), // offen
823 LLVMConstInt(ctx->i32, vindex ? 1 : 0, 0), //idxen
824 LLVMConstInt(ctx->i32, glc, 0),
825 LLVMConstInt(ctx->i32, slc, 0),
826 LLVMConstInt(ctx->i32, 0, 0), // TFE
827 };
828
829 LLVMTypeRef types[] = {ctx->i32, LLVMVectorType(ctx->i32, 2),
830 ctx->v4i32};
831 const char *type_names[] = {"i32", "v2i32", "v4i32"};
832 const char *arg_type = "i32";
833 char name[256];
834
835 if (voffset && vindex) {
836 LLVMValueRef vaddr[] = {vindex, voffset};
837
838 arg_type = "v2i32";
839 args[1] = lp_build_gather_values(gallivm, vaddr, 2);
840 }
841
842 snprintf(name, sizeof(name), "llvm.SI.buffer.load.dword.%s.%s",
843 type_names[func], arg_type);
844
845 return lp_build_intrinsic(gallivm->builder, name, types[func], args,
846 ARRAY_SIZE(args), LLVMReadOnlyAttribute);
847 }
848 }
849
850 static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
851 enum tgsi_opcode_type type, unsigned swizzle,
852 LLVMValueRef buffer, LLVMValueRef offset,
853 LLVMValueRef base)
854 {
855 struct si_shader_context *ctx = si_shader_context(bld_base);
856 struct gallivm_state *gallivm = bld_base->base.gallivm;
857 LLVMValueRef value, value2;
858 LLVMTypeRef llvm_type = tgsi2llvmtype(bld_base, type);
859 LLVMTypeRef vec_type = LLVMVectorType(llvm_type, 4);
860
861 if (swizzle == ~0) {
862 value = build_buffer_load(ctx, buffer, 4, NULL, base, offset,
863 0, 1, 0);
864
865 return LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
866 }
867
868 if (!tgsi_type_is_64bit(type)) {
869 value = build_buffer_load(ctx, buffer, 4, NULL, base, offset,
870 0, 1, 0);
871
872 value = LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
873 return LLVMBuildExtractElement(gallivm->builder, value,
874 lp_build_const_int32(gallivm, swizzle), "");
875 }
876
877 value = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
878 swizzle * 4, 1, 0);
879
880 value2 = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
881 swizzle * 4 + 4, 1, 0);
882
883 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
884 }
885
886 /**
887 * Load from LDS.
888 *
889 * \param type output value type
890 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
891 * \param dw_addr address in dwords
892 */
893 static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
894 enum tgsi_opcode_type type, unsigned swizzle,
895 LLVMValueRef dw_addr)
896 {
897 struct si_shader_context *ctx = si_shader_context(bld_base);
898 struct gallivm_state *gallivm = bld_base->base.gallivm;
899 LLVMValueRef value;
900
901 if (swizzle == ~0) {
902 LLVMValueRef values[TGSI_NUM_CHANNELS];
903
904 for (unsigned chan = 0; chan < TGSI_NUM_CHANNELS; chan++)
905 values[chan] = lds_load(bld_base, type, chan, dw_addr);
906
907 return lp_build_gather_values(bld_base->base.gallivm, values,
908 TGSI_NUM_CHANNELS);
909 }
910
911 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
912 lp_build_const_int32(gallivm, swizzle));
913
914 value = build_indexed_load(ctx, ctx->lds, dw_addr, false);
915 if (tgsi_type_is_64bit(type)) {
916 LLVMValueRef value2;
917 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
918 lp_build_const_int32(gallivm, 1));
919 value2 = build_indexed_load(ctx, ctx->lds, dw_addr, false);
920 return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
921 }
922
923 return LLVMBuildBitCast(gallivm->builder, value,
924 tgsi2llvmtype(bld_base, type), "");
925 }
926
927 /**
928 * Store to LDS.
929 *
930 * \param swizzle offset (typically 0..3)
931 * \param dw_addr address in dwords
932 * \param value value to store
933 */
934 static void lds_store(struct lp_build_tgsi_context *bld_base,
935 unsigned swizzle, LLVMValueRef dw_addr,
936 LLVMValueRef value)
937 {
938 struct si_shader_context *ctx = si_shader_context(bld_base);
939 struct gallivm_state *gallivm = bld_base->base.gallivm;
940
941 dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
942 lp_build_const_int32(gallivm, swizzle));
943
944 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
945 build_indexed_store(ctx, ctx->lds,
946 dw_addr, value);
947 }
948
949 static LLVMValueRef fetch_input_tcs(
950 struct lp_build_tgsi_context *bld_base,
951 const struct tgsi_full_src_register *reg,
952 enum tgsi_opcode_type type, unsigned swizzle)
953 {
954 struct si_shader_context *ctx = si_shader_context(bld_base);
955 LLVMValueRef dw_addr, stride;
956
957 stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
958 dw_addr = get_tcs_in_current_patch_offset(ctx);
959 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
960
961 return lds_load(bld_base, type, swizzle, dw_addr);
962 }
963
964 static LLVMValueRef fetch_output_tcs(
965 struct lp_build_tgsi_context *bld_base,
966 const struct tgsi_full_src_register *reg,
967 enum tgsi_opcode_type type, unsigned swizzle)
968 {
969 struct si_shader_context *ctx = si_shader_context(bld_base);
970 LLVMValueRef dw_addr, stride;
971
972 if (reg->Register.Dimension) {
973 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
974 dw_addr = get_tcs_out_current_patch_offset(ctx);
975 dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
976 } else {
977 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
978 dw_addr = get_dw_address(ctx, NULL, reg, NULL, dw_addr);
979 }
980
981 return lds_load(bld_base, type, swizzle, dw_addr);
982 }
983
984 static LLVMValueRef fetch_input_tes(
985 struct lp_build_tgsi_context *bld_base,
986 const struct tgsi_full_src_register *reg,
987 enum tgsi_opcode_type type, unsigned swizzle)
988 {
989 struct si_shader_context *ctx = si_shader_context(bld_base);
990 struct gallivm_state *gallivm = bld_base->base.gallivm;
991 LLVMValueRef rw_buffers, buffer, base, addr;
992
993 rw_buffers = LLVMGetParam(ctx->main_fn,
994 SI_PARAM_RW_BUFFERS);
995 buffer = build_indexed_load_const(ctx, rw_buffers,
996 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
997
998 base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
999 addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
1000
1001 return buffer_load(bld_base, type, swizzle, buffer, base, addr);
1002 }
1003
1004 static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
1005 const struct tgsi_full_instruction *inst,
1006 const struct tgsi_opcode_info *info,
1007 LLVMValueRef dst[4])
1008 {
1009 struct si_shader_context *ctx = si_shader_context(bld_base);
1010 struct gallivm_state *gallivm = bld_base->base.gallivm;
1011 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
1012 unsigned chan_index;
1013 LLVMValueRef dw_addr, stride;
1014 LLVMValueRef rw_buffers, buffer, base, buf_addr;
1015 LLVMValueRef values[4];
1016
1017 /* Only handle per-patch and per-vertex outputs here.
1018 * Vectors will be lowered to scalars and this function will be called again.
1019 */
1020 if (reg->Register.File != TGSI_FILE_OUTPUT ||
1021 (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
1022 si_llvm_emit_store(bld_base, inst, info, dst);
1023 return;
1024 }
1025
1026 if (reg->Register.Dimension) {
1027 stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
1028 dw_addr = get_tcs_out_current_patch_offset(ctx);
1029 dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
1030 } else {
1031 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1032 dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
1033 }
1034
1035 rw_buffers = LLVMGetParam(ctx->main_fn,
1036 SI_PARAM_RW_BUFFERS);
1037 buffer = build_indexed_load_const(ctx, rw_buffers,
1038 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
1039
1040 base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
1041 buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
1042
1043
1044 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
1045 LLVMValueRef value = dst[chan_index];
1046
1047 if (inst->Instruction.Saturate)
1048 value = si_llvm_saturate(bld_base, value);
1049
1050 lds_store(bld_base, chan_index, dw_addr, value);
1051
1052 value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
1053 values[chan_index] = value;
1054
1055 if (inst->Dst[0].Register.WriteMask != 0xF) {
1056 build_tbuffer_store_dwords(ctx, buffer, value, 1,
1057 buf_addr, base,
1058 4 * chan_index);
1059 }
1060 }
1061
1062 if (inst->Dst[0].Register.WriteMask == 0xF) {
1063 LLVMValueRef value = lp_build_gather_values(bld_base->base.gallivm,
1064 values, 4);
1065 build_tbuffer_store_dwords(ctx, buffer, value, 4, buf_addr,
1066 base, 0);
1067 }
1068 }
1069
1070 static LLVMValueRef fetch_input_gs(
1071 struct lp_build_tgsi_context *bld_base,
1072 const struct tgsi_full_src_register *reg,
1073 enum tgsi_opcode_type type,
1074 unsigned swizzle)
1075 {
1076 struct lp_build_context *base = &bld_base->base;
1077 struct si_shader_context *ctx = si_shader_context(bld_base);
1078 struct si_shader *shader = ctx->shader;
1079 struct lp_build_context *uint = &ctx->soa.bld_base.uint_bld;
1080 struct gallivm_state *gallivm = base->gallivm;
1081 LLVMValueRef vtx_offset;
1082 LLVMValueRef args[9];
1083 unsigned vtx_offset_param;
1084 struct tgsi_shader_info *info = &shader->selector->info;
1085 unsigned semantic_name = info->input_semantic_name[reg->Register.Index];
1086 unsigned semantic_index = info->input_semantic_index[reg->Register.Index];
1087 unsigned param;
1088 LLVMValueRef value;
1089
1090 if (swizzle != ~0 && semantic_name == TGSI_SEMANTIC_PRIMID)
1091 return get_primitive_id(bld_base, swizzle);
1092
1093 if (!reg->Register.Dimension)
1094 return NULL;
1095
1096 if (swizzle == ~0) {
1097 LLVMValueRef values[TGSI_NUM_CHANNELS];
1098 unsigned chan;
1099 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1100 values[chan] = fetch_input_gs(bld_base, reg, type, chan);
1101 }
1102 return lp_build_gather_values(bld_base->base.gallivm, values,
1103 TGSI_NUM_CHANNELS);
1104 }
1105
1106 /* Get the vertex offset parameter */
1107 vtx_offset_param = reg->Dimension.Index;
1108 if (vtx_offset_param < 2) {
1109 vtx_offset_param += SI_PARAM_VTX0_OFFSET;
1110 } else {
1111 assert(vtx_offset_param < 6);
1112 vtx_offset_param += SI_PARAM_VTX2_OFFSET - 2;
1113 }
1114 vtx_offset = lp_build_mul_imm(uint,
1115 LLVMGetParam(ctx->main_fn,
1116 vtx_offset_param),
1117 4);
1118
1119 param = si_shader_io_get_unique_index(semantic_name, semantic_index);
1120 args[0] = ctx->esgs_ring;
1121 args[1] = vtx_offset;
1122 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle) * 256);
1123 args[3] = uint->zero;
1124 args[4] = uint->one; /* OFFEN */
1125 args[5] = uint->zero; /* IDXEN */
1126 args[6] = uint->one; /* GLC */
1127 args[7] = uint->zero; /* SLC */
1128 args[8] = uint->zero; /* TFE */
1129
1130 value = lp_build_intrinsic(gallivm->builder,
1131 "llvm.SI.buffer.load.dword.i32.i32",
1132 ctx->i32, args, 9,
1133 LLVMReadOnlyAttribute);
1134 if (tgsi_type_is_64bit(type)) {
1135 LLVMValueRef value2;
1136 args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle + 1) * 256);
1137 value2 = lp_build_intrinsic(gallivm->builder,
1138 "llvm.SI.buffer.load.dword.i32.i32",
1139 ctx->i32, args, 9,
1140 LLVMReadOnlyAttribute);
1141 return si_llvm_emit_fetch_64bit(bld_base, type,
1142 value, value2);
1143 }
1144 return LLVMBuildBitCast(gallivm->builder,
1145 value,
1146 tgsi2llvmtype(bld_base, type), "");
1147 }
1148
1149 static int lookup_interp_param_index(unsigned interpolate, unsigned location)
1150 {
1151 switch (interpolate) {
1152 case TGSI_INTERPOLATE_CONSTANT:
1153 return 0;
1154
1155 case TGSI_INTERPOLATE_LINEAR:
1156 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1157 return SI_PARAM_LINEAR_SAMPLE;
1158 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1159 return SI_PARAM_LINEAR_CENTROID;
1160 else
1161 return SI_PARAM_LINEAR_CENTER;
1162 break;
1163 case TGSI_INTERPOLATE_COLOR:
1164 case TGSI_INTERPOLATE_PERSPECTIVE:
1165 if (location == TGSI_INTERPOLATE_LOC_SAMPLE)
1166 return SI_PARAM_PERSP_SAMPLE;
1167 else if (location == TGSI_INTERPOLATE_LOC_CENTROID)
1168 return SI_PARAM_PERSP_CENTROID;
1169 else
1170 return SI_PARAM_PERSP_CENTER;
1171 break;
1172 default:
1173 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
1174 return -1;
1175 }
1176 }
1177
1178 /* This shouldn't be used by explicit INTERP opcodes. */
1179 static unsigned select_interp_param(struct si_shader_context *ctx,
1180 unsigned param)
1181 {
1182 if (!ctx->no_prolog)
1183 return param;
1184
1185 if (ctx->shader->key.ps.prolog.force_persp_sample_interp) {
1186 switch (param) {
1187 case SI_PARAM_PERSP_CENTROID:
1188 case SI_PARAM_PERSP_CENTER:
1189 return SI_PARAM_PERSP_SAMPLE;
1190 }
1191 }
1192 if (ctx->shader->key.ps.prolog.force_linear_sample_interp) {
1193 switch (param) {
1194 case SI_PARAM_LINEAR_CENTROID:
1195 case SI_PARAM_LINEAR_CENTER:
1196 return SI_PARAM_LINEAR_SAMPLE;
1197 }
1198 }
1199 if (ctx->shader->key.ps.prolog.force_persp_center_interp) {
1200 switch (param) {
1201 case SI_PARAM_PERSP_CENTROID:
1202 case SI_PARAM_PERSP_SAMPLE:
1203 return SI_PARAM_PERSP_CENTER;
1204 }
1205 }
1206 if (ctx->shader->key.ps.prolog.force_linear_center_interp) {
1207 switch (param) {
1208 case SI_PARAM_LINEAR_CENTROID:
1209 case SI_PARAM_LINEAR_SAMPLE:
1210 return SI_PARAM_LINEAR_CENTER;
1211 }
1212 }
1213
1214 return param;
1215 }
1216
1217 /**
1218 * Interpolate a fragment shader input.
1219 *
1220 * @param ctx context
1221 * @param input_index index of the input in hardware
1222 * @param semantic_name TGSI_SEMANTIC_*
1223 * @param semantic_index semantic index
1224 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1225 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1226 * @param interp_param interpolation weights (i,j)
1227 * @param prim_mask SI_PARAM_PRIM_MASK
1228 * @param face SI_PARAM_FRONT_FACE
1229 * @param result the return value (4 components)
1230 */
1231 static void interp_fs_input(struct si_shader_context *ctx,
1232 unsigned input_index,
1233 unsigned semantic_name,
1234 unsigned semantic_index,
1235 unsigned num_interp_inputs,
1236 unsigned colors_read_mask,
1237 LLVMValueRef interp_param,
1238 LLVMValueRef prim_mask,
1239 LLVMValueRef face,
1240 LLVMValueRef result[4])
1241 {
1242 struct lp_build_context *base = &ctx->soa.bld_base.base;
1243 struct lp_build_context *uint = &ctx->soa.bld_base.uint_bld;
1244 struct gallivm_state *gallivm = base->gallivm;
1245 const char *intr_name;
1246 LLVMValueRef attr_number;
1247
1248 unsigned chan;
1249
1250 attr_number = lp_build_const_int32(gallivm, input_index);
1251
1252 /* fs.constant returns the param from the middle vertex, so it's not
1253 * really useful for flat shading. It's meant to be used for custom
1254 * interpolation (but the intrinsic can't fetch from the other two
1255 * vertices).
1256 *
1257 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1258 * to do the right thing. The only reason we use fs.constant is that
1259 * fs.interp cannot be used on integers, because they can be equal
1260 * to NaN.
1261 */
1262 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
1263
1264 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1265 ctx->shader->key.ps.prolog.color_two_side) {
1266 LLVMValueRef args[4];
1267 LLVMValueRef is_face_positive;
1268 LLVMValueRef back_attr_number;
1269
1270 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1271 * otherwise it's at offset "num_inputs".
1272 */
1273 unsigned back_attr_offset = num_interp_inputs;
1274 if (semantic_index == 1 && colors_read_mask & 0xf)
1275 back_attr_offset += 1;
1276
1277 back_attr_number = lp_build_const_int32(gallivm, back_attr_offset);
1278
1279 is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1280 face, uint->zero, "");
1281
1282 args[2] = prim_mask;
1283 args[3] = interp_param;
1284 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1285 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
1286 LLVMValueRef front, back;
1287
1288 args[0] = llvm_chan;
1289 args[1] = attr_number;
1290 front = lp_build_intrinsic(gallivm->builder, intr_name,
1291 ctx->f32, args, args[3] ? 4 : 3,
1292 LLVMReadNoneAttribute);
1293
1294 args[1] = back_attr_number;
1295 back = lp_build_intrinsic(gallivm->builder, intr_name,
1296 ctx->f32, args, args[3] ? 4 : 3,
1297 LLVMReadNoneAttribute);
1298
1299 result[chan] = LLVMBuildSelect(gallivm->builder,
1300 is_face_positive,
1301 front,
1302 back,
1303 "");
1304 }
1305 } else if (semantic_name == TGSI_SEMANTIC_FOG) {
1306 LLVMValueRef args[4];
1307
1308 args[0] = uint->zero;
1309 args[1] = attr_number;
1310 args[2] = prim_mask;
1311 args[3] = interp_param;
1312 result[0] = lp_build_intrinsic(gallivm->builder, intr_name,
1313 ctx->f32, args, args[3] ? 4 : 3,
1314 LLVMReadNoneAttribute);
1315 result[1] =
1316 result[2] = lp_build_const_float(gallivm, 0.0f);
1317 result[3] = lp_build_const_float(gallivm, 1.0f);
1318 } else {
1319 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
1320 LLVMValueRef args[4];
1321 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
1322
1323 args[0] = llvm_chan;
1324 args[1] = attr_number;
1325 args[2] = prim_mask;
1326 args[3] = interp_param;
1327 result[chan] = lp_build_intrinsic(gallivm->builder, intr_name,
1328 ctx->f32, args, args[3] ? 4 : 3,
1329 LLVMReadNoneAttribute);
1330 }
1331 }
1332 }
1333
1334 /* LLVMGetParam with bc_optimize resolved. */
1335 static LLVMValueRef get_interp_param(struct si_shader_context *ctx,
1336 int interp_param_idx)
1337 {
1338 LLVMBuilderRef builder = ctx->gallivm.builder;
1339 LLVMValueRef main_fn = ctx->main_fn;
1340 LLVMValueRef param = NULL;
1341
1342 /* Handle PRIM_MASK[31] (bc_optimize). */
1343 if (ctx->no_prolog &&
1344 ((ctx->shader->key.ps.prolog.bc_optimize_for_persp &&
1345 interp_param_idx == SI_PARAM_PERSP_CENTROID) ||
1346 (ctx->shader->key.ps.prolog.bc_optimize_for_linear &&
1347 interp_param_idx == SI_PARAM_LINEAR_CENTROID))) {
1348 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
1349 * The hw doesn't compute CENTROID if the whole wave only
1350 * contains fully-covered quads.
1351 */
1352 LLVMValueRef bc_optimize =
1353 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK);
1354 bc_optimize = LLVMBuildLShr(builder,
1355 bc_optimize,
1356 LLVMConstInt(ctx->i32, 31, 0), "");
1357 bc_optimize = LLVMBuildTrunc(builder, bc_optimize, ctx->i1, "");
1358
1359 if (ctx->shader->key.ps.prolog.bc_optimize_for_persp &&
1360 interp_param_idx == SI_PARAM_PERSP_CENTROID) {
1361 param = LLVMBuildSelect(builder, bc_optimize,
1362 LLVMGetParam(main_fn,
1363 SI_PARAM_PERSP_CENTER),
1364 LLVMGetParam(main_fn,
1365 SI_PARAM_PERSP_CENTROID),
1366 "");
1367 }
1368 if (ctx->shader->key.ps.prolog.bc_optimize_for_linear &&
1369 interp_param_idx == SI_PARAM_LINEAR_CENTROID) {
1370 param = LLVMBuildSelect(builder, bc_optimize,
1371 LLVMGetParam(main_fn,
1372 SI_PARAM_LINEAR_CENTER),
1373 LLVMGetParam(main_fn,
1374 SI_PARAM_LINEAR_CENTROID),
1375 "");
1376 }
1377 }
1378
1379 if (!param)
1380 param = LLVMGetParam(main_fn, interp_param_idx);
1381 return param;
1382 }
1383
1384 static void declare_input_fs(
1385 struct si_shader_context *radeon_bld,
1386 unsigned input_index,
1387 const struct tgsi_full_declaration *decl,
1388 LLVMValueRef out[4])
1389 {
1390 struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
1391 struct si_shader_context *ctx =
1392 si_shader_context(&radeon_bld->soa.bld_base);
1393 struct si_shader *shader = ctx->shader;
1394 LLVMValueRef main_fn = radeon_bld->main_fn;
1395 LLVMValueRef interp_param = NULL;
1396 int interp_param_idx;
1397
1398 /* Get colors from input VGPRs (set by the prolog). */
1399 if (!ctx->no_prolog &&
1400 decl->Semantic.Name == TGSI_SEMANTIC_COLOR) {
1401 unsigned i = decl->Semantic.Index;
1402 unsigned colors_read = shader->selector->info.colors_read;
1403 unsigned mask = colors_read >> (i * 4);
1404 unsigned offset = SI_PARAM_POS_FIXED_PT + 1 +
1405 (i ? util_bitcount(colors_read & 0xf) : 0);
1406
1407 out[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : base->undef;
1408 out[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : base->undef;
1409 out[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : base->undef;
1410 out[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : base->undef;
1411 return;
1412 }
1413
1414 interp_param_idx = lookup_interp_param_index(decl->Interp.Interpolate,
1415 decl->Interp.Location);
1416 if (interp_param_idx == -1)
1417 return;
1418 else if (interp_param_idx) {
1419 interp_param_idx = select_interp_param(ctx,
1420 interp_param_idx);
1421 interp_param = get_interp_param(ctx, interp_param_idx);
1422 }
1423
1424 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
1425 decl->Interp.Interpolate == TGSI_INTERPOLATE_COLOR &&
1426 ctx->shader->key.ps.prolog.flatshade_colors)
1427 interp_param = NULL; /* load the constant color */
1428
1429 interp_fs_input(ctx, input_index, decl->Semantic.Name,
1430 decl->Semantic.Index, shader->selector->info.num_inputs,
1431 shader->selector->info.colors_read, interp_param,
1432 LLVMGetParam(main_fn, SI_PARAM_PRIM_MASK),
1433 LLVMGetParam(main_fn, SI_PARAM_FRONT_FACE),
1434 &out[0]);
1435 }
1436
1437 static LLVMValueRef get_sample_id(struct si_shader_context *radeon_bld)
1438 {
1439 return unpack_param(si_shader_context(&radeon_bld->soa.bld_base),
1440 SI_PARAM_ANCILLARY, 8, 4);
1441 }
1442
1443 /**
1444 * Set range metadata on an instruction. This can only be used on load and
1445 * call instructions. If you know an instruction can only produce the values
1446 * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
1447 * \p lo is the minimum value inclusive.
1448 * \p hi is the maximum value exclusive.
1449 */
1450 static void set_range_metadata(struct si_shader_context *ctx,
1451 LLVMValueRef value, unsigned lo, unsigned hi)
1452 {
1453 LLVMValueRef range_md, md_args[2];
1454 LLVMTypeRef type = LLVMTypeOf(value);
1455 LLVMContextRef context = LLVMGetTypeContext(type);
1456
1457 md_args[0] = LLVMConstInt(type, lo, false);
1458 md_args[1] = LLVMConstInt(type, hi, false);
1459 range_md = LLVMMDNodeInContext(context, md_args, 2);
1460 LLVMSetMetadata(value, ctx->range_md_kind, range_md);
1461 }
1462
1463 static LLVMValueRef get_thread_id(struct si_shader_context *ctx)
1464 {
1465 struct gallivm_state *gallivm = &ctx->gallivm;
1466 LLVMValueRef tid;
1467
1468 if (HAVE_LLVM < 0x0308) {
1469 tid = lp_build_intrinsic(gallivm->builder, "llvm.SI.tid",
1470 ctx->i32, NULL, 0, LLVMReadNoneAttribute);
1471 } else {
1472 LLVMValueRef tid_args[2];
1473 tid_args[0] = lp_build_const_int32(gallivm, 0xffffffff);
1474 tid_args[1] = lp_build_const_int32(gallivm, 0);
1475 tid_args[1] = lp_build_intrinsic(gallivm->builder,
1476 "llvm.amdgcn.mbcnt.lo", ctx->i32,
1477 tid_args, 2, LLVMReadNoneAttribute);
1478
1479 tid = lp_build_intrinsic(gallivm->builder,
1480 "llvm.amdgcn.mbcnt.hi", ctx->i32,
1481 tid_args, 2, LLVMReadNoneAttribute);
1482 }
1483 set_range_metadata(ctx, tid, 0, 64);
1484 return tid;
1485 }
1486
1487 /**
1488 * Load a dword from a constant buffer.
1489 */
1490 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1491 LLVMValueRef resource,
1492 LLVMValueRef offset)
1493 {
1494 LLVMBuilderRef builder = ctx->gallivm.builder;
1495 LLVMValueRef args[2] = {resource, offset};
1496
1497 return lp_build_intrinsic(builder, "llvm.SI.load.const", ctx->f32, args, 2,
1498 LLVMReadNoneAttribute);
1499 }
1500
1501 static LLVMValueRef load_sample_position(struct si_shader_context *radeon_bld, LLVMValueRef sample_id)
1502 {
1503 struct si_shader_context *ctx =
1504 si_shader_context(&radeon_bld->soa.bld_base);
1505 struct lp_build_context *uint_bld = &radeon_bld->soa.bld_base.uint_bld;
1506 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1507 LLVMBuilderRef builder = gallivm->builder;
1508 LLVMValueRef desc = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
1509 LLVMValueRef buf_index = lp_build_const_int32(gallivm, SI_PS_CONST_SAMPLE_POSITIONS);
1510 LLVMValueRef resource = build_indexed_load_const(ctx, desc, buf_index);
1511
1512 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1513 LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
1514 LLVMValueRef offset1 = LLVMBuildAdd(builder, offset0, lp_build_const_int32(gallivm, 4), "");
1515
1516 LLVMValueRef pos[4] = {
1517 buffer_load_const(ctx, resource, offset0),
1518 buffer_load_const(ctx, resource, offset1),
1519 lp_build_const_float(gallivm, 0),
1520 lp_build_const_float(gallivm, 0)
1521 };
1522
1523 return lp_build_gather_values(gallivm, pos, 4);
1524 }
1525
1526 static void declare_system_value(
1527 struct si_shader_context *radeon_bld,
1528 unsigned index,
1529 const struct tgsi_full_declaration *decl)
1530 {
1531 struct si_shader_context *ctx =
1532 si_shader_context(&radeon_bld->soa.bld_base);
1533 struct lp_build_context *bld = &radeon_bld->soa.bld_base.base;
1534 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1535 LLVMValueRef value = 0;
1536
1537 switch (decl->Semantic.Name) {
1538 case TGSI_SEMANTIC_INSTANCEID:
1539 value = LLVMGetParam(radeon_bld->main_fn,
1540 ctx->param_instance_id);
1541 break;
1542
1543 case TGSI_SEMANTIC_VERTEXID:
1544 value = LLVMBuildAdd(gallivm->builder,
1545 LLVMGetParam(radeon_bld->main_fn,
1546 ctx->param_vertex_id),
1547 LLVMGetParam(radeon_bld->main_fn,
1548 SI_PARAM_BASE_VERTEX), "");
1549 break;
1550
1551 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1552 value = LLVMGetParam(radeon_bld->main_fn,
1553 ctx->param_vertex_id);
1554 break;
1555
1556 case TGSI_SEMANTIC_BASEVERTEX:
1557 value = LLVMGetParam(radeon_bld->main_fn,
1558 SI_PARAM_BASE_VERTEX);
1559 break;
1560
1561 case TGSI_SEMANTIC_BASEINSTANCE:
1562 value = LLVMGetParam(radeon_bld->main_fn,
1563 SI_PARAM_START_INSTANCE);
1564 break;
1565
1566 case TGSI_SEMANTIC_DRAWID:
1567 value = LLVMGetParam(radeon_bld->main_fn,
1568 SI_PARAM_DRAWID);
1569 break;
1570
1571 case TGSI_SEMANTIC_INVOCATIONID:
1572 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1573 value = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
1574 else if (ctx->type == PIPE_SHADER_GEOMETRY)
1575 value = LLVMGetParam(radeon_bld->main_fn,
1576 SI_PARAM_GS_INSTANCE_ID);
1577 else
1578 assert(!"INVOCATIONID not implemented");
1579 break;
1580
1581 case TGSI_SEMANTIC_POSITION:
1582 {
1583 LLVMValueRef pos[4] = {
1584 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1585 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1586 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Z_FLOAT),
1587 lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base, TGSI_OPCODE_RCP,
1588 LLVMGetParam(radeon_bld->main_fn,
1589 SI_PARAM_POS_W_FLOAT)),
1590 };
1591 value = lp_build_gather_values(gallivm, pos, 4);
1592 break;
1593 }
1594
1595 case TGSI_SEMANTIC_FACE:
1596 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_FRONT_FACE);
1597 break;
1598
1599 case TGSI_SEMANTIC_SAMPLEID:
1600 value = get_sample_id(radeon_bld);
1601 break;
1602
1603 case TGSI_SEMANTIC_SAMPLEPOS: {
1604 LLVMValueRef pos[4] = {
1605 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
1606 LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
1607 lp_build_const_float(gallivm, 0),
1608 lp_build_const_float(gallivm, 0)
1609 };
1610 pos[0] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1611 TGSI_OPCODE_FRC, pos[0]);
1612 pos[1] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
1613 TGSI_OPCODE_FRC, pos[1]);
1614 value = lp_build_gather_values(gallivm, pos, 4);
1615 break;
1616 }
1617
1618 case TGSI_SEMANTIC_SAMPLEMASK:
1619 /* This can only occur with the OpenGL Core profile, which
1620 * doesn't support smoothing.
1621 */
1622 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_SAMPLE_COVERAGE);
1623 break;
1624
1625 case TGSI_SEMANTIC_TESSCOORD:
1626 {
1627 LLVMValueRef coord[4] = {
1628 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_u),
1629 LLVMGetParam(radeon_bld->main_fn, ctx->param_tes_v),
1630 bld->zero,
1631 bld->zero
1632 };
1633
1634 /* For triangles, the vector should be (u, v, 1-u-v). */
1635 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1636 PIPE_PRIM_TRIANGLES)
1637 coord[2] = lp_build_sub(bld, bld->one,
1638 lp_build_add(bld, coord[0], coord[1]));
1639
1640 value = lp_build_gather_values(gallivm, coord, 4);
1641 break;
1642 }
1643
1644 case TGSI_SEMANTIC_VERTICESIN:
1645 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1646 value = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 26, 6);
1647 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1648 value = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 9, 7);
1649 else
1650 assert(!"invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1651 break;
1652
1653 case TGSI_SEMANTIC_TESSINNER:
1654 case TGSI_SEMANTIC_TESSOUTER:
1655 {
1656 LLVMValueRef rw_buffers, buffer, base, addr;
1657 int param = si_shader_io_get_unique_index(decl->Semantic.Name, 0);
1658
1659 rw_buffers = LLVMGetParam(ctx->main_fn,
1660 SI_PARAM_RW_BUFFERS);
1661 buffer = build_indexed_load_const(ctx, rw_buffers,
1662 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
1663
1664 base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
1665 addr = get_tcs_tes_buffer_address(ctx, NULL,
1666 lp_build_const_int32(gallivm, param));
1667
1668 value = buffer_load(&radeon_bld->soa.bld_base, TGSI_TYPE_FLOAT,
1669 ~0, buffer, base, addr);
1670
1671 break;
1672 }
1673
1674 case TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI:
1675 case TGSI_SEMANTIC_DEFAULT_TESSINNER_SI:
1676 {
1677 LLVMValueRef buf, slot, val[4];
1678 int i, offset;
1679
1680 slot = lp_build_const_int32(gallivm, SI_HS_CONST_DEFAULT_TESS_LEVELS);
1681 buf = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
1682 buf = build_indexed_load_const(ctx, buf, slot);
1683 offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
1684
1685 for (i = 0; i < 4; i++)
1686 val[i] = buffer_load_const(ctx, buf,
1687 lp_build_const_int32(gallivm, (offset + i) * 4));
1688 value = lp_build_gather_values(gallivm, val, 4);
1689 break;
1690 }
1691
1692 case TGSI_SEMANTIC_PRIMID:
1693 value = get_primitive_id(&radeon_bld->soa.bld_base, 0);
1694 break;
1695
1696 case TGSI_SEMANTIC_GRID_SIZE:
1697 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_GRID_SIZE);
1698 break;
1699
1700 case TGSI_SEMANTIC_BLOCK_SIZE:
1701 {
1702 LLVMValueRef values[3];
1703 unsigned i;
1704 unsigned *properties = ctx->shader->selector->info.properties;
1705
1706 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1707 unsigned sizes[3] = {
1708 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1709 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1710 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1711 };
1712
1713 for (i = 0; i < 3; ++i)
1714 values[i] = lp_build_const_int32(gallivm, sizes[i]);
1715
1716 value = lp_build_gather_values(gallivm, values, 3);
1717 } else {
1718 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_BLOCK_SIZE);
1719 }
1720 break;
1721 }
1722
1723 case TGSI_SEMANTIC_BLOCK_ID:
1724 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_BLOCK_ID);
1725 break;
1726
1727 case TGSI_SEMANTIC_THREAD_ID:
1728 value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_THREAD_ID);
1729 break;
1730
1731 #if HAVE_LLVM >= 0x0309
1732 case TGSI_SEMANTIC_HELPER_INVOCATION:
1733 value = lp_build_intrinsic(gallivm->builder,
1734 "llvm.amdgcn.ps.live",
1735 ctx->i1, NULL, 0,
1736 LLVMReadNoneAttribute);
1737 value = LLVMBuildNot(gallivm->builder, value, "");
1738 value = LLVMBuildSExt(gallivm->builder, value, ctx->i32, "");
1739 break;
1740 #endif
1741
1742 default:
1743 assert(!"unknown system value");
1744 return;
1745 }
1746
1747 radeon_bld->system_values[index] = value;
1748 }
1749
1750 static void declare_compute_memory(struct si_shader_context *radeon_bld,
1751 const struct tgsi_full_declaration *decl)
1752 {
1753 struct si_shader_context *ctx =
1754 si_shader_context(&radeon_bld->soa.bld_base);
1755 struct si_shader_selector *sel = ctx->shader->selector;
1756 struct gallivm_state *gallivm = &radeon_bld->gallivm;
1757
1758 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, LOCAL_ADDR_SPACE);
1759 LLVMValueRef var;
1760
1761 assert(decl->Declaration.MemType == TGSI_MEMORY_TYPE_SHARED);
1762 assert(decl->Range.First == decl->Range.Last);
1763 assert(!ctx->shared_memory);
1764
1765 var = LLVMAddGlobalInAddressSpace(gallivm->module,
1766 LLVMArrayType(ctx->i8, sel->local_size),
1767 "compute_lds",
1768 LOCAL_ADDR_SPACE);
1769 LLVMSetAlignment(var, 4);
1770
1771 ctx->shared_memory = LLVMBuildBitCast(gallivm->builder, var, i8p, "");
1772 }
1773
1774 static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
1775 {
1776 LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
1777 SI_PARAM_CONST_BUFFERS);
1778
1779 return build_indexed_load_const(ctx, list_ptr,
1780 LLVMConstInt(ctx->i32, i, 0));
1781 }
1782
1783 static LLVMValueRef fetch_constant(
1784 struct lp_build_tgsi_context *bld_base,
1785 const struct tgsi_full_src_register *reg,
1786 enum tgsi_opcode_type type,
1787 unsigned swizzle)
1788 {
1789 struct si_shader_context *ctx = si_shader_context(bld_base);
1790 struct lp_build_context *base = &bld_base->base;
1791 const struct tgsi_ind_register *ireg = &reg->Indirect;
1792 unsigned buf, idx;
1793
1794 LLVMValueRef addr, bufp;
1795 LLVMValueRef result;
1796
1797 if (swizzle == LP_CHAN_ALL) {
1798 unsigned chan;
1799 LLVMValueRef values[4];
1800 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan)
1801 values[chan] = fetch_constant(bld_base, reg, type, chan);
1802
1803 return lp_build_gather_values(bld_base->base.gallivm, values, 4);
1804 }
1805
1806 buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
1807 idx = reg->Register.Index * 4 + swizzle;
1808
1809 if (reg->Register.Dimension && reg->Dimension.Indirect) {
1810 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_CONST_BUFFERS);
1811 LLVMValueRef index;
1812 index = get_bounded_indirect_index(ctx, &reg->DimIndirect,
1813 reg->Dimension.Index,
1814 SI_NUM_CONST_BUFFERS);
1815 bufp = build_indexed_load_const(ctx, ptr, index);
1816 } else
1817 bufp = load_const_buffer_desc(ctx, buf);
1818
1819 if (reg->Register.Indirect) {
1820 addr = ctx->soa.addr[ireg->Index][ireg->Swizzle];
1821 addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
1822 addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
1823 addr = lp_build_add(&bld_base->uint_bld, addr,
1824 lp_build_const_int32(base->gallivm, idx * 4));
1825 } else {
1826 addr = LLVMConstInt(ctx->i32, idx * 4, 0);
1827 }
1828
1829 result = buffer_load_const(ctx, bufp, addr);
1830
1831 if (!tgsi_type_is_64bit(type))
1832 result = bitcast(bld_base, type, result);
1833 else {
1834 LLVMValueRef addr2, result2;
1835
1836 addr2 = lp_build_add(&bld_base->uint_bld, addr,
1837 LLVMConstInt(ctx->i32, 4, 0));
1838 result2 = buffer_load_const(ctx, bufp, addr2);
1839
1840 result = si_llvm_emit_fetch_64bit(bld_base, type,
1841 result, result2);
1842 }
1843 return result;
1844 }
1845
1846 /* Upper 16 bits must be zero. */
1847 static LLVMValueRef si_llvm_pack_two_int16(struct gallivm_state *gallivm,
1848 LLVMValueRef val[2])
1849 {
1850 return LLVMBuildOr(gallivm->builder, val[0],
1851 LLVMBuildShl(gallivm->builder, val[1],
1852 lp_build_const_int32(gallivm, 16),
1853 ""), "");
1854 }
1855
1856 /* Upper 16 bits are ignored and will be dropped. */
1857 static LLVMValueRef si_llvm_pack_two_int32_as_int16(struct gallivm_state *gallivm,
1858 LLVMValueRef val[2])
1859 {
1860 LLVMValueRef v[2] = {
1861 LLVMBuildAnd(gallivm->builder, val[0],
1862 lp_build_const_int32(gallivm, 0xffff), ""),
1863 val[1],
1864 };
1865 return si_llvm_pack_two_int16(gallivm, v);
1866 }
1867
1868 /* Initialize arguments for the shader export intrinsic */
1869 static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
1870 LLVMValueRef *values,
1871 unsigned target,
1872 LLVMValueRef *args)
1873 {
1874 struct si_shader_context *ctx = si_shader_context(bld_base);
1875 struct lp_build_context *uint =
1876 &ctx->soa.bld_base.uint_bld;
1877 struct lp_build_context *base = &bld_base->base;
1878 struct gallivm_state *gallivm = base->gallivm;
1879 LLVMBuilderRef builder = base->gallivm->builder;
1880 LLVMValueRef val[4];
1881 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1882 unsigned chan;
1883 bool is_int8;
1884
1885 /* Default is 0xf. Adjusted below depending on the format. */
1886 args[0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
1887
1888 /* Specify whether the EXEC mask represents the valid mask */
1889 args[1] = uint->zero;
1890
1891 /* Specify whether this is the last export */
1892 args[2] = uint->zero;
1893
1894 /* Specify the target we are exporting */
1895 args[3] = lp_build_const_int32(base->gallivm, target);
1896
1897 if (ctx->type == PIPE_SHADER_FRAGMENT) {
1898 const union si_shader_key *key = &ctx->shader->key;
1899 unsigned col_formats = key->ps.epilog.spi_shader_col_format;
1900 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1901
1902 assert(cbuf >= 0 && cbuf < 8);
1903 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1904 is_int8 = (key->ps.epilog.color_is_int8 >> cbuf) & 0x1;
1905 }
1906
1907 args[4] = uint->zero; /* COMPR flag */
1908 args[5] = base->undef;
1909 args[6] = base->undef;
1910 args[7] = base->undef;
1911 args[8] = base->undef;
1912
1913 switch (spi_shader_col_format) {
1914 case V_028714_SPI_SHADER_ZERO:
1915 args[0] = uint->zero; /* writemask */
1916 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
1917 break;
1918
1919 case V_028714_SPI_SHADER_32_R:
1920 args[0] = uint->one; /* writemask */
1921 args[5] = values[0];
1922 break;
1923
1924 case V_028714_SPI_SHADER_32_GR:
1925 args[0] = lp_build_const_int32(base->gallivm, 0x3); /* writemask */
1926 args[5] = values[0];
1927 args[6] = values[1];
1928 break;
1929
1930 case V_028714_SPI_SHADER_32_AR:
1931 args[0] = lp_build_const_int32(base->gallivm, 0x9); /* writemask */
1932 args[5] = values[0];
1933 args[8] = values[3];
1934 break;
1935
1936 case V_028714_SPI_SHADER_FP16_ABGR:
1937 args[4] = uint->one; /* COMPR flag */
1938
1939 for (chan = 0; chan < 2; chan++) {
1940 LLVMValueRef pack_args[2] = {
1941 values[2 * chan],
1942 values[2 * chan + 1]
1943 };
1944 LLVMValueRef packed;
1945
1946 packed = lp_build_intrinsic(base->gallivm->builder,
1947 "llvm.SI.packf16",
1948 ctx->i32, pack_args, 2,
1949 LLVMReadNoneAttribute);
1950 args[chan + 5] =
1951 LLVMBuildBitCast(base->gallivm->builder,
1952 packed, ctx->f32, "");
1953 }
1954 break;
1955
1956 case V_028714_SPI_SHADER_UNORM16_ABGR:
1957 for (chan = 0; chan < 4; chan++) {
1958 val[chan] = si_llvm_saturate(bld_base, values[chan]);
1959 val[chan] = LLVMBuildFMul(builder, val[chan],
1960 lp_build_const_float(gallivm, 65535), "");
1961 val[chan] = LLVMBuildFAdd(builder, val[chan],
1962 lp_build_const_float(gallivm, 0.5), "");
1963 val[chan] = LLVMBuildFPToUI(builder, val[chan],
1964 ctx->i32, "");
1965 }
1966
1967 args[4] = uint->one; /* COMPR flag */
1968 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1969 si_llvm_pack_two_int16(gallivm, val));
1970 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1971 si_llvm_pack_two_int16(gallivm, val+2));
1972 break;
1973
1974 case V_028714_SPI_SHADER_SNORM16_ABGR:
1975 for (chan = 0; chan < 4; chan++) {
1976 /* Clamp between [-1, 1]. */
1977 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MIN,
1978 values[chan],
1979 lp_build_const_float(gallivm, 1));
1980 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_MAX,
1981 val[chan],
1982 lp_build_const_float(gallivm, -1));
1983 /* Convert to a signed integer in [-32767, 32767]. */
1984 val[chan] = LLVMBuildFMul(builder, val[chan],
1985 lp_build_const_float(gallivm, 32767), "");
1986 /* If positive, add 0.5, else add -0.5. */
1987 val[chan] = LLVMBuildFAdd(builder, val[chan],
1988 LLVMBuildSelect(builder,
1989 LLVMBuildFCmp(builder, LLVMRealOGE,
1990 val[chan], base->zero, ""),
1991 lp_build_const_float(gallivm, 0.5),
1992 lp_build_const_float(gallivm, -0.5), ""), "");
1993 val[chan] = LLVMBuildFPToSI(builder, val[chan], ctx->i32, "");
1994 }
1995
1996 args[4] = uint->one; /* COMPR flag */
1997 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
1998 si_llvm_pack_two_int32_as_int16(gallivm, val));
1999 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2000 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
2001 break;
2002
2003 case V_028714_SPI_SHADER_UINT16_ABGR: {
2004 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
2005 255 : 65535);
2006 /* Clamp. */
2007 for (chan = 0; chan < 4; chan++) {
2008 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
2009 val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
2010 val[chan], max);
2011 }
2012
2013 args[4] = uint->one; /* COMPR flag */
2014 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2015 si_llvm_pack_two_int16(gallivm, val));
2016 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2017 si_llvm_pack_two_int16(gallivm, val+2));
2018 break;
2019 }
2020
2021 case V_028714_SPI_SHADER_SINT16_ABGR: {
2022 LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
2023 127 : 32767);
2024 LLVMValueRef min = lp_build_const_int32(gallivm, is_int8 ?
2025 -128 : -32768);
2026 /* Clamp. */
2027 for (chan = 0; chan < 4; chan++) {
2028 val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
2029 val[chan] = lp_build_emit_llvm_binary(bld_base,
2030 TGSI_OPCODE_IMIN,
2031 val[chan], max);
2032 val[chan] = lp_build_emit_llvm_binary(bld_base,
2033 TGSI_OPCODE_IMAX,
2034 val[chan], min);
2035 }
2036
2037 args[4] = uint->one; /* COMPR flag */
2038 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2039 si_llvm_pack_two_int32_as_int16(gallivm, val));
2040 args[6] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2041 si_llvm_pack_two_int32_as_int16(gallivm, val+2));
2042 break;
2043 }
2044
2045 case V_028714_SPI_SHADER_32_ABGR:
2046 memcpy(&args[5], values, sizeof(values[0]) * 4);
2047 break;
2048 }
2049 }
2050
2051 static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
2052 LLVMValueRef alpha)
2053 {
2054 struct si_shader_context *ctx = si_shader_context(bld_base);
2055 struct gallivm_state *gallivm = bld_base->base.gallivm;
2056
2057 if (ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
2058 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
2059 SI_PARAM_ALPHA_REF);
2060
2061 LLVMValueRef alpha_pass =
2062 lp_build_cmp(&bld_base->base,
2063 ctx->shader->key.ps.epilog.alpha_func,
2064 alpha, alpha_ref);
2065 LLVMValueRef arg =
2066 lp_build_select(&bld_base->base,
2067 alpha_pass,
2068 lp_build_const_float(gallivm, 1.0f),
2069 lp_build_const_float(gallivm, -1.0f));
2070
2071 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
2072 ctx->voidt, &arg, 1, 0);
2073 } else {
2074 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kilp",
2075 ctx->voidt, NULL, 0, 0);
2076 }
2077 }
2078
2079 static LLVMValueRef si_scale_alpha_by_sample_mask(struct lp_build_tgsi_context *bld_base,
2080 LLVMValueRef alpha,
2081 unsigned samplemask_param)
2082 {
2083 struct si_shader_context *ctx = si_shader_context(bld_base);
2084 struct gallivm_state *gallivm = bld_base->base.gallivm;
2085 LLVMValueRef coverage;
2086
2087 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
2088 coverage = LLVMGetParam(ctx->main_fn,
2089 samplemask_param);
2090 coverage = bitcast(bld_base, TGSI_TYPE_SIGNED, coverage);
2091
2092 coverage = lp_build_intrinsic(gallivm->builder, "llvm.ctpop.i32",
2093 ctx->i32,
2094 &coverage, 1, LLVMReadNoneAttribute);
2095
2096 coverage = LLVMBuildUIToFP(gallivm->builder, coverage,
2097 ctx->f32, "");
2098
2099 coverage = LLVMBuildFMul(gallivm->builder, coverage,
2100 lp_build_const_float(gallivm,
2101 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
2102
2103 return LLVMBuildFMul(gallivm->builder, alpha, coverage, "");
2104 }
2105
2106 static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context *bld_base,
2107 LLVMValueRef (*pos)[9], LLVMValueRef *out_elts)
2108 {
2109 struct si_shader_context *ctx = si_shader_context(bld_base);
2110 struct lp_build_context *base = &bld_base->base;
2111 struct lp_build_context *uint = &ctx->soa.bld_base.uint_bld;
2112 unsigned reg_index;
2113 unsigned chan;
2114 unsigned const_chan;
2115 LLVMValueRef base_elt;
2116 LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
2117 LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm,
2118 SI_VS_CONST_CLIP_PLANES);
2119 LLVMValueRef const_resource = build_indexed_load_const(ctx, ptr, constbuf_index);
2120
2121 for (reg_index = 0; reg_index < 2; reg_index ++) {
2122 LLVMValueRef *args = pos[2 + reg_index];
2123
2124 args[5] =
2125 args[6] =
2126 args[7] =
2127 args[8] = lp_build_const_float(base->gallivm, 0.0f);
2128
2129 /* Compute dot products of position and user clip plane vectors */
2130 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
2131 for (const_chan = 0; const_chan < TGSI_NUM_CHANNELS; const_chan++) {
2132 args[1] = lp_build_const_int32(base->gallivm,
2133 ((reg_index * 4 + chan) * 4 +
2134 const_chan) * 4);
2135 base_elt = buffer_load_const(ctx, const_resource,
2136 args[1]);
2137 args[5 + chan] =
2138 lp_build_add(base, args[5 + chan],
2139 lp_build_mul(base, base_elt,
2140 out_elts[const_chan]));
2141 }
2142 }
2143
2144 args[0] = lp_build_const_int32(base->gallivm, 0xf);
2145 args[1] = uint->zero;
2146 args[2] = uint->zero;
2147 args[3] = lp_build_const_int32(base->gallivm,
2148 V_008DFC_SQ_EXP_POS + 2 + reg_index);
2149 args[4] = uint->zero;
2150 }
2151 }
2152
2153 static void si_dump_streamout(struct pipe_stream_output_info *so)
2154 {
2155 unsigned i;
2156
2157 if (so->num_outputs)
2158 fprintf(stderr, "STREAMOUT\n");
2159
2160 for (i = 0; i < so->num_outputs; i++) {
2161 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
2162 so->output[i].start_component;
2163 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
2164 i, so->output[i].output_buffer,
2165 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
2166 so->output[i].register_index,
2167 mask & 1 ? "x" : "",
2168 mask & 2 ? "y" : "",
2169 mask & 4 ? "z" : "",
2170 mask & 8 ? "w" : "");
2171 }
2172 }
2173
2174 /* On SI, the vertex shader is responsible for writing streamout data
2175 * to buffers. */
2176 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
2177 struct si_shader_output_values *outputs,
2178 unsigned noutput)
2179 {
2180 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
2181 struct gallivm_state *gallivm = &ctx->gallivm;
2182 LLVMBuilderRef builder = gallivm->builder;
2183 int i, j;
2184 struct lp_build_if_state if_ctx;
2185 LLVMValueRef so_buffers[4];
2186 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
2187 SI_PARAM_RW_BUFFERS);
2188
2189 /* Load the descriptors. */
2190 for (i = 0; i < 4; ++i) {
2191 if (ctx->shader->selector->so.stride[i]) {
2192 LLVMValueRef offset = lp_build_const_int32(gallivm,
2193 SI_VS_STREAMOUT_BUF0 + i);
2194
2195 so_buffers[i] = build_indexed_load_const(ctx, buf_ptr, offset);
2196 }
2197 }
2198
2199 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2200 LLVMValueRef so_vtx_count =
2201 unpack_param(ctx, ctx->param_streamout_config, 16, 7);
2202
2203 LLVMValueRef tid = get_thread_id(ctx);
2204
2205 /* can_emit = tid < so_vtx_count; */
2206 LLVMValueRef can_emit =
2207 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
2208
2209 LLVMValueRef stream_id =
2210 unpack_param(ctx, ctx->param_streamout_config, 24, 2);
2211
2212 /* Emit the streamout code conditionally. This actually avoids
2213 * out-of-bounds buffer access. The hw tells us via the SGPR
2214 * (so_vtx_count) which threads are allowed to emit streamout data. */
2215 lp_build_if(&if_ctx, gallivm, can_emit);
2216 {
2217 /* The buffer offset is computed as follows:
2218 * ByteOffset = streamout_offset[buffer_id]*4 +
2219 * (streamout_write_index + thread_id)*stride[buffer_id] +
2220 * attrib_offset
2221 */
2222
2223 LLVMValueRef so_write_index =
2224 LLVMGetParam(ctx->main_fn,
2225 ctx->param_streamout_write_index);
2226
2227 /* Compute (streamout_write_index + thread_id). */
2228 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
2229
2230 /* Compute the write offset for each enabled buffer. */
2231 LLVMValueRef so_write_offset[4] = {};
2232 for (i = 0; i < 4; i++) {
2233 if (!so->stride[i])
2234 continue;
2235
2236 LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
2237 ctx->param_streamout_offset[i]);
2238 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
2239
2240 so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
2241 LLVMConstInt(ctx->i32, so->stride[i]*4, 0), "");
2242 so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
2243 }
2244
2245 /* Write streamout data. */
2246 for (i = 0; i < so->num_outputs; i++) {
2247 unsigned buf_idx = so->output[i].output_buffer;
2248 unsigned reg = so->output[i].register_index;
2249 unsigned start = so->output[i].start_component;
2250 unsigned num_comps = so->output[i].num_components;
2251 unsigned stream = so->output[i].stream;
2252 LLVMValueRef out[4];
2253 struct lp_build_if_state if_ctx_stream;
2254
2255 assert(num_comps && num_comps <= 4);
2256 if (!num_comps || num_comps > 4)
2257 continue;
2258
2259 if (reg >= noutput)
2260 continue;
2261
2262 /* Load the output as int. */
2263 for (j = 0; j < num_comps; j++) {
2264 out[j] = LLVMBuildBitCast(builder,
2265 outputs[reg].values[start+j],
2266 ctx->i32, "");
2267 }
2268
2269 /* Pack the output. */
2270 LLVMValueRef vdata = NULL;
2271
2272 switch (num_comps) {
2273 case 1: /* as i32 */
2274 vdata = out[0];
2275 break;
2276 case 2: /* as v2i32 */
2277 case 3: /* as v4i32 (aligned to 4) */
2278 case 4: /* as v4i32 */
2279 vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
2280 for (j = 0; j < num_comps; j++) {
2281 vdata = LLVMBuildInsertElement(builder, vdata, out[j],
2282 LLVMConstInt(ctx->i32, j, 0), "");
2283 }
2284 break;
2285 }
2286
2287 LLVMValueRef can_emit_stream =
2288 LLVMBuildICmp(builder, LLVMIntEQ,
2289 stream_id,
2290 lp_build_const_int32(gallivm, stream), "");
2291
2292 lp_build_if(&if_ctx_stream, gallivm, can_emit_stream);
2293 build_tbuffer_store_dwords(ctx, so_buffers[buf_idx],
2294 vdata, num_comps,
2295 so_write_offset[buf_idx],
2296 LLVMConstInt(ctx->i32, 0, 0),
2297 so->output[i].dst_offset*4);
2298 lp_build_endif(&if_ctx_stream);
2299 }
2300 }
2301 lp_build_endif(&if_ctx);
2302 }
2303
2304
2305 /* Generate export instructions for hardware VS shader stage */
2306 static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
2307 struct si_shader_output_values *outputs,
2308 unsigned noutput)
2309 {
2310 struct si_shader_context *ctx = si_shader_context(bld_base);
2311 struct si_shader *shader = ctx->shader;
2312 struct lp_build_context *base = &bld_base->base;
2313 struct lp_build_context *uint =
2314 &ctx->soa.bld_base.uint_bld;
2315 LLVMValueRef args[9];
2316 LLVMValueRef pos_args[4][9] = { { 0 } };
2317 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2318 unsigned semantic_name, semantic_index;
2319 unsigned target;
2320 unsigned param_count = 0;
2321 unsigned pos_idx;
2322 int i;
2323
2324 if (outputs && ctx->shader->selector->so.num_outputs) {
2325 si_llvm_emit_streamout(ctx, outputs, noutput);
2326 }
2327
2328 for (i = 0; i < noutput; i++) {
2329 semantic_name = outputs[i].name;
2330 semantic_index = outputs[i].sid;
2331
2332 handle_semantic:
2333 /* Select the correct target */
2334 switch(semantic_name) {
2335 case TGSI_SEMANTIC_PSIZE:
2336 psize_value = outputs[i].values[0];
2337 continue;
2338 case TGSI_SEMANTIC_EDGEFLAG:
2339 edgeflag_value = outputs[i].values[0];
2340 continue;
2341 case TGSI_SEMANTIC_LAYER:
2342 layer_value = outputs[i].values[0];
2343 semantic_name = TGSI_SEMANTIC_GENERIC;
2344 goto handle_semantic;
2345 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2346 viewport_index_value = outputs[i].values[0];
2347 semantic_name = TGSI_SEMANTIC_GENERIC;
2348 goto handle_semantic;
2349 case TGSI_SEMANTIC_POSITION:
2350 target = V_008DFC_SQ_EXP_POS;
2351 break;
2352 case TGSI_SEMANTIC_COLOR:
2353 case TGSI_SEMANTIC_BCOLOR:
2354 target = V_008DFC_SQ_EXP_PARAM + param_count;
2355 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2356 shader->info.vs_output_param_offset[i] = param_count;
2357 param_count++;
2358 break;
2359 case TGSI_SEMANTIC_CLIPDIST:
2360 target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
2361 break;
2362 case TGSI_SEMANTIC_CLIPVERTEX:
2363 si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
2364 continue;
2365 case TGSI_SEMANTIC_PRIMID:
2366 case TGSI_SEMANTIC_FOG:
2367 case TGSI_SEMANTIC_TEXCOORD:
2368 case TGSI_SEMANTIC_GENERIC:
2369 target = V_008DFC_SQ_EXP_PARAM + param_count;
2370 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2371 shader->info.vs_output_param_offset[i] = param_count;
2372 param_count++;
2373 break;
2374 default:
2375 target = 0;
2376 fprintf(stderr,
2377 "Warning: SI unhandled vs output type:%d\n",
2378 semantic_name);
2379 }
2380
2381 si_llvm_init_export_args(bld_base, outputs[i].values, target, args);
2382
2383 if (target >= V_008DFC_SQ_EXP_POS &&
2384 target <= (V_008DFC_SQ_EXP_POS + 3)) {
2385 memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
2386 args, sizeof(args));
2387 } else {
2388 lp_build_intrinsic(base->gallivm->builder,
2389 "llvm.SI.export", ctx->voidt,
2390 args, 9, 0);
2391 }
2392
2393 if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
2394 semantic_name = TGSI_SEMANTIC_GENERIC;
2395 goto handle_semantic;
2396 }
2397 }
2398
2399 shader->info.nr_param_exports = param_count;
2400
2401 /* We need to add the position output manually if it's missing. */
2402 if (!pos_args[0][0]) {
2403 pos_args[0][0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
2404 pos_args[0][1] = uint->zero; /* EXEC mask */
2405 pos_args[0][2] = uint->zero; /* last export? */
2406 pos_args[0][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS);
2407 pos_args[0][4] = uint->zero; /* COMPR flag */
2408 pos_args[0][5] = base->zero; /* X */
2409 pos_args[0][6] = base->zero; /* Y */
2410 pos_args[0][7] = base->zero; /* Z */
2411 pos_args[0][8] = base->one; /* W */
2412 }
2413
2414 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2415 if (shader->selector->info.writes_psize ||
2416 shader->selector->info.writes_edgeflag ||
2417 shader->selector->info.writes_viewport_index ||
2418 shader->selector->info.writes_layer) {
2419 pos_args[1][0] = lp_build_const_int32(base->gallivm, /* writemask */
2420 shader->selector->info.writes_psize |
2421 (shader->selector->info.writes_edgeflag << 1) |
2422 (shader->selector->info.writes_layer << 2) |
2423 (shader->selector->info.writes_viewport_index << 3));
2424 pos_args[1][1] = uint->zero; /* EXEC mask */
2425 pos_args[1][2] = uint->zero; /* last export? */
2426 pos_args[1][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + 1);
2427 pos_args[1][4] = uint->zero; /* COMPR flag */
2428 pos_args[1][5] = base->zero; /* X */
2429 pos_args[1][6] = base->zero; /* Y */
2430 pos_args[1][7] = base->zero; /* Z */
2431 pos_args[1][8] = base->zero; /* W */
2432
2433 if (shader->selector->info.writes_psize)
2434 pos_args[1][5] = psize_value;
2435
2436 if (shader->selector->info.writes_edgeflag) {
2437 /* The output is a float, but the hw expects an integer
2438 * with the first bit containing the edge flag. */
2439 edgeflag_value = LLVMBuildFPToUI(base->gallivm->builder,
2440 edgeflag_value,
2441 ctx->i32, "");
2442 edgeflag_value = lp_build_min(&bld_base->int_bld,
2443 edgeflag_value,
2444 bld_base->int_bld.one);
2445
2446 /* The LLVM intrinsic expects a float. */
2447 pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder,
2448 edgeflag_value,
2449 ctx->f32, "");
2450 }
2451
2452 if (shader->selector->info.writes_layer)
2453 pos_args[1][7] = layer_value;
2454
2455 if (shader->selector->info.writes_viewport_index)
2456 pos_args[1][8] = viewport_index_value;
2457 }
2458
2459 for (i = 0; i < 4; i++)
2460 if (pos_args[i][0])
2461 shader->info.nr_pos_exports++;
2462
2463 pos_idx = 0;
2464 for (i = 0; i < 4; i++) {
2465 if (!pos_args[i][0])
2466 continue;
2467
2468 /* Specify the target we are exporting */
2469 pos_args[i][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + pos_idx++);
2470
2471 if (pos_idx == shader->info.nr_pos_exports)
2472 /* Specify that this is the last export */
2473 pos_args[i][2] = uint->one;
2474
2475 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
2476 ctx->voidt, pos_args[i], 9, 0);
2477 }
2478 }
2479
2480 static void si_copy_tcs_inputs(struct lp_build_tgsi_context *bld_base)
2481 {
2482 struct si_shader_context *ctx = si_shader_context(bld_base);
2483 struct gallivm_state *gallivm = bld_base->base.gallivm;
2484 LLVMValueRef invocation_id, rw_buffers, buffer, buffer_offset;
2485 LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
2486 uint64_t inputs;
2487
2488 invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
2489
2490 rw_buffers = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
2491 buffer = build_indexed_load_const(ctx, rw_buffers,
2492 lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
2493
2494 buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
2495
2496 lds_vertex_stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
2497 lds_vertex_offset = LLVMBuildMul(gallivm->builder, invocation_id,
2498 lds_vertex_stride, "");
2499 lds_base = get_tcs_in_current_patch_offset(ctx);
2500 lds_base = LLVMBuildAdd(gallivm->builder, lds_base, lds_vertex_offset, "");
2501
2502 inputs = ctx->shader->key.tcs.epilog.inputs_to_copy;
2503 while (inputs) {
2504 unsigned i = u_bit_scan64(&inputs);
2505
2506 LLVMValueRef lds_ptr = LLVMBuildAdd(gallivm->builder, lds_base,
2507 lp_build_const_int32(gallivm, 4 * i),
2508 "");
2509
2510 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
2511 invocation_id,
2512 lp_build_const_int32(gallivm, i));
2513
2514 LLVMValueRef value = lds_load(bld_base, TGSI_TYPE_SIGNED, ~0,
2515 lds_ptr);
2516
2517 build_tbuffer_store_dwords(ctx, buffer, value, 4, buffer_addr,
2518 buffer_offset, 0);
2519 }
2520 }
2521
2522 static void si_write_tess_factors(struct lp_build_tgsi_context *bld_base,
2523 LLVMValueRef rel_patch_id,
2524 LLVMValueRef invocation_id,
2525 LLVMValueRef tcs_out_current_patch_data_offset)
2526 {
2527 struct si_shader_context *ctx = si_shader_context(bld_base);
2528 struct gallivm_state *gallivm = bld_base->base.gallivm;
2529 struct si_shader *shader = ctx->shader;
2530 unsigned tess_inner_index, tess_outer_index;
2531 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2532 LLVMValueRef out[6], vec0, vec1, rw_buffers, tf_base;
2533 unsigned stride, outer_comps, inner_comps, i;
2534 struct lp_build_if_state if_ctx, inner_if_ctx;
2535
2536 si_llvm_emit_barrier(NULL, bld_base, NULL);
2537
2538 /* Do this only for invocation 0, because the tess levels are per-patch,
2539 * not per-vertex.
2540 *
2541 * This can't jump, because invocation 0 executes this. It should
2542 * at least mask out the loads and stores for other invocations.
2543 */
2544 lp_build_if(&if_ctx, gallivm,
2545 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2546 invocation_id, bld_base->uint_bld.zero, ""));
2547
2548 /* Determine the layout of one tess factor element in the buffer. */
2549 switch (shader->key.tcs.epilog.prim_mode) {
2550 case PIPE_PRIM_LINES:
2551 stride = 2; /* 2 dwords, 1 vec2 store */
2552 outer_comps = 2;
2553 inner_comps = 0;
2554 break;
2555 case PIPE_PRIM_TRIANGLES:
2556 stride = 4; /* 4 dwords, 1 vec4 store */
2557 outer_comps = 3;
2558 inner_comps = 1;
2559 break;
2560 case PIPE_PRIM_QUADS:
2561 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2562 outer_comps = 4;
2563 inner_comps = 2;
2564 break;
2565 default:
2566 assert(0);
2567 return;
2568 }
2569
2570 /* Load tess_inner and tess_outer from LDS.
2571 * Any invocation can write them, so we can't get them from a temporary.
2572 */
2573 tess_inner_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSINNER, 0);
2574 tess_outer_index = si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSOUTER, 0);
2575
2576 lds_base = tcs_out_current_patch_data_offset;
2577 lds_inner = LLVMBuildAdd(gallivm->builder, lds_base,
2578 lp_build_const_int32(gallivm,
2579 tess_inner_index * 4), "");
2580 lds_outer = LLVMBuildAdd(gallivm->builder, lds_base,
2581 lp_build_const_int32(gallivm,
2582 tess_outer_index * 4), "");
2583
2584 for (i = 0; i < outer_comps; i++)
2585 out[i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
2586 for (i = 0; i < inner_comps; i++)
2587 out[outer_comps+i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
2588
2589 /* Convert the outputs to vectors for stores. */
2590 vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
2591 vec1 = NULL;
2592
2593 if (stride > 4)
2594 vec1 = lp_build_gather_values(gallivm, out+4, stride - 4);
2595
2596 /* Get the buffer. */
2597 rw_buffers = LLVMGetParam(ctx->main_fn,
2598 SI_PARAM_RW_BUFFERS);
2599 buffer = build_indexed_load_const(ctx, rw_buffers,
2600 lp_build_const_int32(gallivm, SI_HS_RING_TESS_FACTOR));
2601
2602 /* Get the offset. */
2603 tf_base = LLVMGetParam(ctx->main_fn,
2604 SI_PARAM_TESS_FACTOR_OFFSET);
2605 byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
2606 lp_build_const_int32(gallivm, 4 * stride), "");
2607
2608 lp_build_if(&inner_if_ctx, gallivm,
2609 LLVMBuildICmp(gallivm->builder, LLVMIntEQ,
2610 rel_patch_id, bld_base->uint_bld.zero, ""));
2611
2612 /* Store the dynamic HS control word. */
2613 build_tbuffer_store_dwords(ctx, buffer,
2614 lp_build_const_int32(gallivm, 0x80000000),
2615 1, lp_build_const_int32(gallivm, 0), tf_base, 0);
2616
2617 lp_build_endif(&inner_if_ctx);
2618
2619 /* Store the tessellation factors. */
2620 build_tbuffer_store_dwords(ctx, buffer, vec0,
2621 MIN2(stride, 4), byteoffset, tf_base, 4);
2622 if (vec1)
2623 build_tbuffer_store_dwords(ctx, buffer, vec1,
2624 stride - 4, byteoffset, tf_base, 20);
2625 lp_build_endif(&if_ctx);
2626 }
2627
2628 /* This only writes the tessellation factor levels. */
2629 static void si_llvm_emit_tcs_epilogue(struct lp_build_tgsi_context *bld_base)
2630 {
2631 struct si_shader_context *ctx = si_shader_context(bld_base);
2632 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2633
2634 rel_patch_id = get_rel_patch_id(ctx);
2635 invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
2636 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2637
2638 if (!ctx->no_epilog) {
2639 /* Return epilog parameters from this function. */
2640 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
2641 LLVMValueRef ret = ctx->return_value;
2642 LLVMValueRef rw_buffers, rw0, rw1, tf_soffset;
2643 unsigned vgpr;
2644
2645 /* RW_BUFFERS pointer */
2646 rw_buffers = LLVMGetParam(ctx->main_fn,
2647 SI_PARAM_RW_BUFFERS);
2648 rw_buffers = LLVMBuildPtrToInt(builder, rw_buffers, ctx->i64, "");
2649 rw_buffers = LLVMBuildBitCast(builder, rw_buffers, ctx->v2i32, "");
2650 rw0 = LLVMBuildExtractElement(builder, rw_buffers,
2651 bld_base->uint_bld.zero, "");
2652 rw1 = LLVMBuildExtractElement(builder, rw_buffers,
2653 bld_base->uint_bld.one, "");
2654 ret = LLVMBuildInsertValue(builder, ret, rw0, 0, "");
2655 ret = LLVMBuildInsertValue(builder, ret, rw1, 1, "");
2656
2657 /* Tess factor buffer soffset is after user SGPRs. */
2658 tf_soffset = LLVMGetParam(ctx->main_fn,
2659 SI_PARAM_TESS_FACTOR_OFFSET);
2660 ret = LLVMBuildInsertValue(builder, ret, tf_soffset,
2661 SI_TCS_NUM_USER_SGPR + 1, "");
2662
2663 /* VGPRs */
2664 rel_patch_id = bitcast(bld_base, TGSI_TYPE_FLOAT, rel_patch_id);
2665 invocation_id = bitcast(bld_base, TGSI_TYPE_FLOAT, invocation_id);
2666 tf_lds_offset = bitcast(bld_base, TGSI_TYPE_FLOAT, tf_lds_offset);
2667
2668 vgpr = SI_TCS_NUM_USER_SGPR + 2;
2669 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2670 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2671 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2672 ctx->return_value = ret;
2673 return;
2674 }
2675
2676 si_copy_tcs_inputs(bld_base);
2677 si_write_tess_factors(bld_base, rel_patch_id, invocation_id, tf_lds_offset);
2678 }
2679
2680 static void si_llvm_emit_ls_epilogue(struct lp_build_tgsi_context *bld_base)
2681 {
2682 struct si_shader_context *ctx = si_shader_context(bld_base);
2683 struct si_shader *shader = ctx->shader;
2684 struct tgsi_shader_info *info = &shader->selector->info;
2685 struct gallivm_state *gallivm = bld_base->base.gallivm;
2686 unsigned i, chan;
2687 LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
2688 ctx->param_rel_auto_id);
2689 LLVMValueRef vertex_dw_stride =
2690 unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 13, 8);
2691 LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
2692 vertex_dw_stride, "");
2693
2694 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2695 * its inputs from it. */
2696 for (i = 0; i < info->num_outputs; i++) {
2697 LLVMValueRef *out_ptr = ctx->soa.outputs[i];
2698 unsigned name = info->output_semantic_name[i];
2699 unsigned index = info->output_semantic_index[i];
2700 int param = si_shader_io_get_unique_index(name, index);
2701 LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
2702 lp_build_const_int32(gallivm, param * 4), "");
2703
2704 for (chan = 0; chan < 4; chan++) {
2705 lds_store(bld_base, chan, dw_addr,
2706 LLVMBuildLoad(gallivm->builder, out_ptr[chan], ""));
2707 }
2708 }
2709 }
2710
2711 static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context *bld_base)
2712 {
2713 struct si_shader_context *ctx = si_shader_context(bld_base);
2714 struct gallivm_state *gallivm = bld_base->base.gallivm;
2715 struct si_shader *es = ctx->shader;
2716 struct tgsi_shader_info *info = &es->selector->info;
2717 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
2718 ctx->param_es2gs_offset);
2719 unsigned chan;
2720 int i;
2721
2722 for (i = 0; i < info->num_outputs; i++) {
2723 LLVMValueRef *out_ptr =
2724 ctx->soa.outputs[i];
2725 int param_index;
2726
2727 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2728 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2729 continue;
2730
2731 param_index = si_shader_io_get_unique_index(info->output_semantic_name[i],
2732 info->output_semantic_index[i]);
2733
2734 for (chan = 0; chan < 4; chan++) {
2735 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
2736 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
2737
2738 build_tbuffer_store(ctx,
2739 ctx->esgs_ring,
2740 out_val, 1,
2741 LLVMGetUndef(ctx->i32), soffset,
2742 (4 * param_index + chan) * 4,
2743 V_008F0C_BUF_DATA_FORMAT_32,
2744 V_008F0C_BUF_NUM_FORMAT_UINT,
2745 0, 0, 1, 1, 0);
2746 }
2747 }
2748 }
2749
2750 static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
2751 {
2752 struct si_shader_context *ctx = si_shader_context(bld_base);
2753 struct gallivm_state *gallivm = bld_base->base.gallivm;
2754 LLVMValueRef args[2];
2755
2756 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_NOP | SENDMSG_GS_DONE);
2757 args[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID);
2758 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
2759 ctx->voidt, args, 2, 0);
2760 }
2761
2762 static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
2763 {
2764 struct si_shader_context *ctx = si_shader_context(bld_base);
2765 struct gallivm_state *gallivm = bld_base->base.gallivm;
2766 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2767 struct si_shader_output_values *outputs = NULL;
2768 int i,j;
2769
2770 assert(!ctx->is_gs_copy_shader);
2771
2772 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2773
2774 /* Vertex color clamping.
2775 *
2776 * This uses a state constant loaded in a user data SGPR and
2777 * an IF statement is added that clamps all colors if the constant
2778 * is true.
2779 */
2780 if (ctx->type == PIPE_SHADER_VERTEX) {
2781 struct lp_build_if_state if_ctx;
2782 LLVMValueRef cond = NULL;
2783 LLVMValueRef addr, val;
2784
2785 for (i = 0; i < info->num_outputs; i++) {
2786 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR &&
2787 info->output_semantic_name[i] != TGSI_SEMANTIC_BCOLOR)
2788 continue;
2789
2790 /* We've found a color. */
2791 if (!cond) {
2792 /* The state is in the first bit of the user SGPR. */
2793 cond = LLVMGetParam(ctx->main_fn,
2794 SI_PARAM_VS_STATE_BITS);
2795 cond = LLVMBuildTrunc(gallivm->builder, cond,
2796 ctx->i1, "");
2797 lp_build_if(&if_ctx, gallivm, cond);
2798 }
2799
2800 for (j = 0; j < 4; j++) {
2801 addr = ctx->soa.outputs[i][j];
2802 val = LLVMBuildLoad(gallivm->builder, addr, "");
2803 val = si_llvm_saturate(bld_base, val);
2804 LLVMBuildStore(gallivm->builder, val, addr);
2805 }
2806 }
2807
2808 if (cond)
2809 lp_build_endif(&if_ctx);
2810 }
2811
2812 for (i = 0; i < info->num_outputs; i++) {
2813 outputs[i].name = info->output_semantic_name[i];
2814 outputs[i].sid = info->output_semantic_index[i];
2815
2816 for (j = 0; j < 4; j++)
2817 outputs[i].values[j] =
2818 LLVMBuildLoad(gallivm->builder,
2819 ctx->soa.outputs[i][j],
2820 "");
2821 }
2822
2823 if (ctx->no_epilog) {
2824 /* Export PrimitiveID when PS needs it. */
2825 if (si_vs_exports_prim_id(ctx->shader)) {
2826 outputs[i].name = TGSI_SEMANTIC_PRIMID;
2827 outputs[i].sid = 0;
2828 outputs[i].values[0] = bitcast(bld_base, TGSI_TYPE_FLOAT,
2829 get_primitive_id(bld_base, 0));
2830 outputs[i].values[1] = bld_base->base.undef;
2831 outputs[i].values[2] = bld_base->base.undef;
2832 outputs[i].values[3] = bld_base->base.undef;
2833 i++;
2834 }
2835 } else {
2836 /* Return the primitive ID from the LLVM function. */
2837 ctx->return_value =
2838 LLVMBuildInsertValue(gallivm->builder,
2839 ctx->return_value,
2840 bitcast(bld_base, TGSI_TYPE_FLOAT,
2841 get_primitive_id(bld_base, 0)),
2842 VS_EPILOG_PRIMID_LOC, "");
2843 }
2844
2845 si_llvm_export_vs(bld_base, outputs, i);
2846 FREE(outputs);
2847 }
2848
2849 struct si_ps_exports {
2850 unsigned num;
2851 LLVMValueRef args[10][9];
2852 };
2853
2854 unsigned si_get_spi_shader_z_format(bool writes_z, bool writes_stencil,
2855 bool writes_samplemask)
2856 {
2857 if (writes_z) {
2858 /* Z needs 32 bits. */
2859 if (writes_samplemask)
2860 return V_028710_SPI_SHADER_32_ABGR;
2861 else if (writes_stencil)
2862 return V_028710_SPI_SHADER_32_GR;
2863 else
2864 return V_028710_SPI_SHADER_32_R;
2865 } else if (writes_stencil || writes_samplemask) {
2866 /* Both stencil and sample mask need only 16 bits. */
2867 return V_028710_SPI_SHADER_UINT16_ABGR;
2868 } else {
2869 return V_028710_SPI_SHADER_ZERO;
2870 }
2871 }
2872
2873 static void si_export_mrt_z(struct lp_build_tgsi_context *bld_base,
2874 LLVMValueRef depth, LLVMValueRef stencil,
2875 LLVMValueRef samplemask, struct si_ps_exports *exp)
2876 {
2877 struct si_shader_context *ctx = si_shader_context(bld_base);
2878 struct lp_build_context *base = &bld_base->base;
2879 struct lp_build_context *uint = &bld_base->uint_bld;
2880 LLVMValueRef args[9];
2881 unsigned mask = 0;
2882 unsigned format = si_get_spi_shader_z_format(depth != NULL,
2883 stencil != NULL,
2884 samplemask != NULL);
2885
2886 assert(depth || stencil || samplemask);
2887
2888 args[1] = uint->one; /* whether the EXEC mask is valid */
2889 args[2] = uint->one; /* DONE bit */
2890
2891 /* Specify the target we are exporting */
2892 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_MRTZ);
2893
2894 args[4] = uint->zero; /* COMP flag */
2895 args[5] = base->undef; /* R, depth */
2896 args[6] = base->undef; /* G, stencil test value[0:7], stencil op value[8:15] */
2897 args[7] = base->undef; /* B, sample mask */
2898 args[8] = base->undef; /* A, alpha to mask */
2899
2900 if (format == V_028710_SPI_SHADER_UINT16_ABGR) {
2901 assert(!depth);
2902 args[4] = uint->one; /* COMPR flag */
2903
2904 if (stencil) {
2905 /* Stencil should be in X[23:16]. */
2906 stencil = bitcast(bld_base, TGSI_TYPE_UNSIGNED, stencil);
2907 stencil = LLVMBuildShl(base->gallivm->builder, stencil,
2908 LLVMConstInt(ctx->i32, 16, 0), "");
2909 args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT, stencil);
2910 mask |= 0x3;
2911 }
2912 if (samplemask) {
2913 /* SampleMask should be in Y[15:0]. */
2914 args[6] = samplemask;
2915 mask |= 0xc;
2916 }
2917 } else {
2918 if (depth) {
2919 args[5] = depth;
2920 mask |= 0x1;
2921 }
2922 if (stencil) {
2923 args[6] = stencil;
2924 mask |= 0x2;
2925 }
2926 if (samplemask) {
2927 args[7] = samplemask;
2928 mask |= 0x4;
2929 }
2930 }
2931
2932 /* SI (except OLAND) has a bug that it only looks
2933 * at the X writemask component. */
2934 if (ctx->screen->b.chip_class == SI &&
2935 ctx->screen->b.family != CHIP_OLAND)
2936 mask |= 0x1;
2937
2938 /* Specify which components to enable */
2939 args[0] = lp_build_const_int32(base->gallivm, mask);
2940
2941 memcpy(exp->args[exp->num++], args, sizeof(args));
2942 }
2943
2944 static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
2945 LLVMValueRef *color, unsigned index,
2946 unsigned samplemask_param,
2947 bool is_last, struct si_ps_exports *exp)
2948 {
2949 struct si_shader_context *ctx = si_shader_context(bld_base);
2950 struct lp_build_context *base = &bld_base->base;
2951 int i;
2952
2953 /* Clamp color */
2954 if (ctx->shader->key.ps.epilog.clamp_color)
2955 for (i = 0; i < 4; i++)
2956 color[i] = si_llvm_saturate(bld_base, color[i]);
2957
2958 /* Alpha to one */
2959 if (ctx->shader->key.ps.epilog.alpha_to_one)
2960 color[3] = base->one;
2961
2962 /* Alpha test */
2963 if (index == 0 &&
2964 ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
2965 si_alpha_test(bld_base, color[3]);
2966
2967 /* Line & polygon smoothing */
2968 if (ctx->shader->key.ps.epilog.poly_line_smoothing)
2969 color[3] = si_scale_alpha_by_sample_mask(bld_base, color[3],
2970 samplemask_param);
2971
2972 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
2973 if (ctx->shader->key.ps.epilog.last_cbuf > 0) {
2974 LLVMValueRef args[8][9];
2975 int c, last = -1;
2976
2977 /* Get the export arguments, also find out what the last one is. */
2978 for (c = 0; c <= ctx->shader->key.ps.epilog.last_cbuf; c++) {
2979 si_llvm_init_export_args(bld_base, color,
2980 V_008DFC_SQ_EXP_MRT + c, args[c]);
2981 if (args[c][0] != bld_base->uint_bld.zero)
2982 last = c;
2983 }
2984
2985 /* Emit all exports. */
2986 for (c = 0; c <= ctx->shader->key.ps.epilog.last_cbuf; c++) {
2987 if (is_last && last == c) {
2988 args[c][1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
2989 args[c][2] = bld_base->uint_bld.one; /* DONE bit */
2990 } else if (args[c][0] == bld_base->uint_bld.zero)
2991 continue; /* unnecessary NULL export */
2992
2993 memcpy(exp->args[exp->num++], args[c], sizeof(args[c]));
2994 }
2995 } else {
2996 LLVMValueRef args[9];
2997
2998 /* Export */
2999 si_llvm_init_export_args(bld_base, color, V_008DFC_SQ_EXP_MRT + index,
3000 args);
3001 if (is_last) {
3002 args[1] = bld_base->uint_bld.one; /* whether the EXEC mask is valid */
3003 args[2] = bld_base->uint_bld.one; /* DONE bit */
3004 } else if (args[0] == bld_base->uint_bld.zero)
3005 return; /* unnecessary NULL export */
3006
3007 memcpy(exp->args[exp->num++], args, sizeof(args));
3008 }
3009 }
3010
3011 static void si_emit_ps_exports(struct si_shader_context *ctx,
3012 struct si_ps_exports *exp)
3013 {
3014 for (unsigned i = 0; i < exp->num; i++)
3015 lp_build_intrinsic(ctx->gallivm.builder,
3016 "llvm.SI.export", ctx->voidt,
3017 exp->args[i], 9, 0);
3018 }
3019
3020 static void si_export_null(struct lp_build_tgsi_context *bld_base)
3021 {
3022 struct si_shader_context *ctx = si_shader_context(bld_base);
3023 struct lp_build_context *base = &bld_base->base;
3024 struct lp_build_context *uint = &bld_base->uint_bld;
3025 LLVMValueRef args[9];
3026
3027 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
3028 args[1] = uint->one; /* whether the EXEC mask is valid */
3029 args[2] = uint->one; /* DONE bit */
3030 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_NULL);
3031 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
3032 args[5] = base->undef; /* R */
3033 args[6] = base->undef; /* G */
3034 args[7] = base->undef; /* B */
3035 args[8] = base->undef; /* A */
3036
3037 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
3038 ctx->voidt, args, 9, 0);
3039 }
3040
3041 static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context *bld_base)
3042 {
3043 struct si_shader_context *ctx = si_shader_context(bld_base);
3044 struct si_shader *shader = ctx->shader;
3045 struct lp_build_context *base = &bld_base->base;
3046 struct tgsi_shader_info *info = &shader->selector->info;
3047 LLVMBuilderRef builder = base->gallivm->builder;
3048 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3049 int last_color_export = -1;
3050 int i;
3051 struct si_ps_exports exp = {};
3052
3053 /* Determine the last export. If MRTZ is present, it's always last.
3054 * Otherwise, find the last color export.
3055 */
3056 if (!info->writes_z && !info->writes_stencil && !info->writes_samplemask) {
3057 unsigned spi_format = shader->key.ps.epilog.spi_shader_col_format;
3058
3059 /* Don't export NULL and return if alpha-test is enabled. */
3060 if (shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS &&
3061 shader->key.ps.epilog.alpha_func != PIPE_FUNC_NEVER &&
3062 (spi_format & 0xf) == 0)
3063 spi_format |= V_028714_SPI_SHADER_32_AR;
3064
3065 for (i = 0; i < info->num_outputs; i++) {
3066 unsigned index = info->output_semantic_index[i];
3067
3068 if (info->output_semantic_name[i] != TGSI_SEMANTIC_COLOR)
3069 continue;
3070
3071 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
3072 if (shader->key.ps.epilog.last_cbuf > 0) {
3073 /* Just set this if any of the colorbuffers are enabled. */
3074 if (spi_format &
3075 ((1llu << (4 * (shader->key.ps.epilog.last_cbuf + 1))) - 1))
3076 last_color_export = i;
3077 continue;
3078 }
3079
3080 if ((spi_format >> (index * 4)) & 0xf)
3081 last_color_export = i;
3082 }
3083
3084 /* If there are no outputs, export NULL. */
3085 if (last_color_export == -1) {
3086 si_export_null(bld_base);
3087 return;
3088 }
3089 }
3090
3091 for (i = 0; i < info->num_outputs; i++) {
3092 unsigned semantic_name = info->output_semantic_name[i];
3093 unsigned semantic_index = info->output_semantic_index[i];
3094 unsigned j;
3095 LLVMValueRef color[4] = {};
3096
3097 /* Select the correct target */
3098 switch (semantic_name) {
3099 case TGSI_SEMANTIC_POSITION:
3100 depth = LLVMBuildLoad(builder,
3101 ctx->soa.outputs[i][2], "");
3102 break;
3103 case TGSI_SEMANTIC_STENCIL:
3104 stencil = LLVMBuildLoad(builder,
3105 ctx->soa.outputs[i][1], "");
3106 break;
3107 case TGSI_SEMANTIC_SAMPLEMASK:
3108 samplemask = LLVMBuildLoad(builder,
3109 ctx->soa.outputs[i][0], "");
3110 break;
3111 case TGSI_SEMANTIC_COLOR:
3112 for (j = 0; j < 4; j++)
3113 color[j] = LLVMBuildLoad(builder,
3114 ctx->soa.outputs[i][j], "");
3115
3116 si_export_mrt_color(bld_base, color, semantic_index,
3117 SI_PARAM_SAMPLE_COVERAGE,
3118 last_color_export == i, &exp);
3119 break;
3120 default:
3121 fprintf(stderr,
3122 "Warning: SI unhandled fs output type:%d\n",
3123 semantic_name);
3124 }
3125 }
3126
3127 if (depth || stencil || samplemask)
3128 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
3129
3130 si_emit_ps_exports(ctx, &exp);
3131 }
3132
3133 /**
3134 * Return PS outputs in this order:
3135 *
3136 * v[0:3] = color0.xyzw
3137 * v[4:7] = color1.xyzw
3138 * ...
3139 * vN+0 = Depth
3140 * vN+1 = Stencil
3141 * vN+2 = SampleMask
3142 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3143 *
3144 * The alpha-ref SGPR is returned via its original location.
3145 */
3146 static void si_llvm_return_fs_outputs(struct lp_build_tgsi_context *bld_base)
3147 {
3148 struct si_shader_context *ctx = si_shader_context(bld_base);
3149 struct si_shader *shader = ctx->shader;
3150 struct lp_build_context *base = &bld_base->base;
3151 struct tgsi_shader_info *info = &shader->selector->info;
3152 LLVMBuilderRef builder = base->gallivm->builder;
3153 unsigned i, j, first_vgpr, vgpr;
3154
3155 LLVMValueRef color[8][4] = {};
3156 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3157 LLVMValueRef ret;
3158
3159 /* Read the output values. */
3160 for (i = 0; i < info->num_outputs; i++) {
3161 unsigned semantic_name = info->output_semantic_name[i];
3162 unsigned semantic_index = info->output_semantic_index[i];
3163
3164 switch (semantic_name) {
3165 case TGSI_SEMANTIC_COLOR:
3166 assert(semantic_index < 8);
3167 for (j = 0; j < 4; j++) {
3168 LLVMValueRef ptr = ctx->soa.outputs[i][j];
3169 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3170 color[semantic_index][j] = result;
3171 }
3172 break;
3173 case TGSI_SEMANTIC_POSITION:
3174 depth = LLVMBuildLoad(builder,
3175 ctx->soa.outputs[i][2], "");
3176 break;
3177 case TGSI_SEMANTIC_STENCIL:
3178 stencil = LLVMBuildLoad(builder,
3179 ctx->soa.outputs[i][1], "");
3180 break;
3181 case TGSI_SEMANTIC_SAMPLEMASK:
3182 samplemask = LLVMBuildLoad(builder,
3183 ctx->soa.outputs[i][0], "");
3184 break;
3185 default:
3186 fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
3187 semantic_name);
3188 }
3189 }
3190
3191 /* Fill the return structure. */
3192 ret = ctx->return_value;
3193
3194 /* Set SGPRs. */
3195 ret = LLVMBuildInsertValue(builder, ret,
3196 bitcast(bld_base, TGSI_TYPE_SIGNED,
3197 LLVMGetParam(ctx->main_fn,
3198 SI_PARAM_ALPHA_REF)),
3199 SI_SGPR_ALPHA_REF, "");
3200
3201 /* Set VGPRs */
3202 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3203 for (i = 0; i < ARRAY_SIZE(color); i++) {
3204 if (!color[i][0])
3205 continue;
3206
3207 for (j = 0; j < 4; j++)
3208 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3209 }
3210 if (depth)
3211 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3212 if (stencil)
3213 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3214 if (samplemask)
3215 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3216
3217 /* Add the input sample mask for smoothing at the end. */
3218 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3219 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3220 ret = LLVMBuildInsertValue(builder, ret,
3221 LLVMGetParam(ctx->main_fn,
3222 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3223
3224 ctx->return_value = ret;
3225 }
3226
3227 /**
3228 * Given a v8i32 resource descriptor for a buffer, extract the size of the
3229 * buffer in number of elements and return it as an i32.
3230 */
3231 static LLVMValueRef get_buffer_size(
3232 struct lp_build_tgsi_context *bld_base,
3233 LLVMValueRef descriptor)
3234 {
3235 struct si_shader_context *ctx = si_shader_context(bld_base);
3236 struct gallivm_state *gallivm = bld_base->base.gallivm;
3237 LLVMBuilderRef builder = gallivm->builder;
3238 LLVMValueRef size =
3239 LLVMBuildExtractElement(builder, descriptor,
3240 lp_build_const_int32(gallivm, 6), "");
3241
3242 if (ctx->screen->b.chip_class >= VI) {
3243 /* On VI, the descriptor contains the size in bytes,
3244 * but TXQ must return the size in elements.
3245 * The stride is always non-zero for resources using TXQ.
3246 */
3247 LLVMValueRef stride =
3248 LLVMBuildExtractElement(builder, descriptor,
3249 lp_build_const_int32(gallivm, 5), "");
3250 stride = LLVMBuildLShr(builder, stride,
3251 lp_build_const_int32(gallivm, 16), "");
3252 stride = LLVMBuildAnd(builder, stride,
3253 lp_build_const_int32(gallivm, 0x3FFF), "");
3254
3255 size = LLVMBuildUDiv(builder, size, stride, "");
3256 }
3257
3258 return size;
3259 }
3260
3261 /**
3262 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
3263 * intrinsic names).
3264 */
3265 static void build_type_name_for_intr(
3266 LLVMTypeRef type,
3267 char *buf, unsigned bufsize)
3268 {
3269 LLVMTypeRef elem_type = type;
3270
3271 assert(bufsize >= 8);
3272
3273 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
3274 int ret = snprintf(buf, bufsize, "v%u",
3275 LLVMGetVectorSize(type));
3276 if (ret < 0) {
3277 char *type_name = LLVMPrintTypeToString(type);
3278 fprintf(stderr, "Error building type name for: %s\n",
3279 type_name);
3280 return;
3281 }
3282 elem_type = LLVMGetElementType(type);
3283 buf += ret;
3284 bufsize -= ret;
3285 }
3286 switch (LLVMGetTypeKind(elem_type)) {
3287 default: break;
3288 case LLVMIntegerTypeKind:
3289 snprintf(buf, bufsize, "i%d", LLVMGetIntTypeWidth(elem_type));
3290 break;
3291 case LLVMFloatTypeKind:
3292 snprintf(buf, bufsize, "f32");
3293 break;
3294 case LLVMDoubleTypeKind:
3295 snprintf(buf, bufsize, "f64");
3296 break;
3297 }
3298 }
3299
3300 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
3301 struct lp_build_tgsi_context *bld_base,
3302 struct lp_build_emit_data *emit_data);
3303
3304 /* Prevent optimizations (at least of memory accesses) across the current
3305 * point in the program by emitting empty inline assembly that is marked as
3306 * having side effects.
3307 */
3308 static void emit_optimization_barrier(struct si_shader_context *ctx)
3309 {
3310 LLVMBuilderRef builder = ctx->gallivm.builder;
3311 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
3312 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, "", "", true, false);
3313 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
3314 }
3315
3316 static void emit_waitcnt(struct si_shader_context *ctx)
3317 {
3318 struct gallivm_state *gallivm = &ctx->gallivm;
3319 LLVMBuilderRef builder = gallivm->builder;
3320 LLVMValueRef args[1] = {
3321 lp_build_const_int32(gallivm, 0xf70)
3322 };
3323 lp_build_intrinsic(builder, "llvm.amdgcn.s.waitcnt",
3324 ctx->voidt, args, 1, 0);
3325 }
3326
3327 static void membar_emit(
3328 const struct lp_build_tgsi_action *action,
3329 struct lp_build_tgsi_context *bld_base,
3330 struct lp_build_emit_data *emit_data)
3331 {
3332 struct si_shader_context *ctx = si_shader_context(bld_base);
3333
3334 emit_waitcnt(ctx);
3335 }
3336
3337 static LLVMValueRef
3338 shader_buffer_fetch_rsrc(struct si_shader_context *ctx,
3339 const struct tgsi_full_src_register *reg)
3340 {
3341 LLVMValueRef index;
3342 LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
3343 SI_PARAM_SHADER_BUFFERS);
3344
3345 if (!reg->Register.Indirect)
3346 index = LLVMConstInt(ctx->i32, reg->Register.Index, 0);
3347 else
3348 index = get_bounded_indirect_index(ctx, &reg->Indirect,
3349 reg->Register.Index,
3350 SI_NUM_SHADER_BUFFERS);
3351
3352 return build_indexed_load_const(ctx, rsrc_ptr, index);
3353 }
3354
3355 static bool tgsi_is_array_sampler(unsigned target)
3356 {
3357 return target == TGSI_TEXTURE_1D_ARRAY ||
3358 target == TGSI_TEXTURE_SHADOW1D_ARRAY ||
3359 target == TGSI_TEXTURE_2D_ARRAY ||
3360 target == TGSI_TEXTURE_SHADOW2D_ARRAY ||
3361 target == TGSI_TEXTURE_CUBE_ARRAY ||
3362 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY ||
3363 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
3364 }
3365
3366 static bool tgsi_is_array_image(unsigned target)
3367 {
3368 return target == TGSI_TEXTURE_3D ||
3369 target == TGSI_TEXTURE_CUBE ||
3370 target == TGSI_TEXTURE_1D_ARRAY ||
3371 target == TGSI_TEXTURE_2D_ARRAY ||
3372 target == TGSI_TEXTURE_CUBE_ARRAY ||
3373 target == TGSI_TEXTURE_2D_ARRAY_MSAA;
3374 }
3375
3376 /**
3377 * Given a 256-bit resource descriptor, force the DCC enable bit to off.
3378 *
3379 * At least on Tonga, executing image stores on images with DCC enabled and
3380 * non-trivial can eventually lead to lockups. This can occur when an
3381 * application binds an image as read-only but then uses a shader that writes
3382 * to it. The OpenGL spec allows almost arbitrarily bad behavior (including
3383 * program termination) in this case, but it doesn't cost much to be a bit
3384 * nicer: disabling DCC in the shader still leads to undefined results but
3385 * avoids the lockup.
3386 */
3387 static LLVMValueRef force_dcc_off(struct si_shader_context *ctx,
3388 LLVMValueRef rsrc)
3389 {
3390 if (ctx->screen->b.chip_class <= CIK) {
3391 return rsrc;
3392 } else {
3393 LLVMBuilderRef builder = ctx->gallivm.builder;
3394 LLVMValueRef i32_6 = LLVMConstInt(ctx->i32, 6, 0);
3395 LLVMValueRef i32_C = LLVMConstInt(ctx->i32, C_008F28_COMPRESSION_EN, 0);
3396 LLVMValueRef tmp;
3397
3398 tmp = LLVMBuildExtractElement(builder, rsrc, i32_6, "");
3399 tmp = LLVMBuildAnd(builder, tmp, i32_C, "");
3400 return LLVMBuildInsertElement(builder, rsrc, tmp, i32_6, "");
3401 }
3402 }
3403
3404 /**
3405 * Load the resource descriptor for \p image.
3406 */
3407 static void
3408 image_fetch_rsrc(
3409 struct lp_build_tgsi_context *bld_base,
3410 const struct tgsi_full_src_register *image,
3411 bool dcc_off,
3412 LLVMValueRef *rsrc)
3413 {
3414 struct si_shader_context *ctx = si_shader_context(bld_base);
3415 LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
3416 SI_PARAM_IMAGES);
3417 LLVMValueRef index, tmp;
3418
3419 assert(image->Register.File == TGSI_FILE_IMAGE);
3420
3421 if (!image->Register.Indirect) {
3422 const struct tgsi_shader_info *info = bld_base->info;
3423
3424 index = LLVMConstInt(ctx->i32, image->Register.Index, 0);
3425
3426 if (info->images_writemask & (1 << image->Register.Index) &&
3427 !(info->images_buffers & (1 << image->Register.Index)))
3428 dcc_off = true;
3429 } else {
3430 /* From the GL_ARB_shader_image_load_store extension spec:
3431 *
3432 * If a shader performs an image load, store, or atomic
3433 * operation using an image variable declared as an array,
3434 * and if the index used to select an individual element is
3435 * negative or greater than or equal to the size of the
3436 * array, the results of the operation are undefined but may
3437 * not lead to termination.
3438 */
3439 index = get_bounded_indirect_index(ctx, &image->Indirect,
3440 image->Register.Index,
3441 SI_NUM_IMAGES);
3442 }
3443
3444 tmp = build_indexed_load_const(ctx, rsrc_ptr, index);
3445 if (dcc_off)
3446 tmp = force_dcc_off(ctx, tmp);
3447 *rsrc = tmp;
3448 }
3449
3450 static LLVMValueRef image_fetch_coords(
3451 struct lp_build_tgsi_context *bld_base,
3452 const struct tgsi_full_instruction *inst,
3453 unsigned src)
3454 {
3455 struct gallivm_state *gallivm = bld_base->base.gallivm;
3456 LLVMBuilderRef builder = gallivm->builder;
3457 unsigned target = inst->Memory.Texture;
3458 unsigned num_coords = tgsi_util_get_texture_coord_dim(target);
3459 LLVMValueRef coords[4];
3460 LLVMValueRef tmp;
3461 int chan;
3462
3463 for (chan = 0; chan < num_coords; ++chan) {
3464 tmp = lp_build_emit_fetch(bld_base, inst, src, chan);
3465 tmp = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3466 coords[chan] = tmp;
3467 }
3468
3469 if (num_coords == 1)
3470 return coords[0];
3471
3472 if (num_coords == 3) {
3473 /* LLVM has difficulties lowering 3-element vectors. */
3474 coords[3] = bld_base->uint_bld.undef;
3475 num_coords = 4;
3476 }
3477
3478 return lp_build_gather_values(gallivm, coords, num_coords);
3479 }
3480
3481 /**
3482 * Append the extra mode bits that are used by image load and store.
3483 */
3484 static void image_append_args(
3485 struct si_shader_context *ctx,
3486 struct lp_build_emit_data * emit_data,
3487 unsigned target,
3488 bool atomic)
3489 {
3490 const struct tgsi_full_instruction *inst = emit_data->inst;
3491 LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
3492 LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
3493 LLVMValueRef r128 = i1false;
3494 LLVMValueRef da = tgsi_is_array_image(target) ? i1true : i1false;
3495 LLVMValueRef glc =
3496 inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
3497 i1true : i1false;
3498 LLVMValueRef slc = i1false;
3499 LLVMValueRef lwe = i1false;
3500
3501 if (atomic || (HAVE_LLVM <= 0x0309)) {
3502 emit_data->args[emit_data->arg_count++] = r128;
3503 emit_data->args[emit_data->arg_count++] = da;
3504 if (!atomic) {
3505 emit_data->args[emit_data->arg_count++] = glc;
3506 }
3507 emit_data->args[emit_data->arg_count++] = slc;
3508 return;
3509 }
3510
3511 /* HAVE_LLVM >= 0x0400 */
3512 emit_data->args[emit_data->arg_count++] = glc;
3513 emit_data->args[emit_data->arg_count++] = slc;
3514 emit_data->args[emit_data->arg_count++] = lwe;
3515 emit_data->args[emit_data->arg_count++] = da;
3516 }
3517
3518 /**
3519 * Given a 256 bit resource, extract the top half (which stores the buffer
3520 * resource in the case of textures and images).
3521 */
3522 static LLVMValueRef extract_rsrc_top_half(
3523 struct si_shader_context *ctx,
3524 LLVMValueRef rsrc)
3525 {
3526 struct gallivm_state *gallivm = &ctx->gallivm;
3527 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
3528 LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
3529
3530 rsrc = LLVMBuildBitCast(gallivm->builder, rsrc, v2i128, "");
3531 rsrc = LLVMBuildExtractElement(gallivm->builder, rsrc, bld_base->uint_bld.one, "");
3532 rsrc = LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v4i32, "");
3533
3534 return rsrc;
3535 }
3536
3537 /**
3538 * Append the resource and indexing arguments for buffer intrinsics.
3539 *
3540 * \param rsrc the v4i32 buffer resource
3541 * \param index index into the buffer (stride-based)
3542 * \param offset byte offset into the buffer
3543 */
3544 static void buffer_append_args(
3545 struct si_shader_context *ctx,
3546 struct lp_build_emit_data *emit_data,
3547 LLVMValueRef rsrc,
3548 LLVMValueRef index,
3549 LLVMValueRef offset,
3550 bool atomic)
3551 {
3552 const struct tgsi_full_instruction *inst = emit_data->inst;
3553 LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
3554 LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
3555
3556 emit_data->args[emit_data->arg_count++] = rsrc;
3557 emit_data->args[emit_data->arg_count++] = index; /* vindex */
3558 emit_data->args[emit_data->arg_count++] = offset; /* voffset */
3559 if (!atomic) {
3560 emit_data->args[emit_data->arg_count++] =
3561 inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
3562 i1true : i1false; /* glc */
3563 }
3564 emit_data->args[emit_data->arg_count++] = i1false; /* slc */
3565 }
3566
3567 static void load_fetch_args(
3568 struct lp_build_tgsi_context * bld_base,
3569 struct lp_build_emit_data * emit_data)
3570 {
3571 struct si_shader_context *ctx = si_shader_context(bld_base);
3572 struct gallivm_state *gallivm = bld_base->base.gallivm;
3573 const struct tgsi_full_instruction * inst = emit_data->inst;
3574 unsigned target = inst->Memory.Texture;
3575 LLVMValueRef rsrc;
3576
3577 emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
3578
3579 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3580 LLVMBuilderRef builder = gallivm->builder;
3581 LLVMValueRef offset;
3582 LLVMValueRef tmp;
3583
3584 rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0]);
3585
3586 tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
3587 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3588
3589 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
3590 offset, false);
3591 } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
3592 LLVMValueRef coords;
3593
3594 image_fetch_rsrc(bld_base, &inst->Src[0], false, &rsrc);
3595 coords = image_fetch_coords(bld_base, inst, 1);
3596
3597 if (target == TGSI_TEXTURE_BUFFER) {
3598 rsrc = extract_rsrc_top_half(ctx, rsrc);
3599 buffer_append_args(ctx, emit_data, rsrc, coords,
3600 bld_base->uint_bld.zero, false);
3601 } else {
3602 emit_data->args[0] = coords;
3603 emit_data->args[1] = rsrc;
3604 emit_data->args[2] = lp_build_const_int32(gallivm, 15); /* dmask */
3605 emit_data->arg_count = 3;
3606
3607 image_append_args(ctx, emit_data, target, false);
3608 }
3609 }
3610 }
3611
3612 static void load_emit_buffer(struct si_shader_context *ctx,
3613 struct lp_build_emit_data *emit_data)
3614 {
3615 const struct tgsi_full_instruction *inst = emit_data->inst;
3616 struct gallivm_state *gallivm = &ctx->gallivm;
3617 LLVMBuilderRef builder = gallivm->builder;
3618 uint writemask = inst->Dst[0].Register.WriteMask;
3619 uint count = util_last_bit(writemask);
3620 const char *intrinsic_name;
3621 LLVMTypeRef dst_type;
3622
3623 switch (count) {
3624 case 1:
3625 intrinsic_name = "llvm.amdgcn.buffer.load.f32";
3626 dst_type = ctx->f32;
3627 break;
3628 case 2:
3629 intrinsic_name = "llvm.amdgcn.buffer.load.v2f32";
3630 dst_type = LLVMVectorType(ctx->f32, 2);
3631 break;
3632 default: // 3 & 4
3633 intrinsic_name = "llvm.amdgcn.buffer.load.v4f32";
3634 dst_type = ctx->v4f32;
3635 count = 4;
3636 }
3637
3638 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3639 builder, intrinsic_name, dst_type,
3640 emit_data->args, emit_data->arg_count,
3641 LLVMReadOnlyAttribute);
3642 }
3643
3644 static LLVMValueRef get_memory_ptr(struct si_shader_context *ctx,
3645 const struct tgsi_full_instruction *inst,
3646 LLVMTypeRef type, int arg)
3647 {
3648 struct gallivm_state *gallivm = &ctx->gallivm;
3649 LLVMBuilderRef builder = gallivm->builder;
3650 LLVMValueRef offset, ptr;
3651 int addr_space;
3652
3653 offset = lp_build_emit_fetch(&ctx->soa.bld_base, inst, arg, 0);
3654 offset = LLVMBuildBitCast(builder, offset, ctx->i32, "");
3655
3656 ptr = ctx->shared_memory;
3657 ptr = LLVMBuildGEP(builder, ptr, &offset, 1, "");
3658 addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
3659 ptr = LLVMBuildBitCast(builder, ptr, LLVMPointerType(type, addr_space), "");
3660
3661 return ptr;
3662 }
3663
3664 static void load_emit_memory(
3665 struct si_shader_context *ctx,
3666 struct lp_build_emit_data *emit_data)
3667 {
3668 const struct tgsi_full_instruction *inst = emit_data->inst;
3669 struct lp_build_context *base = &ctx->soa.bld_base.base;
3670 struct gallivm_state *gallivm = &ctx->gallivm;
3671 LLVMBuilderRef builder = gallivm->builder;
3672 unsigned writemask = inst->Dst[0].Register.WriteMask;
3673 LLVMValueRef channels[4], ptr, derived_ptr, index;
3674 int chan;
3675
3676 ptr = get_memory_ptr(ctx, inst, base->elem_type, 1);
3677
3678 for (chan = 0; chan < 4; ++chan) {
3679 if (!(writemask & (1 << chan))) {
3680 channels[chan] = LLVMGetUndef(base->elem_type);
3681 continue;
3682 }
3683
3684 index = lp_build_const_int32(gallivm, chan);
3685 derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
3686 channels[chan] = LLVMBuildLoad(builder, derived_ptr, "");
3687 }
3688 emit_data->output[emit_data->chan] = lp_build_gather_values(gallivm, channels, 4);
3689 }
3690
3691 static void get_image_intr_name(const char *base_name,
3692 LLVMTypeRef data_type,
3693 LLVMTypeRef coords_type,
3694 LLVMTypeRef rsrc_type,
3695 char *out_name, unsigned out_len)
3696 {
3697 char coords_type_name[8];
3698
3699 build_type_name_for_intr(coords_type, coords_type_name,
3700 sizeof(coords_type_name));
3701
3702 if (HAVE_LLVM <= 0x0309) {
3703 snprintf(out_name, out_len, "%s.%s", base_name, coords_type_name);
3704 } else {
3705 char data_type_name[8];
3706 char rsrc_type_name[8];
3707
3708 build_type_name_for_intr(data_type, data_type_name,
3709 sizeof(data_type_name));
3710 build_type_name_for_intr(rsrc_type, rsrc_type_name,
3711 sizeof(rsrc_type_name));
3712 snprintf(out_name, out_len, "%s.%s.%s.%s", base_name,
3713 data_type_name, coords_type_name, rsrc_type_name);
3714 }
3715 }
3716
3717 static void load_emit(
3718 const struct lp_build_tgsi_action *action,
3719 struct lp_build_tgsi_context *bld_base,
3720 struct lp_build_emit_data *emit_data)
3721 {
3722 struct si_shader_context *ctx = si_shader_context(bld_base);
3723 struct gallivm_state *gallivm = bld_base->base.gallivm;
3724 LLVMBuilderRef builder = gallivm->builder;
3725 const struct tgsi_full_instruction * inst = emit_data->inst;
3726 char intrinsic_name[64];
3727
3728 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
3729 load_emit_memory(ctx, emit_data);
3730 return;
3731 }
3732
3733 if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
3734 emit_waitcnt(ctx);
3735
3736 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3737 load_emit_buffer(ctx, emit_data);
3738 return;
3739 }
3740
3741 if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
3742 emit_data->output[emit_data->chan] =
3743 lp_build_intrinsic(
3744 builder, "llvm.amdgcn.buffer.load.format.v4f32", emit_data->dst_type,
3745 emit_data->args, emit_data->arg_count,
3746 LLVMReadOnlyAttribute);
3747 } else {
3748 get_image_intr_name("llvm.amdgcn.image.load",
3749 emit_data->dst_type, /* vdata */
3750 LLVMTypeOf(emit_data->args[0]), /* coords */
3751 LLVMTypeOf(emit_data->args[1]), /* rsrc */
3752 intrinsic_name, sizeof(intrinsic_name));
3753
3754 emit_data->output[emit_data->chan] =
3755 lp_build_intrinsic(
3756 builder, intrinsic_name, emit_data->dst_type,
3757 emit_data->args, emit_data->arg_count,
3758 LLVMReadOnlyAttribute);
3759 }
3760 }
3761
3762 static void store_fetch_args(
3763 struct lp_build_tgsi_context * bld_base,
3764 struct lp_build_emit_data * emit_data)
3765 {
3766 struct si_shader_context *ctx = si_shader_context(bld_base);
3767 struct gallivm_state *gallivm = bld_base->base.gallivm;
3768 LLVMBuilderRef builder = gallivm->builder;
3769 const struct tgsi_full_instruction * inst = emit_data->inst;
3770 struct tgsi_full_src_register memory;
3771 LLVMValueRef chans[4];
3772 LLVMValueRef data;
3773 LLVMValueRef rsrc;
3774 unsigned chan;
3775
3776 emit_data->dst_type = LLVMVoidTypeInContext(gallivm->context);
3777
3778 for (chan = 0; chan < 4; ++chan) {
3779 chans[chan] = lp_build_emit_fetch(bld_base, inst, 1, chan);
3780 }
3781 data = lp_build_gather_values(gallivm, chans, 4);
3782
3783 emit_data->args[emit_data->arg_count++] = data;
3784
3785 memory = tgsi_full_src_register_from_dst(&inst->Dst[0]);
3786
3787 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
3788 LLVMValueRef offset;
3789 LLVMValueRef tmp;
3790
3791 rsrc = shader_buffer_fetch_rsrc(ctx, &memory);
3792
3793 tmp = lp_build_emit_fetch(bld_base, inst, 0, 0);
3794 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3795
3796 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
3797 offset, false);
3798 } else if (inst->Dst[0].Register.File == TGSI_FILE_IMAGE) {
3799 unsigned target = inst->Memory.Texture;
3800 LLVMValueRef coords;
3801
3802 coords = image_fetch_coords(bld_base, inst, 0);
3803
3804 if (target == TGSI_TEXTURE_BUFFER) {
3805 image_fetch_rsrc(bld_base, &memory, false, &rsrc);
3806
3807 rsrc = extract_rsrc_top_half(ctx, rsrc);
3808 buffer_append_args(ctx, emit_data, rsrc, coords,
3809 bld_base->uint_bld.zero, false);
3810 } else {
3811 emit_data->args[1] = coords;
3812 image_fetch_rsrc(bld_base, &memory, true, &emit_data->args[2]);
3813 emit_data->args[3] = lp_build_const_int32(gallivm, 15); /* dmask */
3814 emit_data->arg_count = 4;
3815
3816 image_append_args(ctx, emit_data, target, false);
3817 }
3818 }
3819 }
3820
3821 static void store_emit_buffer(
3822 struct si_shader_context *ctx,
3823 struct lp_build_emit_data *emit_data)
3824 {
3825 const struct tgsi_full_instruction *inst = emit_data->inst;
3826 struct gallivm_state *gallivm = &ctx->gallivm;
3827 LLVMBuilderRef builder = gallivm->builder;
3828 struct lp_build_context *uint_bld = &ctx->soa.bld_base.uint_bld;
3829 LLVMValueRef base_data = emit_data->args[0];
3830 LLVMValueRef base_offset = emit_data->args[3];
3831 unsigned writemask = inst->Dst[0].Register.WriteMask;
3832
3833 while (writemask) {
3834 int start, count;
3835 const char *intrinsic_name;
3836 LLVMValueRef data;
3837 LLVMValueRef offset;
3838 LLVMValueRef tmp;
3839
3840 u_bit_scan_consecutive_range(&writemask, &start, &count);
3841
3842 /* Due to an LLVM limitation, split 3-element writes
3843 * into a 2-element and a 1-element write. */
3844 if (count == 3) {
3845 writemask |= 1 << (start + 2);
3846 count = 2;
3847 }
3848
3849 if (count == 4) {
3850 data = base_data;
3851 intrinsic_name = "llvm.amdgcn.buffer.store.v4f32";
3852 } else if (count == 2) {
3853 LLVMTypeRef v2f32 = LLVMVectorType(ctx->f32, 2);
3854
3855 tmp = LLVMBuildExtractElement(
3856 builder, base_data,
3857 lp_build_const_int32(gallivm, start), "");
3858 data = LLVMBuildInsertElement(
3859 builder, LLVMGetUndef(v2f32), tmp,
3860 uint_bld->zero, "");
3861
3862 tmp = LLVMBuildExtractElement(
3863 builder, base_data,
3864 lp_build_const_int32(gallivm, start + 1), "");
3865 data = LLVMBuildInsertElement(
3866 builder, data, tmp, uint_bld->one, "");
3867
3868 intrinsic_name = "llvm.amdgcn.buffer.store.v2f32";
3869 } else {
3870 assert(count == 1);
3871 data = LLVMBuildExtractElement(
3872 builder, base_data,
3873 lp_build_const_int32(gallivm, start), "");
3874 intrinsic_name = "llvm.amdgcn.buffer.store.f32";
3875 }
3876
3877 offset = base_offset;
3878 if (start != 0) {
3879 offset = LLVMBuildAdd(
3880 builder, offset,
3881 lp_build_const_int32(gallivm, start * 4), "");
3882 }
3883
3884 emit_data->args[0] = data;
3885 emit_data->args[3] = offset;
3886
3887 lp_build_intrinsic(
3888 builder, intrinsic_name, emit_data->dst_type,
3889 emit_data->args, emit_data->arg_count, 0);
3890 }
3891 }
3892
3893 static void store_emit_memory(
3894 struct si_shader_context *ctx,
3895 struct lp_build_emit_data *emit_data)
3896 {
3897 const struct tgsi_full_instruction *inst = emit_data->inst;
3898 struct gallivm_state *gallivm = &ctx->gallivm;
3899 struct lp_build_context *base = &ctx->soa.bld_base.base;
3900 LLVMBuilderRef builder = gallivm->builder;
3901 unsigned writemask = inst->Dst[0].Register.WriteMask;
3902 LLVMValueRef ptr, derived_ptr, data, index;
3903 int chan;
3904
3905 ptr = get_memory_ptr(ctx, inst, base->elem_type, 0);
3906
3907 for (chan = 0; chan < 4; ++chan) {
3908 if (!(writemask & (1 << chan))) {
3909 continue;
3910 }
3911 data = lp_build_emit_fetch(&ctx->soa.bld_base, inst, 1, chan);
3912 index = lp_build_const_int32(gallivm, chan);
3913 derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
3914 LLVMBuildStore(builder, data, derived_ptr);
3915 }
3916 }
3917
3918 static void store_emit(
3919 const struct lp_build_tgsi_action *action,
3920 struct lp_build_tgsi_context *bld_base,
3921 struct lp_build_emit_data *emit_data)
3922 {
3923 struct si_shader_context *ctx = si_shader_context(bld_base);
3924 struct gallivm_state *gallivm = bld_base->base.gallivm;
3925 LLVMBuilderRef builder = gallivm->builder;
3926 const struct tgsi_full_instruction * inst = emit_data->inst;
3927 unsigned target = inst->Memory.Texture;
3928 char intrinsic_name[64];
3929
3930 if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY) {
3931 store_emit_memory(ctx, emit_data);
3932 return;
3933 }
3934
3935 if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
3936 emit_waitcnt(ctx);
3937
3938 if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
3939 store_emit_buffer(ctx, emit_data);
3940 return;
3941 }
3942
3943 if (target == TGSI_TEXTURE_BUFFER) {
3944 emit_data->output[emit_data->chan] = lp_build_intrinsic(
3945 builder, "llvm.amdgcn.buffer.store.format.v4f32",
3946 emit_data->dst_type, emit_data->args,
3947 emit_data->arg_count, 0);
3948 } else {
3949 get_image_intr_name("llvm.amdgcn.image.store",
3950 LLVMTypeOf(emit_data->args[0]), /* vdata */
3951 LLVMTypeOf(emit_data->args[1]), /* coords */
3952 LLVMTypeOf(emit_data->args[2]), /* rsrc */
3953 intrinsic_name, sizeof(intrinsic_name));
3954
3955 emit_data->output[emit_data->chan] =
3956 lp_build_intrinsic(
3957 builder, intrinsic_name, emit_data->dst_type,
3958 emit_data->args, emit_data->arg_count, 0);
3959 }
3960 }
3961
3962 static void atomic_fetch_args(
3963 struct lp_build_tgsi_context * bld_base,
3964 struct lp_build_emit_data * emit_data)
3965 {
3966 struct si_shader_context *ctx = si_shader_context(bld_base);
3967 struct gallivm_state *gallivm = bld_base->base.gallivm;
3968 LLVMBuilderRef builder = gallivm->builder;
3969 const struct tgsi_full_instruction * inst = emit_data->inst;
3970 LLVMValueRef data1, data2;
3971 LLVMValueRef rsrc;
3972 LLVMValueRef tmp;
3973
3974 emit_data->dst_type = bld_base->base.elem_type;
3975
3976 tmp = lp_build_emit_fetch(bld_base, inst, 2, 0);
3977 data1 = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3978
3979 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
3980 tmp = lp_build_emit_fetch(bld_base, inst, 3, 0);
3981 data2 = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3982 }
3983
3984 /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
3985 * of arguments, which is reversed relative to TGSI (and GLSL)
3986 */
3987 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
3988 emit_data->args[emit_data->arg_count++] = data2;
3989 emit_data->args[emit_data->arg_count++] = data1;
3990
3991 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
3992 LLVMValueRef offset;
3993
3994 rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0]);
3995
3996 tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
3997 offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
3998
3999 buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
4000 offset, true);
4001 } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
4002 unsigned target = inst->Memory.Texture;
4003 LLVMValueRef coords;
4004
4005 image_fetch_rsrc(bld_base, &inst->Src[0],
4006 target != TGSI_TEXTURE_BUFFER, &rsrc);
4007 coords = image_fetch_coords(bld_base, inst, 1);
4008
4009 if (target == TGSI_TEXTURE_BUFFER) {
4010 rsrc = extract_rsrc_top_half(ctx, rsrc);
4011 buffer_append_args(ctx, emit_data, rsrc, coords,
4012 bld_base->uint_bld.zero, true);
4013 } else {
4014 emit_data->args[emit_data->arg_count++] = coords;
4015 emit_data->args[emit_data->arg_count++] = rsrc;
4016
4017 image_append_args(ctx, emit_data, target, true);
4018 }
4019 }
4020 }
4021
4022 static void atomic_emit_memory(struct si_shader_context *ctx,
4023 struct lp_build_emit_data *emit_data) {
4024 struct gallivm_state *gallivm = &ctx->gallivm;
4025 LLVMBuilderRef builder = gallivm->builder;
4026 const struct tgsi_full_instruction * inst = emit_data->inst;
4027 LLVMValueRef ptr, result, arg;
4028
4029 ptr = get_memory_ptr(ctx, inst, ctx->i32, 1);
4030
4031 arg = lp_build_emit_fetch(&ctx->soa.bld_base, inst, 2, 0);
4032 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
4033
4034 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
4035 LLVMValueRef new_data;
4036 new_data = lp_build_emit_fetch(&ctx->soa.bld_base,
4037 inst, 3, 0);
4038
4039 new_data = LLVMBuildBitCast(builder, new_data, ctx->i32, "");
4040
4041 #if HAVE_LLVM >= 0x309
4042 result = LLVMBuildAtomicCmpXchg(builder, ptr, arg, new_data,
4043 LLVMAtomicOrderingSequentiallyConsistent,
4044 LLVMAtomicOrderingSequentiallyConsistent,
4045 false);
4046 #endif
4047
4048 result = LLVMBuildExtractValue(builder, result, 0, "");
4049 } else {
4050 LLVMAtomicRMWBinOp op;
4051
4052 switch(inst->Instruction.Opcode) {
4053 case TGSI_OPCODE_ATOMUADD:
4054 op = LLVMAtomicRMWBinOpAdd;
4055 break;
4056 case TGSI_OPCODE_ATOMXCHG:
4057 op = LLVMAtomicRMWBinOpXchg;
4058 break;
4059 case TGSI_OPCODE_ATOMAND:
4060 op = LLVMAtomicRMWBinOpAnd;
4061 break;
4062 case TGSI_OPCODE_ATOMOR:
4063 op = LLVMAtomicRMWBinOpOr;
4064 break;
4065 case TGSI_OPCODE_ATOMXOR:
4066 op = LLVMAtomicRMWBinOpXor;
4067 break;
4068 case TGSI_OPCODE_ATOMUMIN:
4069 op = LLVMAtomicRMWBinOpUMin;
4070 break;
4071 case TGSI_OPCODE_ATOMUMAX:
4072 op = LLVMAtomicRMWBinOpUMax;
4073 break;
4074 case TGSI_OPCODE_ATOMIMIN:
4075 op = LLVMAtomicRMWBinOpMin;
4076 break;
4077 case TGSI_OPCODE_ATOMIMAX:
4078 op = LLVMAtomicRMWBinOpMax;
4079 break;
4080 default:
4081 unreachable("unknown atomic opcode");
4082 }
4083
4084 result = LLVMBuildAtomicRMW(builder, op, ptr, arg,
4085 LLVMAtomicOrderingSequentiallyConsistent,
4086 false);
4087 }
4088 emit_data->output[emit_data->chan] = LLVMBuildBitCast(builder, result, emit_data->dst_type, "");
4089 }
4090
4091 static void atomic_emit(
4092 const struct lp_build_tgsi_action *action,
4093 struct lp_build_tgsi_context *bld_base,
4094 struct lp_build_emit_data *emit_data)
4095 {
4096 struct si_shader_context *ctx = si_shader_context(bld_base);
4097 struct gallivm_state *gallivm = bld_base->base.gallivm;
4098 LLVMBuilderRef builder = gallivm->builder;
4099 const struct tgsi_full_instruction * inst = emit_data->inst;
4100 char intrinsic_name[40];
4101 LLVMValueRef tmp;
4102
4103 if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
4104 atomic_emit_memory(ctx, emit_data);
4105 return;
4106 }
4107
4108 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
4109 inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
4110 snprintf(intrinsic_name, sizeof(intrinsic_name),
4111 "llvm.amdgcn.buffer.atomic.%s", action->intr_name);
4112 } else {
4113 LLVMValueRef coords;
4114 char coords_type[8];
4115
4116 if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
4117 coords = emit_data->args[2];
4118 else
4119 coords = emit_data->args[1];
4120
4121 build_type_name_for_intr(LLVMTypeOf(coords), coords_type, sizeof(coords_type));
4122 snprintf(intrinsic_name, sizeof(intrinsic_name),
4123 "llvm.amdgcn.image.atomic.%s.%s",
4124 action->intr_name, coords_type);
4125 }
4126
4127 tmp = lp_build_intrinsic(
4128 builder, intrinsic_name, bld_base->uint_bld.elem_type,
4129 emit_data->args, emit_data->arg_count, 0);
4130 emit_data->output[emit_data->chan] =
4131 LLVMBuildBitCast(builder, tmp, bld_base->base.elem_type, "");
4132 }
4133
4134 static void resq_fetch_args(
4135 struct lp_build_tgsi_context * bld_base,
4136 struct lp_build_emit_data * emit_data)
4137 {
4138 struct si_shader_context *ctx = si_shader_context(bld_base);
4139 struct gallivm_state *gallivm = bld_base->base.gallivm;
4140 const struct tgsi_full_instruction *inst = emit_data->inst;
4141 const struct tgsi_full_src_register *reg = &inst->Src[0];
4142
4143 emit_data->dst_type = ctx->v4i32;
4144
4145 if (reg->Register.File == TGSI_FILE_BUFFER) {
4146 emit_data->args[0] = shader_buffer_fetch_rsrc(ctx, reg);
4147 emit_data->arg_count = 1;
4148 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
4149 image_fetch_rsrc(bld_base, reg, false, &emit_data->args[0]);
4150 emit_data->arg_count = 1;
4151 } else {
4152 emit_data->args[0] = bld_base->uint_bld.zero; /* mip level */
4153 image_fetch_rsrc(bld_base, reg, false, &emit_data->args[1]);
4154 emit_data->args[2] = lp_build_const_int32(gallivm, 15); /* dmask */
4155 emit_data->args[3] = bld_base->uint_bld.zero; /* unorm */
4156 emit_data->args[4] = bld_base->uint_bld.zero; /* r128 */
4157 emit_data->args[5] = tgsi_is_array_image(inst->Memory.Texture) ?
4158 bld_base->uint_bld.one : bld_base->uint_bld.zero; /* da */
4159 emit_data->args[6] = bld_base->uint_bld.zero; /* glc */
4160 emit_data->args[7] = bld_base->uint_bld.zero; /* slc */
4161 emit_data->args[8] = bld_base->uint_bld.zero; /* tfe */
4162 emit_data->args[9] = bld_base->uint_bld.zero; /* lwe */
4163 emit_data->arg_count = 10;
4164 }
4165 }
4166
4167 static void resq_emit(
4168 const struct lp_build_tgsi_action *action,
4169 struct lp_build_tgsi_context *bld_base,
4170 struct lp_build_emit_data *emit_data)
4171 {
4172 struct gallivm_state *gallivm = bld_base->base.gallivm;
4173 LLVMBuilderRef builder = gallivm->builder;
4174 const struct tgsi_full_instruction *inst = emit_data->inst;
4175 LLVMValueRef out;
4176
4177 if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
4178 out = LLVMBuildExtractElement(builder, emit_data->args[0],
4179 lp_build_const_int32(gallivm, 2), "");
4180 } else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
4181 out = get_buffer_size(bld_base, emit_data->args[0]);
4182 } else {
4183 out = lp_build_intrinsic(
4184 builder, "llvm.SI.getresinfo.i32", emit_data->dst_type,
4185 emit_data->args, emit_data->arg_count,
4186 LLVMReadNoneAttribute);
4187
4188 /* Divide the number of layers by 6 to get the number of cubes. */
4189 if (inst->Memory.Texture == TGSI_TEXTURE_CUBE_ARRAY) {
4190 LLVMValueRef imm2 = lp_build_const_int32(gallivm, 2);
4191 LLVMValueRef imm6 = lp_build_const_int32(gallivm, 6);
4192
4193 LLVMValueRef z = LLVMBuildExtractElement(builder, out, imm2, "");
4194 z = LLVMBuildSDiv(builder, z, imm6, "");
4195 out = LLVMBuildInsertElement(builder, out, z, imm2, "");
4196 }
4197 }
4198
4199 emit_data->output[emit_data->chan] = out;
4200 }
4201
4202 static void set_tex_fetch_args(struct si_shader_context *ctx,
4203 struct lp_build_emit_data *emit_data,
4204 unsigned opcode, unsigned target,
4205 LLVMValueRef res_ptr, LLVMValueRef samp_ptr,
4206 LLVMValueRef *param, unsigned count,
4207 unsigned dmask)
4208 {
4209 struct gallivm_state *gallivm = &ctx->gallivm;
4210 unsigned num_args;
4211 unsigned is_rect = target == TGSI_TEXTURE_RECT;
4212
4213 /* Pad to power of two vector */
4214 while (count < util_next_power_of_two(count))
4215 param[count++] = LLVMGetUndef(ctx->i32);
4216
4217 /* Texture coordinates. */
4218 if (count > 1)
4219 emit_data->args[0] = lp_build_gather_values(gallivm, param, count);
4220 else
4221 emit_data->args[0] = param[0];
4222
4223 /* Resource. */
4224 emit_data->args[1] = res_ptr;
4225 num_args = 2;
4226
4227 if (opcode == TGSI_OPCODE_TXF || opcode == TGSI_OPCODE_TXQ)
4228 emit_data->dst_type = ctx->v4i32;
4229 else {
4230 emit_data->dst_type = ctx->v4f32;
4231
4232 emit_data->args[num_args++] = samp_ptr;
4233 }
4234
4235 emit_data->args[num_args++] = lp_build_const_int32(gallivm, dmask);
4236 emit_data->args[num_args++] = lp_build_const_int32(gallivm, is_rect); /* unorm */
4237 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* r128 */
4238 emit_data->args[num_args++] = lp_build_const_int32(gallivm,
4239 tgsi_is_array_sampler(target)); /* da */
4240 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* glc */
4241 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* slc */
4242 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* tfe */
4243 emit_data->args[num_args++] = lp_build_const_int32(gallivm, 0); /* lwe */
4244
4245 emit_data->arg_count = num_args;
4246 }
4247
4248 static const struct lp_build_tgsi_action tex_action;
4249
4250 enum desc_type {
4251 DESC_IMAGE,
4252 DESC_FMASK,
4253 DESC_SAMPLER
4254 };
4255
4256 static LLVMTypeRef const_array(LLVMTypeRef elem_type, int num_elements)
4257 {
4258 return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
4259 CONST_ADDR_SPACE);
4260 }
4261
4262 /**
4263 * Load an image view, fmask view. or sampler state descriptor.
4264 */
4265 static LLVMValueRef load_sampler_desc_custom(struct si_shader_context *ctx,
4266 LLVMValueRef list, LLVMValueRef index,
4267 enum desc_type type)
4268 {
4269 struct gallivm_state *gallivm = &ctx->gallivm;
4270 LLVMBuilderRef builder = gallivm->builder;
4271
4272 switch (type) {
4273 case DESC_IMAGE:
4274 /* The image is at [0:7]. */
4275 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
4276 break;
4277 case DESC_FMASK:
4278 /* The FMASK is at [8:15]. */
4279 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
4280 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 1, 0), "");
4281 break;
4282 case DESC_SAMPLER:
4283 /* The sampler state is at [12:15]. */
4284 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
4285 index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 3, 0), "");
4286 list = LLVMBuildPointerCast(builder, list,
4287 const_array(ctx->v4i32, 0), "");
4288 break;
4289 }
4290
4291 return build_indexed_load_const(ctx, list, index);
4292 }
4293
4294 static LLVMValueRef load_sampler_desc(struct si_shader_context *ctx,
4295 LLVMValueRef index, enum desc_type type)
4296 {
4297 LLVMValueRef list = LLVMGetParam(ctx->main_fn,
4298 SI_PARAM_SAMPLERS);
4299
4300 return load_sampler_desc_custom(ctx, list, index, type);
4301 }
4302
4303 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4304 *
4305 * SI-CI:
4306 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4307 * filtering manually. The driver sets img7 to a mask clearing
4308 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4309 * s_and_b32 samp0, samp0, img7
4310 *
4311 * VI:
4312 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4313 */
4314 static LLVMValueRef sici_fix_sampler_aniso(struct si_shader_context *ctx,
4315 LLVMValueRef res, LLVMValueRef samp)
4316 {
4317 LLVMBuilderRef builder = ctx->gallivm.builder;
4318 LLVMValueRef img7, samp0;
4319
4320 if (ctx->screen->b.chip_class >= VI)
4321 return samp;
4322
4323 img7 = LLVMBuildExtractElement(builder, res,
4324 LLVMConstInt(ctx->i32, 7, 0), "");
4325 samp0 = LLVMBuildExtractElement(builder, samp,
4326 LLVMConstInt(ctx->i32, 0, 0), "");
4327 samp0 = LLVMBuildAnd(builder, samp0, img7, "");
4328 return LLVMBuildInsertElement(builder, samp, samp0,
4329 LLVMConstInt(ctx->i32, 0, 0), "");
4330 }
4331
4332 static void tex_fetch_ptrs(
4333 struct lp_build_tgsi_context *bld_base,
4334 struct lp_build_emit_data *emit_data,
4335 LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr, LLVMValueRef *fmask_ptr)
4336 {
4337 struct si_shader_context *ctx = si_shader_context(bld_base);
4338 const struct tgsi_full_instruction *inst = emit_data->inst;
4339 unsigned target = inst->Texture.Texture;
4340 unsigned sampler_src;
4341 unsigned sampler_index;
4342 LLVMValueRef index;
4343
4344 sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
4345 sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
4346
4347 if (emit_data->inst->Src[sampler_src].Register.Indirect) {
4348 const struct tgsi_full_src_register *reg = &emit_data->inst->Src[sampler_src];
4349
4350 index = get_bounded_indirect_index(ctx,
4351 &reg->Indirect,
4352 reg->Register.Index,
4353 SI_NUM_SAMPLERS);
4354 } else {
4355 index = LLVMConstInt(ctx->i32, sampler_index, 0);
4356 }
4357
4358 *res_ptr = load_sampler_desc(ctx, index, DESC_IMAGE);
4359
4360 if (target == TGSI_TEXTURE_2D_MSAA ||
4361 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
4362 if (samp_ptr)
4363 *samp_ptr = NULL;
4364 if (fmask_ptr)
4365 *fmask_ptr = load_sampler_desc(ctx, index, DESC_FMASK);
4366 } else {
4367 if (samp_ptr) {
4368 *samp_ptr = load_sampler_desc(ctx, index, DESC_SAMPLER);
4369 *samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
4370 }
4371 if (fmask_ptr)
4372 *fmask_ptr = NULL;
4373 }
4374 }
4375
4376 static void txq_fetch_args(
4377 struct lp_build_tgsi_context *bld_base,
4378 struct lp_build_emit_data *emit_data)
4379 {
4380 struct si_shader_context *ctx = si_shader_context(bld_base);
4381 struct gallivm_state *gallivm = bld_base->base.gallivm;
4382 LLVMBuilderRef builder = gallivm->builder;
4383 const struct tgsi_full_instruction *inst = emit_data->inst;
4384 unsigned target = inst->Texture.Texture;
4385 LLVMValueRef res_ptr;
4386 LLVMValueRef address;
4387
4388 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, NULL, NULL);
4389
4390 if (target == TGSI_TEXTURE_BUFFER) {
4391 /* Read the size from the buffer descriptor directly. */
4392 LLVMValueRef res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
4393 emit_data->args[0] = get_buffer_size(bld_base, res);
4394 return;
4395 }
4396
4397 /* Textures - set the mip level. */
4398 address = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
4399
4400 set_tex_fetch_args(ctx, emit_data, TGSI_OPCODE_TXQ, target, res_ptr,
4401 NULL, &address, 1, 0xf);
4402 }
4403
4404 static void txq_emit(const struct lp_build_tgsi_action *action,
4405 struct lp_build_tgsi_context *bld_base,
4406 struct lp_build_emit_data *emit_data)
4407 {
4408 struct lp_build_context *base = &bld_base->base;
4409 unsigned target = emit_data->inst->Texture.Texture;
4410
4411 if (target == TGSI_TEXTURE_BUFFER) {
4412 /* Just return the buffer size. */
4413 emit_data->output[emit_data->chan] = emit_data->args[0];
4414 return;
4415 }
4416
4417 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4418 base->gallivm->builder, "llvm.SI.getresinfo.i32",
4419 emit_data->dst_type, emit_data->args, emit_data->arg_count,
4420 LLVMReadNoneAttribute);
4421
4422 /* Divide the number of layers by 6 to get the number of cubes. */
4423 if (target == TGSI_TEXTURE_CUBE_ARRAY ||
4424 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4425 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
4426 LLVMValueRef two = lp_build_const_int32(bld_base->base.gallivm, 2);
4427 LLVMValueRef six = lp_build_const_int32(bld_base->base.gallivm, 6);
4428
4429 LLVMValueRef v4 = emit_data->output[emit_data->chan];
4430 LLVMValueRef z = LLVMBuildExtractElement(builder, v4, two, "");
4431 z = LLVMBuildSDiv(builder, z, six, "");
4432
4433 emit_data->output[emit_data->chan] =
4434 LLVMBuildInsertElement(builder, v4, z, two, "");
4435 }
4436 }
4437
4438 static void tex_fetch_args(
4439 struct lp_build_tgsi_context *bld_base,
4440 struct lp_build_emit_data *emit_data)
4441 {
4442 struct si_shader_context *ctx = si_shader_context(bld_base);
4443 struct gallivm_state *gallivm = bld_base->base.gallivm;
4444 const struct tgsi_full_instruction *inst = emit_data->inst;
4445 unsigned opcode = inst->Instruction.Opcode;
4446 unsigned target = inst->Texture.Texture;
4447 LLVMValueRef coords[5], derivs[6];
4448 LLVMValueRef address[16];
4449 unsigned num_coords = tgsi_util_get_texture_coord_dim(target);
4450 int ref_pos = tgsi_util_get_shadow_ref_src_index(target);
4451 unsigned count = 0;
4452 unsigned chan;
4453 unsigned num_deriv_channels = 0;
4454 bool has_offset = inst->Texture.NumOffsets > 0;
4455 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
4456 unsigned dmask = 0xf;
4457
4458 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
4459
4460 if (target == TGSI_TEXTURE_BUFFER) {
4461 LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
4462
4463 /* Bitcast and truncate v8i32 to v16i8. */
4464 LLVMValueRef res = res_ptr;
4465 res = LLVMBuildBitCast(gallivm->builder, res, v2i128, "");
4466 res = LLVMBuildExtractElement(gallivm->builder, res, bld_base->uint_bld.one, "");
4467 res = LLVMBuildBitCast(gallivm->builder, res, ctx->v16i8, "");
4468
4469 emit_data->dst_type = ctx->v4f32;
4470 emit_data->args[0] = res;
4471 emit_data->args[1] = bld_base->uint_bld.zero;
4472 emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
4473 emit_data->arg_count = 3;
4474 return;
4475 }
4476
4477 /* Fetch and project texture coordinates */
4478 coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
4479 for (chan = 0; chan < 3; chan++ ) {
4480 coords[chan] = lp_build_emit_fetch(bld_base,
4481 emit_data->inst, 0,
4482 chan);
4483 if (opcode == TGSI_OPCODE_TXP)
4484 coords[chan] = lp_build_emit_llvm_binary(bld_base,
4485 TGSI_OPCODE_DIV,
4486 coords[chan],
4487 coords[3]);
4488 }
4489
4490 if (opcode == TGSI_OPCODE_TXP)
4491 coords[3] = bld_base->base.one;
4492
4493 /* Pack offsets. */
4494 if (has_offset && opcode != TGSI_OPCODE_TXF) {
4495 /* The offsets are six-bit signed integers packed like this:
4496 * X=[5:0], Y=[13:8], and Z=[21:16].
4497 */
4498 LLVMValueRef offset[3], pack;
4499
4500 assert(inst->Texture.NumOffsets == 1);
4501
4502 for (chan = 0; chan < 3; chan++) {
4503 offset[chan] = lp_build_emit_fetch_texoffset(bld_base,
4504 emit_data->inst, 0, chan);
4505 offset[chan] = LLVMBuildAnd(gallivm->builder, offset[chan],
4506 lp_build_const_int32(gallivm, 0x3f), "");
4507 if (chan)
4508 offset[chan] = LLVMBuildShl(gallivm->builder, offset[chan],
4509 lp_build_const_int32(gallivm, chan*8), "");
4510 }
4511
4512 pack = LLVMBuildOr(gallivm->builder, offset[0], offset[1], "");
4513 pack = LLVMBuildOr(gallivm->builder, pack, offset[2], "");
4514 address[count++] = pack;
4515 }
4516
4517 /* Pack LOD bias value */
4518 if (opcode == TGSI_OPCODE_TXB)
4519 address[count++] = coords[3];
4520 if (opcode == TGSI_OPCODE_TXB2)
4521 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4522
4523 /* Pack depth comparison value */
4524 if (tgsi_is_shadow_target(target) && opcode != TGSI_OPCODE_LODQ) {
4525 LLVMValueRef z;
4526
4527 if (target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
4528 z = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4529 } else {
4530 assert(ref_pos >= 0);
4531 z = coords[ref_pos];
4532 }
4533
4534 /* TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
4535 * so the depth comparison value isn't clamped for Z16 and
4536 * Z24 anymore. Do it manually here.
4537 *
4538 * It's unnecessary if the original texture format was
4539 * Z32_FLOAT, but we don't know that here.
4540 */
4541 if (ctx->screen->b.chip_class == VI)
4542 z = si_llvm_saturate(bld_base, z);
4543
4544 address[count++] = z;
4545 }
4546
4547 /* Pack user derivatives */
4548 if (opcode == TGSI_OPCODE_TXD) {
4549 int param, num_src_deriv_channels;
4550
4551 switch (target) {
4552 case TGSI_TEXTURE_3D:
4553 num_src_deriv_channels = 3;
4554 num_deriv_channels = 3;
4555 break;
4556 case TGSI_TEXTURE_2D:
4557 case TGSI_TEXTURE_SHADOW2D:
4558 case TGSI_TEXTURE_RECT:
4559 case TGSI_TEXTURE_SHADOWRECT:
4560 case TGSI_TEXTURE_2D_ARRAY:
4561 case TGSI_TEXTURE_SHADOW2D_ARRAY:
4562 num_src_deriv_channels = 2;
4563 num_deriv_channels = 2;
4564 break;
4565 case TGSI_TEXTURE_CUBE:
4566 case TGSI_TEXTURE_SHADOWCUBE:
4567 case TGSI_TEXTURE_CUBE_ARRAY:
4568 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
4569 /* Cube derivatives will be converted to 2D. */
4570 num_src_deriv_channels = 3;
4571 num_deriv_channels = 2;
4572 break;
4573 case TGSI_TEXTURE_1D:
4574 case TGSI_TEXTURE_SHADOW1D:
4575 case TGSI_TEXTURE_1D_ARRAY:
4576 case TGSI_TEXTURE_SHADOW1D_ARRAY:
4577 num_src_deriv_channels = 1;
4578 num_deriv_channels = 1;
4579 break;
4580 default:
4581 unreachable("invalid target");
4582 }
4583
4584 for (param = 0; param < 2; param++)
4585 for (chan = 0; chan < num_src_deriv_channels; chan++)
4586 derivs[param * num_src_deriv_channels + chan] =
4587 lp_build_emit_fetch(bld_base, inst, param+1, chan);
4588 }
4589
4590 if (target == TGSI_TEXTURE_CUBE ||
4591 target == TGSI_TEXTURE_CUBE_ARRAY ||
4592 target == TGSI_TEXTURE_SHADOWCUBE ||
4593 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
4594 si_prepare_cube_coords(bld_base, emit_data, coords, derivs);
4595
4596 if (opcode == TGSI_OPCODE_TXD)
4597 for (int i = 0; i < num_deriv_channels * 2; i++)
4598 address[count++] = derivs[i];
4599
4600 /* Pack texture coordinates */
4601 address[count++] = coords[0];
4602 if (num_coords > 1)
4603 address[count++] = coords[1];
4604 if (num_coords > 2)
4605 address[count++] = coords[2];
4606
4607 /* Pack LOD or sample index */
4608 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
4609 address[count++] = coords[3];
4610 else if (opcode == TGSI_OPCODE_TXL2)
4611 address[count++] = lp_build_emit_fetch(bld_base, inst, 1, TGSI_CHAN_X);
4612
4613 if (count > 16) {
4614 assert(!"Cannot handle more than 16 texture address parameters");
4615 count = 16;
4616 }
4617
4618 for (chan = 0; chan < count; chan++ ) {
4619 address[chan] = LLVMBuildBitCast(gallivm->builder,
4620 address[chan], ctx->i32, "");
4621 }
4622
4623 /* Adjust the sample index according to FMASK.
4624 *
4625 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
4626 * which is the identity mapping. Each nibble says which physical sample
4627 * should be fetched to get that sample.
4628 *
4629 * For example, 0x11111100 means there are only 2 samples stored and
4630 * the second sample covers 3/4 of the pixel. When reading samples 0
4631 * and 1, return physical sample 0 (determined by the first two 0s
4632 * in FMASK), otherwise return physical sample 1.
4633 *
4634 * The sample index should be adjusted as follows:
4635 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
4636 */
4637 if (target == TGSI_TEXTURE_2D_MSAA ||
4638 target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
4639 struct lp_build_context *uint_bld = &bld_base->uint_bld;
4640 struct lp_build_emit_data txf_emit_data = *emit_data;
4641 LLVMValueRef txf_address[4];
4642 unsigned txf_count = count;
4643 struct tgsi_full_instruction inst = {};
4644
4645 memcpy(txf_address, address, sizeof(txf_address));
4646
4647 if (target == TGSI_TEXTURE_2D_MSAA) {
4648 txf_address[2] = bld_base->uint_bld.zero;
4649 }
4650 txf_address[3] = bld_base->uint_bld.zero;
4651
4652 /* Read FMASK using TXF. */
4653 inst.Instruction.Opcode = TGSI_OPCODE_TXF;
4654 inst.Texture.Texture = target;
4655 txf_emit_data.inst = &inst;
4656 txf_emit_data.chan = 0;
4657 set_tex_fetch_args(ctx, &txf_emit_data, TGSI_OPCODE_TXF,
4658 target, fmask_ptr, NULL,
4659 txf_address, txf_count, 0xf);
4660 build_tex_intrinsic(&tex_action, bld_base, &txf_emit_data);
4661
4662 /* Initialize some constants. */
4663 LLVMValueRef four = LLVMConstInt(ctx->i32, 4, 0);
4664 LLVMValueRef F = LLVMConstInt(ctx->i32, 0xF, 0);
4665
4666 /* Apply the formula. */
4667 LLVMValueRef fmask =
4668 LLVMBuildExtractElement(gallivm->builder,
4669 txf_emit_data.output[0],
4670 uint_bld->zero, "");
4671
4672 unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
4673
4674 LLVMValueRef sample_index4 =
4675 LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
4676
4677 LLVMValueRef shifted_fmask =
4678 LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
4679
4680 LLVMValueRef final_sample =
4681 LLVMBuildAnd(gallivm->builder, shifted_fmask, F, "");
4682
4683 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
4684 * resource descriptor is 0 (invalid),
4685 */
4686 LLVMValueRef fmask_desc =
4687 LLVMBuildBitCast(gallivm->builder, fmask_ptr,
4688 ctx->v8i32, "");
4689
4690 LLVMValueRef fmask_word1 =
4691 LLVMBuildExtractElement(gallivm->builder, fmask_desc,
4692 uint_bld->one, "");
4693
4694 LLVMValueRef word1_is_nonzero =
4695 LLVMBuildICmp(gallivm->builder, LLVMIntNE,
4696 fmask_word1, uint_bld->zero, "");
4697
4698 /* Replace the MSAA sample index. */
4699 address[sample_chan] =
4700 LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
4701 final_sample, address[sample_chan], "");
4702 }
4703
4704 if (opcode == TGSI_OPCODE_TXF) {
4705 /* add tex offsets */
4706 if (inst->Texture.NumOffsets) {
4707 struct lp_build_context *uint_bld = &bld_base->uint_bld;
4708 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
4709 const struct tgsi_texture_offset *off = inst->TexOffsets;
4710
4711 assert(inst->Texture.NumOffsets == 1);
4712
4713 switch (target) {
4714 case TGSI_TEXTURE_3D:
4715 address[2] = lp_build_add(uint_bld, address[2],
4716 bld->immediates[off->Index][off->SwizzleZ]);
4717 /* fall through */
4718 case TGSI_TEXTURE_2D:
4719 case TGSI_TEXTURE_SHADOW2D:
4720 case TGSI_TEXTURE_RECT:
4721 case TGSI_TEXTURE_SHADOWRECT:
4722 case TGSI_TEXTURE_2D_ARRAY:
4723 case TGSI_TEXTURE_SHADOW2D_ARRAY:
4724 address[1] =
4725 lp_build_add(uint_bld, address[1],
4726 bld->immediates[off->Index][off->SwizzleY]);
4727 /* fall through */
4728 case TGSI_TEXTURE_1D:
4729 case TGSI_TEXTURE_SHADOW1D:
4730 case TGSI_TEXTURE_1D_ARRAY:
4731 case TGSI_TEXTURE_SHADOW1D_ARRAY:
4732 address[0] =
4733 lp_build_add(uint_bld, address[0],
4734 bld->immediates[off->Index][off->SwizzleX]);
4735 break;
4736 /* texture offsets do not apply to other texture targets */
4737 }
4738 }
4739 }
4740
4741 if (opcode == TGSI_OPCODE_TG4) {
4742 unsigned gather_comp = 0;
4743
4744 /* DMASK was repurposed for GATHER4. 4 components are always
4745 * returned and DMASK works like a swizzle - it selects
4746 * the component to fetch. The only valid DMASK values are
4747 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4748 * (red,red,red,red) etc.) The ISA document doesn't mention
4749 * this.
4750 */
4751
4752 /* Get the component index from src1.x for Gather4. */
4753 if (!tgsi_is_shadow_target(target)) {
4754 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
4755 LLVMValueRef comp_imm;
4756 struct tgsi_src_register src1 = inst->Src[1].Register;
4757
4758 assert(src1.File == TGSI_FILE_IMMEDIATE);
4759
4760 comp_imm = imms[src1.Index][src1.SwizzleX];
4761 gather_comp = LLVMConstIntGetZExtValue(comp_imm);
4762 gather_comp = CLAMP(gather_comp, 0, 3);
4763 }
4764
4765 dmask = 1 << gather_comp;
4766 }
4767
4768 set_tex_fetch_args(ctx, emit_data, opcode, target, res_ptr,
4769 samp_ptr, address, count, dmask);
4770 }
4771
4772 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
4773 * incorrectly forces nearest filtering if the texture format is integer.
4774 * The only effect it has on Gather4, which always returns 4 texels for
4775 * bilinear filtering, is that the final coordinates are off by 0.5 of
4776 * the texel size.
4777 *
4778 * The workaround is to subtract 0.5 from the unnormalized coordinates,
4779 * or (0.5 / size) from the normalized coordinates.
4780 */
4781 static void si_lower_gather4_integer(struct si_shader_context *ctx,
4782 struct lp_build_emit_data *emit_data,
4783 const char *intr_name,
4784 unsigned coord_vgpr_index)
4785 {
4786 LLVMBuilderRef builder = ctx->gallivm.builder;
4787 LLVMValueRef coord = emit_data->args[0];
4788 LLVMValueRef half_texel[2];
4789 int c;
4790
4791 if (emit_data->inst->Texture.Texture == TGSI_TEXTURE_RECT ||
4792 emit_data->inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT) {
4793 half_texel[0] = half_texel[1] = LLVMConstReal(ctx->f32, -0.5);
4794 } else {
4795 struct tgsi_full_instruction txq_inst = {};
4796 struct lp_build_emit_data txq_emit_data = {};
4797
4798 /* Query the texture size. */
4799 txq_inst.Texture.Texture = emit_data->inst->Texture.Texture;
4800 txq_emit_data.inst = &txq_inst;
4801 txq_emit_data.dst_type = ctx->v4i32;
4802 set_tex_fetch_args(ctx, &txq_emit_data, TGSI_OPCODE_TXQ,
4803 txq_inst.Texture.Texture,
4804 emit_data->args[1], NULL,
4805 &ctx->soa.bld_base.uint_bld.zero,
4806 1, 0xf);
4807 txq_emit(NULL, &ctx->soa.bld_base, &txq_emit_data);
4808
4809 /* Compute -0.5 / size. */
4810 for (c = 0; c < 2; c++) {
4811 half_texel[c] =
4812 LLVMBuildExtractElement(builder, txq_emit_data.output[0],
4813 LLVMConstInt(ctx->i32, c, 0), "");
4814 half_texel[c] = LLVMBuildUIToFP(builder, half_texel[c], ctx->f32, "");
4815 half_texel[c] =
4816 lp_build_emit_llvm_unary(&ctx->soa.bld_base,
4817 TGSI_OPCODE_RCP, half_texel[c]);
4818 half_texel[c] = LLVMBuildFMul(builder, half_texel[c],
4819 LLVMConstReal(ctx->f32, -0.5), "");
4820 }
4821 }
4822
4823 for (c = 0; c < 2; c++) {
4824 LLVMValueRef tmp;
4825 LLVMValueRef index = LLVMConstInt(ctx->i32, coord_vgpr_index + c, 0);
4826
4827 tmp = LLVMBuildExtractElement(builder, coord, index, "");
4828 tmp = LLVMBuildBitCast(builder, tmp, ctx->f32, "");
4829 tmp = LLVMBuildFAdd(builder, tmp, half_texel[c], "");
4830 tmp = LLVMBuildBitCast(builder, tmp, ctx->i32, "");
4831 coord = LLVMBuildInsertElement(builder, coord, tmp, index, "");
4832 }
4833
4834 emit_data->args[0] = coord;
4835 emit_data->output[emit_data->chan] =
4836 lp_build_intrinsic(builder, intr_name, emit_data->dst_type,
4837 emit_data->args, emit_data->arg_count,
4838 LLVMReadNoneAttribute);
4839 }
4840
4841 static void build_tex_intrinsic(const struct lp_build_tgsi_action *action,
4842 struct lp_build_tgsi_context *bld_base,
4843 struct lp_build_emit_data *emit_data)
4844 {
4845 struct si_shader_context *ctx = si_shader_context(bld_base);
4846 struct lp_build_context *base = &bld_base->base;
4847 const struct tgsi_full_instruction *inst = emit_data->inst;
4848 unsigned opcode = inst->Instruction.Opcode;
4849 unsigned target = inst->Texture.Texture;
4850 char intr_name[127];
4851 bool has_offset = inst->Texture.NumOffsets > 0;
4852 bool is_shadow = tgsi_is_shadow_target(target);
4853 char type[64];
4854 const char *name = "llvm.SI.image.sample";
4855 const char *infix = "";
4856
4857 if (target == TGSI_TEXTURE_BUFFER) {
4858 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4859 base->gallivm->builder,
4860 "llvm.SI.vs.load.input", emit_data->dst_type,
4861 emit_data->args, emit_data->arg_count,
4862 LLVMReadNoneAttribute);
4863 return;
4864 }
4865
4866 switch (opcode) {
4867 case TGSI_OPCODE_TXF:
4868 name = target == TGSI_TEXTURE_2D_MSAA ||
4869 target == TGSI_TEXTURE_2D_ARRAY_MSAA ?
4870 "llvm.SI.image.load" :
4871 "llvm.SI.image.load.mip";
4872 is_shadow = false;
4873 has_offset = false;
4874 break;
4875 case TGSI_OPCODE_LODQ:
4876 name = "llvm.SI.getlod";
4877 is_shadow = false;
4878 has_offset = false;
4879 break;
4880 case TGSI_OPCODE_TEX:
4881 case TGSI_OPCODE_TEX2:
4882 case TGSI_OPCODE_TXP:
4883 if (ctx->type != PIPE_SHADER_FRAGMENT)
4884 infix = ".lz";
4885 break;
4886 case TGSI_OPCODE_TXB:
4887 case TGSI_OPCODE_TXB2:
4888 assert(ctx->type == PIPE_SHADER_FRAGMENT);
4889 infix = ".b";
4890 break;
4891 case TGSI_OPCODE_TXL:
4892 case TGSI_OPCODE_TXL2:
4893 infix = ".l";
4894 break;
4895 case TGSI_OPCODE_TXD:
4896 infix = ".d";
4897 break;
4898 case TGSI_OPCODE_TG4:
4899 name = "llvm.SI.gather4";
4900 infix = ".lz";
4901 break;
4902 default:
4903 assert(0);
4904 return;
4905 }
4906
4907 /* Add the type and suffixes .c, .o if needed. */
4908 build_type_name_for_intr(LLVMTypeOf(emit_data->args[0]), type, sizeof(type));
4909 sprintf(intr_name, "%s%s%s%s.%s",
4910 name, is_shadow ? ".c" : "", infix,
4911 has_offset ? ".o" : "", type);
4912
4913 /* The hardware needs special lowering for Gather4 with integer formats. */
4914 if (opcode == TGSI_OPCODE_TG4) {
4915 struct tgsi_shader_info *info = &ctx->shader->selector->info;
4916 /* This will also work with non-constant indexing because of how
4917 * glsl_to_tgsi works and we intent to preserve that behavior.
4918 */
4919 const unsigned src_idx = 2;
4920 unsigned sampler = inst->Src[src_idx].Register.Index;
4921
4922 assert(inst->Src[src_idx].Register.File == TGSI_FILE_SAMPLER);
4923
4924 if (info->sampler_type[sampler] == TGSI_RETURN_TYPE_SINT ||
4925 info->sampler_type[sampler] == TGSI_RETURN_TYPE_UINT) {
4926 /* Texture coordinates start after:
4927 * {offset, bias, z-compare, derivatives}
4928 * Only the offset and z-compare can occur here.
4929 */
4930 si_lower_gather4_integer(ctx, emit_data, intr_name,
4931 (int)has_offset + (int)is_shadow);
4932 return;
4933 }
4934 }
4935
4936 emit_data->output[emit_data->chan] = lp_build_intrinsic(
4937 base->gallivm->builder, intr_name, emit_data->dst_type,
4938 emit_data->args, emit_data->arg_count,
4939 LLVMReadNoneAttribute);
4940 }
4941
4942 static void si_llvm_emit_txqs(
4943 const struct lp_build_tgsi_action *action,
4944 struct lp_build_tgsi_context *bld_base,
4945 struct lp_build_emit_data *emit_data)
4946 {
4947 struct si_shader_context *ctx = si_shader_context(bld_base);
4948 struct gallivm_state *gallivm = bld_base->base.gallivm;
4949 LLVMBuilderRef builder = gallivm->builder;
4950 LLVMValueRef res, samples;
4951 LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL;
4952
4953 tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
4954
4955
4956 /* Read the samples from the descriptor directly. */
4957 res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
4958 samples = LLVMBuildExtractElement(
4959 builder, res,
4960 lp_build_const_int32(gallivm, 3), "");
4961 samples = LLVMBuildLShr(builder, samples,
4962 lp_build_const_int32(gallivm, 16), "");
4963 samples = LLVMBuildAnd(builder, samples,
4964 lp_build_const_int32(gallivm, 0xf), "");
4965 samples = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1),
4966 samples, "");
4967
4968 emit_data->output[emit_data->chan] = samples;
4969 }
4970
4971 /*
4972 * SI implements derivatives using the local data store (LDS)
4973 * All writes to the LDS happen in all executing threads at
4974 * the same time. TID is the Thread ID for the current
4975 * thread and is a value between 0 and 63, representing
4976 * the thread's position in the wavefront.
4977 *
4978 * For the pixel shader threads are grouped into quads of four pixels.
4979 * The TIDs of the pixels of a quad are:
4980 *
4981 * +------+------+
4982 * |4n + 0|4n + 1|
4983 * +------+------+
4984 * |4n + 2|4n + 3|
4985 * +------+------+
4986 *
4987 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
4988 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
4989 * the current pixel's column, and masking with 0xfffffffe yields the TID
4990 * of the left pixel of the current pixel's row.
4991 *
4992 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
4993 * adding 2 yields the TID of the pixel below the top pixel.
4994 */
4995 /* masks for thread ID. */
4996 #define TID_MASK_TOP_LEFT 0xfffffffc
4997 #define TID_MASK_TOP 0xfffffffd
4998 #define TID_MASK_LEFT 0xfffffffe
4999
5000 static void si_llvm_emit_ddxy(
5001 const struct lp_build_tgsi_action *action,
5002 struct lp_build_tgsi_context *bld_base,
5003 struct lp_build_emit_data *emit_data)
5004 {
5005 struct si_shader_context *ctx = si_shader_context(bld_base);
5006 struct gallivm_state *gallivm = bld_base->base.gallivm;
5007 unsigned opcode = emit_data->info->opcode;
5008 LLVMValueRef thread_id, tl, trbl, tl_tid, trbl_tid, val, args[2];
5009 int idx;
5010 unsigned mask;
5011
5012 thread_id = get_thread_id(ctx);
5013
5014 if (opcode == TGSI_OPCODE_DDX_FINE)
5015 mask = TID_MASK_LEFT;
5016 else if (opcode == TGSI_OPCODE_DDY_FINE)
5017 mask = TID_MASK_TOP;
5018 else
5019 mask = TID_MASK_TOP_LEFT;
5020
5021 tl_tid = LLVMBuildAnd(gallivm->builder, thread_id,
5022 lp_build_const_int32(gallivm, mask), "");
5023
5024 /* for DDX we want to next X pixel, DDY next Y pixel. */
5025 idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
5026 trbl_tid = LLVMBuildAdd(gallivm->builder, tl_tid,
5027 lp_build_const_int32(gallivm, idx), "");
5028
5029 val = LLVMBuildBitCast(gallivm->builder, emit_data->args[0], ctx->i32, "");
5030
5031 if (ctx->screen->has_ds_bpermute) {
5032 args[0] = LLVMBuildMul(gallivm->builder, tl_tid,
5033 lp_build_const_int32(gallivm, 4), "");
5034 args[1] = val;
5035 tl = lp_build_intrinsic(gallivm->builder,
5036 "llvm.amdgcn.ds.bpermute", ctx->i32,
5037 args, 2, LLVMReadNoneAttribute);
5038
5039 args[0] = LLVMBuildMul(gallivm->builder, trbl_tid,
5040 lp_build_const_int32(gallivm, 4), "");
5041 trbl = lp_build_intrinsic(gallivm->builder,
5042 "llvm.amdgcn.ds.bpermute", ctx->i32,
5043 args, 2, LLVMReadNoneAttribute);
5044 } else {
5045 LLVMValueRef store_ptr, load_ptr0, load_ptr1;
5046
5047 store_ptr = build_gep0(ctx, ctx->lds, thread_id);
5048 load_ptr0 = build_gep0(ctx, ctx->lds, tl_tid);
5049 load_ptr1 = build_gep0(ctx, ctx->lds, trbl_tid);
5050
5051 LLVMBuildStore(gallivm->builder, val, store_ptr);
5052 tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
5053 trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
5054 }
5055
5056 tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
5057 trbl = LLVMBuildBitCast(gallivm->builder, trbl, ctx->f32, "");
5058
5059 emit_data->output[emit_data->chan] =
5060 LLVMBuildFSub(gallivm->builder, trbl, tl, "");
5061 }
5062
5063 /*
5064 * this takes an I,J coordinate pair,
5065 * and works out the X and Y derivatives.
5066 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
5067 */
5068 static LLVMValueRef si_llvm_emit_ddxy_interp(
5069 struct lp_build_tgsi_context *bld_base,
5070 LLVMValueRef interp_ij)
5071 {
5072 struct si_shader_context *ctx = si_shader_context(bld_base);
5073 struct gallivm_state *gallivm = bld_base->base.gallivm;
5074 LLVMValueRef result[4], a;
5075 unsigned i;
5076
5077 for (i = 0; i < 2; i++) {
5078 a = LLVMBuildExtractElement(gallivm->builder, interp_ij,
5079 LLVMConstInt(ctx->i32, i, 0), "");
5080 result[i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDX, a);
5081 result[2+i] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_DDY, a);
5082 }
5083
5084 return lp_build_gather_values(gallivm, result, 4);
5085 }
5086
5087 static void interp_fetch_args(
5088 struct lp_build_tgsi_context *bld_base,
5089 struct lp_build_emit_data *emit_data)
5090 {
5091 struct si_shader_context *ctx = si_shader_context(bld_base);
5092 struct gallivm_state *gallivm = bld_base->base.gallivm;
5093 const struct tgsi_full_instruction *inst = emit_data->inst;
5094
5095 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
5096 /* offset is in second src, first two channels */
5097 emit_data->args[0] = lp_build_emit_fetch(bld_base,
5098 emit_data->inst, 1,
5099 TGSI_CHAN_X);
5100 emit_data->args[1] = lp_build_emit_fetch(bld_base,
5101 emit_data->inst, 1,
5102 TGSI_CHAN_Y);
5103 emit_data->arg_count = 2;
5104 } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5105 LLVMValueRef sample_position;
5106 LLVMValueRef sample_id;
5107 LLVMValueRef halfval = lp_build_const_float(gallivm, 0.5f);
5108
5109 /* fetch sample ID, then fetch its sample position,
5110 * and place into first two channels.
5111 */
5112 sample_id = lp_build_emit_fetch(bld_base,
5113 emit_data->inst, 1, TGSI_CHAN_X);
5114 sample_id = LLVMBuildBitCast(gallivm->builder, sample_id,
5115 ctx->i32, "");
5116 sample_position = load_sample_position(ctx, sample_id);
5117
5118 emit_data->args[0] = LLVMBuildExtractElement(gallivm->builder,
5119 sample_position,
5120 lp_build_const_int32(gallivm, 0), "");
5121
5122 emit_data->args[0] = LLVMBuildFSub(gallivm->builder, emit_data->args[0], halfval, "");
5123 emit_data->args[1] = LLVMBuildExtractElement(gallivm->builder,
5124 sample_position,
5125 lp_build_const_int32(gallivm, 1), "");
5126 emit_data->args[1] = LLVMBuildFSub(gallivm->builder, emit_data->args[1], halfval, "");
5127 emit_data->arg_count = 2;
5128 }
5129 }
5130
5131 static void build_interp_intrinsic(const struct lp_build_tgsi_action *action,
5132 struct lp_build_tgsi_context *bld_base,
5133 struct lp_build_emit_data *emit_data)
5134 {
5135 struct si_shader_context *ctx = si_shader_context(bld_base);
5136 struct si_shader *shader = ctx->shader;
5137 struct gallivm_state *gallivm = bld_base->base.gallivm;
5138 LLVMValueRef interp_param;
5139 const struct tgsi_full_instruction *inst = emit_data->inst;
5140 const char *intr_name;
5141 int input_index = inst->Src[0].Register.Index;
5142 int chan;
5143 int i;
5144 LLVMValueRef attr_number;
5145 LLVMValueRef params = LLVMGetParam(ctx->main_fn, SI_PARAM_PRIM_MASK);
5146 int interp_param_idx;
5147 unsigned interp = shader->selector->info.input_interpolate[input_index];
5148 unsigned location;
5149
5150 assert(inst->Src[0].Register.File == TGSI_FILE_INPUT);
5151
5152 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5153 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE)
5154 location = TGSI_INTERPOLATE_LOC_CENTER;
5155 else
5156 location = TGSI_INTERPOLATE_LOC_CENTROID;
5157
5158 interp_param_idx = lookup_interp_param_index(interp, location);
5159 if (interp_param_idx == -1)
5160 return;
5161 else if (interp_param_idx)
5162 interp_param = get_interp_param(ctx, interp_param_idx);
5163 else
5164 interp_param = NULL;
5165
5166 attr_number = lp_build_const_int32(gallivm, input_index);
5167
5168 if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
5169 inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
5170 LLVMValueRef ij_out[2];
5171 LLVMValueRef ddxy_out = si_llvm_emit_ddxy_interp(bld_base, interp_param);
5172
5173 /*
5174 * take the I then J parameters, and the DDX/Y for it, and
5175 * calculate the IJ inputs for the interpolator.
5176 * temp1 = ddx * offset/sample.x + I;
5177 * interp_param.I = ddy * offset/sample.y + temp1;
5178 * temp1 = ddx * offset/sample.x + J;
5179 * interp_param.J = ddy * offset/sample.y + temp1;
5180 */
5181 for (i = 0; i < 2; i++) {
5182 LLVMValueRef ix_ll = lp_build_const_int32(gallivm, i);
5183 LLVMValueRef iy_ll = lp_build_const_int32(gallivm, i + 2);
5184 LLVMValueRef ddx_el = LLVMBuildExtractElement(gallivm->builder,
5185 ddxy_out, ix_ll, "");
5186 LLVMValueRef ddy_el = LLVMBuildExtractElement(gallivm->builder,
5187 ddxy_out, iy_ll, "");
5188 LLVMValueRef interp_el = LLVMBuildExtractElement(gallivm->builder,
5189 interp_param, ix_ll, "");
5190 LLVMValueRef temp1, temp2;
5191
5192 interp_el = LLVMBuildBitCast(gallivm->builder, interp_el,
5193 ctx->f32, "");
5194
5195 temp1 = LLVMBuildFMul(gallivm->builder, ddx_el, emit_data->args[0], "");
5196
5197 temp1 = LLVMBuildFAdd(gallivm->builder, temp1, interp_el, "");
5198
5199 temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
5200
5201 temp2 = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
5202
5203 ij_out[i] = LLVMBuildBitCast(gallivm->builder,
5204 temp2, ctx->i32, "");
5205 }
5206 interp_param = lp_build_gather_values(bld_base->base.gallivm, ij_out, 2);
5207 }
5208
5209 intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
5210 for (chan = 0; chan < 4; chan++) {
5211 LLVMValueRef args[4];
5212 LLVMValueRef llvm_chan;
5213 unsigned schan;
5214
5215 schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
5216 llvm_chan = lp_build_const_int32(gallivm, schan);
5217
5218 args[0] = llvm_chan;
5219 args[1] = attr_number;
5220 args[2] = params;
5221 args[3] = interp_param;
5222
5223 emit_data->output[chan] =
5224 lp_build_intrinsic(gallivm->builder, intr_name,
5225 ctx->f32, args, args[3] ? 4 : 3,
5226 LLVMReadNoneAttribute);
5227 }
5228 }
5229
5230 static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
5231 struct lp_build_emit_data *emit_data)
5232 {
5233 LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
5234 struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
5235 unsigned stream;
5236
5237 assert(src0.File == TGSI_FILE_IMMEDIATE);
5238
5239 stream = LLVMConstIntGetZExtValue(imms[src0.Index][src0.SwizzleX]) & 0x3;
5240 return stream;
5241 }
5242
5243 /* Emit one vertex from the geometry shader */
5244 static void si_llvm_emit_vertex(
5245 const struct lp_build_tgsi_action *action,
5246 struct lp_build_tgsi_context *bld_base,
5247 struct lp_build_emit_data *emit_data)
5248 {
5249 struct si_shader_context *ctx = si_shader_context(bld_base);
5250 struct lp_build_context *uint = &bld_base->uint_bld;
5251 struct si_shader *shader = ctx->shader;
5252 struct tgsi_shader_info *info = &shader->selector->info;
5253 struct gallivm_state *gallivm = bld_base->base.gallivm;
5254 LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
5255 SI_PARAM_GS2VS_OFFSET);
5256 LLVMValueRef gs_next_vertex;
5257 LLVMValueRef can_emit, kill;
5258 LLVMValueRef args[2];
5259 unsigned chan;
5260 int i;
5261 unsigned stream;
5262
5263 stream = si_llvm_get_stream(bld_base, emit_data);
5264
5265 /* Write vertex attribute values to GSVS ring */
5266 gs_next_vertex = LLVMBuildLoad(gallivm->builder,
5267 ctx->gs_next_vertex[stream],
5268 "");
5269
5270 /* If this thread has already emitted the declared maximum number of
5271 * vertices, kill it: excessive vertex emissions are not supposed to
5272 * have any effect, and GS threads have no externally observable
5273 * effects other than emitting vertices.
5274 */
5275 can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULE, gs_next_vertex,
5276 lp_build_const_int32(gallivm,
5277 shader->selector->gs_max_out_vertices), "");
5278 kill = lp_build_select(&bld_base->base, can_emit,
5279 lp_build_const_float(gallivm, 1.0f),
5280 lp_build_const_float(gallivm, -1.0f));
5281
5282 lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
5283 ctx->voidt, &kill, 1, 0);
5284
5285 for (i = 0; i < info->num_outputs; i++) {
5286 LLVMValueRef *out_ptr =
5287 ctx->soa.outputs[i];
5288
5289 for (chan = 0; chan < 4; chan++) {
5290 LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
5291 LLVMValueRef voffset =
5292 lp_build_const_int32(gallivm, (i * 4 + chan) *
5293 shader->selector->gs_max_out_vertices);
5294
5295 voffset = lp_build_add(uint, voffset, gs_next_vertex);
5296 voffset = lp_build_mul_imm(uint, voffset, 4);
5297
5298 out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
5299
5300 build_tbuffer_store(ctx,
5301 ctx->gsvs_ring[stream],
5302 out_val, 1,
5303 voffset, soffset, 0,
5304 V_008F0C_BUF_DATA_FORMAT_32,
5305 V_008F0C_BUF_NUM_FORMAT_UINT,
5306 1, 0, 1, 1, 0);
5307 }
5308 }
5309 gs_next_vertex = lp_build_add(uint, gs_next_vertex,
5310 lp_build_const_int32(gallivm, 1));
5311
5312 LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
5313
5314 /* Signal vertex emission */
5315 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_EMIT | SENDMSG_GS | (stream << 8));
5316 args[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID);
5317 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
5318 ctx->voidt, args, 2, 0);
5319 }
5320
5321 /* Cut one primitive from the geometry shader */
5322 static void si_llvm_emit_primitive(
5323 const struct lp_build_tgsi_action *action,
5324 struct lp_build_tgsi_context *bld_base,
5325 struct lp_build_emit_data *emit_data)
5326 {
5327 struct si_shader_context *ctx = si_shader_context(bld_base);
5328 struct gallivm_state *gallivm = bld_base->base.gallivm;
5329 LLVMValueRef args[2];
5330 unsigned stream;
5331
5332 /* Signal primitive cut */
5333 stream = si_llvm_get_stream(bld_base, emit_data);
5334 args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_CUT | SENDMSG_GS | (stream << 8));
5335 args[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID);
5336 lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
5337 ctx->voidt, args, 2, 0);
5338 }
5339
5340 static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
5341 struct lp_build_tgsi_context *bld_base,
5342 struct lp_build_emit_data *emit_data)
5343 {
5344 struct si_shader_context *ctx = si_shader_context(bld_base);
5345 struct gallivm_state *gallivm = bld_base->base.gallivm;
5346
5347 /* The real barrier instruction isn’t needed, because an entire patch
5348 * always fits into a single wave.
5349 */
5350 if (ctx->type == PIPE_SHADER_TESS_CTRL) {
5351 emit_optimization_barrier(ctx);
5352 return;
5353 }
5354
5355 lp_build_intrinsic(gallivm->builder,
5356 HAVE_LLVM >= 0x0309 ? "llvm.amdgcn.s.barrier"
5357 : "llvm.AMDGPU.barrier.local",
5358 ctx->voidt, NULL, 0, 0);
5359 }
5360
5361 static const struct lp_build_tgsi_action tex_action = {
5362 .fetch_args = tex_fetch_args,
5363 .emit = build_tex_intrinsic,
5364 };
5365
5366 static const struct lp_build_tgsi_action interp_action = {
5367 .fetch_args = interp_fetch_args,
5368 .emit = build_interp_intrinsic,
5369 };
5370
5371 static void si_create_function(struct si_shader_context *ctx,
5372 const char *name,
5373 LLVMTypeRef *returns, unsigned num_returns,
5374 LLVMTypeRef *params, unsigned num_params,
5375 int last_sgpr)
5376 {
5377 int i;
5378
5379 si_llvm_create_func(ctx, name, returns, num_returns,
5380 params, num_params);
5381 si_llvm_shader_type(ctx->main_fn, ctx->type);
5382 ctx->return_value = LLVMGetUndef(ctx->return_type);
5383
5384 for (i = 0; i <= last_sgpr; ++i) {
5385 LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
5386
5387 /* The combination of:
5388 * - ByVal
5389 * - dereferenceable
5390 * - invariant.load
5391 * allows the optimization passes to move loads and reduces
5392 * SGPR spilling significantly.
5393 */
5394 if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
5395 LLVMAddAttribute(P, LLVMByValAttribute);
5396 lp_add_attr_dereferenceable(P, UINT64_MAX);
5397 } else
5398 LLVMAddAttribute(P, LLVMInRegAttribute);
5399 }
5400
5401 if (ctx->screen->b.debug_flags & DBG_UNSAFE_MATH) {
5402 /* These were copied from some LLVM test. */
5403 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
5404 "less-precise-fpmad",
5405 "true");
5406 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
5407 "no-infs-fp-math",
5408 "true");
5409 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
5410 "no-nans-fp-math",
5411 "true");
5412 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
5413 "unsafe-fp-math",
5414 "true");
5415 }
5416 }
5417
5418 static void create_meta_data(struct si_shader_context *ctx)
5419 {
5420 struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
5421
5422 ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
5423 "invariant.load", 14);
5424 ctx->range_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
5425 "range", 5);
5426 ctx->uniform_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
5427 "amdgpu.uniform", 14);
5428
5429 ctx->empty_md = LLVMMDNodeInContext(gallivm->context, NULL, 0);
5430 }
5431
5432 static void declare_streamout_params(struct si_shader_context *ctx,
5433 struct pipe_stream_output_info *so,
5434 LLVMTypeRef *params, LLVMTypeRef i32,
5435 unsigned *num_params)
5436 {
5437 int i;
5438
5439 /* Streamout SGPRs. */
5440 if (so->num_outputs) {
5441 if (ctx->type != PIPE_SHADER_TESS_EVAL)
5442 params[ctx->param_streamout_config = (*num_params)++] = i32;
5443 else
5444 ctx->param_streamout_config = ctx->param_tess_offchip;
5445
5446 params[ctx->param_streamout_write_index = (*num_params)++] = i32;
5447 }
5448 /* A streamout buffer offset is loaded if the stride is non-zero. */
5449 for (i = 0; i < 4; i++) {
5450 if (!so->stride[i])
5451 continue;
5452
5453 params[ctx->param_streamout_offset[i] = (*num_params)++] = i32;
5454 }
5455 }
5456
5457 static unsigned llvm_get_type_size(LLVMTypeRef type)
5458 {
5459 LLVMTypeKind kind = LLVMGetTypeKind(type);
5460
5461 switch (kind) {
5462 case LLVMIntegerTypeKind:
5463 return LLVMGetIntTypeWidth(type) / 8;
5464 case LLVMFloatTypeKind:
5465 return 4;
5466 case LLVMPointerTypeKind:
5467 return 8;
5468 case LLVMVectorTypeKind:
5469 return LLVMGetVectorSize(type) *
5470 llvm_get_type_size(LLVMGetElementType(type));
5471 default:
5472 assert(0);
5473 return 0;
5474 }
5475 }
5476
5477 static void declare_tess_lds(struct si_shader_context *ctx)
5478 {
5479 struct gallivm_state *gallivm = &ctx->gallivm;
5480 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
5481 struct lp_build_context *uint = &bld_base->uint_bld;
5482
5483 unsigned lds_size = ctx->screen->b.chip_class >= CIK ? 65536 : 32768;
5484 ctx->lds = LLVMBuildIntToPtr(gallivm->builder, uint->zero,
5485 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), LOCAL_ADDR_SPACE),
5486 "tess_lds");
5487 }
5488
5489 static void create_function(struct si_shader_context *ctx)
5490 {
5491 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
5492 struct gallivm_state *gallivm = bld_base->base.gallivm;
5493 struct si_shader *shader = ctx->shader;
5494 LLVMTypeRef params[SI_NUM_PARAMS + SI_NUM_VERTEX_BUFFERS], v3i32;
5495 LLVMTypeRef returns[16+32*4];
5496 unsigned i, last_sgpr, num_params, num_return_sgprs;
5497 unsigned num_returns = 0;
5498 unsigned num_prolog_vgprs = 0;
5499
5500 v3i32 = LLVMVectorType(ctx->i32, 3);
5501
5502 params[SI_PARAM_RW_BUFFERS] = const_array(ctx->v16i8, SI_NUM_RW_BUFFERS);
5503 params[SI_PARAM_CONST_BUFFERS] = const_array(ctx->v16i8, SI_NUM_CONST_BUFFERS);
5504 params[SI_PARAM_SAMPLERS] = const_array(ctx->v8i32, SI_NUM_SAMPLERS);
5505 params[SI_PARAM_IMAGES] = const_array(ctx->v8i32, SI_NUM_IMAGES);
5506 params[SI_PARAM_SHADER_BUFFERS] = const_array(ctx->v4i32, SI_NUM_SHADER_BUFFERS);
5507
5508 switch (ctx->type) {
5509 case PIPE_SHADER_VERTEX:
5510 params[SI_PARAM_VERTEX_BUFFERS] = const_array(ctx->v16i8, SI_NUM_VERTEX_BUFFERS);
5511 params[SI_PARAM_BASE_VERTEX] = ctx->i32;
5512 params[SI_PARAM_START_INSTANCE] = ctx->i32;
5513 params[SI_PARAM_DRAWID] = ctx->i32;
5514 num_params = SI_PARAM_DRAWID+1;
5515
5516 if (shader->key.vs.as_es) {
5517 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
5518 } else if (shader->key.vs.as_ls) {
5519 params[SI_PARAM_LS_OUT_LAYOUT] = ctx->i32;
5520 num_params = SI_PARAM_LS_OUT_LAYOUT+1;
5521 } else {
5522 if (ctx->is_gs_copy_shader) {
5523 num_params = SI_PARAM_RW_BUFFERS+1;
5524 } else {
5525 params[SI_PARAM_VS_STATE_BITS] = ctx->i32;
5526 num_params = SI_PARAM_VS_STATE_BITS+1;
5527 }
5528
5529 /* The locations of the other parameters are assigned dynamically. */
5530 declare_streamout_params(ctx, &shader->selector->so,
5531 params, ctx->i32, &num_params);
5532 }
5533
5534 last_sgpr = num_params-1;
5535
5536 /* VGPRs */
5537 params[ctx->param_vertex_id = num_params++] = ctx->i32;
5538 params[ctx->param_rel_auto_id = num_params++] = ctx->i32;
5539 params[ctx->param_vs_prim_id = num_params++] = ctx->i32;
5540 params[ctx->param_instance_id = num_params++] = ctx->i32;
5541
5542 if (!ctx->no_prolog &&
5543 !ctx->is_gs_copy_shader) {
5544 /* Vertex load indices. */
5545 ctx->param_vertex_index0 = num_params;
5546
5547 for (i = 0; i < shader->selector->info.num_inputs; i++)
5548 params[num_params++] = ctx->i32;
5549
5550 num_prolog_vgprs += shader->selector->info.num_inputs;
5551 }
5552
5553 if (!ctx->no_epilog &&
5554 !ctx->is_gs_copy_shader) {
5555 /* PrimitiveID output. */
5556 if (!shader->key.vs.as_es && !shader->key.vs.as_ls)
5557 for (i = 0; i <= VS_EPILOG_PRIMID_LOC; i++)
5558 returns[num_returns++] = ctx->f32;
5559 }
5560 break;
5561
5562 case PIPE_SHADER_TESS_CTRL:
5563 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
5564 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
5565 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
5566 params[SI_PARAM_TCS_IN_LAYOUT] = ctx->i32;
5567 params[ctx->param_oc_lds = SI_PARAM_TCS_OC_LDS] = ctx->i32;
5568 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx->i32;
5569 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
5570
5571 /* VGPRs */
5572 params[SI_PARAM_PATCH_ID] = ctx->i32;
5573 params[SI_PARAM_REL_IDS] = ctx->i32;
5574 num_params = SI_PARAM_REL_IDS+1;
5575
5576 if (!ctx->no_epilog) {
5577 /* SI_PARAM_TCS_OC_LDS and PARAM_TESS_FACTOR_OFFSET are
5578 * placed after the user SGPRs.
5579 */
5580 for (i = 0; i < SI_TCS_NUM_USER_SGPR + 2; i++)
5581 returns[num_returns++] = ctx->i32; /* SGPRs */
5582
5583 for (i = 0; i < 3; i++)
5584 returns[num_returns++] = ctx->f32; /* VGPRs */
5585 }
5586 break;
5587
5588 case PIPE_SHADER_TESS_EVAL:
5589 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
5590 num_params = SI_PARAM_TCS_OFFCHIP_LAYOUT+1;
5591
5592 if (shader->key.tes.as_es) {
5593 params[ctx->param_oc_lds = num_params++] = ctx->i32;
5594 params[ctx->param_tess_offchip = num_params++] = ctx->i32;
5595 params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
5596 } else {
5597 params[ctx->param_tess_offchip = num_params++] = ctx->i32;
5598 declare_streamout_params(ctx, &shader->selector->so,
5599 params, ctx->i32, &num_params);
5600 params[ctx->param_oc_lds = num_params++] = ctx->i32;
5601 }
5602 last_sgpr = num_params - 1;
5603
5604 /* VGPRs */
5605 params[ctx->param_tes_u = num_params++] = ctx->f32;
5606 params[ctx->param_tes_v = num_params++] = ctx->f32;
5607 params[ctx->param_tes_rel_patch_id = num_params++] = ctx->i32;
5608 params[ctx->param_tes_patch_id = num_params++] = ctx->i32;
5609
5610 /* PrimitiveID output. */
5611 if (!ctx->no_epilog && !shader->key.tes.as_es)
5612 for (i = 0; i <= VS_EPILOG_PRIMID_LOC; i++)
5613 returns[num_returns++] = ctx->f32;
5614 break;
5615
5616 case PIPE_SHADER_GEOMETRY:
5617 params[SI_PARAM_GS2VS_OFFSET] = ctx->i32;
5618 params[SI_PARAM_GS_WAVE_ID] = ctx->i32;
5619 last_sgpr = SI_PARAM_GS_WAVE_ID;
5620
5621 /* VGPRs */
5622 params[SI_PARAM_VTX0_OFFSET] = ctx->i32;
5623 params[SI_PARAM_VTX1_OFFSET] = ctx->i32;
5624 params[SI_PARAM_PRIMITIVE_ID] = ctx->i32;
5625 params[SI_PARAM_VTX2_OFFSET] = ctx->i32;
5626 params[SI_PARAM_VTX3_OFFSET] = ctx->i32;
5627 params[SI_PARAM_VTX4_OFFSET] = ctx->i32;
5628 params[SI_PARAM_VTX5_OFFSET] = ctx->i32;
5629 params[SI_PARAM_GS_INSTANCE_ID] = ctx->i32;
5630 num_params = SI_PARAM_GS_INSTANCE_ID+1;
5631 break;
5632
5633 case PIPE_SHADER_FRAGMENT:
5634 params[SI_PARAM_ALPHA_REF] = ctx->f32;
5635 params[SI_PARAM_PRIM_MASK] = ctx->i32;
5636 last_sgpr = SI_PARAM_PRIM_MASK;
5637 params[SI_PARAM_PERSP_SAMPLE] = ctx->v2i32;
5638 params[SI_PARAM_PERSP_CENTER] = ctx->v2i32;
5639 params[SI_PARAM_PERSP_CENTROID] = ctx->v2i32;
5640 params[SI_PARAM_PERSP_PULL_MODEL] = v3i32;
5641 params[SI_PARAM_LINEAR_SAMPLE] = ctx->v2i32;
5642 params[SI_PARAM_LINEAR_CENTER] = ctx->v2i32;
5643 params[SI_PARAM_LINEAR_CENTROID] = ctx->v2i32;
5644 params[SI_PARAM_LINE_STIPPLE_TEX] = ctx->f32;
5645 params[SI_PARAM_POS_X_FLOAT] = ctx->f32;
5646 params[SI_PARAM_POS_Y_FLOAT] = ctx->f32;
5647 params[SI_PARAM_POS_Z_FLOAT] = ctx->f32;
5648 params[SI_PARAM_POS_W_FLOAT] = ctx->f32;
5649 params[SI_PARAM_FRONT_FACE] = ctx->i32;
5650 shader->info.face_vgpr_index = 20;
5651 params[SI_PARAM_ANCILLARY] = ctx->i32;
5652 params[SI_PARAM_SAMPLE_COVERAGE] = ctx->f32;
5653 params[SI_PARAM_POS_FIXED_PT] = ctx->i32;
5654 num_params = SI_PARAM_POS_FIXED_PT+1;
5655
5656 if (!ctx->no_prolog) {
5657 /* Color inputs from the prolog. */
5658 if (shader->selector->info.colors_read) {
5659 unsigned num_color_elements =
5660 util_bitcount(shader->selector->info.colors_read);
5661
5662 assert(num_params + num_color_elements <= ARRAY_SIZE(params));
5663 for (i = 0; i < num_color_elements; i++)
5664 params[num_params++] = ctx->f32;
5665
5666 num_prolog_vgprs += num_color_elements;
5667 }
5668 }
5669
5670 if (!ctx->no_epilog) {
5671 /* Outputs for the epilog. */
5672 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
5673 num_returns =
5674 num_return_sgprs +
5675 util_bitcount(shader->selector->info.colors_written) * 4 +
5676 shader->selector->info.writes_z +
5677 shader->selector->info.writes_stencil +
5678 shader->selector->info.writes_samplemask +
5679 1 /* SampleMaskIn */;
5680
5681 num_returns = MAX2(num_returns,
5682 num_return_sgprs +
5683 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
5684
5685 for (i = 0; i < num_return_sgprs; i++)
5686 returns[i] = ctx->i32;
5687 for (; i < num_returns; i++)
5688 returns[i] = ctx->f32;
5689 }
5690 break;
5691
5692 case PIPE_SHADER_COMPUTE:
5693 params[SI_PARAM_GRID_SIZE] = v3i32;
5694 params[SI_PARAM_BLOCK_SIZE] = v3i32;
5695 params[SI_PARAM_BLOCK_ID] = v3i32;
5696 last_sgpr = SI_PARAM_BLOCK_ID;
5697
5698 params[SI_PARAM_THREAD_ID] = v3i32;
5699 num_params = SI_PARAM_THREAD_ID + 1;
5700 break;
5701 default:
5702 assert(0 && "unimplemented shader");
5703 return;
5704 }
5705
5706 assert(num_params <= ARRAY_SIZE(params));
5707
5708 si_create_function(ctx, "main", returns, num_returns, params,
5709 num_params, last_sgpr);
5710
5711 /* Reserve register locations for VGPR inputs the PS prolog may need. */
5712 if (ctx->type == PIPE_SHADER_FRAGMENT &&
5713 ctx->separate_prolog) {
5714 si_llvm_add_attribute(ctx->main_fn,
5715 "InitialPSInputAddr",
5716 S_0286D0_PERSP_SAMPLE_ENA(1) |
5717 S_0286D0_PERSP_CENTER_ENA(1) |
5718 S_0286D0_PERSP_CENTROID_ENA(1) |
5719 S_0286D0_LINEAR_SAMPLE_ENA(1) |
5720 S_0286D0_LINEAR_CENTER_ENA(1) |
5721 S_0286D0_LINEAR_CENTROID_ENA(1) |
5722 S_0286D0_FRONT_FACE_ENA(1) |
5723 S_0286D0_POS_FIXED_PT_ENA(1));
5724 } else if (ctx->type == PIPE_SHADER_COMPUTE) {
5725 const unsigned *properties = shader->selector->info.properties;
5726 unsigned max_work_group_size =
5727 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
5728 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
5729 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
5730
5731 if (!max_work_group_size) {
5732 /* This is a variable group size compute shader,
5733 * compile it for the maximum possible group size.
5734 */
5735 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
5736 }
5737
5738 si_llvm_add_attribute(ctx->main_fn,
5739 "amdgpu-max-work-group-size",
5740 max_work_group_size);
5741 }
5742
5743 shader->info.num_input_sgprs = 0;
5744 shader->info.num_input_vgprs = 0;
5745
5746 for (i = 0; i <= last_sgpr; ++i)
5747 shader->info.num_input_sgprs += llvm_get_type_size(params[i]) / 4;
5748
5749 for (; i < num_params; ++i)
5750 shader->info.num_input_vgprs += llvm_get_type_size(params[i]) / 4;
5751
5752 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
5753 shader->info.num_input_vgprs -= num_prolog_vgprs;
5754
5755 if (!ctx->screen->has_ds_bpermute &&
5756 bld_base->info &&
5757 (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
5758 bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0 ||
5759 bld_base->info->opcode_count[TGSI_OPCODE_DDX_FINE] > 0 ||
5760 bld_base->info->opcode_count[TGSI_OPCODE_DDY_FINE] > 0 ||
5761 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_OFFSET] > 0 ||
5762 bld_base->info->opcode_count[TGSI_OPCODE_INTERP_SAMPLE] > 0))
5763 ctx->lds =
5764 LLVMAddGlobalInAddressSpace(gallivm->module,
5765 LLVMArrayType(ctx->i32, 64),
5766 "ddxy_lds",
5767 LOCAL_ADDR_SPACE);
5768
5769 if ((ctx->type == PIPE_SHADER_VERTEX && shader->key.vs.as_ls) ||
5770 ctx->type == PIPE_SHADER_TESS_CTRL ||
5771 ctx->type == PIPE_SHADER_TESS_EVAL)
5772 declare_tess_lds(ctx);
5773 }
5774
5775 /**
5776 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
5777 * for later use.
5778 */
5779 static void preload_ring_buffers(struct si_shader_context *ctx)
5780 {
5781 struct gallivm_state *gallivm =
5782 ctx->soa.bld_base.base.gallivm;
5783
5784 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
5785 SI_PARAM_RW_BUFFERS);
5786
5787 if ((ctx->type == PIPE_SHADER_VERTEX &&
5788 ctx->shader->key.vs.as_es) ||
5789 (ctx->type == PIPE_SHADER_TESS_EVAL &&
5790 ctx->shader->key.tes.as_es) ||
5791 ctx->type == PIPE_SHADER_GEOMETRY) {
5792 unsigned ring =
5793 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
5794 : SI_ES_RING_ESGS;
5795 LLVMValueRef offset = lp_build_const_int32(gallivm, ring);
5796
5797 ctx->esgs_ring =
5798 build_indexed_load_const(ctx, buf_ptr, offset);
5799 }
5800
5801 if (ctx->is_gs_copy_shader) {
5802 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_VS_RING_GSVS);
5803
5804 ctx->gsvs_ring[0] =
5805 build_indexed_load_const(ctx, buf_ptr, offset);
5806 }
5807 if (ctx->type == PIPE_SHADER_GEOMETRY) {
5808 int i;
5809 for (i = 0; i < 4; i++) {
5810 LLVMValueRef offset = lp_build_const_int32(gallivm, SI_GS_RING_GSVS0 + i);
5811
5812 ctx->gsvs_ring[i] =
5813 build_indexed_load_const(ctx, buf_ptr, offset);
5814 }
5815 }
5816 }
5817
5818 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
5819 LLVMValueRef param_rw_buffers,
5820 unsigned param_pos_fixed_pt)
5821 {
5822 struct lp_build_tgsi_context *bld_base =
5823 &ctx->soa.bld_base;
5824 struct gallivm_state *gallivm = bld_base->base.gallivm;
5825 LLVMBuilderRef builder = gallivm->builder;
5826 LLVMValueRef slot, desc, offset, row, bit, address[2];
5827
5828 /* Use the fixed-point gl_FragCoord input.
5829 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
5830 * per coordinate to get the repeating effect.
5831 */
5832 address[0] = unpack_param(ctx, param_pos_fixed_pt, 0, 5);
5833 address[1] = unpack_param(ctx, param_pos_fixed_pt, 16, 5);
5834
5835 /* Load the buffer descriptor. */
5836 slot = lp_build_const_int32(gallivm, SI_PS_CONST_POLY_STIPPLE);
5837 desc = build_indexed_load_const(ctx, param_rw_buffers, slot);
5838
5839 /* The stipple pattern is 32x32, each row has 32 bits. */
5840 offset = LLVMBuildMul(builder, address[1],
5841 LLVMConstInt(ctx->i32, 4, 0), "");
5842 row = buffer_load_const(ctx, desc, offset);
5843 row = LLVMBuildBitCast(builder, row, ctx->i32, "");
5844 bit = LLVMBuildLShr(builder, row, address[0], "");
5845 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
5846
5847 /* The intrinsic kills the thread if arg < 0. */
5848 bit = LLVMBuildSelect(builder, bit, LLVMConstReal(ctx->f32, 0),
5849 LLVMConstReal(ctx->f32, -1), "");
5850 lp_build_intrinsic(builder, "llvm.AMDGPU.kill", ctx->voidt, &bit, 1, 0);
5851 }
5852
5853 void si_shader_binary_read_config(struct radeon_shader_binary *binary,
5854 struct si_shader_config *conf,
5855 unsigned symbol_offset)
5856 {
5857 unsigned i;
5858 const unsigned char *config =
5859 radeon_shader_binary_config_start(binary, symbol_offset);
5860 bool really_needs_scratch = false;
5861
5862 /* LLVM adds SGPR spills to the scratch size.
5863 * Find out if we really need the scratch buffer.
5864 */
5865 for (i = 0; i < binary->reloc_count; i++) {
5866 const struct radeon_shader_reloc *reloc = &binary->relocs[i];
5867
5868 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name) ||
5869 !strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5870 really_needs_scratch = true;
5871 break;
5872 }
5873 }
5874
5875 /* XXX: We may be able to emit some of these values directly rather than
5876 * extracting fields to be emitted later.
5877 */
5878
5879 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
5880 unsigned reg = util_le32_to_cpu(*(uint32_t*)(config + i));
5881 unsigned value = util_le32_to_cpu(*(uint32_t*)(config + i + 4));
5882 switch (reg) {
5883 case R_00B028_SPI_SHADER_PGM_RSRC1_PS:
5884 case R_00B128_SPI_SHADER_PGM_RSRC1_VS:
5885 case R_00B228_SPI_SHADER_PGM_RSRC1_GS:
5886 case R_00B848_COMPUTE_PGM_RSRC1:
5887 conf->num_sgprs = MAX2(conf->num_sgprs, (G_00B028_SGPRS(value) + 1) * 8);
5888 conf->num_vgprs = MAX2(conf->num_vgprs, (G_00B028_VGPRS(value) + 1) * 4);
5889 conf->float_mode = G_00B028_FLOAT_MODE(value);
5890 conf->rsrc1 = value;
5891 break;
5892 case R_00B02C_SPI_SHADER_PGM_RSRC2_PS:
5893 conf->lds_size = MAX2(conf->lds_size, G_00B02C_EXTRA_LDS_SIZE(value));
5894 break;
5895 case R_00B84C_COMPUTE_PGM_RSRC2:
5896 conf->lds_size = MAX2(conf->lds_size, G_00B84C_LDS_SIZE(value));
5897 conf->rsrc2 = value;
5898 break;
5899 case R_0286CC_SPI_PS_INPUT_ENA:
5900 conf->spi_ps_input_ena = value;
5901 break;
5902 case R_0286D0_SPI_PS_INPUT_ADDR:
5903 conf->spi_ps_input_addr = value;
5904 break;
5905 case R_0286E8_SPI_TMPRING_SIZE:
5906 case R_00B860_COMPUTE_TMPRING_SIZE:
5907 /* WAVESIZE is in units of 256 dwords. */
5908 if (really_needs_scratch)
5909 conf->scratch_bytes_per_wave =
5910 G_00B860_WAVESIZE(value) * 256 * 4;
5911 break;
5912 case 0x4: /* SPILLED_SGPRS */
5913 conf->spilled_sgprs = value;
5914 break;
5915 case 0x8: /* SPILLED_VGPRS */
5916 conf->spilled_vgprs = value;
5917 break;
5918 default:
5919 {
5920 static bool printed;
5921
5922 if (!printed) {
5923 fprintf(stderr, "Warning: LLVM emitted unknown "
5924 "config register: 0x%x\n", reg);
5925 printed = true;
5926 }
5927 }
5928 break;
5929 }
5930 }
5931
5932 if (!conf->spi_ps_input_addr)
5933 conf->spi_ps_input_addr = conf->spi_ps_input_ena;
5934 }
5935
5936 void si_shader_apply_scratch_relocs(struct si_context *sctx,
5937 struct si_shader *shader,
5938 struct si_shader_config *config,
5939 uint64_t scratch_va)
5940 {
5941 unsigned i;
5942 uint32_t scratch_rsrc_dword0 = scratch_va;
5943 uint32_t scratch_rsrc_dword1 =
5944 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
5945
5946 /* Enable scratch coalescing if LLVM sets ELEMENT_SIZE & INDEX_STRIDE
5947 * correctly.
5948 */
5949 if (HAVE_LLVM >= 0x0309)
5950 scratch_rsrc_dword1 |= S_008F04_SWIZZLE_ENABLE(1);
5951 else
5952 scratch_rsrc_dword1 |=
5953 S_008F04_STRIDE(config->scratch_bytes_per_wave / 64);
5954
5955 for (i = 0 ; i < shader->binary.reloc_count; i++) {
5956 const struct radeon_shader_reloc *reloc =
5957 &shader->binary.relocs[i];
5958 if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
5959 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5960 &scratch_rsrc_dword0, 4);
5961 } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
5962 util_memcpy_cpu_to_le32(shader->binary.code + reloc->offset,
5963 &scratch_rsrc_dword1, 4);
5964 }
5965 }
5966 }
5967
5968 static unsigned si_get_shader_binary_size(struct si_shader *shader)
5969 {
5970 unsigned size = shader->binary.code_size;
5971
5972 if (shader->prolog)
5973 size += shader->prolog->binary.code_size;
5974 if (shader->epilog)
5975 size += shader->epilog->binary.code_size;
5976 return size;
5977 }
5978
5979 int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader)
5980 {
5981 const struct radeon_shader_binary *prolog =
5982 shader->prolog ? &shader->prolog->binary : NULL;
5983 const struct radeon_shader_binary *epilog =
5984 shader->epilog ? &shader->epilog->binary : NULL;
5985 const struct radeon_shader_binary *mainb = &shader->binary;
5986 unsigned bo_size = si_get_shader_binary_size(shader) +
5987 (!epilog ? mainb->rodata_size : 0);
5988 unsigned char *ptr;
5989
5990 assert(!prolog || !prolog->rodata_size);
5991 assert((!prolog && !epilog) || !mainb->rodata_size);
5992 assert(!epilog || !epilog->rodata_size);
5993
5994 r600_resource_reference(&shader->bo, NULL);
5995 shader->bo = (struct r600_resource*)
5996 pipe_buffer_create(&sscreen->b.b, 0,
5997 PIPE_USAGE_IMMUTABLE, bo_size);
5998 if (!shader->bo)
5999 return -ENOMEM;
6000
6001 /* Upload. */
6002 ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
6003 PIPE_TRANSFER_READ_WRITE);
6004
6005 if (prolog) {
6006 util_memcpy_cpu_to_le32(ptr, prolog->code, prolog->code_size);
6007 ptr += prolog->code_size;
6008 }
6009
6010 util_memcpy_cpu_to_le32(ptr, mainb->code, mainb->code_size);
6011 ptr += mainb->code_size;
6012
6013 if (epilog)
6014 util_memcpy_cpu_to_le32(ptr, epilog->code, epilog->code_size);
6015 else if (mainb->rodata_size > 0)
6016 util_memcpy_cpu_to_le32(ptr, mainb->rodata, mainb->rodata_size);
6017
6018 sscreen->b.ws->buffer_unmap(shader->bo->buf);
6019 return 0;
6020 }
6021
6022 static void si_shader_dump_disassembly(const struct radeon_shader_binary *binary,
6023 struct pipe_debug_callback *debug,
6024 const char *name, FILE *file)
6025 {
6026 char *line, *p;
6027 unsigned i, count;
6028
6029 if (binary->disasm_string) {
6030 fprintf(file, "Shader %s disassembly:\n", name);
6031 fprintf(file, "%s", binary->disasm_string);
6032
6033 if (debug && debug->debug_message) {
6034 /* Very long debug messages are cut off, so send the
6035 * disassembly one line at a time. This causes more
6036 * overhead, but on the plus side it simplifies
6037 * parsing of resulting logs.
6038 */
6039 pipe_debug_message(debug, SHADER_INFO,
6040 "Shader Disassembly Begin");
6041
6042 line = binary->disasm_string;
6043 while (*line) {
6044 p = util_strchrnul(line, '\n');
6045 count = p - line;
6046
6047 if (count) {
6048 pipe_debug_message(debug, SHADER_INFO,
6049 "%.*s", count, line);
6050 }
6051
6052 if (!*p)
6053 break;
6054 line = p + 1;
6055 }
6056
6057 pipe_debug_message(debug, SHADER_INFO,
6058 "Shader Disassembly End");
6059 }
6060 } else {
6061 fprintf(file, "Shader %s binary:\n", name);
6062 for (i = 0; i < binary->code_size; i += 4) {
6063 fprintf(file, "@0x%x: %02x%02x%02x%02x\n", i,
6064 binary->code[i + 3], binary->code[i + 2],
6065 binary->code[i + 1], binary->code[i]);
6066 }
6067 }
6068 }
6069
6070 static void si_shader_dump_stats(struct si_screen *sscreen,
6071 struct si_shader_config *conf,
6072 unsigned num_inputs,
6073 unsigned code_size,
6074 struct pipe_debug_callback *debug,
6075 unsigned processor,
6076 FILE *file)
6077 {
6078 unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
6079 unsigned lds_per_wave = 0;
6080 unsigned max_simd_waves = 10;
6081
6082 /* Compute LDS usage for PS. */
6083 if (processor == PIPE_SHADER_FRAGMENT) {
6084 /* The minimum usage per wave is (num_inputs * 48). The maximum
6085 * usage is (num_inputs * 48 * 16).
6086 * We can get anything in between and it varies between waves.
6087 *
6088 * The 48 bytes per input for a single primitive is equal to
6089 * 4 bytes/component * 4 components/input * 3 points.
6090 *
6091 * Other stages don't know the size at compile time or don't
6092 * allocate LDS per wave, but instead they do it per thread group.
6093 */
6094 lds_per_wave = conf->lds_size * lds_increment +
6095 align(num_inputs * 48, lds_increment);
6096 }
6097
6098 /* Compute the per-SIMD wave counts. */
6099 if (conf->num_sgprs) {
6100 if (sscreen->b.chip_class >= VI)
6101 max_simd_waves = MIN2(max_simd_waves, 800 / conf->num_sgprs);
6102 else
6103 max_simd_waves = MIN2(max_simd_waves, 512 / conf->num_sgprs);
6104 }
6105
6106 if (conf->num_vgprs)
6107 max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
6108
6109 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
6110 * that PS can use.
6111 */
6112 if (lds_per_wave)
6113 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
6114
6115 if (file != stderr ||
6116 r600_can_dump_shader(&sscreen->b, processor)) {
6117 if (processor == PIPE_SHADER_FRAGMENT) {
6118 fprintf(file, "*** SHADER CONFIG ***\n"
6119 "SPI_PS_INPUT_ADDR = 0x%04x\n"
6120 "SPI_PS_INPUT_ENA = 0x%04x\n",
6121 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
6122 }
6123
6124 fprintf(file, "*** SHADER STATS ***\n"
6125 "SGPRS: %d\n"
6126 "VGPRS: %d\n"
6127 "Spilled SGPRs: %d\n"
6128 "Spilled VGPRs: %d\n"
6129 "Code Size: %d bytes\n"
6130 "LDS: %d blocks\n"
6131 "Scratch: %d bytes per wave\n"
6132 "Max Waves: %d\n"
6133 "********************\n\n\n",
6134 conf->num_sgprs, conf->num_vgprs,
6135 conf->spilled_sgprs, conf->spilled_vgprs, code_size,
6136 conf->lds_size, conf->scratch_bytes_per_wave,
6137 max_simd_waves);
6138 }
6139
6140 pipe_debug_message(debug, SHADER_INFO,
6141 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
6142 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
6143 "Spilled VGPRs: %d",
6144 conf->num_sgprs, conf->num_vgprs, code_size,
6145 conf->lds_size, conf->scratch_bytes_per_wave,
6146 max_simd_waves, conf->spilled_sgprs,
6147 conf->spilled_vgprs);
6148 }
6149
6150 static const char *si_get_shader_name(struct si_shader *shader,
6151 unsigned processor)
6152 {
6153 switch (processor) {
6154 case PIPE_SHADER_VERTEX:
6155 if (shader->key.vs.as_es)
6156 return "Vertex Shader as ES";
6157 else if (shader->key.vs.as_ls)
6158 return "Vertex Shader as LS";
6159 else
6160 return "Vertex Shader as VS";
6161 case PIPE_SHADER_TESS_CTRL:
6162 return "Tessellation Control Shader";
6163 case PIPE_SHADER_TESS_EVAL:
6164 if (shader->key.tes.as_es)
6165 return "Tessellation Evaluation Shader as ES";
6166 else
6167 return "Tessellation Evaluation Shader as VS";
6168 case PIPE_SHADER_GEOMETRY:
6169 if (shader->gs_copy_shader == NULL)
6170 return "GS Copy Shader as VS";
6171 else
6172 return "Geometry Shader";
6173 case PIPE_SHADER_FRAGMENT:
6174 return "Pixel Shader";
6175 case PIPE_SHADER_COMPUTE:
6176 return "Compute Shader";
6177 default:
6178 return "Unknown Shader";
6179 }
6180 }
6181
6182 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
6183 struct pipe_debug_callback *debug, unsigned processor,
6184 FILE *file)
6185 {
6186 if (file != stderr ||
6187 r600_can_dump_shader(&sscreen->b, processor))
6188 si_dump_shader_key(processor, &shader->key, file);
6189
6190 if (file != stderr && shader->binary.llvm_ir_string) {
6191 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
6192 si_get_shader_name(shader, processor));
6193 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
6194 }
6195
6196 if (file != stderr ||
6197 (r600_can_dump_shader(&sscreen->b, processor) &&
6198 !(sscreen->b.debug_flags & DBG_NO_ASM))) {
6199 fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
6200
6201 if (shader->prolog)
6202 si_shader_dump_disassembly(&shader->prolog->binary,
6203 debug, "prolog", file);
6204
6205 si_shader_dump_disassembly(&shader->binary, debug, "main", file);
6206
6207 if (shader->epilog)
6208 si_shader_dump_disassembly(&shader->epilog->binary,
6209 debug, "epilog", file);
6210 fprintf(file, "\n");
6211 }
6212
6213 si_shader_dump_stats(sscreen, &shader->config,
6214 shader->selector ? shader->selector->info.num_inputs : 0,
6215 si_get_shader_binary_size(shader), debug, processor,
6216 file);
6217 }
6218
6219 int si_compile_llvm(struct si_screen *sscreen,
6220 struct radeon_shader_binary *binary,
6221 struct si_shader_config *conf,
6222 LLVMTargetMachineRef tm,
6223 LLVMModuleRef mod,
6224 struct pipe_debug_callback *debug,
6225 unsigned processor,
6226 const char *name)
6227 {
6228 int r = 0;
6229 unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
6230
6231 if (r600_can_dump_shader(&sscreen->b, processor)) {
6232 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
6233
6234 if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
6235 fprintf(stderr, "%s LLVM IR:\n\n", name);
6236 LLVMDumpModule(mod);
6237 fprintf(stderr, "\n");
6238 }
6239 }
6240
6241 if (sscreen->record_llvm_ir) {
6242 char *ir = LLVMPrintModuleToString(mod);
6243 binary->llvm_ir_string = strdup(ir);
6244 LLVMDisposeMessage(ir);
6245 }
6246
6247 if (!si_replace_shader(count, binary)) {
6248 r = si_llvm_compile(mod, binary, tm, debug);
6249 if (r)
6250 return r;
6251 }
6252
6253 si_shader_binary_read_config(binary, conf, 0);
6254
6255 /* Enable 64-bit and 16-bit denormals, because there is no performance
6256 * cost.
6257 *
6258 * If denormals are enabled, all floating-point output modifiers are
6259 * ignored.
6260 *
6261 * Don't enable denormals for 32-bit floats, because:
6262 * - Floating-point output modifiers would be ignored by the hw.
6263 * - Some opcodes don't support denormals, such as v_mad_f32. We would
6264 * have to stop using those.
6265 * - SI & CI would be very slow.
6266 */
6267 conf->float_mode |= V_00B028_FP_64_DENORMS;
6268
6269 FREE(binary->config);
6270 FREE(binary->global_symbol_offsets);
6271 binary->config = NULL;
6272 binary->global_symbol_offsets = NULL;
6273
6274 /* Some shaders can't have rodata because their binaries can be
6275 * concatenated.
6276 */
6277 if (binary->rodata_size &&
6278 (processor == PIPE_SHADER_VERTEX ||
6279 processor == PIPE_SHADER_TESS_CTRL ||
6280 processor == PIPE_SHADER_TESS_EVAL ||
6281 processor == PIPE_SHADER_FRAGMENT)) {
6282 fprintf(stderr, "radeonsi: The shader can't have rodata.");
6283 return -EINVAL;
6284 }
6285
6286 return r;
6287 }
6288
6289 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
6290 {
6291 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
6292 LLVMBuildRetVoid(ctx->gallivm.builder);
6293 else
6294 LLVMBuildRet(ctx->gallivm.builder, ret);
6295 }
6296
6297 /* Generate code for the hardware VS shader stage to go with a geometry shader */
6298 static int si_generate_gs_copy_shader(struct si_screen *sscreen,
6299 struct si_shader_context *ctx,
6300 struct si_shader *gs,
6301 struct pipe_debug_callback *debug)
6302 {
6303 struct gallivm_state *gallivm = &ctx->gallivm;
6304 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
6305 struct lp_build_context *uint = &bld_base->uint_bld;
6306 struct si_shader_output_values *outputs;
6307 struct tgsi_shader_info *gsinfo = &gs->selector->info;
6308 LLVMValueRef args[9];
6309 int i, r;
6310
6311 outputs = MALLOC(gsinfo->num_outputs * sizeof(outputs[0]));
6312
6313 si_init_shader_ctx(ctx, sscreen, ctx->shader, ctx->tm);
6314 ctx->type = PIPE_SHADER_VERTEX;
6315 ctx->is_gs_copy_shader = true;
6316
6317 create_meta_data(ctx);
6318 create_function(ctx);
6319 preload_ring_buffers(ctx);
6320
6321 args[0] = ctx->gsvs_ring[0];
6322 args[1] = lp_build_mul_imm(uint,
6323 LLVMGetParam(ctx->main_fn,
6324 ctx->param_vertex_id),
6325 4);
6326 args[3] = uint->zero;
6327 args[4] = uint->one; /* OFFEN */
6328 args[5] = uint->zero; /* IDXEN */
6329 args[6] = uint->one; /* GLC */
6330 args[7] = uint->one; /* SLC */
6331 args[8] = uint->zero; /* TFE */
6332
6333 /* Fetch vertex data from GSVS ring */
6334 for (i = 0; i < gsinfo->num_outputs; ++i) {
6335 unsigned chan;
6336
6337 outputs[i].name = gsinfo->output_semantic_name[i];
6338 outputs[i].sid = gsinfo->output_semantic_index[i];
6339
6340 for (chan = 0; chan < 4; chan++) {
6341 args[2] = lp_build_const_int32(gallivm,
6342 (i * 4 + chan) *
6343 gs->selector->gs_max_out_vertices * 16 * 4);
6344
6345 outputs[i].values[chan] =
6346 LLVMBuildBitCast(gallivm->builder,
6347 lp_build_intrinsic(gallivm->builder,
6348 "llvm.SI.buffer.load.dword.i32.i32",
6349 ctx->i32, args, 9,
6350 LLVMReadOnlyAttribute),
6351 ctx->f32, "");
6352 }
6353 }
6354
6355 si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
6356
6357 LLVMBuildRetVoid(gallivm->builder);
6358
6359 /* Dump LLVM IR before any optimization passes */
6360 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
6361 r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
6362 LLVMDumpModule(bld_base->base.gallivm->module);
6363
6364 si_llvm_finalize_module(ctx,
6365 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_GEOMETRY));
6366
6367 r = si_compile_llvm(sscreen, &ctx->shader->binary,
6368 &ctx->shader->config, ctx->tm,
6369 bld_base->base.gallivm->module,
6370 debug, PIPE_SHADER_GEOMETRY,
6371 "GS Copy Shader");
6372 if (!r) {
6373 if (r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
6374 fprintf(stderr, "GS Copy Shader:\n");
6375 si_shader_dump(sscreen, ctx->shader, debug,
6376 PIPE_SHADER_GEOMETRY, stderr);
6377 r = si_shader_binary_upload(sscreen, ctx->shader);
6378 }
6379
6380 si_llvm_dispose(ctx);
6381
6382 FREE(outputs);
6383 return r;
6384 }
6385
6386 static void si_dump_shader_key(unsigned shader, union si_shader_key *key,
6387 FILE *f)
6388 {
6389 int i;
6390
6391 fprintf(f, "SHADER KEY\n");
6392
6393 switch (shader) {
6394 case PIPE_SHADER_VERTEX:
6395 fprintf(f, " instance_divisors = {");
6396 for (i = 0; i < ARRAY_SIZE(key->vs.prolog.instance_divisors); i++)
6397 fprintf(f, !i ? "%u" : ", %u",
6398 key->vs.prolog.instance_divisors[i]);
6399 fprintf(f, "}\n");
6400 fprintf(f, " as_es = %u\n", key->vs.as_es);
6401 fprintf(f, " as_ls = %u\n", key->vs.as_ls);
6402 fprintf(f, " export_prim_id = %u\n", key->vs.epilog.export_prim_id);
6403 break;
6404
6405 case PIPE_SHADER_TESS_CTRL:
6406 fprintf(f, " prim_mode = %u\n", key->tcs.epilog.prim_mode);
6407 break;
6408
6409 case PIPE_SHADER_TESS_EVAL:
6410 fprintf(f, " as_es = %u\n", key->tes.as_es);
6411 fprintf(f, " export_prim_id = %u\n", key->tes.epilog.export_prim_id);
6412 break;
6413
6414 case PIPE_SHADER_GEOMETRY:
6415 case PIPE_SHADER_COMPUTE:
6416 break;
6417
6418 case PIPE_SHADER_FRAGMENT:
6419 fprintf(f, " prolog.color_two_side = %u\n", key->ps.prolog.color_two_side);
6420 fprintf(f, " prolog.flatshade_colors = %u\n", key->ps.prolog.flatshade_colors);
6421 fprintf(f, " prolog.poly_stipple = %u\n", key->ps.prolog.poly_stipple);
6422 fprintf(f, " prolog.force_persp_sample_interp = %u\n", key->ps.prolog.force_persp_sample_interp);
6423 fprintf(f, " prolog.force_linear_sample_interp = %u\n", key->ps.prolog.force_linear_sample_interp);
6424 fprintf(f, " prolog.force_persp_center_interp = %u\n", key->ps.prolog.force_persp_center_interp);
6425 fprintf(f, " prolog.force_linear_center_interp = %u\n", key->ps.prolog.force_linear_center_interp);
6426 fprintf(f, " prolog.bc_optimize_for_persp = %u\n", key->ps.prolog.bc_optimize_for_persp);
6427 fprintf(f, " prolog.bc_optimize_for_linear = %u\n", key->ps.prolog.bc_optimize_for_linear);
6428 fprintf(f, " epilog.spi_shader_col_format = 0x%x\n", key->ps.epilog.spi_shader_col_format);
6429 fprintf(f, " epilog.color_is_int8 = 0x%X\n", key->ps.epilog.color_is_int8);
6430 fprintf(f, " epilog.last_cbuf = %u\n", key->ps.epilog.last_cbuf);
6431 fprintf(f, " epilog.alpha_func = %u\n", key->ps.epilog.alpha_func);
6432 fprintf(f, " epilog.alpha_to_one = %u\n", key->ps.epilog.alpha_to_one);
6433 fprintf(f, " epilog.poly_line_smoothing = %u\n", key->ps.epilog.poly_line_smoothing);
6434 fprintf(f, " epilog.clamp_color = %u\n", key->ps.epilog.clamp_color);
6435 break;
6436
6437 default:
6438 assert(0);
6439 }
6440 }
6441
6442 static void si_init_shader_ctx(struct si_shader_context *ctx,
6443 struct si_screen *sscreen,
6444 struct si_shader *shader,
6445 LLVMTargetMachineRef tm)
6446 {
6447 struct lp_build_tgsi_context *bld_base;
6448 struct lp_build_tgsi_action tmpl = {};
6449
6450 memset(ctx, 0, sizeof(*ctx));
6451 si_llvm_context_init(
6452 ctx, "amdgcn--",
6453 (shader && shader->selector) ? &shader->selector->info : NULL,
6454 (shader && shader->selector) ? shader->selector->tokens : NULL);
6455 si_shader_context_init_alu(&ctx->soa.bld_base);
6456 ctx->tm = tm;
6457 ctx->screen = sscreen;
6458 if (shader && shader->selector)
6459 ctx->type = shader->selector->info.processor;
6460 else
6461 ctx->type = -1;
6462 ctx->shader = shader;
6463
6464 ctx->voidt = LLVMVoidTypeInContext(ctx->gallivm.context);
6465 ctx->i1 = LLVMInt1TypeInContext(ctx->gallivm.context);
6466 ctx->i8 = LLVMInt8TypeInContext(ctx->gallivm.context);
6467 ctx->i32 = LLVMInt32TypeInContext(ctx->gallivm.context);
6468 ctx->i64 = LLVMInt64TypeInContext(ctx->gallivm.context);
6469 ctx->i128 = LLVMIntTypeInContext(ctx->gallivm.context, 128);
6470 ctx->f32 = LLVMFloatTypeInContext(ctx->gallivm.context);
6471 ctx->v16i8 = LLVMVectorType(ctx->i8, 16);
6472 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
6473 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
6474 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
6475 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
6476
6477 bld_base = &ctx->soa.bld_base;
6478 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
6479
6480 bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
6481 bld_base->op_actions[TGSI_OPCODE_INTERP_SAMPLE] = interp_action;
6482 bld_base->op_actions[TGSI_OPCODE_INTERP_OFFSET] = interp_action;
6483
6484 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
6485 bld_base->op_actions[TGSI_OPCODE_TEX2] = tex_action;
6486 bld_base->op_actions[TGSI_OPCODE_TXB] = tex_action;
6487 bld_base->op_actions[TGSI_OPCODE_TXB2] = tex_action;
6488 bld_base->op_actions[TGSI_OPCODE_TXD] = tex_action;
6489 bld_base->op_actions[TGSI_OPCODE_TXF] = tex_action;
6490 bld_base->op_actions[TGSI_OPCODE_TXL] = tex_action;
6491 bld_base->op_actions[TGSI_OPCODE_TXL2] = tex_action;
6492 bld_base->op_actions[TGSI_OPCODE_TXP] = tex_action;
6493 bld_base->op_actions[TGSI_OPCODE_TXQ].fetch_args = txq_fetch_args;
6494 bld_base->op_actions[TGSI_OPCODE_TXQ].emit = txq_emit;
6495 bld_base->op_actions[TGSI_OPCODE_TG4] = tex_action;
6496 bld_base->op_actions[TGSI_OPCODE_LODQ] = tex_action;
6497 bld_base->op_actions[TGSI_OPCODE_TXQS].emit = si_llvm_emit_txqs;
6498
6499 bld_base->op_actions[TGSI_OPCODE_LOAD].fetch_args = load_fetch_args;
6500 bld_base->op_actions[TGSI_OPCODE_LOAD].emit = load_emit;
6501 bld_base->op_actions[TGSI_OPCODE_STORE].fetch_args = store_fetch_args;
6502 bld_base->op_actions[TGSI_OPCODE_STORE].emit = store_emit;
6503 bld_base->op_actions[TGSI_OPCODE_RESQ].fetch_args = resq_fetch_args;
6504 bld_base->op_actions[TGSI_OPCODE_RESQ].emit = resq_emit;
6505
6506 tmpl.fetch_args = atomic_fetch_args;
6507 tmpl.emit = atomic_emit;
6508 bld_base->op_actions[TGSI_OPCODE_ATOMUADD] = tmpl;
6509 bld_base->op_actions[TGSI_OPCODE_ATOMUADD].intr_name = "add";
6510 bld_base->op_actions[TGSI_OPCODE_ATOMXCHG] = tmpl;
6511 bld_base->op_actions[TGSI_OPCODE_ATOMXCHG].intr_name = "swap";
6512 bld_base->op_actions[TGSI_OPCODE_ATOMCAS] = tmpl;
6513 bld_base->op_actions[TGSI_OPCODE_ATOMCAS].intr_name = "cmpswap";
6514 bld_base->op_actions[TGSI_OPCODE_ATOMAND] = tmpl;
6515 bld_base->op_actions[TGSI_OPCODE_ATOMAND].intr_name = "and";
6516 bld_base->op_actions[TGSI_OPCODE_ATOMOR] = tmpl;
6517 bld_base->op_actions[TGSI_OPCODE_ATOMOR].intr_name = "or";
6518 bld_base->op_actions[TGSI_OPCODE_ATOMXOR] = tmpl;
6519 bld_base->op_actions[TGSI_OPCODE_ATOMXOR].intr_name = "xor";
6520 bld_base->op_actions[TGSI_OPCODE_ATOMUMIN] = tmpl;
6521 bld_base->op_actions[TGSI_OPCODE_ATOMUMIN].intr_name = "umin";
6522 bld_base->op_actions[TGSI_OPCODE_ATOMUMAX] = tmpl;
6523 bld_base->op_actions[TGSI_OPCODE_ATOMUMAX].intr_name = "umax";
6524 bld_base->op_actions[TGSI_OPCODE_ATOMIMIN] = tmpl;
6525 bld_base->op_actions[TGSI_OPCODE_ATOMIMIN].intr_name = "smin";
6526 bld_base->op_actions[TGSI_OPCODE_ATOMIMAX] = tmpl;
6527 bld_base->op_actions[TGSI_OPCODE_ATOMIMAX].intr_name = "smax";
6528
6529 bld_base->op_actions[TGSI_OPCODE_MEMBAR].emit = membar_emit;
6530
6531 bld_base->op_actions[TGSI_OPCODE_DDX].emit = si_llvm_emit_ddxy;
6532 bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
6533 bld_base->op_actions[TGSI_OPCODE_DDX_FINE].emit = si_llvm_emit_ddxy;
6534 bld_base->op_actions[TGSI_OPCODE_DDY_FINE].emit = si_llvm_emit_ddxy;
6535
6536 bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
6537 bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
6538 bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
6539 }
6540
6541 /* Return true if the PARAM export has been eliminated. */
6542 static bool si_eliminate_const_output(struct si_shader_context *ctx,
6543 LLVMValueRef inst, unsigned offset)
6544 {
6545 struct si_shader *shader = ctx->shader;
6546 unsigned num_outputs = shader->selector->info.num_outputs;
6547 unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */
6548 bool is_zero[4] = {}, is_one[4] = {};
6549
6550 for (i = 0; i < 4; i++) {
6551 LLVMBool loses_info;
6552 LLVMValueRef p = LLVMGetOperand(inst, 5 + i);
6553
6554 /* It's a constant expression. Undef outputs are eliminated too. */
6555 if (LLVMIsUndef(p)) {
6556 is_zero[i] = true;
6557 is_one[i] = true;
6558 } else if (LLVMIsAConstantFP(p)) {
6559 double a = LLVMConstRealGetDouble(p, &loses_info);
6560
6561 if (a == 0)
6562 is_zero[i] = true;
6563 else if (a == 1)
6564 is_one[i] = true;
6565 else
6566 return false; /* other constant */
6567 } else
6568 return false;
6569 }
6570
6571 /* Only certain combinations of 0 and 1 can be eliminated. */
6572 if (is_zero[0] && is_zero[1] && is_zero[2])
6573 default_val = is_zero[3] ? 0 : 1;
6574 else if (is_one[0] && is_one[1] && is_one[2])
6575 default_val = is_zero[3] ? 2 : 3;
6576 else
6577 return false;
6578
6579 /* The PARAM export can be represented as DEFAULT_VAL. Kill it. */
6580 LLVMInstructionEraseFromParent(inst);
6581
6582 /* Change OFFSET to DEFAULT_VAL. */
6583 for (i = 0; i < num_outputs; i++) {
6584 if (shader->info.vs_output_param_offset[i] == offset) {
6585 shader->info.vs_output_param_offset[i] =
6586 EXP_PARAM_DEFAULT_VAL_0000 + default_val;
6587 break;
6588 }
6589 }
6590 return true;
6591 }
6592
6593 struct si_vs_exports {
6594 unsigned num;
6595 unsigned offset[SI_MAX_VS_OUTPUTS];
6596 LLVMValueRef inst[SI_MAX_VS_OUTPUTS];
6597 };
6598
6599 static void si_eliminate_const_vs_outputs(struct si_shader_context *ctx)
6600 {
6601 struct si_shader *shader = ctx->shader;
6602 struct tgsi_shader_info *info = &shader->selector->info;
6603 LLVMBasicBlockRef bb;
6604 struct si_vs_exports exports;
6605 bool removed_any = false;
6606
6607 exports.num = 0;
6608
6609 if ((ctx->type == PIPE_SHADER_VERTEX &&
6610 (shader->key.vs.as_es || shader->key.vs.as_ls)) ||
6611 (ctx->type == PIPE_SHADER_TESS_EVAL && shader->key.tes.as_es))
6612 return;
6613
6614 /* Process all LLVM instructions. */
6615 bb = LLVMGetFirstBasicBlock(ctx->main_fn);
6616 while (bb) {
6617 LLVMValueRef inst = LLVMGetFirstInstruction(bb);
6618
6619 while (inst) {
6620 LLVMValueRef cur = inst;
6621 inst = LLVMGetNextInstruction(inst);
6622
6623 if (LLVMGetInstructionOpcode(cur) != LLVMCall)
6624 continue;
6625
6626 LLVMValueRef callee = lp_get_called_value(cur);
6627
6628 if (!lp_is_function(callee))
6629 continue;
6630
6631 const char *name = LLVMGetValueName(callee);
6632 unsigned num_args = LLVMCountParams(callee);
6633
6634 /* Check if this is an export instruction. */
6635 if (num_args != 9 || strcmp(name, "llvm.SI.export"))
6636 continue;
6637
6638 LLVMValueRef arg = LLVMGetOperand(cur, 3);
6639 unsigned target = LLVMConstIntGetZExtValue(arg);
6640
6641 if (target < V_008DFC_SQ_EXP_PARAM)
6642 continue;
6643
6644 target -= V_008DFC_SQ_EXP_PARAM;
6645
6646 /* Eliminate constant value PARAM exports. */
6647 if (si_eliminate_const_output(ctx, cur, target)) {
6648 removed_any = true;
6649 } else {
6650 exports.offset[exports.num] = target;
6651 exports.inst[exports.num] = cur;
6652 exports.num++;
6653 }
6654 }
6655 bb = LLVMGetNextBasicBlock(bb);
6656 }
6657
6658 /* Remove holes in export memory due to removed PARAM exports.
6659 * This is done by renumbering all PARAM exports.
6660 */
6661 if (removed_any) {
6662 ubyte current_offset[SI_MAX_VS_OUTPUTS];
6663 unsigned new_count = 0;
6664 unsigned out, i;
6665
6666 /* Make a copy of the offsets. We need the old version while
6667 * we are modifying some of them. */
6668 assert(sizeof(current_offset) ==
6669 sizeof(shader->info.vs_output_param_offset));
6670 memcpy(current_offset, shader->info.vs_output_param_offset,
6671 sizeof(current_offset));
6672
6673 for (i = 0; i < exports.num; i++) {
6674 unsigned offset = exports.offset[i];
6675
6676 for (out = 0; out < info->num_outputs; out++) {
6677 if (current_offset[out] != offset)
6678 continue;
6679
6680 LLVMSetOperand(exports.inst[i], 3,
6681 LLVMConstInt(ctx->i32,
6682 V_008DFC_SQ_EXP_PARAM + new_count, 0));
6683 shader->info.vs_output_param_offset[out] = new_count;
6684 new_count++;
6685 break;
6686 }
6687 }
6688 shader->info.nr_param_exports = new_count;
6689 }
6690 }
6691
6692 static bool si_compile_tgsi_main(struct si_shader_context *ctx,
6693 struct si_shader *shader)
6694 {
6695 struct si_shader_selector *sel = shader->selector;
6696 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
6697
6698 switch (ctx->type) {
6699 case PIPE_SHADER_VERTEX:
6700 ctx->load_input = declare_input_vs;
6701 if (shader->key.vs.as_ls)
6702 bld_base->emit_epilogue = si_llvm_emit_ls_epilogue;
6703 else if (shader->key.vs.as_es)
6704 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
6705 else
6706 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
6707 break;
6708 case PIPE_SHADER_TESS_CTRL:
6709 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tcs;
6710 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = fetch_output_tcs;
6711 bld_base->emit_store = store_output_tcs;
6712 bld_base->emit_epilogue = si_llvm_emit_tcs_epilogue;
6713 break;
6714 case PIPE_SHADER_TESS_EVAL:
6715 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_tes;
6716 if (shader->key.tes.as_es)
6717 bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
6718 else
6719 bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
6720 break;
6721 case PIPE_SHADER_GEOMETRY:
6722 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
6723 bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
6724 break;
6725 case PIPE_SHADER_FRAGMENT:
6726 ctx->load_input = declare_input_fs;
6727 if (ctx->no_epilog)
6728 bld_base->emit_epilogue = si_llvm_emit_fs_epilogue;
6729 else
6730 bld_base->emit_epilogue = si_llvm_return_fs_outputs;
6731 break;
6732 case PIPE_SHADER_COMPUTE:
6733 ctx->declare_memory_region = declare_compute_memory;
6734 break;
6735 default:
6736 assert(!"Unsupported shader type");
6737 return false;
6738 }
6739
6740 create_meta_data(ctx);
6741 create_function(ctx);
6742 preload_ring_buffers(ctx);
6743
6744 if (ctx->no_prolog && sel->type == PIPE_SHADER_FRAGMENT &&
6745 shader->key.ps.prolog.poly_stipple) {
6746 LLVMValueRef list = LLVMGetParam(ctx->main_fn,
6747 SI_PARAM_RW_BUFFERS);
6748 si_llvm_emit_polygon_stipple(ctx, list,
6749 SI_PARAM_POS_FIXED_PT);
6750 }
6751
6752 if (ctx->type == PIPE_SHADER_GEOMETRY) {
6753 int i;
6754 for (i = 0; i < 4; i++) {
6755 ctx->gs_next_vertex[i] =
6756 lp_build_alloca(bld_base->base.gallivm,
6757 ctx->i32, "");
6758 }
6759 }
6760
6761 if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
6762 fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
6763 return false;
6764 }
6765
6766 si_llvm_build_ret(ctx, ctx->return_value);
6767 return true;
6768 }
6769
6770 /**
6771 * Compute the PS prolog key, which contains all the information needed to
6772 * build the PS prolog function, and set related bits in shader->config.
6773 */
6774 static void si_get_ps_prolog_key(struct si_shader *shader,
6775 union si_shader_part_key *key)
6776 {
6777 struct tgsi_shader_info *info = &shader->selector->info;
6778
6779 memset(key, 0, sizeof(*key));
6780 key->ps_prolog.states = shader->key.ps.prolog;
6781 key->ps_prolog.colors_read = info->colors_read;
6782 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
6783 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
6784 key->ps_prolog.wqm = info->uses_derivatives &&
6785 (key->ps_prolog.colors_read ||
6786 key->ps_prolog.states.force_persp_sample_interp ||
6787 key->ps_prolog.states.force_linear_sample_interp ||
6788 key->ps_prolog.states.force_persp_center_interp ||
6789 key->ps_prolog.states.force_linear_center_interp ||
6790 key->ps_prolog.states.bc_optimize_for_persp ||
6791 key->ps_prolog.states.bc_optimize_for_linear);
6792
6793 if (info->colors_read) {
6794 unsigned *color = shader->selector->color_attr_index;
6795
6796 if (shader->key.ps.prolog.color_two_side) {
6797 /* BCOLORs are stored after the last input. */
6798 key->ps_prolog.num_interp_inputs = info->num_inputs;
6799 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
6800 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
6801 }
6802
6803 for (unsigned i = 0; i < 2; i++) {
6804 unsigned interp = info->input_interpolate[color[i]];
6805 unsigned location = info->input_interpolate_loc[color[i]];
6806
6807 if (!(info->colors_read & (0xf << i*4)))
6808 continue;
6809
6810 key->ps_prolog.color_attr_index[i] = color[i];
6811
6812 if (shader->key.ps.prolog.flatshade_colors &&
6813 interp == TGSI_INTERPOLATE_COLOR)
6814 interp = TGSI_INTERPOLATE_CONSTANT;
6815
6816 switch (interp) {
6817 case TGSI_INTERPOLATE_CONSTANT:
6818 key->ps_prolog.color_interp_vgpr_index[i] = -1;
6819 break;
6820 case TGSI_INTERPOLATE_PERSPECTIVE:
6821 case TGSI_INTERPOLATE_COLOR:
6822 /* Force the interpolation location for colors here. */
6823 if (shader->key.ps.prolog.force_persp_sample_interp)
6824 location = TGSI_INTERPOLATE_LOC_SAMPLE;
6825 if (shader->key.ps.prolog.force_persp_center_interp)
6826 location = TGSI_INTERPOLATE_LOC_CENTER;
6827
6828 switch (location) {
6829 case TGSI_INTERPOLATE_LOC_SAMPLE:
6830 key->ps_prolog.color_interp_vgpr_index[i] = 0;
6831 shader->config.spi_ps_input_ena |=
6832 S_0286CC_PERSP_SAMPLE_ENA(1);
6833 break;
6834 case TGSI_INTERPOLATE_LOC_CENTER:
6835 key->ps_prolog.color_interp_vgpr_index[i] = 2;
6836 shader->config.spi_ps_input_ena |=
6837 S_0286CC_PERSP_CENTER_ENA(1);
6838 break;
6839 case TGSI_INTERPOLATE_LOC_CENTROID:
6840 key->ps_prolog.color_interp_vgpr_index[i] = 4;
6841 shader->config.spi_ps_input_ena |=
6842 S_0286CC_PERSP_CENTROID_ENA(1);
6843 break;
6844 default:
6845 assert(0);
6846 }
6847 break;
6848 case TGSI_INTERPOLATE_LINEAR:
6849 /* Force the interpolation location for colors here. */
6850 if (shader->key.ps.prolog.force_linear_sample_interp)
6851 location = TGSI_INTERPOLATE_LOC_SAMPLE;
6852 if (shader->key.ps.prolog.force_linear_center_interp)
6853 location = TGSI_INTERPOLATE_LOC_CENTER;
6854
6855 switch (location) {
6856 case TGSI_INTERPOLATE_LOC_SAMPLE:
6857 key->ps_prolog.color_interp_vgpr_index[i] = 6;
6858 shader->config.spi_ps_input_ena |=
6859 S_0286CC_LINEAR_SAMPLE_ENA(1);
6860 break;
6861 case TGSI_INTERPOLATE_LOC_CENTER:
6862 key->ps_prolog.color_interp_vgpr_index[i] = 8;
6863 shader->config.spi_ps_input_ena |=
6864 S_0286CC_LINEAR_CENTER_ENA(1);
6865 break;
6866 case TGSI_INTERPOLATE_LOC_CENTROID:
6867 key->ps_prolog.color_interp_vgpr_index[i] = 10;
6868 shader->config.spi_ps_input_ena |=
6869 S_0286CC_LINEAR_CENTROID_ENA(1);
6870 break;
6871 default:
6872 assert(0);
6873 }
6874 break;
6875 default:
6876 assert(0);
6877 }
6878 }
6879 }
6880 }
6881
6882 /**
6883 * Check whether a PS prolog is required based on the key.
6884 */
6885 static bool si_need_ps_prolog(const union si_shader_part_key *key)
6886 {
6887 return key->ps_prolog.colors_read ||
6888 key->ps_prolog.states.force_persp_sample_interp ||
6889 key->ps_prolog.states.force_linear_sample_interp ||
6890 key->ps_prolog.states.force_persp_center_interp ||
6891 key->ps_prolog.states.force_linear_center_interp ||
6892 key->ps_prolog.states.bc_optimize_for_persp ||
6893 key->ps_prolog.states.bc_optimize_for_linear ||
6894 key->ps_prolog.states.poly_stipple;
6895 }
6896
6897 /**
6898 * Compute the PS epilog key, which contains all the information needed to
6899 * build the PS epilog function.
6900 */
6901 static void si_get_ps_epilog_key(struct si_shader *shader,
6902 union si_shader_part_key *key)
6903 {
6904 struct tgsi_shader_info *info = &shader->selector->info;
6905 memset(key, 0, sizeof(*key));
6906 key->ps_epilog.colors_written = info->colors_written;
6907 key->ps_epilog.writes_z = info->writes_z;
6908 key->ps_epilog.writes_stencil = info->writes_stencil;
6909 key->ps_epilog.writes_samplemask = info->writes_samplemask;
6910 key->ps_epilog.states = shader->key.ps.epilog;
6911 }
6912
6913 /**
6914 * Given a list of shader part functions, build a wrapper function that
6915 * runs them in sequence to form a monolithic shader.
6916 */
6917 static void si_build_wrapper_function(struct si_shader_context *ctx,
6918 LLVMValueRef *parts,
6919 unsigned num_parts,
6920 unsigned main_part)
6921 {
6922 struct gallivm_state *gallivm = &ctx->gallivm;
6923 LLVMBuilderRef builder = ctx->gallivm.builder;
6924 /* PS epilog has one arg per color component */
6925 LLVMTypeRef param_types[48];
6926 LLVMValueRef out[48];
6927 LLVMTypeRef function_type;
6928 unsigned num_params;
6929 unsigned num_out_sgpr, num_out;
6930 unsigned num_sgprs, num_vgprs;
6931 unsigned last_sgpr_param;
6932 unsigned gprs;
6933
6934 for (unsigned i = 0; i < num_parts; ++i) {
6935 LLVMAddFunctionAttr(parts[i], LLVMAlwaysInlineAttribute);
6936 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
6937 }
6938
6939 /* The parameters of the wrapper function correspond to those of the
6940 * first part in terms of SGPRs and VGPRs, but we use the types of the
6941 * main part to get the right types. This is relevant for the
6942 * dereferenceable attribute on descriptor table pointers.
6943 */
6944 num_sgprs = 0;
6945 num_vgprs = 0;
6946
6947 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
6948 num_params = LLVMCountParamTypes(function_type);
6949
6950 for (unsigned i = 0; i < num_params; ++i) {
6951 LLVMValueRef param = LLVMGetParam(parts[0], i);
6952
6953 if (ac_is_sgpr_param(param)) {
6954 assert(num_vgprs == 0);
6955 num_sgprs += llvm_get_type_size(LLVMTypeOf(param)) / 4;
6956 } else {
6957 num_vgprs += llvm_get_type_size(LLVMTypeOf(param)) / 4;
6958 }
6959 }
6960 assert(num_vgprs + num_sgprs <= ARRAY_SIZE(param_types));
6961
6962 num_params = 0;
6963 last_sgpr_param = 0;
6964 gprs = 0;
6965 while (gprs < num_sgprs + num_vgprs) {
6966 LLVMValueRef param = LLVMGetParam(parts[main_part], num_params);
6967 unsigned size;
6968
6969 param_types[num_params] = LLVMTypeOf(param);
6970 if (gprs < num_sgprs)
6971 last_sgpr_param = num_params;
6972 size = llvm_get_type_size(param_types[num_params]) / 4;
6973 num_params++;
6974
6975 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
6976 assert(gprs + size <= num_sgprs + num_vgprs &&
6977 (gprs >= num_sgprs || gprs + size <= num_sgprs));
6978
6979 gprs += size;
6980 }
6981
6982 si_create_function(ctx, "wrapper", NULL, 0, param_types, num_params, last_sgpr_param);
6983
6984 /* Record the arguments of the function as if they were an output of
6985 * a previous part.
6986 */
6987 num_out = 0;
6988 num_out_sgpr = 0;
6989
6990 for (unsigned i = 0; i < num_params; ++i) {
6991 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
6992 LLVMTypeRef param_type = LLVMTypeOf(param);
6993 LLVMTypeRef out_type = i <= last_sgpr_param ? ctx->i32 : ctx->f32;
6994 unsigned size = llvm_get_type_size(param_type) / 4;
6995
6996 if (size == 1) {
6997 if (param_type != out_type)
6998 param = LLVMBuildBitCast(builder, param, out_type, "");
6999 out[num_out++] = param;
7000 } else {
7001 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
7002
7003 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
7004 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
7005 param_type = ctx->i64;
7006 }
7007
7008 if (param_type != vector_type)
7009 param = LLVMBuildBitCast(builder, param, vector_type, "");
7010
7011 for (unsigned j = 0; j < size; ++j)
7012 out[num_out++] = LLVMBuildExtractElement(
7013 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
7014 }
7015
7016 if (i <= last_sgpr_param)
7017 num_out_sgpr = num_out;
7018 }
7019
7020 /* Now chain the parts. */
7021 for (unsigned part = 0; part < num_parts; ++part) {
7022 LLVMValueRef in[48];
7023 LLVMValueRef ret;
7024 LLVMTypeRef ret_type;
7025 unsigned out_idx = 0;
7026
7027 num_params = LLVMCountParams(parts[part]);
7028 assert(num_params <= ARRAY_SIZE(param_types));
7029
7030 /* Derive arguments for the next part from outputs of the
7031 * previous one.
7032 */
7033 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
7034 LLVMValueRef param;
7035 LLVMTypeRef param_type;
7036 bool is_sgpr;
7037 unsigned param_size;
7038 LLVMValueRef arg = NULL;
7039
7040 param = LLVMGetParam(parts[part], param_idx);
7041 param_type = LLVMTypeOf(param);
7042 param_size = llvm_get_type_size(param_type) / 4;
7043 is_sgpr = ac_is_sgpr_param(param);
7044
7045 if (is_sgpr) {
7046 LLVMRemoveAttribute(param, LLVMByValAttribute);
7047 LLVMAddAttribute(param, LLVMInRegAttribute);
7048 }
7049
7050 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
7051 assert(is_sgpr || out_idx >= num_out_sgpr);
7052
7053 if (param_size == 1)
7054 arg = out[out_idx];
7055 else
7056 arg = lp_build_gather_values(gallivm, &out[out_idx], param_size);
7057
7058 if (LLVMTypeOf(arg) != param_type) {
7059 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
7060 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
7061 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
7062 } else {
7063 arg = LLVMBuildBitCast(builder, arg, param_type, "");
7064 }
7065 }
7066
7067 in[param_idx] = arg;
7068 out_idx += param_size;
7069 }
7070
7071 ret = LLVMBuildCall(builder, parts[part], in, num_params, "");
7072 ret_type = LLVMTypeOf(ret);
7073
7074 /* Extract the returned GPRs. */
7075 num_out = 0;
7076 num_out_sgpr = 0;
7077
7078 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
7079 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
7080
7081 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
7082
7083 for (unsigned i = 0; i < ret_size; ++i) {
7084 LLVMValueRef val =
7085 LLVMBuildExtractValue(builder, ret, i, "");
7086
7087 out[num_out++] = val;
7088
7089 if (LLVMTypeOf(val) == ctx->i32) {
7090 assert(num_out_sgpr + 1 == num_out);
7091 num_out_sgpr = num_out;
7092 }
7093 }
7094 }
7095 }
7096
7097 LLVMBuildRetVoid(builder);
7098 }
7099
7100 int si_compile_tgsi_shader(struct si_screen *sscreen,
7101 LLVMTargetMachineRef tm,
7102 struct si_shader *shader,
7103 bool is_monolithic,
7104 struct pipe_debug_callback *debug)
7105 {
7106 struct si_shader_selector *sel = shader->selector;
7107 struct si_shader_context ctx;
7108 struct lp_build_tgsi_context *bld_base;
7109 LLVMModuleRef mod;
7110 int r = -1;
7111
7112 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
7113 * conversion fails. */
7114 if (r600_can_dump_shader(&sscreen->b, sel->info.processor) &&
7115 !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
7116 tgsi_dump(sel->tokens, 0);
7117 si_dump_streamout(&sel->so);
7118 }
7119
7120 si_init_shader_ctx(&ctx, sscreen, shader, tm);
7121 ctx.no_prolog = is_monolithic;
7122 ctx.no_epilog = is_monolithic;
7123 ctx.separate_prolog = !is_monolithic;
7124
7125 if (ctx.type == PIPE_SHADER_FRAGMENT)
7126 ctx.no_epilog = false;
7127
7128 memset(shader->info.vs_output_param_offset, 0xff,
7129 sizeof(shader->info.vs_output_param_offset));
7130
7131 shader->info.uses_instanceid = sel->info.uses_instanceid;
7132
7133 bld_base = &ctx.soa.bld_base;
7134 ctx.load_system_value = declare_system_value;
7135
7136 if (!si_compile_tgsi_main(&ctx, shader)) {
7137 si_llvm_dispose(&ctx);
7138 return -1;
7139 }
7140
7141 if (is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
7142 LLVMValueRef parts[2];
7143 union si_shader_part_key epilog_key;
7144
7145 parts[0] = ctx.main_fn;
7146
7147 si_get_ps_epilog_key(shader, &epilog_key);
7148 si_build_ps_epilog_function(&ctx, &epilog_key);
7149 parts[1] = ctx.main_fn;
7150
7151 si_build_wrapper_function(&ctx, parts, 2, 0);
7152 }
7153
7154 mod = bld_base->base.gallivm->module;
7155
7156 /* Dump LLVM IR before any optimization passes */
7157 if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
7158 r600_can_dump_shader(&sscreen->b, ctx.type))
7159 LLVMDumpModule(mod);
7160
7161 si_llvm_finalize_module(&ctx,
7162 r600_extra_shader_checks(&sscreen->b, ctx.type));
7163
7164 /* Post-optimization transformations. */
7165 si_eliminate_const_vs_outputs(&ctx);
7166
7167 /* Compile to bytecode. */
7168 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
7169 mod, debug, ctx.type, "TGSI shader");
7170 si_llvm_dispose(&ctx);
7171 if (r) {
7172 fprintf(stderr, "LLVM failed to compile shader\n");
7173 return r;
7174 }
7175
7176 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
7177 * LLVM 3.9svn has this bug.
7178 */
7179 if (sel->type == PIPE_SHADER_COMPUTE) {
7180 unsigned *props = sel->info.properties;
7181 unsigned wave_size = 64;
7182 unsigned max_vgprs = 256;
7183 unsigned max_sgprs = sscreen->b.chip_class >= VI ? 800 : 512;
7184 unsigned max_sgprs_per_wave = 128;
7185 unsigned max_block_threads;
7186
7187 if (props[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH])
7188 max_block_threads = props[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
7189 props[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
7190 props[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
7191 else
7192 max_block_threads = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
7193
7194 unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
7195 unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
7196
7197 max_vgprs = max_vgprs / min_waves_per_simd;
7198 max_sgprs = MIN2(max_sgprs / min_waves_per_simd, max_sgprs_per_wave);
7199
7200 if (shader->config.num_sgprs > max_sgprs ||
7201 shader->config.num_vgprs > max_vgprs) {
7202 fprintf(stderr, "LLVM failed to compile a shader correctly: "
7203 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
7204 shader->config.num_sgprs, shader->config.num_vgprs,
7205 max_sgprs, max_vgprs);
7206
7207 /* Just terminate the process, because dependent
7208 * shaders can hang due to bad input data, but use
7209 * the env var to allow shader-db to work.
7210 */
7211 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
7212 abort();
7213 }
7214 }
7215
7216 /* Add the scratch offset to input SGPRs. */
7217 if (shader->config.scratch_bytes_per_wave)
7218 shader->info.num_input_sgprs += 1; /* scratch byte offset */
7219
7220 /* Calculate the number of fragment input VGPRs. */
7221 if (ctx.type == PIPE_SHADER_FRAGMENT) {
7222 shader->info.num_input_vgprs = 0;
7223 shader->info.face_vgpr_index = -1;
7224
7225 if (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_addr))
7226 shader->info.num_input_vgprs += 2;
7227 if (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr))
7228 shader->info.num_input_vgprs += 2;
7229 if (G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_addr))
7230 shader->info.num_input_vgprs += 2;
7231 if (G_0286CC_PERSP_PULL_MODEL_ENA(shader->config.spi_ps_input_addr))
7232 shader->info.num_input_vgprs += 3;
7233 if (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_addr))
7234 shader->info.num_input_vgprs += 2;
7235 if (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr))
7236 shader->info.num_input_vgprs += 2;
7237 if (G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_addr))
7238 shader->info.num_input_vgprs += 2;
7239 if (G_0286CC_LINE_STIPPLE_TEX_ENA(shader->config.spi_ps_input_addr))
7240 shader->info.num_input_vgprs += 1;
7241 if (G_0286CC_POS_X_FLOAT_ENA(shader->config.spi_ps_input_addr))
7242 shader->info.num_input_vgprs += 1;
7243 if (G_0286CC_POS_Y_FLOAT_ENA(shader->config.spi_ps_input_addr))
7244 shader->info.num_input_vgprs += 1;
7245 if (G_0286CC_POS_Z_FLOAT_ENA(shader->config.spi_ps_input_addr))
7246 shader->info.num_input_vgprs += 1;
7247 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_addr))
7248 shader->info.num_input_vgprs += 1;
7249 if (G_0286CC_FRONT_FACE_ENA(shader->config.spi_ps_input_addr)) {
7250 shader->info.face_vgpr_index = shader->info.num_input_vgprs;
7251 shader->info.num_input_vgprs += 1;
7252 }
7253 if (G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr))
7254 shader->info.num_input_vgprs += 1;
7255 if (G_0286CC_SAMPLE_COVERAGE_ENA(shader->config.spi_ps_input_addr))
7256 shader->info.num_input_vgprs += 1;
7257 if (G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr))
7258 shader->info.num_input_vgprs += 1;
7259 }
7260
7261 if (ctx.type == PIPE_SHADER_GEOMETRY) {
7262 shader->gs_copy_shader = CALLOC_STRUCT(si_shader);
7263 shader->gs_copy_shader->selector = shader->selector;
7264 ctx.shader = shader->gs_copy_shader;
7265 r = si_generate_gs_copy_shader(sscreen, &ctx,
7266 shader, debug);
7267 if (r) {
7268 free(shader->gs_copy_shader);
7269 shader->gs_copy_shader = NULL;
7270 return r;
7271 }
7272 }
7273
7274 return 0;
7275 }
7276
7277 /**
7278 * Create, compile and return a shader part (prolog or epilog).
7279 *
7280 * \param sscreen screen
7281 * \param list list of shader parts of the same category
7282 * \param key shader part key
7283 * \param tm LLVM target machine
7284 * \param debug debug callback
7285 * \param compile the callback responsible for compilation
7286 * \return non-NULL on success
7287 */
7288 static struct si_shader_part *
7289 si_get_shader_part(struct si_screen *sscreen,
7290 struct si_shader_part **list,
7291 union si_shader_part_key *key,
7292 LLVMTargetMachineRef tm,
7293 struct pipe_debug_callback *debug,
7294 bool (*compile)(struct si_screen *,
7295 LLVMTargetMachineRef,
7296 struct pipe_debug_callback *,
7297 struct si_shader_part *))
7298 {
7299 struct si_shader_part *result;
7300
7301 pipe_mutex_lock(sscreen->shader_parts_mutex);
7302
7303 /* Find existing. */
7304 for (result = *list; result; result = result->next) {
7305 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
7306 pipe_mutex_unlock(sscreen->shader_parts_mutex);
7307 return result;
7308 }
7309 }
7310
7311 /* Compile a new one. */
7312 result = CALLOC_STRUCT(si_shader_part);
7313 result->key = *key;
7314 if (!compile(sscreen, tm, debug, result)) {
7315 FREE(result);
7316 pipe_mutex_unlock(sscreen->shader_parts_mutex);
7317 return NULL;
7318 }
7319
7320 result->next = *list;
7321 *list = result;
7322 pipe_mutex_unlock(sscreen->shader_parts_mutex);
7323 return result;
7324 }
7325
7326 /**
7327 * Create a vertex shader prolog.
7328 *
7329 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
7330 * All inputs are returned unmodified. The vertex load indices are
7331 * stored after them, which will used by the API VS for fetching inputs.
7332 *
7333 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
7334 * input_v0,
7335 * input_v1,
7336 * input_v2,
7337 * input_v3,
7338 * (VertexID + BaseVertex),
7339 * (InstanceID + StartInstance),
7340 * (InstanceID / 2 + StartInstance)
7341 */
7342 static bool si_compile_vs_prolog(struct si_screen *sscreen,
7343 LLVMTargetMachineRef tm,
7344 struct pipe_debug_callback *debug,
7345 struct si_shader_part *out)
7346 {
7347 union si_shader_part_key *key = &out->key;
7348 struct si_shader shader = {};
7349 struct si_shader_context ctx;
7350 struct gallivm_state *gallivm = &ctx.gallivm;
7351 LLVMTypeRef *params, *returns;
7352 LLVMValueRef ret, func;
7353 int last_sgpr, num_params, num_returns, i;
7354 bool status = true;
7355
7356 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7357 ctx.type = PIPE_SHADER_VERTEX;
7358 ctx.param_vertex_id = key->vs_prolog.num_input_sgprs;
7359 ctx.param_instance_id = key->vs_prolog.num_input_sgprs + 3;
7360
7361 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
7362 params = alloca((key->vs_prolog.num_input_sgprs + 4) *
7363 sizeof(LLVMTypeRef));
7364 returns = alloca((key->vs_prolog.num_input_sgprs + 4 +
7365 key->vs_prolog.last_input + 1) *
7366 sizeof(LLVMTypeRef));
7367 num_params = 0;
7368 num_returns = 0;
7369
7370 /* Declare input and output SGPRs. */
7371 num_params = 0;
7372 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7373 params[num_params++] = ctx.i32;
7374 returns[num_returns++] = ctx.i32;
7375 }
7376 last_sgpr = num_params - 1;
7377
7378 /* 4 preloaded VGPRs (outputs must be floats) */
7379 for (i = 0; i < 4; i++) {
7380 params[num_params++] = ctx.i32;
7381 returns[num_returns++] = ctx.f32;
7382 }
7383
7384 /* Vertex load indices. */
7385 for (i = 0; i <= key->vs_prolog.last_input; i++)
7386 returns[num_returns++] = ctx.f32;
7387
7388 /* Create the function. */
7389 si_create_function(&ctx, "vs_prolog", returns, num_returns, params,
7390 num_params, last_sgpr);
7391 func = ctx.main_fn;
7392
7393 /* Copy inputs to outputs. This should be no-op, as the registers match,
7394 * but it will prevent the compiler from overwriting them unintentionally.
7395 */
7396 ret = ctx.return_value;
7397 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
7398 LLVMValueRef p = LLVMGetParam(func, i);
7399 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
7400 }
7401 for (i = num_params - 4; i < num_params; i++) {
7402 LLVMValueRef p = LLVMGetParam(func, i);
7403 p = LLVMBuildBitCast(gallivm->builder, p, ctx.f32, "");
7404 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
7405 }
7406
7407 /* Compute vertex load indices from instance divisors. */
7408 for (i = 0; i <= key->vs_prolog.last_input; i++) {
7409 unsigned divisor = key->vs_prolog.states.instance_divisors[i];
7410 LLVMValueRef index;
7411
7412 if (divisor) {
7413 /* InstanceID / Divisor + StartInstance */
7414 index = get_instance_index_for_fetch(&ctx,
7415 SI_SGPR_START_INSTANCE,
7416 divisor);
7417 } else {
7418 /* VertexID + BaseVertex */
7419 index = LLVMBuildAdd(gallivm->builder,
7420 LLVMGetParam(func, ctx.param_vertex_id),
7421 LLVMGetParam(func, SI_SGPR_BASE_VERTEX), "");
7422 }
7423
7424 index = LLVMBuildBitCast(gallivm->builder, index, ctx.f32, "");
7425 ret = LLVMBuildInsertValue(gallivm->builder, ret, index,
7426 num_params++, "");
7427 }
7428
7429 /* Compile. */
7430 si_llvm_build_ret(&ctx, ret);
7431 si_llvm_finalize_module(&ctx,
7432 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_VERTEX));
7433
7434 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7435 gallivm->module, debug, ctx.type,
7436 "Vertex Shader Prolog"))
7437 status = false;
7438
7439 si_llvm_dispose(&ctx);
7440 return status;
7441 }
7442
7443 /**
7444 * Compile the vertex shader epilog. This is also used by the tessellation
7445 * evaluation shader compiled as VS.
7446 *
7447 * The input is PrimitiveID.
7448 *
7449 * If PrimitiveID is required by the pixel shader, export it.
7450 * Otherwise, do nothing.
7451 */
7452 static bool si_compile_vs_epilog(struct si_screen *sscreen,
7453 LLVMTargetMachineRef tm,
7454 struct pipe_debug_callback *debug,
7455 struct si_shader_part *out)
7456 {
7457 union si_shader_part_key *key = &out->key;
7458 struct si_shader_context ctx;
7459 struct gallivm_state *gallivm = &ctx.gallivm;
7460 struct lp_build_tgsi_context *bld_base = &ctx.soa.bld_base;
7461 LLVMTypeRef params[5];
7462 int num_params, i;
7463 bool status = true;
7464
7465 si_init_shader_ctx(&ctx, sscreen, NULL, tm);
7466 ctx.type = PIPE_SHADER_VERTEX;
7467
7468 /* Declare input VGPRs. */
7469 num_params = key->vs_epilog.states.export_prim_id ?
7470 (VS_EPILOG_PRIMID_LOC + 1) : 0;
7471 assert(num_params <= ARRAY_SIZE(params));
7472
7473 for (i = 0; i < num_params; i++)
7474 params[i] = ctx.f32;
7475
7476 /* Create the function. */
7477 si_create_function(&ctx, "vs_epilog", NULL, 0, params, num_params, -1);
7478
7479 /* Emit exports. */
7480 if (key->vs_epilog.states.export_prim_id) {
7481 struct lp_build_context *base = &bld_base->base;
7482 struct lp_build_context *uint = &bld_base->uint_bld;
7483 LLVMValueRef args[9];
7484
7485 args[0] = lp_build_const_int32(base->gallivm, 0x0); /* enabled channels */
7486 args[1] = uint->zero; /* whether the EXEC mask is valid */
7487 args[2] = uint->zero; /* DONE bit */
7488 args[3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_PARAM +
7489 key->vs_epilog.prim_id_param_offset);
7490 args[4] = uint->zero; /* COMPR flag (0 = 32-bit export) */
7491 args[5] = LLVMGetParam(ctx.main_fn,
7492 VS_EPILOG_PRIMID_LOC); /* X */
7493 args[6] = base->undef; /* Y */
7494 args[7] = base->undef; /* Z */
7495 args[8] = base->undef; /* W */
7496
7497 lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
7498 LLVMVoidTypeInContext(base->gallivm->context),
7499 args, 9, 0);
7500 }
7501
7502 /* Compile. */
7503 LLVMBuildRetVoid(gallivm->builder);
7504 si_llvm_finalize_module(&ctx,
7505 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_VERTEX));
7506
7507 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7508 gallivm->module, debug, ctx.type,
7509 "Vertex Shader Epilog"))
7510 status = false;
7511
7512 si_llvm_dispose(&ctx);
7513 return status;
7514 }
7515
7516 /**
7517 * Create & compile a vertex shader epilog. This a helper used by VS and TES.
7518 */
7519 static bool si_get_vs_epilog(struct si_screen *sscreen,
7520 LLVMTargetMachineRef tm,
7521 struct si_shader *shader,
7522 struct pipe_debug_callback *debug,
7523 struct si_vs_epilog_bits *states)
7524 {
7525 union si_shader_part_key epilog_key;
7526
7527 memset(&epilog_key, 0, sizeof(epilog_key));
7528 epilog_key.vs_epilog.states = *states;
7529
7530 /* Set up the PrimitiveID output. */
7531 if (shader->key.vs.epilog.export_prim_id) {
7532 unsigned index = shader->selector->info.num_outputs;
7533 unsigned offset = shader->info.nr_param_exports++;
7534
7535 epilog_key.vs_epilog.prim_id_param_offset = offset;
7536 assert(index < ARRAY_SIZE(shader->info.vs_output_param_offset));
7537 shader->info.vs_output_param_offset[index] = offset;
7538 }
7539
7540 shader->epilog = si_get_shader_part(sscreen, &sscreen->vs_epilogs,
7541 &epilog_key, tm, debug,
7542 si_compile_vs_epilog);
7543 return shader->epilog != NULL;
7544 }
7545
7546 /**
7547 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
7548 */
7549 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
7550 LLVMTargetMachineRef tm,
7551 struct si_shader *shader,
7552 struct pipe_debug_callback *debug)
7553 {
7554 struct tgsi_shader_info *info = &shader->selector->info;
7555 union si_shader_part_key prolog_key;
7556 unsigned i;
7557
7558 /* Get the prolog. */
7559 memset(&prolog_key, 0, sizeof(prolog_key));
7560 prolog_key.vs_prolog.states = shader->key.vs.prolog;
7561 prolog_key.vs_prolog.num_input_sgprs = shader->info.num_input_sgprs;
7562 prolog_key.vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
7563
7564 /* The prolog is a no-op if there are no inputs. */
7565 if (info->num_inputs) {
7566 shader->prolog =
7567 si_get_shader_part(sscreen, &sscreen->vs_prologs,
7568 &prolog_key, tm, debug,
7569 si_compile_vs_prolog);
7570 if (!shader->prolog)
7571 return false;
7572 }
7573
7574 /* Get the epilog. */
7575 if (!shader->key.vs.as_es && !shader->key.vs.as_ls &&
7576 !si_get_vs_epilog(sscreen, tm, shader, debug,
7577 &shader->key.vs.epilog))
7578 return false;
7579
7580 /* Set the instanceID flag. */
7581 for (i = 0; i < info->num_inputs; i++)
7582 if (prolog_key.vs_prolog.states.instance_divisors[i])
7583 shader->info.uses_instanceid = true;
7584
7585 return true;
7586 }
7587
7588 /**
7589 * Select and compile (or reuse) TES parts (epilog).
7590 */
7591 static bool si_shader_select_tes_parts(struct si_screen *sscreen,
7592 LLVMTargetMachineRef tm,
7593 struct si_shader *shader,
7594 struct pipe_debug_callback *debug)
7595 {
7596 if (shader->key.tes.as_es)
7597 return true;
7598
7599 /* TES compiled as VS. */
7600 return si_get_vs_epilog(sscreen, tm, shader, debug,
7601 &shader->key.tes.epilog);
7602 }
7603
7604 /**
7605 * Compile the TCS epilog. This writes tesselation factors to memory based on
7606 * the output primitive type of the tesselator (determined by TES).
7607 */
7608 static bool si_compile_tcs_epilog(struct si_screen *sscreen,
7609 LLVMTargetMachineRef tm,
7610 struct pipe_debug_callback *debug,
7611 struct si_shader_part *out)
7612 {
7613 union si_shader_part_key *key = &out->key;
7614 struct si_shader shader = {};
7615 struct si_shader_context ctx;
7616 struct gallivm_state *gallivm = &ctx.gallivm;
7617 struct lp_build_tgsi_context *bld_base = &ctx.soa.bld_base;
7618 LLVMTypeRef params[16];
7619 LLVMValueRef func;
7620 int last_sgpr, num_params;
7621 bool status = true;
7622
7623 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7624 ctx.type = PIPE_SHADER_TESS_CTRL;
7625 shader.key.tcs.epilog = key->tcs_epilog.states;
7626
7627 /* Declare inputs. Only RW_BUFFERS and TESS_FACTOR_OFFSET are used. */
7628 params[SI_PARAM_RW_BUFFERS] = const_array(ctx.v16i8, SI_NUM_RW_BUFFERS);
7629 params[SI_PARAM_CONST_BUFFERS] = ctx.i64;
7630 params[SI_PARAM_SAMPLERS] = ctx.i64;
7631 params[SI_PARAM_IMAGES] = ctx.i64;
7632 params[SI_PARAM_SHADER_BUFFERS] = ctx.i64;
7633 params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx.i32;
7634 params[SI_PARAM_TCS_OUT_OFFSETS] = ctx.i32;
7635 params[SI_PARAM_TCS_OUT_LAYOUT] = ctx.i32;
7636 params[SI_PARAM_TCS_IN_LAYOUT] = ctx.i32;
7637 params[ctx.param_oc_lds = SI_PARAM_TCS_OC_LDS] = ctx.i32;
7638 params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx.i32;
7639 last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
7640 num_params = last_sgpr + 1;
7641
7642 params[num_params++] = ctx.i32; /* patch index within the wave (REL_PATCH_ID) */
7643 params[num_params++] = ctx.i32; /* invocation ID within the patch */
7644 params[num_params++] = ctx.i32; /* LDS offset where tess factors should be loaded from */
7645
7646 /* Create the function. */
7647 si_create_function(&ctx, "tcs_epilog", NULL, 0, params, num_params, last_sgpr);
7648 declare_tess_lds(&ctx);
7649 func = ctx.main_fn;
7650
7651 si_write_tess_factors(bld_base,
7652 LLVMGetParam(func, last_sgpr + 1),
7653 LLVMGetParam(func, last_sgpr + 2),
7654 LLVMGetParam(func, last_sgpr + 3));
7655
7656 /* Compile. */
7657 LLVMBuildRetVoid(gallivm->builder);
7658 si_llvm_finalize_module(&ctx,
7659 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_TESS_CTRL));
7660
7661 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7662 gallivm->module, debug, ctx.type,
7663 "Tessellation Control Shader Epilog"))
7664 status = false;
7665
7666 si_llvm_dispose(&ctx);
7667 return status;
7668 }
7669
7670 /**
7671 * Select and compile (or reuse) TCS parts (epilog).
7672 */
7673 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
7674 LLVMTargetMachineRef tm,
7675 struct si_shader *shader,
7676 struct pipe_debug_callback *debug)
7677 {
7678 union si_shader_part_key epilog_key;
7679
7680 /* Get the epilog. */
7681 memset(&epilog_key, 0, sizeof(epilog_key));
7682 epilog_key.tcs_epilog.states = shader->key.tcs.epilog;
7683
7684 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
7685 &epilog_key, tm, debug,
7686 si_compile_tcs_epilog);
7687 return shader->epilog != NULL;
7688 }
7689
7690 /**
7691 * Build the pixel shader prolog function. This handles:
7692 * - two-side color selection and interpolation
7693 * - overriding interpolation parameters for the API PS
7694 * - polygon stippling
7695 *
7696 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
7697 * overriden by other states. (e.g. per-sample interpolation)
7698 * Interpolated colors are stored after the preloaded VGPRs.
7699 */
7700 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
7701 union si_shader_part_key *key)
7702 {
7703 struct gallivm_state *gallivm = &ctx->gallivm;
7704 LLVMTypeRef *params;
7705 LLVMValueRef ret, func;
7706 int last_sgpr, num_params, num_returns, i, num_color_channels;
7707
7708 assert(si_need_ps_prolog(key));
7709
7710 /* Number of inputs + 8 color elements. */
7711 params = alloca((key->ps_prolog.num_input_sgprs +
7712 key->ps_prolog.num_input_vgprs + 8) *
7713 sizeof(LLVMTypeRef));
7714
7715 /* Declare inputs. */
7716 num_params = 0;
7717 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++)
7718 params[num_params++] = ctx->i32;
7719 last_sgpr = num_params - 1;
7720
7721 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++)
7722 params[num_params++] = ctx->f32;
7723
7724 /* Declare outputs (same as inputs + add colors if needed) */
7725 num_returns = num_params;
7726 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
7727 for (i = 0; i < num_color_channels; i++)
7728 params[num_returns++] = ctx->f32;
7729
7730 /* Create the function. */
7731 si_create_function(ctx, "ps_prolog", params, num_returns, params,
7732 num_params, last_sgpr);
7733 func = ctx->main_fn;
7734
7735 /* Copy inputs to outputs. This should be no-op, as the registers match,
7736 * but it will prevent the compiler from overwriting them unintentionally.
7737 */
7738 ret = ctx->return_value;
7739 for (i = 0; i < num_params; i++) {
7740 LLVMValueRef p = LLVMGetParam(func, i);
7741 ret = LLVMBuildInsertValue(gallivm->builder, ret, p, i, "");
7742 }
7743
7744 /* Polygon stippling. */
7745 if (key->ps_prolog.states.poly_stipple) {
7746 /* POS_FIXED_PT is always last. */
7747 unsigned pos = key->ps_prolog.num_input_sgprs +
7748 key->ps_prolog.num_input_vgprs - 1;
7749 LLVMValueRef ptr[2], list;
7750
7751 /* Get the pointer to rw buffers. */
7752 ptr[0] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS);
7753 ptr[1] = LLVMGetParam(func, SI_SGPR_RW_BUFFERS_HI);
7754 list = lp_build_gather_values(gallivm, ptr, 2);
7755 list = LLVMBuildBitCast(gallivm->builder, list, ctx->i64, "");
7756 list = LLVMBuildIntToPtr(gallivm->builder, list,
7757 const_array(ctx->v16i8, SI_NUM_RW_BUFFERS), "");
7758
7759 si_llvm_emit_polygon_stipple(ctx, list, pos);
7760 }
7761
7762 if (key->ps_prolog.states.bc_optimize_for_persp ||
7763 key->ps_prolog.states.bc_optimize_for_linear) {
7764 unsigned i, base = key->ps_prolog.num_input_sgprs;
7765 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
7766
7767 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
7768 * The hw doesn't compute CENTROID if the whole wave only
7769 * contains fully-covered quads.
7770 *
7771 * PRIM_MASK is after user SGPRs.
7772 */
7773 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7774 bc_optimize = LLVMBuildLShr(gallivm->builder, bc_optimize,
7775 LLVMConstInt(ctx->i32, 31, 0), "");
7776 bc_optimize = LLVMBuildTrunc(gallivm->builder, bc_optimize,
7777 ctx->i1, "");
7778
7779 if (key->ps_prolog.states.bc_optimize_for_persp) {
7780 /* Read PERSP_CENTER. */
7781 for (i = 0; i < 2; i++)
7782 center[i] = LLVMGetParam(func, base + 2 + i);
7783 /* Read PERSP_CENTROID. */
7784 for (i = 0; i < 2; i++)
7785 centroid[i] = LLVMGetParam(func, base + 4 + i);
7786 /* Select PERSP_CENTROID. */
7787 for (i = 0; i < 2; i++) {
7788 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
7789 center[i], centroid[i], "");
7790 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7791 tmp, base + 4 + i, "");
7792 }
7793 }
7794 if (key->ps_prolog.states.bc_optimize_for_linear) {
7795 /* Read LINEAR_CENTER. */
7796 for (i = 0; i < 2; i++)
7797 center[i] = LLVMGetParam(func, base + 8 + i);
7798 /* Read LINEAR_CENTROID. */
7799 for (i = 0; i < 2; i++)
7800 centroid[i] = LLVMGetParam(func, base + 10 + i);
7801 /* Select LINEAR_CENTROID. */
7802 for (i = 0; i < 2; i++) {
7803 tmp = LLVMBuildSelect(gallivm->builder, bc_optimize,
7804 center[i], centroid[i], "");
7805 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7806 tmp, base + 10 + i, "");
7807 }
7808 }
7809 }
7810
7811 /* Force per-sample interpolation. */
7812 if (key->ps_prolog.states.force_persp_sample_interp) {
7813 unsigned i, base = key->ps_prolog.num_input_sgprs;
7814 LLVMValueRef persp_sample[2];
7815
7816 /* Read PERSP_SAMPLE. */
7817 for (i = 0; i < 2; i++)
7818 persp_sample[i] = LLVMGetParam(func, base + i);
7819 /* Overwrite PERSP_CENTER. */
7820 for (i = 0; i < 2; i++)
7821 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7822 persp_sample[i], base + 2 + i, "");
7823 /* Overwrite PERSP_CENTROID. */
7824 for (i = 0; i < 2; i++)
7825 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7826 persp_sample[i], base + 4 + i, "");
7827 }
7828 if (key->ps_prolog.states.force_linear_sample_interp) {
7829 unsigned i, base = key->ps_prolog.num_input_sgprs;
7830 LLVMValueRef linear_sample[2];
7831
7832 /* Read LINEAR_SAMPLE. */
7833 for (i = 0; i < 2; i++)
7834 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
7835 /* Overwrite LINEAR_CENTER. */
7836 for (i = 0; i < 2; i++)
7837 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7838 linear_sample[i], base + 8 + i, "");
7839 /* Overwrite LINEAR_CENTROID. */
7840 for (i = 0; i < 2; i++)
7841 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7842 linear_sample[i], base + 10 + i, "");
7843 }
7844
7845 /* Force center interpolation. */
7846 if (key->ps_prolog.states.force_persp_center_interp) {
7847 unsigned i, base = key->ps_prolog.num_input_sgprs;
7848 LLVMValueRef persp_center[2];
7849
7850 /* Read PERSP_CENTER. */
7851 for (i = 0; i < 2; i++)
7852 persp_center[i] = LLVMGetParam(func, base + 2 + i);
7853 /* Overwrite PERSP_SAMPLE. */
7854 for (i = 0; i < 2; i++)
7855 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7856 persp_center[i], base + i, "");
7857 /* Overwrite PERSP_CENTROID. */
7858 for (i = 0; i < 2; i++)
7859 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7860 persp_center[i], base + 4 + i, "");
7861 }
7862 if (key->ps_prolog.states.force_linear_center_interp) {
7863 unsigned i, base = key->ps_prolog.num_input_sgprs;
7864 LLVMValueRef linear_center[2];
7865
7866 /* Read LINEAR_CENTER. */
7867 for (i = 0; i < 2; i++)
7868 linear_center[i] = LLVMGetParam(func, base + 8 + i);
7869 /* Overwrite LINEAR_SAMPLE. */
7870 for (i = 0; i < 2; i++)
7871 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7872 linear_center[i], base + 6 + i, "");
7873 /* Overwrite LINEAR_CENTROID. */
7874 for (i = 0; i < 2; i++)
7875 ret = LLVMBuildInsertValue(gallivm->builder, ret,
7876 linear_center[i], base + 10 + i, "");
7877 }
7878
7879 /* Interpolate colors. */
7880 for (i = 0; i < 2; i++) {
7881 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
7882 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
7883 key->ps_prolog.face_vgpr_index;
7884 LLVMValueRef interp[2], color[4];
7885 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
7886
7887 if (!writemask)
7888 continue;
7889
7890 /* If the interpolation qualifier is not CONSTANT (-1). */
7891 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
7892 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
7893 key->ps_prolog.color_interp_vgpr_index[i];
7894
7895 /* Get the (i,j) updated by bc_optimize handling. */
7896 interp[0] = LLVMBuildExtractValue(gallivm->builder, ret,
7897 interp_vgpr, "");
7898 interp[1] = LLVMBuildExtractValue(gallivm->builder, ret,
7899 interp_vgpr + 1, "");
7900 interp_ij = lp_build_gather_values(gallivm, interp, 2);
7901 interp_ij = LLVMBuildBitCast(gallivm->builder, interp_ij,
7902 ctx->v2i32, "");
7903 }
7904
7905 /* Use the absolute location of the input. */
7906 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
7907
7908 if (key->ps_prolog.states.color_two_side) {
7909 face = LLVMGetParam(func, face_vgpr);
7910 face = LLVMBuildBitCast(gallivm->builder, face, ctx->i32, "");
7911 }
7912
7913 interp_fs_input(ctx,
7914 key->ps_prolog.color_attr_index[i],
7915 TGSI_SEMANTIC_COLOR, i,
7916 key->ps_prolog.num_interp_inputs,
7917 key->ps_prolog.colors_read, interp_ij,
7918 prim_mask, face, color);
7919
7920 while (writemask) {
7921 unsigned chan = u_bit_scan(&writemask);
7922 ret = LLVMBuildInsertValue(gallivm->builder, ret, color[chan],
7923 num_params++, "");
7924 }
7925 }
7926
7927 /* Tell LLVM to insert WQM instruction sequence when needed. */
7928 if (key->ps_prolog.wqm) {
7929 LLVMAddTargetDependentFunctionAttr(func,
7930 "amdgpu-ps-wqm-outputs", "");
7931 }
7932
7933 si_llvm_build_ret(ctx, ret);
7934 }
7935
7936 /**
7937 * Compile the pixel shader prolog.
7938 */
7939 static bool si_compile_ps_prolog(struct si_screen *sscreen,
7940 LLVMTargetMachineRef tm,
7941 struct pipe_debug_callback *debug,
7942 struct si_shader_part *out)
7943 {
7944 union si_shader_part_key *key = &out->key;
7945 struct si_shader shader = {};
7946 struct si_shader_context ctx;
7947 struct gallivm_state *gallivm = &ctx.gallivm;
7948 bool status = true;
7949
7950 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
7951 ctx.type = PIPE_SHADER_FRAGMENT;
7952 shader.key.ps.prolog = key->ps_prolog.states;
7953
7954 si_build_ps_prolog_function(&ctx, key);
7955
7956 /* Compile. */
7957 si_llvm_finalize_module(&ctx,
7958 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_FRAGMENT));
7959
7960 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
7961 gallivm->module, debug, ctx.type,
7962 "Fragment Shader Prolog"))
7963 status = false;
7964
7965 si_llvm_dispose(&ctx);
7966 return status;
7967 }
7968
7969 /**
7970 * Build the pixel shader epilog function. This handles everything that must be
7971 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
7972 */
7973 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
7974 union si_shader_part_key *key)
7975 {
7976 struct gallivm_state *gallivm = &ctx->gallivm;
7977 struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
7978 LLVMTypeRef params[16+8*4+3];
7979 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
7980 int last_sgpr, num_params, i;
7981 struct si_ps_exports exp = {};
7982
7983 /* Declare input SGPRs. */
7984 params[SI_PARAM_RW_BUFFERS] = ctx->i64;
7985 params[SI_PARAM_CONST_BUFFERS] = ctx->i64;
7986 params[SI_PARAM_SAMPLERS] = ctx->i64;
7987 params[SI_PARAM_IMAGES] = ctx->i64;
7988 params[SI_PARAM_SHADER_BUFFERS] = ctx->i64;
7989 params[SI_PARAM_ALPHA_REF] = ctx->f32;
7990 last_sgpr = SI_PARAM_ALPHA_REF;
7991
7992 /* Declare input VGPRs. */
7993 num_params = (last_sgpr + 1) +
7994 util_bitcount(key->ps_epilog.colors_written) * 4 +
7995 key->ps_epilog.writes_z +
7996 key->ps_epilog.writes_stencil +
7997 key->ps_epilog.writes_samplemask;
7998
7999 num_params = MAX2(num_params,
8000 last_sgpr + 1 + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
8001
8002 assert(num_params <= ARRAY_SIZE(params));
8003
8004 for (i = last_sgpr + 1; i < num_params; i++)
8005 params[i] = ctx->f32;
8006
8007 /* Create the function. */
8008 si_create_function(ctx, "ps_epilog", NULL, 0, params, num_params, last_sgpr);
8009 /* Disable elimination of unused inputs. */
8010 si_llvm_add_attribute(ctx->main_fn,
8011 "InitialPSInputAddr", 0xffffff);
8012
8013 /* Process colors. */
8014 unsigned vgpr = last_sgpr + 1;
8015 unsigned colors_written = key->ps_epilog.colors_written;
8016 int last_color_export = -1;
8017
8018 /* Find the last color export. */
8019 if (!key->ps_epilog.writes_z &&
8020 !key->ps_epilog.writes_stencil &&
8021 !key->ps_epilog.writes_samplemask) {
8022 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
8023
8024 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
8025 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
8026 /* Just set this if any of the colorbuffers are enabled. */
8027 if (spi_format &
8028 ((1llu << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
8029 last_color_export = 0;
8030 } else {
8031 for (i = 0; i < 8; i++)
8032 if (colors_written & (1 << i) &&
8033 (spi_format >> (i * 4)) & 0xf)
8034 last_color_export = i;
8035 }
8036 }
8037
8038 while (colors_written) {
8039 LLVMValueRef color[4];
8040 int mrt = u_bit_scan(&colors_written);
8041
8042 for (i = 0; i < 4; i++)
8043 color[i] = LLVMGetParam(ctx->main_fn, vgpr++);
8044
8045 si_export_mrt_color(bld_base, color, mrt,
8046 num_params - 1,
8047 mrt == last_color_export, &exp);
8048 }
8049
8050 /* Process depth, stencil, samplemask. */
8051 if (key->ps_epilog.writes_z)
8052 depth = LLVMGetParam(ctx->main_fn, vgpr++);
8053 if (key->ps_epilog.writes_stencil)
8054 stencil = LLVMGetParam(ctx->main_fn, vgpr++);
8055 if (key->ps_epilog.writes_samplemask)
8056 samplemask = LLVMGetParam(ctx->main_fn, vgpr++);
8057
8058 if (depth || stencil || samplemask)
8059 si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
8060 else if (last_color_export == -1)
8061 si_export_null(bld_base);
8062
8063 if (exp.num)
8064 si_emit_ps_exports(ctx, &exp);
8065
8066 /* Compile. */
8067 LLVMBuildRetVoid(gallivm->builder);
8068 }
8069
8070
8071 /**
8072 * Compile the pixel shader epilog to a binary for concatenation.
8073 */
8074 static bool si_compile_ps_epilog(struct si_screen *sscreen,
8075 LLVMTargetMachineRef tm,
8076 struct pipe_debug_callback *debug,
8077 struct si_shader_part *out)
8078 {
8079 union si_shader_part_key *key = &out->key;
8080 struct si_shader shader = {};
8081 struct si_shader_context ctx;
8082 struct gallivm_state *gallivm = &ctx.gallivm;
8083 bool status = true;
8084
8085 si_init_shader_ctx(&ctx, sscreen, &shader, tm);
8086 ctx.type = PIPE_SHADER_FRAGMENT;
8087 shader.key.ps.epilog = key->ps_epilog.states;
8088
8089 si_build_ps_epilog_function(&ctx, key);
8090
8091 /* Compile. */
8092 si_llvm_finalize_module(&ctx,
8093 r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_FRAGMENT));
8094
8095 if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
8096 gallivm->module, debug, ctx.type,
8097 "Fragment Shader Epilog"))
8098 status = false;
8099
8100 si_llvm_dispose(&ctx);
8101 return status;
8102 }
8103
8104 /**
8105 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
8106 */
8107 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
8108 LLVMTargetMachineRef tm,
8109 struct si_shader *shader,
8110 struct pipe_debug_callback *debug)
8111 {
8112 union si_shader_part_key prolog_key;
8113 union si_shader_part_key epilog_key;
8114
8115 /* Get the prolog. */
8116 si_get_ps_prolog_key(shader, &prolog_key);
8117
8118 /* The prolog is a no-op if these aren't set. */
8119 if (si_need_ps_prolog(&prolog_key)) {
8120 shader->prolog =
8121 si_get_shader_part(sscreen, &sscreen->ps_prologs,
8122 &prolog_key, tm, debug,
8123 si_compile_ps_prolog);
8124 if (!shader->prolog)
8125 return false;
8126 }
8127
8128 /* Get the epilog. */
8129 si_get_ps_epilog_key(shader, &epilog_key);
8130
8131 shader->epilog =
8132 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
8133 &epilog_key, tm, debug,
8134 si_compile_ps_epilog);
8135 if (!shader->epilog)
8136 return false;
8137
8138 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
8139 if (shader->key.ps.prolog.poly_stipple) {
8140 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
8141 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
8142 }
8143
8144 /* Set up the enable bits for per-sample shading if needed. */
8145 if (shader->key.ps.prolog.force_persp_sample_interp &&
8146 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
8147 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
8148 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
8149 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
8150 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
8151 }
8152 if (shader->key.ps.prolog.force_linear_sample_interp &&
8153 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
8154 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
8155 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
8156 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
8157 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
8158 }
8159 if (shader->key.ps.prolog.force_persp_center_interp &&
8160 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
8161 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
8162 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
8163 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
8164 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
8165 }
8166 if (shader->key.ps.prolog.force_linear_center_interp &&
8167 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
8168 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
8169 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
8170 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
8171 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
8172 }
8173
8174 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
8175 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
8176 !(shader->config.spi_ps_input_ena & 0xf)) {
8177 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
8178 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
8179 }
8180
8181 /* At least one pair of interpolation weights must be enabled. */
8182 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
8183 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
8184 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
8185 }
8186
8187 /* The sample mask input is always enabled, because the API shader always
8188 * passes it through to the epilog. Disable it here if it's unused.
8189 */
8190 if (!shader->key.ps.epilog.poly_line_smoothing &&
8191 !shader->selector->info.reads_samplemask)
8192 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
8193
8194 return true;
8195 }
8196
8197 static void si_fix_num_sgprs(struct si_shader *shader)
8198 {
8199 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
8200
8201 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
8202 }
8203
8204 int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
8205 struct si_shader *shader,
8206 struct pipe_debug_callback *debug)
8207 {
8208 struct si_shader_selector *sel = shader->selector;
8209 struct si_shader *mainp = sel->main_shader_part;
8210 int r;
8211
8212 /* LS, ES, VS are compiled on demand if the main part hasn't been
8213 * compiled for that stage.
8214 */
8215 if (!mainp ||
8216 (sel->type == PIPE_SHADER_VERTEX &&
8217 (shader->key.vs.as_es != mainp->key.vs.as_es ||
8218 shader->key.vs.as_ls != mainp->key.vs.as_ls)) ||
8219 (sel->type == PIPE_SHADER_TESS_EVAL &&
8220 shader->key.tes.as_es != mainp->key.tes.as_es) ||
8221 (sel->type == PIPE_SHADER_TESS_CTRL &&
8222 shader->key.tcs.epilog.inputs_to_copy) ||
8223 sel->type == PIPE_SHADER_COMPUTE) {
8224 /* Monolithic shader (compiled as a whole, has many variants,
8225 * may take a long time to compile).
8226 */
8227 r = si_compile_tgsi_shader(sscreen, tm, shader, true, debug);
8228 if (r)
8229 return r;
8230 } else {
8231 /* The shader consists of 2-3 parts:
8232 *
8233 * - the middle part is the user shader, it has 1 variant only
8234 * and it was compiled during the creation of the shader
8235 * selector
8236 * - the prolog part is inserted at the beginning
8237 * - the epilog part is inserted at the end
8238 *
8239 * The prolog and epilog have many (but simple) variants.
8240 */
8241
8242 /* Copy the compiled TGSI shader data over. */
8243 shader->is_binary_shared = true;
8244 shader->binary = mainp->binary;
8245 shader->config = mainp->config;
8246 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
8247 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
8248 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
8249 memcpy(shader->info.vs_output_param_offset,
8250 mainp->info.vs_output_param_offset,
8251 sizeof(mainp->info.vs_output_param_offset));
8252 shader->info.uses_instanceid = mainp->info.uses_instanceid;
8253 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
8254 shader->info.nr_param_exports = mainp->info.nr_param_exports;
8255
8256 /* Select prologs and/or epilogs. */
8257 switch (sel->type) {
8258 case PIPE_SHADER_VERTEX:
8259 if (!si_shader_select_vs_parts(sscreen, tm, shader, debug))
8260 return -1;
8261 break;
8262 case PIPE_SHADER_TESS_CTRL:
8263 if (!si_shader_select_tcs_parts(sscreen, tm, shader, debug))
8264 return -1;
8265 break;
8266 case PIPE_SHADER_TESS_EVAL:
8267 if (!si_shader_select_tes_parts(sscreen, tm, shader, debug))
8268 return -1;
8269 break;
8270 case PIPE_SHADER_FRAGMENT:
8271 if (!si_shader_select_ps_parts(sscreen, tm, shader, debug))
8272 return -1;
8273
8274 /* Make sure we have at least as many VGPRs as there
8275 * are allocated inputs.
8276 */
8277 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8278 shader->info.num_input_vgprs);
8279 break;
8280 }
8281
8282 /* Update SGPR and VGPR counts. */
8283 if (shader->prolog) {
8284 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8285 shader->prolog->config.num_sgprs);
8286 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8287 shader->prolog->config.num_vgprs);
8288 }
8289 if (shader->epilog) {
8290 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
8291 shader->epilog->config.num_sgprs);
8292 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
8293 shader->epilog->config.num_vgprs);
8294 }
8295 }
8296
8297 si_fix_num_sgprs(shader);
8298 si_shader_dump(sscreen, shader, debug, sel->info.processor,
8299 stderr);
8300
8301 /* Upload. */
8302 r = si_shader_binary_upload(sscreen, shader);
8303 if (r) {
8304 fprintf(stderr, "LLVM failed to upload shader\n");
8305 return r;
8306 }
8307
8308 return 0;
8309 }
8310
8311 void si_shader_destroy(struct si_shader *shader)
8312 {
8313 if (shader->gs_copy_shader) {
8314 si_shader_destroy(shader->gs_copy_shader);
8315 FREE(shader->gs_copy_shader);
8316 }
8317
8318 if (shader->scratch_bo)
8319 r600_resource_reference(&shader->scratch_bo, NULL);
8320
8321 r600_resource_reference(&shader->bo, NULL);
8322
8323 if (!shader->is_binary_shared)
8324 radeon_shader_binary_clean(&shader->binary);
8325
8326 free(shader->shader_log);
8327 }