amd/common: Restore v4i32 suffix for llvm.SI.load.const intrinsic
[mesa.git] / src / amd / common / ac_llvm_build.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25 /* based on pieces from si_pipe.c and radeon_llvm_emit.c */
26 #include "ac_llvm_build.h"
27
28 #include <llvm-c/Core.h>
29
30 #include "c11/threads.h"
31
32 #include <assert.h>
33 #include <stdio.h>
34
35 #include "ac_llvm_util.h"
36 #include "ac_exp_param.h"
37 #include "util/bitscan.h"
38 #include "util/macros.h"
39 #include "util/u_atomic.h"
40 #include "util/u_math.h"
41 #include "sid.h"
42
43 #include "shader_enums.h"
44
45 #define AC_LLVM_INITIAL_CF_DEPTH 4
46
47 /* Data for if/else/endif and bgnloop/endloop control flow structures.
48 */
49 struct ac_llvm_flow {
50 /* Loop exit or next part of if/else/endif. */
51 LLVMBasicBlockRef next_block;
52 LLVMBasicBlockRef loop_entry_block;
53 };
54
55 /* Initialize module-independent parts of the context.
56 *
57 * The caller is responsible for initializing ctx::module and ctx::builder.
58 */
59 void
60 ac_llvm_context_init(struct ac_llvm_context *ctx,
61 enum chip_class chip_class, enum radeon_family family)
62 {
63 LLVMValueRef args[1];
64
65 ctx->context = LLVMContextCreate();
66
67 ctx->chip_class = chip_class;
68 ctx->family = family;
69 ctx->module = NULL;
70 ctx->builder = NULL;
71
72 ctx->voidt = LLVMVoidTypeInContext(ctx->context);
73 ctx->i1 = LLVMInt1TypeInContext(ctx->context);
74 ctx->i8 = LLVMInt8TypeInContext(ctx->context);
75 ctx->i16 = LLVMIntTypeInContext(ctx->context, 16);
76 ctx->i32 = LLVMIntTypeInContext(ctx->context, 32);
77 ctx->i64 = LLVMIntTypeInContext(ctx->context, 64);
78 ctx->intptr = ctx->i32;
79 ctx->f16 = LLVMHalfTypeInContext(ctx->context);
80 ctx->f32 = LLVMFloatTypeInContext(ctx->context);
81 ctx->f64 = LLVMDoubleTypeInContext(ctx->context);
82 ctx->v2i16 = LLVMVectorType(ctx->i16, 2);
83 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
84 ctx->v3i32 = LLVMVectorType(ctx->i32, 3);
85 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
86 ctx->v2f32 = LLVMVectorType(ctx->f32, 2);
87 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
88 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
89
90 ctx->i16_0 = LLVMConstInt(ctx->i16, 0, false);
91 ctx->i16_1 = LLVMConstInt(ctx->i16, 1, false);
92 ctx->i32_0 = LLVMConstInt(ctx->i32, 0, false);
93 ctx->i32_1 = LLVMConstInt(ctx->i32, 1, false);
94 ctx->i64_0 = LLVMConstInt(ctx->i64, 0, false);
95 ctx->i64_1 = LLVMConstInt(ctx->i64, 1, false);
96 ctx->f32_0 = LLVMConstReal(ctx->f32, 0.0);
97 ctx->f32_1 = LLVMConstReal(ctx->f32, 1.0);
98 ctx->f64_0 = LLVMConstReal(ctx->f64, 0.0);
99 ctx->f64_1 = LLVMConstReal(ctx->f64, 1.0);
100
101 ctx->i1false = LLVMConstInt(ctx->i1, 0, false);
102 ctx->i1true = LLVMConstInt(ctx->i1, 1, false);
103
104 ctx->range_md_kind = LLVMGetMDKindIDInContext(ctx->context,
105 "range", 5);
106
107 ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(ctx->context,
108 "invariant.load", 14);
109
110 ctx->fpmath_md_kind = LLVMGetMDKindIDInContext(ctx->context, "fpmath", 6);
111
112 args[0] = LLVMConstReal(ctx->f32, 2.5);
113 ctx->fpmath_md_2p5_ulp = LLVMMDNodeInContext(ctx->context, args, 1);
114
115 ctx->uniform_md_kind = LLVMGetMDKindIDInContext(ctx->context,
116 "amdgpu.uniform", 14);
117
118 ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0);
119 }
120
121 void
122 ac_llvm_context_dispose(struct ac_llvm_context *ctx)
123 {
124 free(ctx->flow);
125 ctx->flow = NULL;
126 ctx->flow_depth_max = 0;
127 }
128
129 int
130 ac_get_llvm_num_components(LLVMValueRef value)
131 {
132 LLVMTypeRef type = LLVMTypeOf(value);
133 unsigned num_components = LLVMGetTypeKind(type) == LLVMVectorTypeKind
134 ? LLVMGetVectorSize(type)
135 : 1;
136 return num_components;
137 }
138
139 LLVMValueRef
140 ac_llvm_extract_elem(struct ac_llvm_context *ac,
141 LLVMValueRef value,
142 int index)
143 {
144 if (LLVMGetTypeKind(LLVMTypeOf(value)) != LLVMVectorTypeKind) {
145 assert(index == 0);
146 return value;
147 }
148
149 return LLVMBuildExtractElement(ac->builder, value,
150 LLVMConstInt(ac->i32, index, false), "");
151 }
152
153 int
154 ac_get_elem_bits(struct ac_llvm_context *ctx, LLVMTypeRef type)
155 {
156 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind)
157 type = LLVMGetElementType(type);
158
159 if (LLVMGetTypeKind(type) == LLVMIntegerTypeKind)
160 return LLVMGetIntTypeWidth(type);
161
162 if (type == ctx->f16)
163 return 16;
164 if (type == ctx->f32)
165 return 32;
166 if (type == ctx->f64)
167 return 64;
168
169 unreachable("Unhandled type kind in get_elem_bits");
170 }
171
172 unsigned
173 ac_get_type_size(LLVMTypeRef type)
174 {
175 LLVMTypeKind kind = LLVMGetTypeKind(type);
176
177 switch (kind) {
178 case LLVMIntegerTypeKind:
179 return LLVMGetIntTypeWidth(type) / 8;
180 case LLVMHalfTypeKind:
181 return 2;
182 case LLVMFloatTypeKind:
183 return 4;
184 case LLVMDoubleTypeKind:
185 return 8;
186 case LLVMPointerTypeKind:
187 if (LLVMGetPointerAddressSpace(type) == AC_ADDR_SPACE_CONST_32BIT)
188 return 4;
189 return 8;
190 case LLVMVectorTypeKind:
191 return LLVMGetVectorSize(type) *
192 ac_get_type_size(LLVMGetElementType(type));
193 case LLVMArrayTypeKind:
194 return LLVMGetArrayLength(type) *
195 ac_get_type_size(LLVMGetElementType(type));
196 default:
197 assert(0);
198 return 0;
199 }
200 }
201
202 static LLVMTypeRef to_integer_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
203 {
204 if (t == ctx->f16 || t == ctx->i16)
205 return ctx->i16;
206 else if (t == ctx->f32 || t == ctx->i32)
207 return ctx->i32;
208 else if (t == ctx->f64 || t == ctx->i64)
209 return ctx->i64;
210 else
211 unreachable("Unhandled integer size");
212 }
213
214 LLVMTypeRef
215 ac_to_integer_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
216 {
217 if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
218 LLVMTypeRef elem_type = LLVMGetElementType(t);
219 return LLVMVectorType(to_integer_type_scalar(ctx, elem_type),
220 LLVMGetVectorSize(t));
221 }
222 return to_integer_type_scalar(ctx, t);
223 }
224
225 LLVMValueRef
226 ac_to_integer(struct ac_llvm_context *ctx, LLVMValueRef v)
227 {
228 LLVMTypeRef type = LLVMTypeOf(v);
229 return LLVMBuildBitCast(ctx->builder, v, ac_to_integer_type(ctx, type), "");
230 }
231
232 LLVMValueRef
233 ac_to_integer_or_pointer(struct ac_llvm_context *ctx, LLVMValueRef v)
234 {
235 LLVMTypeRef type = LLVMTypeOf(v);
236 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind)
237 return v;
238 return ac_to_integer(ctx, v);
239 }
240
241 static LLVMTypeRef to_float_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
242 {
243 if (t == ctx->i16 || t == ctx->f16)
244 return ctx->f16;
245 else if (t == ctx->i32 || t == ctx->f32)
246 return ctx->f32;
247 else if (t == ctx->i64 || t == ctx->f64)
248 return ctx->f64;
249 else
250 unreachable("Unhandled float size");
251 }
252
253 LLVMTypeRef
254 ac_to_float_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
255 {
256 if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
257 LLVMTypeRef elem_type = LLVMGetElementType(t);
258 return LLVMVectorType(to_float_type_scalar(ctx, elem_type),
259 LLVMGetVectorSize(t));
260 }
261 return to_float_type_scalar(ctx, t);
262 }
263
264 LLVMValueRef
265 ac_to_float(struct ac_llvm_context *ctx, LLVMValueRef v)
266 {
267 LLVMTypeRef type = LLVMTypeOf(v);
268 return LLVMBuildBitCast(ctx->builder, v, ac_to_float_type(ctx, type), "");
269 }
270
271
272 LLVMValueRef
273 ac_build_intrinsic(struct ac_llvm_context *ctx, const char *name,
274 LLVMTypeRef return_type, LLVMValueRef *params,
275 unsigned param_count, unsigned attrib_mask)
276 {
277 LLVMValueRef function, call;
278 bool set_callsite_attrs = !(attrib_mask & AC_FUNC_ATTR_LEGACY);
279
280 function = LLVMGetNamedFunction(ctx->module, name);
281 if (!function) {
282 LLVMTypeRef param_types[32], function_type;
283 unsigned i;
284
285 assert(param_count <= 32);
286
287 for (i = 0; i < param_count; ++i) {
288 assert(params[i]);
289 param_types[i] = LLVMTypeOf(params[i]);
290 }
291 function_type =
292 LLVMFunctionType(return_type, param_types, param_count, 0);
293 function = LLVMAddFunction(ctx->module, name, function_type);
294
295 LLVMSetFunctionCallConv(function, LLVMCCallConv);
296 LLVMSetLinkage(function, LLVMExternalLinkage);
297
298 if (!set_callsite_attrs)
299 ac_add_func_attributes(ctx->context, function, attrib_mask);
300 }
301
302 call = LLVMBuildCall(ctx->builder, function, params, param_count, "");
303 if (set_callsite_attrs)
304 ac_add_func_attributes(ctx->context, call, attrib_mask);
305 return call;
306 }
307
308 /**
309 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
310 * intrinsic names).
311 */
312 void ac_build_type_name_for_intr(LLVMTypeRef type, char *buf, unsigned bufsize)
313 {
314 LLVMTypeRef elem_type = type;
315
316 assert(bufsize >= 8);
317
318 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
319 int ret = snprintf(buf, bufsize, "v%u",
320 LLVMGetVectorSize(type));
321 if (ret < 0) {
322 char *type_name = LLVMPrintTypeToString(type);
323 fprintf(stderr, "Error building type name for: %s\n",
324 type_name);
325 return;
326 }
327 elem_type = LLVMGetElementType(type);
328 buf += ret;
329 bufsize -= ret;
330 }
331 switch (LLVMGetTypeKind(elem_type)) {
332 default: break;
333 case LLVMIntegerTypeKind:
334 snprintf(buf, bufsize, "i%d", LLVMGetIntTypeWidth(elem_type));
335 break;
336 case LLVMHalfTypeKind:
337 snprintf(buf, bufsize, "f16");
338 break;
339 case LLVMFloatTypeKind:
340 snprintf(buf, bufsize, "f32");
341 break;
342 case LLVMDoubleTypeKind:
343 snprintf(buf, bufsize, "f64");
344 break;
345 }
346 }
347
348 /**
349 * Helper function that builds an LLVM IR PHI node and immediately adds
350 * incoming edges.
351 */
352 LLVMValueRef
353 ac_build_phi(struct ac_llvm_context *ctx, LLVMTypeRef type,
354 unsigned count_incoming, LLVMValueRef *values,
355 LLVMBasicBlockRef *blocks)
356 {
357 LLVMValueRef phi = LLVMBuildPhi(ctx->builder, type, "");
358 LLVMAddIncoming(phi, values, blocks, count_incoming);
359 return phi;
360 }
361
362 void ac_build_s_barrier(struct ac_llvm_context *ctx)
363 {
364 ac_build_intrinsic(ctx, "llvm.amdgcn.s.barrier", ctx->voidt, NULL,
365 0, AC_FUNC_ATTR_CONVERGENT);
366 }
367
368 /* Prevent optimizations (at least of memory accesses) across the current
369 * point in the program by emitting empty inline assembly that is marked as
370 * having side effects.
371 *
372 * Optionally, a value can be passed through the inline assembly to prevent
373 * LLVM from hoisting calls to ReadNone functions.
374 */
375 void
376 ac_build_optimization_barrier(struct ac_llvm_context *ctx,
377 LLVMValueRef *pvgpr)
378 {
379 static int counter = 0;
380
381 LLVMBuilderRef builder = ctx->builder;
382 char code[16];
383
384 snprintf(code, sizeof(code), "; %d", p_atomic_inc_return(&counter));
385
386 if (!pvgpr) {
387 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
388 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
389 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
390 } else {
391 LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
392 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "=v,0", true, false);
393 LLVMValueRef vgpr = *pvgpr;
394 LLVMTypeRef vgpr_type = LLVMTypeOf(vgpr);
395 unsigned vgpr_size = ac_get_type_size(vgpr_type);
396 LLVMValueRef vgpr0;
397
398 assert(vgpr_size % 4 == 0);
399
400 vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
401 vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
402 vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
403 vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
404 vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
405
406 *pvgpr = vgpr;
407 }
408 }
409
410 LLVMValueRef
411 ac_build_shader_clock(struct ac_llvm_context *ctx)
412 {
413 LLVMValueRef tmp = ac_build_intrinsic(ctx, "llvm.readcyclecounter",
414 ctx->i64, NULL, 0, 0);
415 return LLVMBuildBitCast(ctx->builder, tmp, ctx->v2i32, "");
416 }
417
418 LLVMValueRef
419 ac_build_ballot(struct ac_llvm_context *ctx,
420 LLVMValueRef value)
421 {
422 LLVMValueRef args[3] = {
423 value,
424 ctx->i32_0,
425 LLVMConstInt(ctx->i32, LLVMIntNE, 0)
426 };
427
428 /* We currently have no other way to prevent LLVM from lifting the icmp
429 * calls to a dominating basic block.
430 */
431 ac_build_optimization_barrier(ctx, &args[0]);
432
433 args[0] = ac_to_integer(ctx, args[0]);
434
435 return ac_build_intrinsic(ctx,
436 "llvm.amdgcn.icmp.i32",
437 ctx->i64, args, 3,
438 AC_FUNC_ATTR_NOUNWIND |
439 AC_FUNC_ATTR_READNONE |
440 AC_FUNC_ATTR_CONVERGENT);
441 }
442
443 LLVMValueRef
444 ac_build_vote_all(struct ac_llvm_context *ctx, LLVMValueRef value)
445 {
446 LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
447 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
448 return LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, active_set, "");
449 }
450
451 LLVMValueRef
452 ac_build_vote_any(struct ac_llvm_context *ctx, LLVMValueRef value)
453 {
454 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
455 return LLVMBuildICmp(ctx->builder, LLVMIntNE, vote_set,
456 LLVMConstInt(ctx->i64, 0, 0), "");
457 }
458
459 LLVMValueRef
460 ac_build_vote_eq(struct ac_llvm_context *ctx, LLVMValueRef value)
461 {
462 LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
463 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
464
465 LLVMValueRef all = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
466 vote_set, active_set, "");
467 LLVMValueRef none = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
468 vote_set,
469 LLVMConstInt(ctx->i64, 0, 0), "");
470 return LLVMBuildOr(ctx->builder, all, none, "");
471 }
472
473 LLVMValueRef
474 ac_build_varying_gather_values(struct ac_llvm_context *ctx, LLVMValueRef *values,
475 unsigned value_count, unsigned component)
476 {
477 LLVMValueRef vec = NULL;
478
479 if (value_count == 1) {
480 return values[component];
481 } else if (!value_count)
482 unreachable("value_count is 0");
483
484 for (unsigned i = component; i < value_count + component; i++) {
485 LLVMValueRef value = values[i];
486
487 if (i == component)
488 vec = LLVMGetUndef( LLVMVectorType(LLVMTypeOf(value), value_count));
489 LLVMValueRef index = LLVMConstInt(ctx->i32, i - component, false);
490 vec = LLVMBuildInsertElement(ctx->builder, vec, value, index, "");
491 }
492 return vec;
493 }
494
495 LLVMValueRef
496 ac_build_gather_values_extended(struct ac_llvm_context *ctx,
497 LLVMValueRef *values,
498 unsigned value_count,
499 unsigned value_stride,
500 bool load,
501 bool always_vector)
502 {
503 LLVMBuilderRef builder = ctx->builder;
504 LLVMValueRef vec = NULL;
505 unsigned i;
506
507 if (value_count == 1 && !always_vector) {
508 if (load)
509 return LLVMBuildLoad(builder, values[0], "");
510 return values[0];
511 } else if (!value_count)
512 unreachable("value_count is 0");
513
514 for (i = 0; i < value_count; i++) {
515 LLVMValueRef value = values[i * value_stride];
516 if (load)
517 value = LLVMBuildLoad(builder, value, "");
518
519 if (!i)
520 vec = LLVMGetUndef( LLVMVectorType(LLVMTypeOf(value), value_count));
521 LLVMValueRef index = LLVMConstInt(ctx->i32, i, false);
522 vec = LLVMBuildInsertElement(builder, vec, value, index, "");
523 }
524 return vec;
525 }
526
527 LLVMValueRef
528 ac_build_gather_values(struct ac_llvm_context *ctx,
529 LLVMValueRef *values,
530 unsigned value_count)
531 {
532 return ac_build_gather_values_extended(ctx, values, value_count, 1, false, false);
533 }
534
535 /* Expand a scalar or vector to <dst_channels x type> by filling the remaining
536 * channels with undef. Extract at most src_channels components from the input.
537 */
538 LLVMValueRef ac_build_expand(struct ac_llvm_context *ctx,
539 LLVMValueRef value,
540 unsigned src_channels,
541 unsigned dst_channels)
542 {
543 LLVMTypeRef elemtype;
544 LLVMValueRef chan[dst_channels];
545
546 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind) {
547 unsigned vec_size = LLVMGetVectorSize(LLVMTypeOf(value));
548
549 if (src_channels == dst_channels && vec_size == dst_channels)
550 return value;
551
552 src_channels = MIN2(src_channels, vec_size);
553
554 for (unsigned i = 0; i < src_channels; i++)
555 chan[i] = ac_llvm_extract_elem(ctx, value, i);
556
557 elemtype = LLVMGetElementType(LLVMTypeOf(value));
558 } else {
559 if (src_channels) {
560 assert(src_channels == 1);
561 chan[0] = value;
562 }
563 elemtype = LLVMTypeOf(value);
564 }
565
566 for (unsigned i = src_channels; i < dst_channels; i++)
567 chan[i] = LLVMGetUndef(elemtype);
568
569 return ac_build_gather_values(ctx, chan, dst_channels);
570 }
571
572 /* Expand a scalar or vector to <4 x type> by filling the remaining channels
573 * with undef. Extract at most num_channels components from the input.
574 */
575 LLVMValueRef ac_build_expand_to_vec4(struct ac_llvm_context *ctx,
576 LLVMValueRef value,
577 unsigned num_channels)
578 {
579 return ac_build_expand(ctx, value, num_channels, 4);
580 }
581
582 LLVMValueRef ac_build_round(struct ac_llvm_context *ctx, LLVMValueRef value)
583 {
584 unsigned type_size = ac_get_type_size(LLVMTypeOf(value));
585 const char *name;
586
587 if (type_size == 2)
588 name = "llvm.rint.f16";
589 else if (type_size == 4)
590 name = "llvm.rint.f32";
591 else
592 name = "llvm.rint.f64";
593
594 return ac_build_intrinsic(ctx, name, LLVMTypeOf(value), &value, 1,
595 AC_FUNC_ATTR_READNONE);
596 }
597
598 LLVMValueRef
599 ac_build_fdiv(struct ac_llvm_context *ctx,
600 LLVMValueRef num,
601 LLVMValueRef den)
602 {
603 /* If we do (num / den), LLVM >= 7.0 does:
604 * return num * v_rcp_f32(den * (fabs(den) > 0x1.0p+96f ? 0x1.0p-32f : 1.0f));
605 *
606 * If we do (num * (1 / den)), LLVM does:
607 * return num * v_rcp_f32(den);
608 */
609 LLVMValueRef one = LLVMTypeOf(num) == ctx->f64 ? ctx->f64_1 : ctx->f32_1;
610 LLVMValueRef rcp = LLVMBuildFDiv(ctx->builder, one, den, "");
611 LLVMValueRef ret = LLVMBuildFMul(ctx->builder, num, rcp, "");
612
613 /* Use v_rcp_f32 instead of precise division. */
614 if (!LLVMIsConstant(ret))
615 LLVMSetMetadata(ret, ctx->fpmath_md_kind, ctx->fpmath_md_2p5_ulp);
616 return ret;
617 }
618
619 /* See fast_idiv_by_const.h. */
620 /* Set: increment = util_fast_udiv_info::increment ? multiplier : 0; */
621 LLVMValueRef ac_build_fast_udiv(struct ac_llvm_context *ctx,
622 LLVMValueRef num,
623 LLVMValueRef multiplier,
624 LLVMValueRef pre_shift,
625 LLVMValueRef post_shift,
626 LLVMValueRef increment)
627 {
628 LLVMBuilderRef builder = ctx->builder;
629
630 num = LLVMBuildLShr(builder, num, pre_shift, "");
631 num = LLVMBuildMul(builder,
632 LLVMBuildZExt(builder, num, ctx->i64, ""),
633 LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
634 num = LLVMBuildAdd(builder, num,
635 LLVMBuildZExt(builder, increment, ctx->i64, ""), "");
636 num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
637 num = LLVMBuildTrunc(builder, num, ctx->i32, "");
638 return LLVMBuildLShr(builder, num, post_shift, "");
639 }
640
641 /* See fast_idiv_by_const.h. */
642 /* If num != UINT_MAX, this more efficient version can be used. */
643 /* Set: increment = util_fast_udiv_info::increment; */
644 LLVMValueRef ac_build_fast_udiv_nuw(struct ac_llvm_context *ctx,
645 LLVMValueRef num,
646 LLVMValueRef multiplier,
647 LLVMValueRef pre_shift,
648 LLVMValueRef post_shift,
649 LLVMValueRef increment)
650 {
651 LLVMBuilderRef builder = ctx->builder;
652
653 num = LLVMBuildLShr(builder, num, pre_shift, "");
654 num = LLVMBuildNUWAdd(builder, num, increment, "");
655 num = LLVMBuildMul(builder,
656 LLVMBuildZExt(builder, num, ctx->i64, ""),
657 LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
658 num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
659 num = LLVMBuildTrunc(builder, num, ctx->i32, "");
660 return LLVMBuildLShr(builder, num, post_shift, "");
661 }
662
663 /* See fast_idiv_by_const.h. */
664 /* Both operands must fit in 31 bits and the divisor must not be 1. */
665 LLVMValueRef ac_build_fast_udiv_u31_d_not_one(struct ac_llvm_context *ctx,
666 LLVMValueRef num,
667 LLVMValueRef multiplier,
668 LLVMValueRef post_shift)
669 {
670 LLVMBuilderRef builder = ctx->builder;
671
672 num = LLVMBuildMul(builder,
673 LLVMBuildZExt(builder, num, ctx->i64, ""),
674 LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
675 num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
676 num = LLVMBuildTrunc(builder, num, ctx->i32, "");
677 return LLVMBuildLShr(builder, num, post_shift, "");
678 }
679
680 /* Coordinates for cube map selection. sc, tc, and ma are as in Table 8.27
681 * of the OpenGL 4.5 (Compatibility Profile) specification, except ma is
682 * already multiplied by two. id is the cube face number.
683 */
684 struct cube_selection_coords {
685 LLVMValueRef stc[2];
686 LLVMValueRef ma;
687 LLVMValueRef id;
688 };
689
690 static void
691 build_cube_intrinsic(struct ac_llvm_context *ctx,
692 LLVMValueRef in[3],
693 struct cube_selection_coords *out)
694 {
695 LLVMTypeRef f32 = ctx->f32;
696
697 out->stc[1] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubetc",
698 f32, in, 3, AC_FUNC_ATTR_READNONE);
699 out->stc[0] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubesc",
700 f32, in, 3, AC_FUNC_ATTR_READNONE);
701 out->ma = ac_build_intrinsic(ctx, "llvm.amdgcn.cubema",
702 f32, in, 3, AC_FUNC_ATTR_READNONE);
703 out->id = ac_build_intrinsic(ctx, "llvm.amdgcn.cubeid",
704 f32, in, 3, AC_FUNC_ATTR_READNONE);
705 }
706
707 /**
708 * Build a manual selection sequence for cube face sc/tc coordinates and
709 * major axis vector (multiplied by 2 for consistency) for the given
710 * vec3 \p coords, for the face implied by \p selcoords.
711 *
712 * For the major axis, we always adjust the sign to be in the direction of
713 * selcoords.ma; i.e., a positive out_ma means that coords is pointed towards
714 * the selcoords major axis.
715 */
716 static void build_cube_select(struct ac_llvm_context *ctx,
717 const struct cube_selection_coords *selcoords,
718 const LLVMValueRef *coords,
719 LLVMValueRef *out_st,
720 LLVMValueRef *out_ma)
721 {
722 LLVMBuilderRef builder = ctx->builder;
723 LLVMTypeRef f32 = LLVMTypeOf(coords[0]);
724 LLVMValueRef is_ma_positive;
725 LLVMValueRef sgn_ma;
726 LLVMValueRef is_ma_z, is_not_ma_z;
727 LLVMValueRef is_ma_y;
728 LLVMValueRef is_ma_x;
729 LLVMValueRef sgn;
730 LLVMValueRef tmp;
731
732 is_ma_positive = LLVMBuildFCmp(builder, LLVMRealUGE,
733 selcoords->ma, LLVMConstReal(f32, 0.0), "");
734 sgn_ma = LLVMBuildSelect(builder, is_ma_positive,
735 LLVMConstReal(f32, 1.0), LLVMConstReal(f32, -1.0), "");
736
737 is_ma_z = LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 4.0), "");
738 is_not_ma_z = LLVMBuildNot(builder, is_ma_z, "");
739 is_ma_y = LLVMBuildAnd(builder, is_not_ma_z,
740 LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 2.0), ""), "");
741 is_ma_x = LLVMBuildAnd(builder, is_not_ma_z, LLVMBuildNot(builder, is_ma_y, ""), "");
742
743 /* Select sc */
744 tmp = LLVMBuildSelect(builder, is_ma_x, coords[2], coords[0], "");
745 sgn = LLVMBuildSelect(builder, is_ma_y, LLVMConstReal(f32, 1.0),
746 LLVMBuildSelect(builder, is_ma_z, sgn_ma,
747 LLVMBuildFNeg(builder, sgn_ma, ""), ""), "");
748 out_st[0] = LLVMBuildFMul(builder, tmp, sgn, "");
749
750 /* Select tc */
751 tmp = LLVMBuildSelect(builder, is_ma_y, coords[2], coords[1], "");
752 sgn = LLVMBuildSelect(builder, is_ma_y, sgn_ma,
753 LLVMConstReal(f32, -1.0), "");
754 out_st[1] = LLVMBuildFMul(builder, tmp, sgn, "");
755
756 /* Select ma */
757 tmp = LLVMBuildSelect(builder, is_ma_z, coords[2],
758 LLVMBuildSelect(builder, is_ma_y, coords[1], coords[0], ""), "");
759 tmp = ac_build_intrinsic(ctx, "llvm.fabs.f32",
760 ctx->f32, &tmp, 1, AC_FUNC_ATTR_READNONE);
761 *out_ma = LLVMBuildFMul(builder, tmp, LLVMConstReal(f32, 2.0), "");
762 }
763
764 void
765 ac_prepare_cube_coords(struct ac_llvm_context *ctx,
766 bool is_deriv, bool is_array, bool is_lod,
767 LLVMValueRef *coords_arg,
768 LLVMValueRef *derivs_arg)
769 {
770
771 LLVMBuilderRef builder = ctx->builder;
772 struct cube_selection_coords selcoords;
773 LLVMValueRef coords[3];
774 LLVMValueRef invma;
775
776 if (is_array && !is_lod) {
777 LLVMValueRef tmp = ac_build_round(ctx, coords_arg[3]);
778
779 /* Section 8.9 (Texture Functions) of the GLSL 4.50 spec says:
780 *
781 * "For Array forms, the array layer used will be
782 *
783 * max(0, min(d−1, floor(layer+0.5)))
784 *
785 * where d is the depth of the texture array and layer
786 * comes from the component indicated in the tables below.
787 * Workaroudn for an issue where the layer is taken from a
788 * helper invocation which happens to fall on a different
789 * layer due to extrapolation."
790 *
791 * VI and earlier attempt to implement this in hardware by
792 * clamping the value of coords[2] = (8 * layer) + face.
793 * Unfortunately, this means that the we end up with the wrong
794 * face when clamping occurs.
795 *
796 * Clamp the layer earlier to work around the issue.
797 */
798 if (ctx->chip_class <= VI) {
799 LLVMValueRef ge0;
800 ge0 = LLVMBuildFCmp(builder, LLVMRealOGE, tmp, ctx->f32_0, "");
801 tmp = LLVMBuildSelect(builder, ge0, tmp, ctx->f32_0, "");
802 }
803
804 coords_arg[3] = tmp;
805 }
806
807 build_cube_intrinsic(ctx, coords_arg, &selcoords);
808
809 invma = ac_build_intrinsic(ctx, "llvm.fabs.f32",
810 ctx->f32, &selcoords.ma, 1, AC_FUNC_ATTR_READNONE);
811 invma = ac_build_fdiv(ctx, LLVMConstReal(ctx->f32, 1.0), invma);
812
813 for (int i = 0; i < 2; ++i)
814 coords[i] = LLVMBuildFMul(builder, selcoords.stc[i], invma, "");
815
816 coords[2] = selcoords.id;
817
818 if (is_deriv && derivs_arg) {
819 LLVMValueRef derivs[4];
820 int axis;
821
822 /* Convert cube derivatives to 2D derivatives. */
823 for (axis = 0; axis < 2; axis++) {
824 LLVMValueRef deriv_st[2];
825 LLVMValueRef deriv_ma;
826
827 /* Transform the derivative alongside the texture
828 * coordinate. Mathematically, the correct formula is
829 * as follows. Assume we're projecting onto the +Z face
830 * and denote by dx/dh the derivative of the (original)
831 * X texture coordinate with respect to horizontal
832 * window coordinates. The projection onto the +Z face
833 * plane is:
834 *
835 * f(x,z) = x/z
836 *
837 * Then df/dh = df/dx * dx/dh + df/dz * dz/dh
838 * = 1/z * dx/dh - x/z * 1/z * dz/dh.
839 *
840 * This motivatives the implementation below.
841 *
842 * Whether this actually gives the expected results for
843 * apps that might feed in derivatives obtained via
844 * finite differences is anyone's guess. The OpenGL spec
845 * seems awfully quiet about how textureGrad for cube
846 * maps should be handled.
847 */
848 build_cube_select(ctx, &selcoords, &derivs_arg[axis * 3],
849 deriv_st, &deriv_ma);
850
851 deriv_ma = LLVMBuildFMul(builder, deriv_ma, invma, "");
852
853 for (int i = 0; i < 2; ++i)
854 derivs[axis * 2 + i] =
855 LLVMBuildFSub(builder,
856 LLVMBuildFMul(builder, deriv_st[i], invma, ""),
857 LLVMBuildFMul(builder, deriv_ma, coords[i], ""), "");
858 }
859
860 memcpy(derivs_arg, derivs, sizeof(derivs));
861 }
862
863 /* Shift the texture coordinate. This must be applied after the
864 * derivative calculation.
865 */
866 for (int i = 0; i < 2; ++i)
867 coords[i] = LLVMBuildFAdd(builder, coords[i], LLVMConstReal(ctx->f32, 1.5), "");
868
869 if (is_array) {
870 /* for cube arrays coord.z = coord.w(array_index) * 8 + face */
871 /* coords_arg.w component - array_index for cube arrays */
872 coords[2] = ac_build_fmad(ctx, coords_arg[3], LLVMConstReal(ctx->f32, 8.0), coords[2]);
873 }
874
875 memcpy(coords_arg, coords, sizeof(coords));
876 }
877
878
879 LLVMValueRef
880 ac_build_fs_interp(struct ac_llvm_context *ctx,
881 LLVMValueRef llvm_chan,
882 LLVMValueRef attr_number,
883 LLVMValueRef params,
884 LLVMValueRef i,
885 LLVMValueRef j)
886 {
887 LLVMValueRef args[5];
888 LLVMValueRef p1;
889
890 args[0] = i;
891 args[1] = llvm_chan;
892 args[2] = attr_number;
893 args[3] = params;
894
895 p1 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p1",
896 ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
897
898 args[0] = p1;
899 args[1] = j;
900 args[2] = llvm_chan;
901 args[3] = attr_number;
902 args[4] = params;
903
904 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p2",
905 ctx->f32, args, 5, AC_FUNC_ATTR_READNONE);
906 }
907
908 LLVMValueRef
909 ac_build_fs_interp_mov(struct ac_llvm_context *ctx,
910 LLVMValueRef parameter,
911 LLVMValueRef llvm_chan,
912 LLVMValueRef attr_number,
913 LLVMValueRef params)
914 {
915 LLVMValueRef args[4];
916
917 args[0] = parameter;
918 args[1] = llvm_chan;
919 args[2] = attr_number;
920 args[3] = params;
921
922 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.mov",
923 ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
924 }
925
926 LLVMValueRef
927 ac_build_gep0(struct ac_llvm_context *ctx,
928 LLVMValueRef base_ptr,
929 LLVMValueRef index)
930 {
931 LLVMValueRef indices[2] = {
932 ctx->i32_0,
933 index,
934 };
935 return LLVMBuildGEP(ctx->builder, base_ptr, indices, 2, "");
936 }
937
938 LLVMValueRef ac_build_pointer_add(struct ac_llvm_context *ctx, LLVMValueRef ptr,
939 LLVMValueRef index)
940 {
941 return LLVMBuildPointerCast(ctx->builder,
942 ac_build_gep0(ctx, ptr, index),
943 LLVMTypeOf(ptr), "");
944 }
945
946 void
947 ac_build_indexed_store(struct ac_llvm_context *ctx,
948 LLVMValueRef base_ptr, LLVMValueRef index,
949 LLVMValueRef value)
950 {
951 LLVMBuildStore(ctx->builder, value,
952 ac_build_gep0(ctx, base_ptr, index));
953 }
954
955 /**
956 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
957 * It's equivalent to doing a load from &base_ptr[index].
958 *
959 * \param base_ptr Where the array starts.
960 * \param index The element index into the array.
961 * \param uniform Whether the base_ptr and index can be assumed to be
962 * dynamically uniform (i.e. load to an SGPR)
963 * \param invariant Whether the load is invariant (no other opcodes affect it)
964 * \param no_unsigned_wraparound
965 * For all possible re-associations and re-distributions of an expression
966 * "base_ptr + index * elemsize" into "addr + offset" (excluding GEPs
967 * without inbounds in base_ptr), this parameter is true if "addr + offset"
968 * does not result in an unsigned integer wraparound. This is used for
969 * optimal code generation of 32-bit pointer arithmetic.
970 *
971 * For example, a 32-bit immediate offset that causes a 32-bit unsigned
972 * integer wraparound can't be an imm offset in s_load_dword, because
973 * the instruction performs "addr + offset" in 64 bits.
974 *
975 * Expected usage for bindless textures by chaining GEPs:
976 * // possible unsigned wraparound, don't use InBounds:
977 * ptr1 = LLVMBuildGEP(base_ptr, index);
978 * image = load(ptr1); // becomes "s_load ptr1, 0"
979 *
980 * ptr2 = LLVMBuildInBoundsGEP(ptr1, 32 / elemsize);
981 * sampler = load(ptr2); // becomes "s_load ptr1, 32" thanks to InBounds
982 */
983 static LLVMValueRef
984 ac_build_load_custom(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
985 LLVMValueRef index, bool uniform, bool invariant,
986 bool no_unsigned_wraparound)
987 {
988 LLVMValueRef pointer, result;
989 LLVMValueRef indices[2] = {ctx->i32_0, index};
990
991 if (no_unsigned_wraparound &&
992 LLVMGetPointerAddressSpace(LLVMTypeOf(base_ptr)) == AC_ADDR_SPACE_CONST_32BIT)
993 pointer = LLVMBuildInBoundsGEP(ctx->builder, base_ptr, indices, 2, "");
994 else
995 pointer = LLVMBuildGEP(ctx->builder, base_ptr, indices, 2, "");
996
997 if (uniform)
998 LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
999 result = LLVMBuildLoad(ctx->builder, pointer, "");
1000 if (invariant)
1001 LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
1002 return result;
1003 }
1004
1005 LLVMValueRef ac_build_load(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
1006 LLVMValueRef index)
1007 {
1008 return ac_build_load_custom(ctx, base_ptr, index, false, false, false);
1009 }
1010
1011 LLVMValueRef ac_build_load_invariant(struct ac_llvm_context *ctx,
1012 LLVMValueRef base_ptr, LLVMValueRef index)
1013 {
1014 return ac_build_load_custom(ctx, base_ptr, index, false, true, false);
1015 }
1016
1017 /* This assumes that there is no unsigned integer wraparound during the address
1018 * computation, excluding all GEPs within base_ptr. */
1019 LLVMValueRef ac_build_load_to_sgpr(struct ac_llvm_context *ctx,
1020 LLVMValueRef base_ptr, LLVMValueRef index)
1021 {
1022 return ac_build_load_custom(ctx, base_ptr, index, true, true, true);
1023 }
1024
1025 /* See ac_build_load_custom() documentation. */
1026 LLVMValueRef ac_build_load_to_sgpr_uint_wraparound(struct ac_llvm_context *ctx,
1027 LLVMValueRef base_ptr, LLVMValueRef index)
1028 {
1029 return ac_build_load_custom(ctx, base_ptr, index, true, true, false);
1030 }
1031
1032 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
1033 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
1034 * or v4i32 (num_channels=3,4).
1035 */
1036 void
1037 ac_build_buffer_store_dword(struct ac_llvm_context *ctx,
1038 LLVMValueRef rsrc,
1039 LLVMValueRef vdata,
1040 unsigned num_channels,
1041 LLVMValueRef voffset,
1042 LLVMValueRef soffset,
1043 unsigned inst_offset,
1044 bool glc,
1045 bool slc,
1046 bool writeonly_memory,
1047 bool swizzle_enable_hint)
1048 {
1049 /* Split 3 channel stores, becase LLVM doesn't support 3-channel
1050 * intrinsics. */
1051 if (num_channels == 3) {
1052 LLVMValueRef v[3], v01;
1053
1054 for (int i = 0; i < 3; i++) {
1055 v[i] = LLVMBuildExtractElement(ctx->builder, vdata,
1056 LLVMConstInt(ctx->i32, i, 0), "");
1057 }
1058 v01 = ac_build_gather_values(ctx, v, 2);
1059
1060 ac_build_buffer_store_dword(ctx, rsrc, v01, 2, voffset,
1061 soffset, inst_offset, glc, slc,
1062 writeonly_memory, swizzle_enable_hint);
1063 ac_build_buffer_store_dword(ctx, rsrc, v[2], 1, voffset,
1064 soffset, inst_offset + 8,
1065 glc, slc,
1066 writeonly_memory, swizzle_enable_hint);
1067 return;
1068 }
1069
1070 /* SWIZZLE_ENABLE requires that soffset isn't folded into voffset
1071 * (voffset is swizzled, but soffset isn't swizzled).
1072 * llvm.amdgcn.buffer.store doesn't have a separate soffset parameter.
1073 */
1074 if (!swizzle_enable_hint) {
1075 LLVMValueRef offset = soffset;
1076
1077 static const char *types[] = {"f32", "v2f32", "v4f32"};
1078
1079 if (inst_offset)
1080 offset = LLVMBuildAdd(ctx->builder, offset,
1081 LLVMConstInt(ctx->i32, inst_offset, 0), "");
1082 if (voffset)
1083 offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
1084
1085 LLVMValueRef args[] = {
1086 ac_to_float(ctx, vdata),
1087 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
1088 ctx->i32_0,
1089 offset,
1090 LLVMConstInt(ctx->i1, glc, 0),
1091 LLVMConstInt(ctx->i1, slc, 0),
1092 };
1093
1094 char name[256];
1095 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.store.%s",
1096 types[CLAMP(num_channels, 1, 3) - 1]);
1097
1098 ac_build_intrinsic(ctx, name, ctx->voidt,
1099 args, ARRAY_SIZE(args),
1100 writeonly_memory ?
1101 AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY :
1102 AC_FUNC_ATTR_WRITEONLY);
1103 return;
1104 }
1105
1106 static const unsigned dfmt[] = {
1107 V_008F0C_BUF_DATA_FORMAT_32,
1108 V_008F0C_BUF_DATA_FORMAT_32_32,
1109 V_008F0C_BUF_DATA_FORMAT_32_32_32,
1110 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
1111 };
1112 static const char *types[] = {"i32", "v2i32", "v4i32"};
1113 LLVMValueRef args[] = {
1114 vdata,
1115 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
1116 ctx->i32_0,
1117 voffset ? voffset : ctx->i32_0,
1118 soffset,
1119 LLVMConstInt(ctx->i32, inst_offset, 0),
1120 LLVMConstInt(ctx->i32, dfmt[num_channels - 1], 0),
1121 LLVMConstInt(ctx->i32, V_008F0C_BUF_NUM_FORMAT_UINT, 0),
1122 LLVMConstInt(ctx->i1, glc, 0),
1123 LLVMConstInt(ctx->i1, slc, 0),
1124 };
1125 char name[256];
1126 snprintf(name, sizeof(name), "llvm.amdgcn.tbuffer.store.%s",
1127 types[CLAMP(num_channels, 1, 3) - 1]);
1128
1129 ac_build_intrinsic(ctx, name, ctx->voidt,
1130 args, ARRAY_SIZE(args),
1131 writeonly_memory ?
1132 AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY :
1133 AC_FUNC_ATTR_WRITEONLY);
1134 }
1135
1136 static LLVMValueRef
1137 ac_build_buffer_load_common(struct ac_llvm_context *ctx,
1138 LLVMValueRef rsrc,
1139 LLVMValueRef vindex,
1140 LLVMValueRef voffset,
1141 unsigned num_channels,
1142 bool glc,
1143 bool slc,
1144 bool can_speculate,
1145 bool use_format)
1146 {
1147 LLVMValueRef args[] = {
1148 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
1149 vindex ? vindex : ctx->i32_0,
1150 voffset,
1151 LLVMConstInt(ctx->i1, glc, 0),
1152 LLVMConstInt(ctx->i1, slc, 0)
1153 };
1154 unsigned func = CLAMP(num_channels, 1, 3) - 1;
1155
1156 LLVMTypeRef types[] = {ctx->f32, ctx->v2f32, ctx->v4f32};
1157 const char *type_names[] = {"f32", "v2f32", "v4f32"};
1158 char name[256];
1159
1160 if (use_format) {
1161 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.format.%s",
1162 type_names[func]);
1163 } else {
1164 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s",
1165 type_names[func]);
1166 }
1167
1168 return ac_build_intrinsic(ctx, name, types[func], args,
1169 ARRAY_SIZE(args),
1170 ac_get_load_intr_attribs(can_speculate));
1171 }
1172
1173 static LLVMValueRef
1174 ac_build_llvm8_buffer_load_common(struct ac_llvm_context *ctx,
1175 LLVMValueRef rsrc,
1176 LLVMValueRef vindex,
1177 LLVMValueRef voffset,
1178 LLVMValueRef soffset,
1179 unsigned num_channels,
1180 bool glc,
1181 bool slc,
1182 bool can_speculate,
1183 bool use_format,
1184 bool structurized)
1185 {
1186 LLVMValueRef args[5];
1187 int idx = 0;
1188 args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
1189 if (structurized)
1190 args[idx++] = vindex ? vindex : ctx->i32_0;
1191 args[idx++] = voffset ? voffset : ctx->i32_0;
1192 args[idx++] = soffset ? soffset : ctx->i32_0;
1193 args[idx++] = LLVMConstInt(ctx->i32, (glc ? 1 : 0) + (slc ? 2 : 0), 0);
1194 unsigned func = CLAMP(num_channels, 1, 3) - 1;
1195
1196 LLVMTypeRef types[] = {ctx->f32, ctx->v2f32, ctx->v4f32};
1197 const char *type_names[] = {"f32", "v2f32", "v4f32"};
1198 const char *indexing_kind = structurized ? "struct" : "raw";
1199 char name[256];
1200
1201 if (use_format) {
1202 snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.load.format.%s",
1203 indexing_kind, type_names[func]);
1204 } else {
1205 snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.load.%s",
1206 indexing_kind, type_names[func]);
1207 }
1208
1209 return ac_build_intrinsic(ctx, name, types[func], args,
1210 idx,
1211 ac_get_load_intr_attribs(can_speculate));
1212 }
1213
1214 LLVMValueRef
1215 ac_build_buffer_load(struct ac_llvm_context *ctx,
1216 LLVMValueRef rsrc,
1217 int num_channels,
1218 LLVMValueRef vindex,
1219 LLVMValueRef voffset,
1220 LLVMValueRef soffset,
1221 unsigned inst_offset,
1222 unsigned glc,
1223 unsigned slc,
1224 bool can_speculate,
1225 bool allow_smem)
1226 {
1227 LLVMValueRef offset = LLVMConstInt(ctx->i32, inst_offset, 0);
1228 if (voffset)
1229 offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
1230 if (soffset)
1231 offset = LLVMBuildAdd(ctx->builder, offset, soffset, "");
1232
1233 if (allow_smem && !slc &&
1234 (!glc || (HAVE_LLVM >= 0x0800 && ctx->chip_class >= VI))) {
1235 assert(vindex == NULL);
1236
1237 LLVMValueRef result[8];
1238
1239 for (int i = 0; i < num_channels; i++) {
1240 if (i) {
1241 offset = LLVMBuildAdd(ctx->builder, offset,
1242 LLVMConstInt(ctx->i32, 4, 0), "");
1243 }
1244 const char *intrname =
1245 HAVE_LLVM >= 0x0800 ? "llvm.amdgcn.s.buffer.load.f32"
1246 : "llvm.SI.load.const.v4i32";
1247 unsigned num_args = HAVE_LLVM >= 0x0800 ? 3 : 2;
1248 LLVMValueRef args[3] = {
1249 rsrc,
1250 offset,
1251 glc ? ctx->i32_1 : ctx->i32_0,
1252 };
1253 result[i] = ac_build_intrinsic(ctx, intrname,
1254 ctx->f32, args, num_args,
1255 AC_FUNC_ATTR_READNONE |
1256 (HAVE_LLVM < 0x0800 ? AC_FUNC_ATTR_LEGACY : 0));
1257 }
1258 if (num_channels == 1)
1259 return result[0];
1260
1261 if (num_channels == 3)
1262 result[num_channels++] = LLVMGetUndef(ctx->f32);
1263 return ac_build_gather_values(ctx, result, num_channels);
1264 }
1265
1266 return ac_build_buffer_load_common(ctx, rsrc, vindex, offset,
1267 num_channels, glc, slc,
1268 can_speculate, false);
1269 }
1270
1271 LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx,
1272 LLVMValueRef rsrc,
1273 LLVMValueRef vindex,
1274 LLVMValueRef voffset,
1275 unsigned num_channels,
1276 bool glc,
1277 bool can_speculate)
1278 {
1279 if (HAVE_LLVM >= 0x800) {
1280 return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0,
1281 num_channels, glc, false,
1282 can_speculate, true, true);
1283 }
1284 return ac_build_buffer_load_common(ctx, rsrc, vindex, voffset,
1285 num_channels, glc, false,
1286 can_speculate, true);
1287 }
1288
1289 LLVMValueRef ac_build_buffer_load_format_gfx9_safe(struct ac_llvm_context *ctx,
1290 LLVMValueRef rsrc,
1291 LLVMValueRef vindex,
1292 LLVMValueRef voffset,
1293 unsigned num_channels,
1294 bool glc,
1295 bool can_speculate)
1296 {
1297 if (HAVE_LLVM >= 0x800) {
1298 return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0,
1299 num_channels, glc, false,
1300 can_speculate, true, true);
1301 }
1302
1303 LLVMValueRef elem_count = LLVMBuildExtractElement(ctx->builder, rsrc, LLVMConstInt(ctx->i32, 2, 0), "");
1304 LLVMValueRef stride = LLVMBuildExtractElement(ctx->builder, rsrc, ctx->i32_1, "");
1305 stride = LLVMBuildLShr(ctx->builder, stride, LLVMConstInt(ctx->i32, 16, 0), "");
1306
1307 LLVMValueRef new_elem_count = LLVMBuildSelect(ctx->builder,
1308 LLVMBuildICmp(ctx->builder, LLVMIntUGT, elem_count, stride, ""),
1309 elem_count, stride, "");
1310
1311 LLVMValueRef new_rsrc = LLVMBuildInsertElement(ctx->builder, rsrc, new_elem_count,
1312 LLVMConstInt(ctx->i32, 2, 0), "");
1313
1314 return ac_build_buffer_load_common(ctx, new_rsrc, vindex, voffset,
1315 num_channels, glc, false,
1316 can_speculate, true);
1317 }
1318
1319 LLVMValueRef
1320 ac_build_tbuffer_load_short(struct ac_llvm_context *ctx,
1321 LLVMValueRef rsrc,
1322 LLVMValueRef vindex,
1323 LLVMValueRef voffset,
1324 LLVMValueRef soffset,
1325 LLVMValueRef immoffset,
1326 LLVMValueRef glc)
1327 {
1328 const char *name = "llvm.amdgcn.tbuffer.load.i32";
1329 LLVMTypeRef type = ctx->i32;
1330 LLVMValueRef params[] = {
1331 rsrc,
1332 vindex,
1333 voffset,
1334 soffset,
1335 immoffset,
1336 LLVMConstInt(ctx->i32, V_008F0C_BUF_DATA_FORMAT_16, false),
1337 LLVMConstInt(ctx->i32, V_008F0C_BUF_NUM_FORMAT_UINT, false),
1338 glc,
1339 ctx->i1false,
1340 };
1341 LLVMValueRef res = ac_build_intrinsic(ctx, name, type, params, 9, 0);
1342 return LLVMBuildTrunc(ctx->builder, res, ctx->i16, "");
1343 }
1344
1345 /**
1346 * Set range metadata on an instruction. This can only be used on load and
1347 * call instructions. If you know an instruction can only produce the values
1348 * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
1349 * \p lo is the minimum value inclusive.
1350 * \p hi is the maximum value exclusive.
1351 */
1352 static void set_range_metadata(struct ac_llvm_context *ctx,
1353 LLVMValueRef value, unsigned lo, unsigned hi)
1354 {
1355 LLVMValueRef range_md, md_args[2];
1356 LLVMTypeRef type = LLVMTypeOf(value);
1357 LLVMContextRef context = LLVMGetTypeContext(type);
1358
1359 md_args[0] = LLVMConstInt(type, lo, false);
1360 md_args[1] = LLVMConstInt(type, hi, false);
1361 range_md = LLVMMDNodeInContext(context, md_args, 2);
1362 LLVMSetMetadata(value, ctx->range_md_kind, range_md);
1363 }
1364
1365 LLVMValueRef
1366 ac_get_thread_id(struct ac_llvm_context *ctx)
1367 {
1368 LLVMValueRef tid;
1369
1370 LLVMValueRef tid_args[2];
1371 tid_args[0] = LLVMConstInt(ctx->i32, 0xffffffff, false);
1372 tid_args[1] = ctx->i32_0;
1373 tid_args[1] = ac_build_intrinsic(ctx,
1374 "llvm.amdgcn.mbcnt.lo", ctx->i32,
1375 tid_args, 2, AC_FUNC_ATTR_READNONE);
1376
1377 tid = ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.hi",
1378 ctx->i32, tid_args,
1379 2, AC_FUNC_ATTR_READNONE);
1380 set_range_metadata(ctx, tid, 0, 64);
1381 return tid;
1382 }
1383
1384 /*
1385 * SI implements derivatives using the local data store (LDS)
1386 * All writes to the LDS happen in all executing threads at
1387 * the same time. TID is the Thread ID for the current
1388 * thread and is a value between 0 and 63, representing
1389 * the thread's position in the wavefront.
1390 *
1391 * For the pixel shader threads are grouped into quads of four pixels.
1392 * The TIDs of the pixels of a quad are:
1393 *
1394 * +------+------+
1395 * |4n + 0|4n + 1|
1396 * +------+------+
1397 * |4n + 2|4n + 3|
1398 * +------+------+
1399 *
1400 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
1401 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
1402 * the current pixel's column, and masking with 0xfffffffe yields the TID
1403 * of the left pixel of the current pixel's row.
1404 *
1405 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
1406 * adding 2 yields the TID of the pixel below the top pixel.
1407 */
1408 LLVMValueRef
1409 ac_build_ddxy(struct ac_llvm_context *ctx,
1410 uint32_t mask,
1411 int idx,
1412 LLVMValueRef val)
1413 {
1414 unsigned tl_lanes[4], trbl_lanes[4];
1415 LLVMValueRef tl, trbl;
1416 LLVMValueRef result;
1417
1418 for (unsigned i = 0; i < 4; ++i) {
1419 tl_lanes[i] = i & mask;
1420 trbl_lanes[i] = (i & mask) + idx;
1421 }
1422
1423 tl = ac_build_quad_swizzle(ctx, val,
1424 tl_lanes[0], tl_lanes[1],
1425 tl_lanes[2], tl_lanes[3]);
1426 trbl = ac_build_quad_swizzle(ctx, val,
1427 trbl_lanes[0], trbl_lanes[1],
1428 trbl_lanes[2], trbl_lanes[3]);
1429
1430 tl = LLVMBuildBitCast(ctx->builder, tl, ctx->f32, "");
1431 trbl = LLVMBuildBitCast(ctx->builder, trbl, ctx->f32, "");
1432 result = LLVMBuildFSub(ctx->builder, trbl, tl, "");
1433
1434 result = ac_build_intrinsic(ctx, "llvm.amdgcn.wqm.f32", ctx->f32,
1435 &result, 1, 0);
1436
1437 return result;
1438 }
1439
1440 void
1441 ac_build_sendmsg(struct ac_llvm_context *ctx,
1442 uint32_t msg,
1443 LLVMValueRef wave_id)
1444 {
1445 LLVMValueRef args[2];
1446 args[0] = LLVMConstInt(ctx->i32, msg, false);
1447 args[1] = wave_id;
1448 ac_build_intrinsic(ctx, "llvm.amdgcn.s.sendmsg", ctx->voidt, args, 2, 0);
1449 }
1450
1451 LLVMValueRef
1452 ac_build_imsb(struct ac_llvm_context *ctx,
1453 LLVMValueRef arg,
1454 LLVMTypeRef dst_type)
1455 {
1456 LLVMValueRef msb = ac_build_intrinsic(ctx, "llvm.amdgcn.sffbh.i32",
1457 dst_type, &arg, 1,
1458 AC_FUNC_ATTR_READNONE);
1459
1460 /* The HW returns the last bit index from MSB, but NIR/TGSI wants
1461 * the index from LSB. Invert it by doing "31 - msb". */
1462 msb = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, 31, false),
1463 msb, "");
1464
1465 LLVMValueRef all_ones = LLVMConstInt(ctx->i32, -1, true);
1466 LLVMValueRef cond = LLVMBuildOr(ctx->builder,
1467 LLVMBuildICmp(ctx->builder, LLVMIntEQ,
1468 arg, ctx->i32_0, ""),
1469 LLVMBuildICmp(ctx->builder, LLVMIntEQ,
1470 arg, all_ones, ""), "");
1471
1472 return LLVMBuildSelect(ctx->builder, cond, all_ones, msb, "");
1473 }
1474
1475 LLVMValueRef
1476 ac_build_umsb(struct ac_llvm_context *ctx,
1477 LLVMValueRef arg,
1478 LLVMTypeRef dst_type)
1479 {
1480 const char *intrin_name;
1481 LLVMTypeRef type;
1482 LLVMValueRef highest_bit;
1483 LLVMValueRef zero;
1484 unsigned bitsize;
1485
1486 bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(arg));
1487 switch (bitsize) {
1488 case 64:
1489 intrin_name = "llvm.ctlz.i64";
1490 type = ctx->i64;
1491 highest_bit = LLVMConstInt(ctx->i64, 63, false);
1492 zero = ctx->i64_0;
1493 break;
1494 case 32:
1495 intrin_name = "llvm.ctlz.i32";
1496 type = ctx->i32;
1497 highest_bit = LLVMConstInt(ctx->i32, 31, false);
1498 zero = ctx->i32_0;
1499 break;
1500 case 16:
1501 intrin_name = "llvm.ctlz.i16";
1502 type = ctx->i16;
1503 highest_bit = LLVMConstInt(ctx->i16, 15, false);
1504 zero = ctx->i16_0;
1505 break;
1506 default:
1507 unreachable(!"invalid bitsize");
1508 break;
1509 }
1510
1511 LLVMValueRef params[2] = {
1512 arg,
1513 ctx->i1true,
1514 };
1515
1516 LLVMValueRef msb = ac_build_intrinsic(ctx, intrin_name, type,
1517 params, 2,
1518 AC_FUNC_ATTR_READNONE);
1519
1520 /* The HW returns the last bit index from MSB, but TGSI/NIR wants
1521 * the index from LSB. Invert it by doing "31 - msb". */
1522 msb = LLVMBuildSub(ctx->builder, highest_bit, msb, "");
1523 msb = LLVMBuildTruncOrBitCast(ctx->builder, msb, ctx->i32, "");
1524
1525 /* check for zero */
1526 return LLVMBuildSelect(ctx->builder,
1527 LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg, zero, ""),
1528 LLVMConstInt(ctx->i32, -1, true), msb, "");
1529 }
1530
1531 LLVMValueRef ac_build_fmin(struct ac_llvm_context *ctx, LLVMValueRef a,
1532 LLVMValueRef b)
1533 {
1534 LLVMValueRef args[2] = {a, b};
1535 return ac_build_intrinsic(ctx, "llvm.minnum.f32", ctx->f32, args, 2,
1536 AC_FUNC_ATTR_READNONE);
1537 }
1538
1539 LLVMValueRef ac_build_fmax(struct ac_llvm_context *ctx, LLVMValueRef a,
1540 LLVMValueRef b)
1541 {
1542 LLVMValueRef args[2] = {a, b};
1543 return ac_build_intrinsic(ctx, "llvm.maxnum.f32", ctx->f32, args, 2,
1544 AC_FUNC_ATTR_READNONE);
1545 }
1546
1547 LLVMValueRef ac_build_imin(struct ac_llvm_context *ctx, LLVMValueRef a,
1548 LLVMValueRef b)
1549 {
1550 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntSLE, a, b, "");
1551 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1552 }
1553
1554 LLVMValueRef ac_build_imax(struct ac_llvm_context *ctx, LLVMValueRef a,
1555 LLVMValueRef b)
1556 {
1557 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGT, a, b, "");
1558 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1559 }
1560
1561 LLVMValueRef ac_build_umin(struct ac_llvm_context *ctx, LLVMValueRef a,
1562 LLVMValueRef b)
1563 {
1564 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntULE, a, b, "");
1565 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1566 }
1567
1568 LLVMValueRef ac_build_clamp(struct ac_llvm_context *ctx, LLVMValueRef value)
1569 {
1570 return ac_build_fmin(ctx, ac_build_fmax(ctx, value, ctx->f32_0),
1571 ctx->f32_1);
1572 }
1573
1574 void ac_build_export(struct ac_llvm_context *ctx, struct ac_export_args *a)
1575 {
1576 LLVMValueRef args[9];
1577
1578 args[0] = LLVMConstInt(ctx->i32, a->target, 0);
1579 args[1] = LLVMConstInt(ctx->i32, a->enabled_channels, 0);
1580
1581 if (a->compr) {
1582 LLVMTypeRef i16 = LLVMInt16TypeInContext(ctx->context);
1583 LLVMTypeRef v2i16 = LLVMVectorType(i16, 2);
1584
1585 args[2] = LLVMBuildBitCast(ctx->builder, a->out[0],
1586 v2i16, "");
1587 args[3] = LLVMBuildBitCast(ctx->builder, a->out[1],
1588 v2i16, "");
1589 args[4] = LLVMConstInt(ctx->i1, a->done, 0);
1590 args[5] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
1591
1592 ac_build_intrinsic(ctx, "llvm.amdgcn.exp.compr.v2i16",
1593 ctx->voidt, args, 6, 0);
1594 } else {
1595 args[2] = a->out[0];
1596 args[3] = a->out[1];
1597 args[4] = a->out[2];
1598 args[5] = a->out[3];
1599 args[6] = LLVMConstInt(ctx->i1, a->done, 0);
1600 args[7] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
1601
1602 ac_build_intrinsic(ctx, "llvm.amdgcn.exp.f32",
1603 ctx->voidt, args, 8, 0);
1604 }
1605 }
1606
1607 void ac_build_export_null(struct ac_llvm_context *ctx)
1608 {
1609 struct ac_export_args args;
1610
1611 args.enabled_channels = 0x0; /* enabled channels */
1612 args.valid_mask = 1; /* whether the EXEC mask is valid */
1613 args.done = 1; /* DONE bit */
1614 args.target = V_008DFC_SQ_EXP_NULL;
1615 args.compr = 0; /* COMPR flag (0 = 32-bit export) */
1616 args.out[0] = LLVMGetUndef(ctx->f32); /* R */
1617 args.out[1] = LLVMGetUndef(ctx->f32); /* G */
1618 args.out[2] = LLVMGetUndef(ctx->f32); /* B */
1619 args.out[3] = LLVMGetUndef(ctx->f32); /* A */
1620
1621 ac_build_export(ctx, &args);
1622 }
1623
1624 static unsigned ac_num_coords(enum ac_image_dim dim)
1625 {
1626 switch (dim) {
1627 case ac_image_1d:
1628 return 1;
1629 case ac_image_2d:
1630 case ac_image_1darray:
1631 return 2;
1632 case ac_image_3d:
1633 case ac_image_cube:
1634 case ac_image_2darray:
1635 case ac_image_2dmsaa:
1636 return 3;
1637 case ac_image_2darraymsaa:
1638 return 4;
1639 default:
1640 unreachable("ac_num_coords: bad dim");
1641 }
1642 }
1643
1644 static unsigned ac_num_derivs(enum ac_image_dim dim)
1645 {
1646 switch (dim) {
1647 case ac_image_1d:
1648 case ac_image_1darray:
1649 return 2;
1650 case ac_image_2d:
1651 case ac_image_2darray:
1652 case ac_image_cube:
1653 return 4;
1654 case ac_image_3d:
1655 return 6;
1656 case ac_image_2dmsaa:
1657 case ac_image_2darraymsaa:
1658 default:
1659 unreachable("derivatives not supported");
1660 }
1661 }
1662
1663 static const char *get_atomic_name(enum ac_atomic_op op)
1664 {
1665 switch (op) {
1666 case ac_atomic_swap: return "swap";
1667 case ac_atomic_add: return "add";
1668 case ac_atomic_sub: return "sub";
1669 case ac_atomic_smin: return "smin";
1670 case ac_atomic_umin: return "umin";
1671 case ac_atomic_smax: return "smax";
1672 case ac_atomic_umax: return "umax";
1673 case ac_atomic_and: return "and";
1674 case ac_atomic_or: return "or";
1675 case ac_atomic_xor: return "xor";
1676 }
1677 unreachable("bad atomic op");
1678 }
1679
1680 LLVMValueRef ac_build_image_opcode(struct ac_llvm_context *ctx,
1681 struct ac_image_args *a)
1682 {
1683 const char *overload[3] = { "", "", "" };
1684 unsigned num_overloads = 0;
1685 LLVMValueRef args[18];
1686 unsigned num_args = 0;
1687 enum ac_image_dim dim = a->dim;
1688
1689 assert(!a->lod || a->lod == ctx->i32_0 || a->lod == ctx->f32_0 ||
1690 !a->level_zero);
1691 assert((a->opcode != ac_image_get_resinfo && a->opcode != ac_image_load_mip &&
1692 a->opcode != ac_image_store_mip) ||
1693 a->lod);
1694 assert(a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
1695 (!a->compare && !a->offset));
1696 assert((a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
1697 a->opcode == ac_image_get_lod) ||
1698 !a->bias);
1699 assert((a->bias ? 1 : 0) +
1700 (a->lod ? 1 : 0) +
1701 (a->level_zero ? 1 : 0) +
1702 (a->derivs[0] ? 1 : 0) <= 1);
1703
1704 if (a->opcode == ac_image_get_lod) {
1705 switch (dim) {
1706 case ac_image_1darray:
1707 dim = ac_image_1d;
1708 break;
1709 case ac_image_2darray:
1710 case ac_image_cube:
1711 dim = ac_image_2d;
1712 break;
1713 default:
1714 break;
1715 }
1716 }
1717
1718 bool sample = a->opcode == ac_image_sample ||
1719 a->opcode == ac_image_gather4 ||
1720 a->opcode == ac_image_get_lod;
1721 bool atomic = a->opcode == ac_image_atomic ||
1722 a->opcode == ac_image_atomic_cmpswap;
1723 LLVMTypeRef coord_type = sample ? ctx->f32 : ctx->i32;
1724
1725 if (atomic || a->opcode == ac_image_store || a->opcode == ac_image_store_mip) {
1726 args[num_args++] = a->data[0];
1727 if (a->opcode == ac_image_atomic_cmpswap)
1728 args[num_args++] = a->data[1];
1729 }
1730
1731 if (!atomic)
1732 args[num_args++] = LLVMConstInt(ctx->i32, a->dmask, false);
1733
1734 if (a->offset)
1735 args[num_args++] = ac_to_integer(ctx, a->offset);
1736 if (a->bias) {
1737 args[num_args++] = ac_to_float(ctx, a->bias);
1738 overload[num_overloads++] = ".f32";
1739 }
1740 if (a->compare)
1741 args[num_args++] = ac_to_float(ctx, a->compare);
1742 if (a->derivs[0]) {
1743 unsigned count = ac_num_derivs(dim);
1744 for (unsigned i = 0; i < count; ++i)
1745 args[num_args++] = ac_to_float(ctx, a->derivs[i]);
1746 overload[num_overloads++] = ".f32";
1747 }
1748 unsigned num_coords =
1749 a->opcode != ac_image_get_resinfo ? ac_num_coords(dim) : 0;
1750 for (unsigned i = 0; i < num_coords; ++i)
1751 args[num_args++] = LLVMBuildBitCast(ctx->builder, a->coords[i], coord_type, "");
1752 if (a->lod)
1753 args[num_args++] = LLVMBuildBitCast(ctx->builder, a->lod, coord_type, "");
1754 overload[num_overloads++] = sample ? ".f32" : ".i32";
1755
1756 args[num_args++] = a->resource;
1757 if (sample) {
1758 args[num_args++] = a->sampler;
1759 args[num_args++] = LLVMConstInt(ctx->i1, a->unorm, false);
1760 }
1761
1762 args[num_args++] = ctx->i32_0; /* texfailctrl */
1763 args[num_args++] = LLVMConstInt(ctx->i32, a->cache_policy, false);
1764
1765 const char *name;
1766 const char *atomic_subop = "";
1767 switch (a->opcode) {
1768 case ac_image_sample: name = "sample"; break;
1769 case ac_image_gather4: name = "gather4"; break;
1770 case ac_image_load: name = "load"; break;
1771 case ac_image_load_mip: name = "load.mip"; break;
1772 case ac_image_store: name = "store"; break;
1773 case ac_image_store_mip: name = "store.mip"; break;
1774 case ac_image_atomic:
1775 name = "atomic.";
1776 atomic_subop = get_atomic_name(a->atomic);
1777 break;
1778 case ac_image_atomic_cmpswap:
1779 name = "atomic.";
1780 atomic_subop = "cmpswap";
1781 break;
1782 case ac_image_get_lod: name = "getlod"; break;
1783 case ac_image_get_resinfo: name = "getresinfo"; break;
1784 default: unreachable("invalid image opcode");
1785 }
1786
1787 const char *dimname;
1788 switch (dim) {
1789 case ac_image_1d: dimname = "1d"; break;
1790 case ac_image_2d: dimname = "2d"; break;
1791 case ac_image_3d: dimname = "3d"; break;
1792 case ac_image_cube: dimname = "cube"; break;
1793 case ac_image_1darray: dimname = "1darray"; break;
1794 case ac_image_2darray: dimname = "2darray"; break;
1795 case ac_image_2dmsaa: dimname = "2dmsaa"; break;
1796 case ac_image_2darraymsaa: dimname = "2darraymsaa"; break;
1797 default: unreachable("invalid dim");
1798 }
1799
1800 bool lod_suffix =
1801 a->lod && (a->opcode == ac_image_sample || a->opcode == ac_image_gather4);
1802 char intr_name[96];
1803 snprintf(intr_name, sizeof(intr_name),
1804 "llvm.amdgcn.image.%s%s" /* base name */
1805 "%s%s%s" /* sample/gather modifiers */
1806 ".%s.%s%s%s%s", /* dimension and type overloads */
1807 name, atomic_subop,
1808 a->compare ? ".c" : "",
1809 a->bias ? ".b" :
1810 lod_suffix ? ".l" :
1811 a->derivs[0] ? ".d" :
1812 a->level_zero ? ".lz" : "",
1813 a->offset ? ".o" : "",
1814 dimname,
1815 atomic ? "i32" : "v4f32",
1816 overload[0], overload[1], overload[2]);
1817
1818 LLVMTypeRef retty;
1819 if (atomic)
1820 retty = ctx->i32;
1821 else if (a->opcode == ac_image_store || a->opcode == ac_image_store_mip)
1822 retty = ctx->voidt;
1823 else
1824 retty = ctx->v4f32;
1825
1826 LLVMValueRef result =
1827 ac_build_intrinsic(ctx, intr_name, retty, args, num_args,
1828 a->attributes);
1829 if (!sample && retty == ctx->v4f32) {
1830 result = LLVMBuildBitCast(ctx->builder, result,
1831 ctx->v4i32, "");
1832 }
1833 return result;
1834 }
1835
1836 LLVMValueRef ac_build_cvt_pkrtz_f16(struct ac_llvm_context *ctx,
1837 LLVMValueRef args[2])
1838 {
1839 LLVMTypeRef v2f16 =
1840 LLVMVectorType(LLVMHalfTypeInContext(ctx->context), 2);
1841
1842 return ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pkrtz", v2f16,
1843 args, 2, AC_FUNC_ATTR_READNONE);
1844 }
1845
1846 LLVMValueRef ac_build_cvt_pknorm_i16(struct ac_llvm_context *ctx,
1847 LLVMValueRef args[2])
1848 {
1849 LLVMValueRef res =
1850 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pknorm.i16",
1851 ctx->v2i16, args, 2,
1852 AC_FUNC_ATTR_READNONE);
1853 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
1854 }
1855
1856 LLVMValueRef ac_build_cvt_pknorm_u16(struct ac_llvm_context *ctx,
1857 LLVMValueRef args[2])
1858 {
1859 LLVMValueRef res =
1860 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pknorm.u16",
1861 ctx->v2i16, args, 2,
1862 AC_FUNC_ATTR_READNONE);
1863 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
1864 }
1865
1866 /* The 8-bit and 10-bit clamping is for HW workarounds. */
1867 LLVMValueRef ac_build_cvt_pk_i16(struct ac_llvm_context *ctx,
1868 LLVMValueRef args[2], unsigned bits, bool hi)
1869 {
1870 assert(bits == 8 || bits == 10 || bits == 16);
1871
1872 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1873 bits == 8 ? 127 : bits == 10 ? 511 : 32767, 0);
1874 LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
1875 bits == 8 ? -128 : bits == 10 ? -512 : -32768, 0);
1876 LLVMValueRef max_alpha =
1877 bits != 10 ? max_rgb : ctx->i32_1;
1878 LLVMValueRef min_alpha =
1879 bits != 10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
1880
1881 /* Clamp. */
1882 if (bits != 16) {
1883 for (int i = 0; i < 2; i++) {
1884 bool alpha = hi && i == 1;
1885 args[i] = ac_build_imin(ctx, args[i],
1886 alpha ? max_alpha : max_rgb);
1887 args[i] = ac_build_imax(ctx, args[i],
1888 alpha ? min_alpha : min_rgb);
1889 }
1890 }
1891
1892 LLVMValueRef res =
1893 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pk.i16",
1894 ctx->v2i16, args, 2,
1895 AC_FUNC_ATTR_READNONE);
1896 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
1897 }
1898
1899 /* The 8-bit and 10-bit clamping is for HW workarounds. */
1900 LLVMValueRef ac_build_cvt_pk_u16(struct ac_llvm_context *ctx,
1901 LLVMValueRef args[2], unsigned bits, bool hi)
1902 {
1903 assert(bits == 8 || bits == 10 || bits == 16);
1904
1905 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1906 bits == 8 ? 255 : bits == 10 ? 1023 : 65535, 0);
1907 LLVMValueRef max_alpha =
1908 bits != 10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
1909
1910 /* Clamp. */
1911 if (bits != 16) {
1912 for (int i = 0; i < 2; i++) {
1913 bool alpha = hi && i == 1;
1914 args[i] = ac_build_umin(ctx, args[i],
1915 alpha ? max_alpha : max_rgb);
1916 }
1917 }
1918
1919 LLVMValueRef res =
1920 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pk.u16",
1921 ctx->v2i16, args, 2,
1922 AC_FUNC_ATTR_READNONE);
1923 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
1924 }
1925
1926 LLVMValueRef ac_build_wqm_vote(struct ac_llvm_context *ctx, LLVMValueRef i1)
1927 {
1928 return ac_build_intrinsic(ctx, "llvm.amdgcn.wqm.vote", ctx->i1,
1929 &i1, 1, AC_FUNC_ATTR_READNONE);
1930 }
1931
1932 void ac_build_kill_if_false(struct ac_llvm_context *ctx, LLVMValueRef i1)
1933 {
1934 ac_build_intrinsic(ctx, "llvm.amdgcn.kill", ctx->voidt,
1935 &i1, 1, 0);
1936 }
1937
1938 LLVMValueRef ac_build_bfe(struct ac_llvm_context *ctx, LLVMValueRef input,
1939 LLVMValueRef offset, LLVMValueRef width,
1940 bool is_signed)
1941 {
1942 LLVMValueRef args[] = {
1943 input,
1944 offset,
1945 width,
1946 };
1947
1948 return ac_build_intrinsic(ctx,
1949 is_signed ? "llvm.amdgcn.sbfe.i32" :
1950 "llvm.amdgcn.ubfe.i32",
1951 ctx->i32, args, 3,
1952 AC_FUNC_ATTR_READNONE);
1953 }
1954
1955 LLVMValueRef ac_build_imad(struct ac_llvm_context *ctx, LLVMValueRef s0,
1956 LLVMValueRef s1, LLVMValueRef s2)
1957 {
1958 return LLVMBuildAdd(ctx->builder,
1959 LLVMBuildMul(ctx->builder, s0, s1, ""), s2, "");
1960 }
1961
1962 LLVMValueRef ac_build_fmad(struct ac_llvm_context *ctx, LLVMValueRef s0,
1963 LLVMValueRef s1, LLVMValueRef s2)
1964 {
1965 return LLVMBuildFAdd(ctx->builder,
1966 LLVMBuildFMul(ctx->builder, s0, s1, ""), s2, "");
1967 }
1968
1969 void ac_build_waitcnt(struct ac_llvm_context *ctx, unsigned simm16)
1970 {
1971 LLVMValueRef args[1] = {
1972 LLVMConstInt(ctx->i32, simm16, false),
1973 };
1974 ac_build_intrinsic(ctx, "llvm.amdgcn.s.waitcnt",
1975 ctx->voidt, args, 1, 0);
1976 }
1977
1978 LLVMValueRef ac_build_fract(struct ac_llvm_context *ctx, LLVMValueRef src0,
1979 unsigned bitsize)
1980 {
1981 LLVMTypeRef type;
1982 char *intr;
1983
1984 if (bitsize == 32) {
1985 intr = "llvm.floor.f32";
1986 type = ctx->f32;
1987 } else {
1988 intr = "llvm.floor.f64";
1989 type = ctx->f64;
1990 }
1991
1992 LLVMValueRef params[] = {
1993 src0,
1994 };
1995 LLVMValueRef floor = ac_build_intrinsic(ctx, intr, type, params, 1,
1996 AC_FUNC_ATTR_READNONE);
1997 return LLVMBuildFSub(ctx->builder, src0, floor, "");
1998 }
1999
2000 LLVMValueRef ac_build_isign(struct ac_llvm_context *ctx, LLVMValueRef src0,
2001 unsigned bitsize)
2002 {
2003 LLVMValueRef cmp, val, zero, one;
2004 LLVMTypeRef type;
2005
2006 switch (bitsize) {
2007 case 64:
2008 type = ctx->i64;
2009 zero = ctx->i64_0;
2010 one = ctx->i64_1;
2011 break;
2012 case 32:
2013 type = ctx->i32;
2014 zero = ctx->i32_0;
2015 one = ctx->i32_1;
2016 break;
2017 case 16:
2018 type = ctx->i16;
2019 zero = ctx->i16_0;
2020 one = ctx->i16_1;
2021 break;
2022 default:
2023 unreachable(!"invalid bitsize");
2024 break;
2025 }
2026
2027 cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGT, src0, zero, "");
2028 val = LLVMBuildSelect(ctx->builder, cmp, one, src0, "");
2029 cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGE, val, zero, "");
2030 val = LLVMBuildSelect(ctx->builder, cmp, val, LLVMConstInt(type, -1, true), "");
2031 return val;
2032 }
2033
2034 LLVMValueRef ac_build_fsign(struct ac_llvm_context *ctx, LLVMValueRef src0,
2035 unsigned bitsize)
2036 {
2037 LLVMValueRef cmp, val, zero, one;
2038 LLVMTypeRef type;
2039
2040 if (bitsize == 32) {
2041 type = ctx->f32;
2042 zero = ctx->f32_0;
2043 one = ctx->f32_1;
2044 } else {
2045 type = ctx->f64;
2046 zero = ctx->f64_0;
2047 one = ctx->f64_1;
2048 }
2049
2050 cmp = LLVMBuildFCmp(ctx->builder, LLVMRealOGT, src0, zero, "");
2051 val = LLVMBuildSelect(ctx->builder, cmp, one, src0, "");
2052 cmp = LLVMBuildFCmp(ctx->builder, LLVMRealOGE, val, zero, "");
2053 val = LLVMBuildSelect(ctx->builder, cmp, val, LLVMConstReal(type, -1.0), "");
2054 return val;
2055 }
2056
2057 LLVMValueRef ac_build_bit_count(struct ac_llvm_context *ctx, LLVMValueRef src0)
2058 {
2059 LLVMValueRef result;
2060 unsigned bitsize;
2061
2062 bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
2063
2064 switch (bitsize) {
2065 case 64:
2066 result = ac_build_intrinsic(ctx, "llvm.ctpop.i64", ctx->i64,
2067 (LLVMValueRef []) { src0 }, 1,
2068 AC_FUNC_ATTR_READNONE);
2069
2070 result = LLVMBuildTrunc(ctx->builder, result, ctx->i32, "");
2071 break;
2072 case 32:
2073 result = ac_build_intrinsic(ctx, "llvm.ctpop.i32", ctx->i32,
2074 (LLVMValueRef []) { src0 }, 1,
2075 AC_FUNC_ATTR_READNONE);
2076 break;
2077 case 16:
2078 result = ac_build_intrinsic(ctx, "llvm.ctpop.i16", ctx->i16,
2079 (LLVMValueRef []) { src0 }, 1,
2080 AC_FUNC_ATTR_READNONE);
2081 break;
2082 default:
2083 unreachable(!"invalid bitsize");
2084 break;
2085 }
2086
2087 return result;
2088 }
2089
2090 LLVMValueRef ac_build_bitfield_reverse(struct ac_llvm_context *ctx,
2091 LLVMValueRef src0)
2092 {
2093 LLVMValueRef result;
2094 unsigned bitsize;
2095
2096 bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
2097
2098 switch (bitsize) {
2099 case 32:
2100 result = ac_build_intrinsic(ctx, "llvm.bitreverse.i32", ctx->i32,
2101 (LLVMValueRef []) { src0 }, 1,
2102 AC_FUNC_ATTR_READNONE);
2103 break;
2104 case 16:
2105 result = ac_build_intrinsic(ctx, "llvm.bitreverse.i16", ctx->i16,
2106 (LLVMValueRef []) { src0 }, 1,
2107 AC_FUNC_ATTR_READNONE);
2108 break;
2109 default:
2110 unreachable(!"invalid bitsize");
2111 break;
2112 }
2113
2114 return result;
2115 }
2116
2117 #define AC_EXP_TARGET 0
2118 #define AC_EXP_ENABLED_CHANNELS 1
2119 #define AC_EXP_OUT0 2
2120
2121 enum ac_ir_type {
2122 AC_IR_UNDEF,
2123 AC_IR_CONST,
2124 AC_IR_VALUE,
2125 };
2126
2127 struct ac_vs_exp_chan
2128 {
2129 LLVMValueRef value;
2130 float const_float;
2131 enum ac_ir_type type;
2132 };
2133
2134 struct ac_vs_exp_inst {
2135 unsigned offset;
2136 LLVMValueRef inst;
2137 struct ac_vs_exp_chan chan[4];
2138 };
2139
2140 struct ac_vs_exports {
2141 unsigned num;
2142 struct ac_vs_exp_inst exp[VARYING_SLOT_MAX];
2143 };
2144
2145 /* Return true if the PARAM export has been eliminated. */
2146 static bool ac_eliminate_const_output(uint8_t *vs_output_param_offset,
2147 uint32_t num_outputs,
2148 struct ac_vs_exp_inst *exp)
2149 {
2150 unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */
2151 bool is_zero[4] = {}, is_one[4] = {};
2152
2153 for (i = 0; i < 4; i++) {
2154 /* It's a constant expression. Undef outputs are eliminated too. */
2155 if (exp->chan[i].type == AC_IR_UNDEF) {
2156 is_zero[i] = true;
2157 is_one[i] = true;
2158 } else if (exp->chan[i].type == AC_IR_CONST) {
2159 if (exp->chan[i].const_float == 0)
2160 is_zero[i] = true;
2161 else if (exp->chan[i].const_float == 1)
2162 is_one[i] = true;
2163 else
2164 return false; /* other constant */
2165 } else
2166 return false;
2167 }
2168
2169 /* Only certain combinations of 0 and 1 can be eliminated. */
2170 if (is_zero[0] && is_zero[1] && is_zero[2])
2171 default_val = is_zero[3] ? 0 : 1;
2172 else if (is_one[0] && is_one[1] && is_one[2])
2173 default_val = is_zero[3] ? 2 : 3;
2174 else
2175 return false;
2176
2177 /* The PARAM export can be represented as DEFAULT_VAL. Kill it. */
2178 LLVMInstructionEraseFromParent(exp->inst);
2179
2180 /* Change OFFSET to DEFAULT_VAL. */
2181 for (i = 0; i < num_outputs; i++) {
2182 if (vs_output_param_offset[i] == exp->offset) {
2183 vs_output_param_offset[i] =
2184 AC_EXP_PARAM_DEFAULT_VAL_0000 + default_val;
2185 break;
2186 }
2187 }
2188 return true;
2189 }
2190
2191 static bool ac_eliminate_duplicated_output(struct ac_llvm_context *ctx,
2192 uint8_t *vs_output_param_offset,
2193 uint32_t num_outputs,
2194 struct ac_vs_exports *processed,
2195 struct ac_vs_exp_inst *exp)
2196 {
2197 unsigned p, copy_back_channels = 0;
2198
2199 /* See if the output is already in the list of processed outputs.
2200 * The LLVMValueRef comparison relies on SSA.
2201 */
2202 for (p = 0; p < processed->num; p++) {
2203 bool different = false;
2204
2205 for (unsigned j = 0; j < 4; j++) {
2206 struct ac_vs_exp_chan *c1 = &processed->exp[p].chan[j];
2207 struct ac_vs_exp_chan *c2 = &exp->chan[j];
2208
2209 /* Treat undef as a match. */
2210 if (c2->type == AC_IR_UNDEF)
2211 continue;
2212
2213 /* If c1 is undef but c2 isn't, we can copy c2 to c1
2214 * and consider the instruction duplicated.
2215 */
2216 if (c1->type == AC_IR_UNDEF) {
2217 copy_back_channels |= 1 << j;
2218 continue;
2219 }
2220
2221 /* Test whether the channels are not equal. */
2222 if (c1->type != c2->type ||
2223 (c1->type == AC_IR_CONST &&
2224 c1->const_float != c2->const_float) ||
2225 (c1->type == AC_IR_VALUE &&
2226 c1->value != c2->value)) {
2227 different = true;
2228 break;
2229 }
2230 }
2231 if (!different)
2232 break;
2233
2234 copy_back_channels = 0;
2235 }
2236 if (p == processed->num)
2237 return false;
2238
2239 /* If a match was found, but the matching export has undef where the new
2240 * one has a normal value, copy the normal value to the undef channel.
2241 */
2242 struct ac_vs_exp_inst *match = &processed->exp[p];
2243
2244 /* Get current enabled channels mask. */
2245 LLVMValueRef arg = LLVMGetOperand(match->inst, AC_EXP_ENABLED_CHANNELS);
2246 unsigned enabled_channels = LLVMConstIntGetZExtValue(arg);
2247
2248 while (copy_back_channels) {
2249 unsigned chan = u_bit_scan(&copy_back_channels);
2250
2251 assert(match->chan[chan].type == AC_IR_UNDEF);
2252 LLVMSetOperand(match->inst, AC_EXP_OUT0 + chan,
2253 exp->chan[chan].value);
2254 match->chan[chan] = exp->chan[chan];
2255
2256 /* Update number of enabled channels because the original mask
2257 * is not always 0xf.
2258 */
2259 enabled_channels |= (1 << chan);
2260 LLVMSetOperand(match->inst, AC_EXP_ENABLED_CHANNELS,
2261 LLVMConstInt(ctx->i32, enabled_channels, 0));
2262 }
2263
2264 /* The PARAM export is duplicated. Kill it. */
2265 LLVMInstructionEraseFromParent(exp->inst);
2266
2267 /* Change OFFSET to the matching export. */
2268 for (unsigned i = 0; i < num_outputs; i++) {
2269 if (vs_output_param_offset[i] == exp->offset) {
2270 vs_output_param_offset[i] = match->offset;
2271 break;
2272 }
2273 }
2274 return true;
2275 }
2276
2277 void ac_optimize_vs_outputs(struct ac_llvm_context *ctx,
2278 LLVMValueRef main_fn,
2279 uint8_t *vs_output_param_offset,
2280 uint32_t num_outputs,
2281 uint8_t *num_param_exports)
2282 {
2283 LLVMBasicBlockRef bb;
2284 bool removed_any = false;
2285 struct ac_vs_exports exports;
2286
2287 exports.num = 0;
2288
2289 /* Process all LLVM instructions. */
2290 bb = LLVMGetFirstBasicBlock(main_fn);
2291 while (bb) {
2292 LLVMValueRef inst = LLVMGetFirstInstruction(bb);
2293
2294 while (inst) {
2295 LLVMValueRef cur = inst;
2296 inst = LLVMGetNextInstruction(inst);
2297 struct ac_vs_exp_inst exp;
2298
2299 if (LLVMGetInstructionOpcode(cur) != LLVMCall)
2300 continue;
2301
2302 LLVMValueRef callee = ac_llvm_get_called_value(cur);
2303
2304 if (!ac_llvm_is_function(callee))
2305 continue;
2306
2307 const char *name = LLVMGetValueName(callee);
2308 unsigned num_args = LLVMCountParams(callee);
2309
2310 /* Check if this is an export instruction. */
2311 if ((num_args != 9 && num_args != 8) ||
2312 (strcmp(name, "llvm.SI.export") &&
2313 strcmp(name, "llvm.amdgcn.exp.f32")))
2314 continue;
2315
2316 LLVMValueRef arg = LLVMGetOperand(cur, AC_EXP_TARGET);
2317 unsigned target = LLVMConstIntGetZExtValue(arg);
2318
2319 if (target < V_008DFC_SQ_EXP_PARAM)
2320 continue;
2321
2322 target -= V_008DFC_SQ_EXP_PARAM;
2323
2324 /* Parse the instruction. */
2325 memset(&exp, 0, sizeof(exp));
2326 exp.offset = target;
2327 exp.inst = cur;
2328
2329 for (unsigned i = 0; i < 4; i++) {
2330 LLVMValueRef v = LLVMGetOperand(cur, AC_EXP_OUT0 + i);
2331
2332 exp.chan[i].value = v;
2333
2334 if (LLVMIsUndef(v)) {
2335 exp.chan[i].type = AC_IR_UNDEF;
2336 } else if (LLVMIsAConstantFP(v)) {
2337 LLVMBool loses_info;
2338 exp.chan[i].type = AC_IR_CONST;
2339 exp.chan[i].const_float =
2340 LLVMConstRealGetDouble(v, &loses_info);
2341 } else {
2342 exp.chan[i].type = AC_IR_VALUE;
2343 }
2344 }
2345
2346 /* Eliminate constant and duplicated PARAM exports. */
2347 if (ac_eliminate_const_output(vs_output_param_offset,
2348 num_outputs, &exp) ||
2349 ac_eliminate_duplicated_output(ctx,
2350 vs_output_param_offset,
2351 num_outputs, &exports,
2352 &exp)) {
2353 removed_any = true;
2354 } else {
2355 exports.exp[exports.num++] = exp;
2356 }
2357 }
2358 bb = LLVMGetNextBasicBlock(bb);
2359 }
2360
2361 /* Remove holes in export memory due to removed PARAM exports.
2362 * This is done by renumbering all PARAM exports.
2363 */
2364 if (removed_any) {
2365 uint8_t old_offset[VARYING_SLOT_MAX];
2366 unsigned out, i;
2367
2368 /* Make a copy of the offsets. We need the old version while
2369 * we are modifying some of them. */
2370 memcpy(old_offset, vs_output_param_offset,
2371 sizeof(old_offset));
2372
2373 for (i = 0; i < exports.num; i++) {
2374 unsigned offset = exports.exp[i].offset;
2375
2376 /* Update vs_output_param_offset. Multiple outputs can
2377 * have the same offset.
2378 */
2379 for (out = 0; out < num_outputs; out++) {
2380 if (old_offset[out] == offset)
2381 vs_output_param_offset[out] = i;
2382 }
2383
2384 /* Change the PARAM offset in the instruction. */
2385 LLVMSetOperand(exports.exp[i].inst, AC_EXP_TARGET,
2386 LLVMConstInt(ctx->i32,
2387 V_008DFC_SQ_EXP_PARAM + i, 0));
2388 }
2389 *num_param_exports = exports.num;
2390 }
2391 }
2392
2393 void ac_init_exec_full_mask(struct ac_llvm_context *ctx)
2394 {
2395 LLVMValueRef full_mask = LLVMConstInt(ctx->i64, ~0ull, 0);
2396 ac_build_intrinsic(ctx,
2397 "llvm.amdgcn.init.exec", ctx->voidt,
2398 &full_mask, 1, AC_FUNC_ATTR_CONVERGENT);
2399 }
2400
2401 void ac_declare_lds_as_pointer(struct ac_llvm_context *ctx)
2402 {
2403 unsigned lds_size = ctx->chip_class >= CIK ? 65536 : 32768;
2404 ctx->lds = LLVMBuildIntToPtr(ctx->builder, ctx->i32_0,
2405 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), AC_ADDR_SPACE_LDS),
2406 "lds");
2407 }
2408
2409 LLVMValueRef ac_lds_load(struct ac_llvm_context *ctx,
2410 LLVMValueRef dw_addr)
2411 {
2412 return ac_build_load(ctx, ctx->lds, dw_addr);
2413 }
2414
2415 void ac_lds_store(struct ac_llvm_context *ctx,
2416 LLVMValueRef dw_addr,
2417 LLVMValueRef value)
2418 {
2419 value = ac_to_integer(ctx, value);
2420 ac_build_indexed_store(ctx, ctx->lds,
2421 dw_addr, value);
2422 }
2423
2424 LLVMValueRef ac_find_lsb(struct ac_llvm_context *ctx,
2425 LLVMTypeRef dst_type,
2426 LLVMValueRef src0)
2427 {
2428 unsigned src0_bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
2429 const char *intrin_name;
2430 LLVMTypeRef type;
2431 LLVMValueRef zero;
2432
2433 switch (src0_bitsize) {
2434 case 64:
2435 intrin_name = "llvm.cttz.i64";
2436 type = ctx->i64;
2437 zero = ctx->i64_0;
2438 break;
2439 case 32:
2440 intrin_name = "llvm.cttz.i32";
2441 type = ctx->i32;
2442 zero = ctx->i32_0;
2443 break;
2444 case 16:
2445 intrin_name = "llvm.cttz.i16";
2446 type = ctx->i16;
2447 zero = ctx->i16_0;
2448 break;
2449 default:
2450 unreachable(!"invalid bitsize");
2451 }
2452
2453 LLVMValueRef params[2] = {
2454 src0,
2455
2456 /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
2457 * add special code to check for x=0. The reason is that
2458 * the LLVM behavior for x=0 is different from what we
2459 * need here. However, LLVM also assumes that ffs(x) is
2460 * in [0, 31], but GLSL expects that ffs(0) = -1, so
2461 * a conditional assignment to handle 0 is still required.
2462 *
2463 * The hardware already implements the correct behavior.
2464 */
2465 ctx->i1true,
2466 };
2467
2468 LLVMValueRef lsb = ac_build_intrinsic(ctx, intrin_name, type,
2469 params, 2,
2470 AC_FUNC_ATTR_READNONE);
2471
2472 if (src0_bitsize == 64) {
2473 lsb = LLVMBuildTrunc(ctx->builder, lsb, ctx->i32, "");
2474 }
2475
2476 /* TODO: We need an intrinsic to skip this conditional. */
2477 /* Check for zero: */
2478 return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder,
2479 LLVMIntEQ, src0,
2480 zero, ""),
2481 LLVMConstInt(ctx->i32, -1, 0), lsb, "");
2482 }
2483
2484 LLVMTypeRef ac_array_in_const_addr_space(LLVMTypeRef elem_type)
2485 {
2486 return LLVMPointerType(LLVMArrayType(elem_type, 0),
2487 AC_ADDR_SPACE_CONST);
2488 }
2489
2490 LLVMTypeRef ac_array_in_const32_addr_space(LLVMTypeRef elem_type)
2491 {
2492 return LLVMPointerType(LLVMArrayType(elem_type, 0),
2493 AC_ADDR_SPACE_CONST_32BIT);
2494 }
2495
2496 static struct ac_llvm_flow *
2497 get_current_flow(struct ac_llvm_context *ctx)
2498 {
2499 if (ctx->flow_depth > 0)
2500 return &ctx->flow[ctx->flow_depth - 1];
2501 return NULL;
2502 }
2503
2504 static struct ac_llvm_flow *
2505 get_innermost_loop(struct ac_llvm_context *ctx)
2506 {
2507 for (unsigned i = ctx->flow_depth; i > 0; --i) {
2508 if (ctx->flow[i - 1].loop_entry_block)
2509 return &ctx->flow[i - 1];
2510 }
2511 return NULL;
2512 }
2513
2514 static struct ac_llvm_flow *
2515 push_flow(struct ac_llvm_context *ctx)
2516 {
2517 struct ac_llvm_flow *flow;
2518
2519 if (ctx->flow_depth >= ctx->flow_depth_max) {
2520 unsigned new_max = MAX2(ctx->flow_depth << 1,
2521 AC_LLVM_INITIAL_CF_DEPTH);
2522
2523 ctx->flow = realloc(ctx->flow, new_max * sizeof(*ctx->flow));
2524 ctx->flow_depth_max = new_max;
2525 }
2526
2527 flow = &ctx->flow[ctx->flow_depth];
2528 ctx->flow_depth++;
2529
2530 flow->next_block = NULL;
2531 flow->loop_entry_block = NULL;
2532 return flow;
2533 }
2534
2535 static void set_basicblock_name(LLVMBasicBlockRef bb, const char *base,
2536 int label_id)
2537 {
2538 char buf[32];
2539 snprintf(buf, sizeof(buf), "%s%d", base, label_id);
2540 LLVMSetValueName(LLVMBasicBlockAsValue(bb), buf);
2541 }
2542
2543 /* Append a basic block at the level of the parent flow.
2544 */
2545 static LLVMBasicBlockRef append_basic_block(struct ac_llvm_context *ctx,
2546 const char *name)
2547 {
2548 assert(ctx->flow_depth >= 1);
2549
2550 if (ctx->flow_depth >= 2) {
2551 struct ac_llvm_flow *flow = &ctx->flow[ctx->flow_depth - 2];
2552
2553 return LLVMInsertBasicBlockInContext(ctx->context,
2554 flow->next_block, name);
2555 }
2556
2557 LLVMValueRef main_fn =
2558 LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx->builder));
2559 return LLVMAppendBasicBlockInContext(ctx->context, main_fn, name);
2560 }
2561
2562 /* Emit a branch to the given default target for the current block if
2563 * applicable -- that is, if the current block does not already contain a
2564 * branch from a break or continue.
2565 */
2566 static void emit_default_branch(LLVMBuilderRef builder,
2567 LLVMBasicBlockRef target)
2568 {
2569 if (!LLVMGetBasicBlockTerminator(LLVMGetInsertBlock(builder)))
2570 LLVMBuildBr(builder, target);
2571 }
2572
2573 void ac_build_bgnloop(struct ac_llvm_context *ctx, int label_id)
2574 {
2575 struct ac_llvm_flow *flow = push_flow(ctx);
2576 flow->loop_entry_block = append_basic_block(ctx, "LOOP");
2577 flow->next_block = append_basic_block(ctx, "ENDLOOP");
2578 set_basicblock_name(flow->loop_entry_block, "loop", label_id);
2579 LLVMBuildBr(ctx->builder, flow->loop_entry_block);
2580 LLVMPositionBuilderAtEnd(ctx->builder, flow->loop_entry_block);
2581 }
2582
2583 void ac_build_break(struct ac_llvm_context *ctx)
2584 {
2585 struct ac_llvm_flow *flow = get_innermost_loop(ctx);
2586 LLVMBuildBr(ctx->builder, flow->next_block);
2587 }
2588
2589 void ac_build_continue(struct ac_llvm_context *ctx)
2590 {
2591 struct ac_llvm_flow *flow = get_innermost_loop(ctx);
2592 LLVMBuildBr(ctx->builder, flow->loop_entry_block);
2593 }
2594
2595 void ac_build_else(struct ac_llvm_context *ctx, int label_id)
2596 {
2597 struct ac_llvm_flow *current_branch = get_current_flow(ctx);
2598 LLVMBasicBlockRef endif_block;
2599
2600 assert(!current_branch->loop_entry_block);
2601
2602 endif_block = append_basic_block(ctx, "ENDIF");
2603 emit_default_branch(ctx->builder, endif_block);
2604
2605 LLVMPositionBuilderAtEnd(ctx->builder, current_branch->next_block);
2606 set_basicblock_name(current_branch->next_block, "else", label_id);
2607
2608 current_branch->next_block = endif_block;
2609 }
2610
2611 void ac_build_endif(struct ac_llvm_context *ctx, int label_id)
2612 {
2613 struct ac_llvm_flow *current_branch = get_current_flow(ctx);
2614
2615 assert(!current_branch->loop_entry_block);
2616
2617 emit_default_branch(ctx->builder, current_branch->next_block);
2618 LLVMPositionBuilderAtEnd(ctx->builder, current_branch->next_block);
2619 set_basicblock_name(current_branch->next_block, "endif", label_id);
2620
2621 ctx->flow_depth--;
2622 }
2623
2624 void ac_build_endloop(struct ac_llvm_context *ctx, int label_id)
2625 {
2626 struct ac_llvm_flow *current_loop = get_current_flow(ctx);
2627
2628 assert(current_loop->loop_entry_block);
2629
2630 emit_default_branch(ctx->builder, current_loop->loop_entry_block);
2631
2632 LLVMPositionBuilderAtEnd(ctx->builder, current_loop->next_block);
2633 set_basicblock_name(current_loop->next_block, "endloop", label_id);
2634 ctx->flow_depth--;
2635 }
2636
2637 void ac_build_ifcc(struct ac_llvm_context *ctx, LLVMValueRef cond, int label_id)
2638 {
2639 struct ac_llvm_flow *flow = push_flow(ctx);
2640 LLVMBasicBlockRef if_block;
2641
2642 if_block = append_basic_block(ctx, "IF");
2643 flow->next_block = append_basic_block(ctx, "ELSE");
2644 set_basicblock_name(if_block, "if", label_id);
2645 LLVMBuildCondBr(ctx->builder, cond, if_block, flow->next_block);
2646 LLVMPositionBuilderAtEnd(ctx->builder, if_block);
2647 }
2648
2649 void ac_build_if(struct ac_llvm_context *ctx, LLVMValueRef value,
2650 int label_id)
2651 {
2652 LLVMValueRef cond = LLVMBuildFCmp(ctx->builder, LLVMRealUNE,
2653 value, ctx->f32_0, "");
2654 ac_build_ifcc(ctx, cond, label_id);
2655 }
2656
2657 void ac_build_uif(struct ac_llvm_context *ctx, LLVMValueRef value,
2658 int label_id)
2659 {
2660 LLVMValueRef cond = LLVMBuildICmp(ctx->builder, LLVMIntNE,
2661 ac_to_integer(ctx, value),
2662 ctx->i32_0, "");
2663 ac_build_ifcc(ctx, cond, label_id);
2664 }
2665
2666 LLVMValueRef ac_build_alloca_undef(struct ac_llvm_context *ac, LLVMTypeRef type,
2667 const char *name)
2668 {
2669 LLVMBuilderRef builder = ac->builder;
2670 LLVMBasicBlockRef current_block = LLVMGetInsertBlock(builder);
2671 LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
2672 LLVMBasicBlockRef first_block = LLVMGetEntryBasicBlock(function);
2673 LLVMValueRef first_instr = LLVMGetFirstInstruction(first_block);
2674 LLVMBuilderRef first_builder = LLVMCreateBuilderInContext(ac->context);
2675 LLVMValueRef res;
2676
2677 if (first_instr) {
2678 LLVMPositionBuilderBefore(first_builder, first_instr);
2679 } else {
2680 LLVMPositionBuilderAtEnd(first_builder, first_block);
2681 }
2682
2683 res = LLVMBuildAlloca(first_builder, type, name);
2684 LLVMDisposeBuilder(first_builder);
2685 return res;
2686 }
2687
2688 LLVMValueRef ac_build_alloca(struct ac_llvm_context *ac,
2689 LLVMTypeRef type, const char *name)
2690 {
2691 LLVMValueRef ptr = ac_build_alloca_undef(ac, type, name);
2692 LLVMBuildStore(ac->builder, LLVMConstNull(type), ptr);
2693 return ptr;
2694 }
2695
2696 LLVMValueRef ac_cast_ptr(struct ac_llvm_context *ctx, LLVMValueRef ptr,
2697 LLVMTypeRef type)
2698 {
2699 int addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
2700 return LLVMBuildBitCast(ctx->builder, ptr,
2701 LLVMPointerType(type, addr_space), "");
2702 }
2703
2704 LLVMValueRef ac_trim_vector(struct ac_llvm_context *ctx, LLVMValueRef value,
2705 unsigned count)
2706 {
2707 unsigned num_components = ac_get_llvm_num_components(value);
2708 if (count == num_components)
2709 return value;
2710
2711 LLVMValueRef masks[MAX2(count, 2)];
2712 masks[0] = ctx->i32_0;
2713 masks[1] = ctx->i32_1;
2714 for (unsigned i = 2; i < count; i++)
2715 masks[i] = LLVMConstInt(ctx->i32, i, false);
2716
2717 if (count == 1)
2718 return LLVMBuildExtractElement(ctx->builder, value, masks[0],
2719 "");
2720
2721 LLVMValueRef swizzle = LLVMConstVector(masks, count);
2722 return LLVMBuildShuffleVector(ctx->builder, value, value, swizzle, "");
2723 }
2724
2725 LLVMValueRef ac_unpack_param(struct ac_llvm_context *ctx, LLVMValueRef param,
2726 unsigned rshift, unsigned bitwidth)
2727 {
2728 LLVMValueRef value = param;
2729 if (rshift)
2730 value = LLVMBuildLShr(ctx->builder, value,
2731 LLVMConstInt(ctx->i32, rshift, false), "");
2732
2733 if (rshift + bitwidth < 32) {
2734 unsigned mask = (1 << bitwidth) - 1;
2735 value = LLVMBuildAnd(ctx->builder, value,
2736 LLVMConstInt(ctx->i32, mask, false), "");
2737 }
2738 return value;
2739 }
2740
2741 /* Adjust the sample index according to FMASK.
2742 *
2743 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
2744 * which is the identity mapping. Each nibble says which physical sample
2745 * should be fetched to get that sample.
2746 *
2747 * For example, 0x11111100 means there are only 2 samples stored and
2748 * the second sample covers 3/4 of the pixel. When reading samples 0
2749 * and 1, return physical sample 0 (determined by the first two 0s
2750 * in FMASK), otherwise return physical sample 1.
2751 *
2752 * The sample index should be adjusted as follows:
2753 * addr[sample_index] = (fmask >> (addr[sample_index] * 4)) & 0xF;
2754 */
2755 void ac_apply_fmask_to_sample(struct ac_llvm_context *ac, LLVMValueRef fmask,
2756 LLVMValueRef *addr, bool is_array_tex)
2757 {
2758 struct ac_image_args fmask_load = {};
2759 fmask_load.opcode = ac_image_load;
2760 fmask_load.resource = fmask;
2761 fmask_load.dmask = 0xf;
2762 fmask_load.dim = is_array_tex ? ac_image_2darray : ac_image_2d;
2763
2764 fmask_load.coords[0] = addr[0];
2765 fmask_load.coords[1] = addr[1];
2766 if (is_array_tex)
2767 fmask_load.coords[2] = addr[2];
2768
2769 LLVMValueRef fmask_value = ac_build_image_opcode(ac, &fmask_load);
2770 fmask_value = LLVMBuildExtractElement(ac->builder, fmask_value,
2771 ac->i32_0, "");
2772
2773 /* Apply the formula. */
2774 unsigned sample_chan = is_array_tex ? 3 : 2;
2775 LLVMValueRef final_sample;
2776 final_sample = LLVMBuildMul(ac->builder, addr[sample_chan],
2777 LLVMConstInt(ac->i32, 4, 0), "");
2778 final_sample = LLVMBuildLShr(ac->builder, fmask_value, final_sample, "");
2779 /* Mask the sample index by 0x7, because 0x8 means an unknown value
2780 * with EQAA, so those will map to 0. */
2781 final_sample = LLVMBuildAnd(ac->builder, final_sample,
2782 LLVMConstInt(ac->i32, 0x7, 0), "");
2783
2784 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
2785 * resource descriptor is 0 (invalid).
2786 */
2787 LLVMValueRef tmp;
2788 tmp = LLVMBuildBitCast(ac->builder, fmask, ac->v8i32, "");
2789 tmp = LLVMBuildExtractElement(ac->builder, tmp, ac->i32_1, "");
2790 tmp = LLVMBuildICmp(ac->builder, LLVMIntNE, tmp, ac->i32_0, "");
2791
2792 /* Replace the MSAA sample index. */
2793 addr[sample_chan] = LLVMBuildSelect(ac->builder, tmp, final_sample,
2794 addr[sample_chan], "");
2795 }
2796
2797 static LLVMValueRef
2798 _ac_build_readlane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef lane)
2799 {
2800 ac_build_optimization_barrier(ctx, &src);
2801 return ac_build_intrinsic(ctx,
2802 lane == NULL ? "llvm.amdgcn.readfirstlane" : "llvm.amdgcn.readlane",
2803 LLVMTypeOf(src), (LLVMValueRef []) {
2804 src, lane },
2805 lane == NULL ? 1 : 2,
2806 AC_FUNC_ATTR_READNONE |
2807 AC_FUNC_ATTR_CONVERGENT);
2808 }
2809
2810 /**
2811 * Builds the "llvm.amdgcn.readlane" or "llvm.amdgcn.readfirstlane" intrinsic.
2812 * @param ctx
2813 * @param src
2814 * @param lane - id of the lane or NULL for the first active lane
2815 * @return value of the lane
2816 */
2817 LLVMValueRef
2818 ac_build_readlane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef lane)
2819 {
2820 LLVMTypeRef src_type = LLVMTypeOf(src);
2821 src = ac_to_integer(ctx, src);
2822 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
2823 LLVMValueRef ret;
2824
2825 if (bits == 32) {
2826 ret = _ac_build_readlane(ctx, src, lane);
2827 } else {
2828 assert(bits % 32 == 0);
2829 LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
2830 LLVMValueRef src_vector =
2831 LLVMBuildBitCast(ctx->builder, src, vec_type, "");
2832 ret = LLVMGetUndef(vec_type);
2833 for (unsigned i = 0; i < bits / 32; i++) {
2834 src = LLVMBuildExtractElement(ctx->builder, src_vector,
2835 LLVMConstInt(ctx->i32, i, 0), "");
2836 LLVMValueRef ret_comp = _ac_build_readlane(ctx, src, lane);
2837 ret = LLVMBuildInsertElement(ctx->builder, ret, ret_comp,
2838 LLVMConstInt(ctx->i32, i, 0), "");
2839 }
2840 }
2841 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
2842 }
2843
2844 LLVMValueRef
2845 ac_build_writelane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef value, LLVMValueRef lane)
2846 {
2847 /* TODO: Use the actual instruction when LLVM adds an intrinsic for it.
2848 */
2849 LLVMValueRef pred = LLVMBuildICmp(ctx->builder, LLVMIntEQ, lane,
2850 ac_get_thread_id(ctx), "");
2851 return LLVMBuildSelect(ctx->builder, pred, value, src, "");
2852 }
2853
2854 LLVMValueRef
2855 ac_build_mbcnt(struct ac_llvm_context *ctx, LLVMValueRef mask)
2856 {
2857 LLVMValueRef mask_vec = LLVMBuildBitCast(ctx->builder, mask,
2858 LLVMVectorType(ctx->i32, 2),
2859 "");
2860 LLVMValueRef mask_lo = LLVMBuildExtractElement(ctx->builder, mask_vec,
2861 ctx->i32_0, "");
2862 LLVMValueRef mask_hi = LLVMBuildExtractElement(ctx->builder, mask_vec,
2863 ctx->i32_1, "");
2864 LLVMValueRef val =
2865 ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.lo", ctx->i32,
2866 (LLVMValueRef []) { mask_lo, ctx->i32_0 },
2867 2, AC_FUNC_ATTR_READNONE);
2868 val = ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.hi", ctx->i32,
2869 (LLVMValueRef []) { mask_hi, val },
2870 2, AC_FUNC_ATTR_READNONE);
2871 return val;
2872 }
2873
2874 enum dpp_ctrl {
2875 _dpp_quad_perm = 0x000,
2876 _dpp_row_sl = 0x100,
2877 _dpp_row_sr = 0x110,
2878 _dpp_row_rr = 0x120,
2879 dpp_wf_sl1 = 0x130,
2880 dpp_wf_rl1 = 0x134,
2881 dpp_wf_sr1 = 0x138,
2882 dpp_wf_rr1 = 0x13C,
2883 dpp_row_mirror = 0x140,
2884 dpp_row_half_mirror = 0x141,
2885 dpp_row_bcast15 = 0x142,
2886 dpp_row_bcast31 = 0x143
2887 };
2888
2889 static inline enum dpp_ctrl
2890 dpp_quad_perm(unsigned lane0, unsigned lane1, unsigned lane2, unsigned lane3)
2891 {
2892 assert(lane0 < 4 && lane1 < 4 && lane2 < 4 && lane3 < 4);
2893 return _dpp_quad_perm | lane0 | (lane1 << 2) | (lane2 << 4) | (lane3 << 6);
2894 }
2895
2896 static inline enum dpp_ctrl
2897 dpp_row_sl(unsigned amount)
2898 {
2899 assert(amount > 0 && amount < 16);
2900 return _dpp_row_sl | amount;
2901 }
2902
2903 static inline enum dpp_ctrl
2904 dpp_row_sr(unsigned amount)
2905 {
2906 assert(amount > 0 && amount < 16);
2907 return _dpp_row_sr | amount;
2908 }
2909
2910 static LLVMValueRef
2911 _ac_build_dpp(struct ac_llvm_context *ctx, LLVMValueRef old, LLVMValueRef src,
2912 enum dpp_ctrl dpp_ctrl, unsigned row_mask, unsigned bank_mask,
2913 bool bound_ctrl)
2914 {
2915 return ac_build_intrinsic(ctx, "llvm.amdgcn.update.dpp.i32",
2916 LLVMTypeOf(old),
2917 (LLVMValueRef[]) {
2918 old, src,
2919 LLVMConstInt(ctx->i32, dpp_ctrl, 0),
2920 LLVMConstInt(ctx->i32, row_mask, 0),
2921 LLVMConstInt(ctx->i32, bank_mask, 0),
2922 LLVMConstInt(ctx->i1, bound_ctrl, 0) },
2923 6, AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
2924 }
2925
2926 static LLVMValueRef
2927 ac_build_dpp(struct ac_llvm_context *ctx, LLVMValueRef old, LLVMValueRef src,
2928 enum dpp_ctrl dpp_ctrl, unsigned row_mask, unsigned bank_mask,
2929 bool bound_ctrl)
2930 {
2931 LLVMTypeRef src_type = LLVMTypeOf(src);
2932 src = ac_to_integer(ctx, src);
2933 old = ac_to_integer(ctx, old);
2934 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
2935 LLVMValueRef ret;
2936 if (bits == 32) {
2937 ret = _ac_build_dpp(ctx, old, src, dpp_ctrl, row_mask,
2938 bank_mask, bound_ctrl);
2939 } else {
2940 assert(bits % 32 == 0);
2941 LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
2942 LLVMValueRef src_vector =
2943 LLVMBuildBitCast(ctx->builder, src, vec_type, "");
2944 LLVMValueRef old_vector =
2945 LLVMBuildBitCast(ctx->builder, old, vec_type, "");
2946 ret = LLVMGetUndef(vec_type);
2947 for (unsigned i = 0; i < bits / 32; i++) {
2948 src = LLVMBuildExtractElement(ctx->builder, src_vector,
2949 LLVMConstInt(ctx->i32, i,
2950 0), "");
2951 old = LLVMBuildExtractElement(ctx->builder, old_vector,
2952 LLVMConstInt(ctx->i32, i,
2953 0), "");
2954 LLVMValueRef ret_comp = _ac_build_dpp(ctx, old, src,
2955 dpp_ctrl,
2956 row_mask,
2957 bank_mask,
2958 bound_ctrl);
2959 ret = LLVMBuildInsertElement(ctx->builder, ret,
2960 ret_comp,
2961 LLVMConstInt(ctx->i32, i,
2962 0), "");
2963 }
2964 }
2965 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
2966 }
2967
2968 static inline unsigned
2969 ds_pattern_bitmode(unsigned and_mask, unsigned or_mask, unsigned xor_mask)
2970 {
2971 assert(and_mask < 32 && or_mask < 32 && xor_mask < 32);
2972 return and_mask | (or_mask << 5) | (xor_mask << 10);
2973 }
2974
2975 static LLVMValueRef
2976 _ac_build_ds_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src, unsigned mask)
2977 {
2978 return ac_build_intrinsic(ctx, "llvm.amdgcn.ds.swizzle",
2979 LLVMTypeOf(src), (LLVMValueRef []) {
2980 src, LLVMConstInt(ctx->i32, mask, 0) },
2981 2, AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
2982 }
2983
2984 LLVMValueRef
2985 ac_build_ds_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src, unsigned mask)
2986 {
2987 LLVMTypeRef src_type = LLVMTypeOf(src);
2988 src = ac_to_integer(ctx, src);
2989 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
2990 LLVMValueRef ret;
2991 if (bits == 32) {
2992 ret = _ac_build_ds_swizzle(ctx, src, mask);
2993 } else {
2994 assert(bits % 32 == 0);
2995 LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
2996 LLVMValueRef src_vector =
2997 LLVMBuildBitCast(ctx->builder, src, vec_type, "");
2998 ret = LLVMGetUndef(vec_type);
2999 for (unsigned i = 0; i < bits / 32; i++) {
3000 src = LLVMBuildExtractElement(ctx->builder, src_vector,
3001 LLVMConstInt(ctx->i32, i,
3002 0), "");
3003 LLVMValueRef ret_comp = _ac_build_ds_swizzle(ctx, src,
3004 mask);
3005 ret = LLVMBuildInsertElement(ctx->builder, ret,
3006 ret_comp,
3007 LLVMConstInt(ctx->i32, i,
3008 0), "");
3009 }
3010 }
3011 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3012 }
3013
3014 static LLVMValueRef
3015 ac_build_wwm(struct ac_llvm_context *ctx, LLVMValueRef src)
3016 {
3017 char name[32], type[8];
3018 ac_build_type_name_for_intr(LLVMTypeOf(src), type, sizeof(type));
3019 snprintf(name, sizeof(name), "llvm.amdgcn.wwm.%s", type);
3020 return ac_build_intrinsic(ctx, name, LLVMTypeOf(src),
3021 (LLVMValueRef []) { src }, 1,
3022 AC_FUNC_ATTR_READNONE);
3023 }
3024
3025 static LLVMValueRef
3026 ac_build_set_inactive(struct ac_llvm_context *ctx, LLVMValueRef src,
3027 LLVMValueRef inactive)
3028 {
3029 char name[33], type[8];
3030 LLVMTypeRef src_type = LLVMTypeOf(src);
3031 src = ac_to_integer(ctx, src);
3032 inactive = ac_to_integer(ctx, inactive);
3033 ac_build_type_name_for_intr(LLVMTypeOf(src), type, sizeof(type));
3034 snprintf(name, sizeof(name), "llvm.amdgcn.set.inactive.%s", type);
3035 LLVMValueRef ret =
3036 ac_build_intrinsic(ctx, name,
3037 LLVMTypeOf(src), (LLVMValueRef []) {
3038 src, inactive }, 2,
3039 AC_FUNC_ATTR_READNONE |
3040 AC_FUNC_ATTR_CONVERGENT);
3041 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3042 }
3043
3044 static LLVMValueRef
3045 get_reduction_identity(struct ac_llvm_context *ctx, nir_op op, unsigned type_size)
3046 {
3047 if (type_size == 4) {
3048 switch (op) {
3049 case nir_op_iadd: return ctx->i32_0;
3050 case nir_op_fadd: return ctx->f32_0;
3051 case nir_op_imul: return ctx->i32_1;
3052 case nir_op_fmul: return ctx->f32_1;
3053 case nir_op_imin: return LLVMConstInt(ctx->i32, INT32_MAX, 0);
3054 case nir_op_umin: return LLVMConstInt(ctx->i32, UINT32_MAX, 0);
3055 case nir_op_fmin: return LLVMConstReal(ctx->f32, INFINITY);
3056 case nir_op_imax: return LLVMConstInt(ctx->i32, INT32_MIN, 0);
3057 case nir_op_umax: return ctx->i32_0;
3058 case nir_op_fmax: return LLVMConstReal(ctx->f32, -INFINITY);
3059 case nir_op_iand: return LLVMConstInt(ctx->i32, -1, 0);
3060 case nir_op_ior: return ctx->i32_0;
3061 case nir_op_ixor: return ctx->i32_0;
3062 default:
3063 unreachable("bad reduction intrinsic");
3064 }
3065 } else { /* type_size == 64bit */
3066 switch (op) {
3067 case nir_op_iadd: return ctx->i64_0;
3068 case nir_op_fadd: return ctx->f64_0;
3069 case nir_op_imul: return ctx->i64_1;
3070 case nir_op_fmul: return ctx->f64_1;
3071 case nir_op_imin: return LLVMConstInt(ctx->i64, INT64_MAX, 0);
3072 case nir_op_umin: return LLVMConstInt(ctx->i64, UINT64_MAX, 0);
3073 case nir_op_fmin: return LLVMConstReal(ctx->f64, INFINITY);
3074 case nir_op_imax: return LLVMConstInt(ctx->i64, INT64_MIN, 0);
3075 case nir_op_umax: return ctx->i64_0;
3076 case nir_op_fmax: return LLVMConstReal(ctx->f64, -INFINITY);
3077 case nir_op_iand: return LLVMConstInt(ctx->i64, -1, 0);
3078 case nir_op_ior: return ctx->i64_0;
3079 case nir_op_ixor: return ctx->i64_0;
3080 default:
3081 unreachable("bad reduction intrinsic");
3082 }
3083 }
3084 }
3085
3086 static LLVMValueRef
3087 ac_build_alu_op(struct ac_llvm_context *ctx, LLVMValueRef lhs, LLVMValueRef rhs, nir_op op)
3088 {
3089 bool _64bit = ac_get_type_size(LLVMTypeOf(lhs)) == 8;
3090 switch (op) {
3091 case nir_op_iadd: return LLVMBuildAdd(ctx->builder, lhs, rhs, "");
3092 case nir_op_fadd: return LLVMBuildFAdd(ctx->builder, lhs, rhs, "");
3093 case nir_op_imul: return LLVMBuildMul(ctx->builder, lhs, rhs, "");
3094 case nir_op_fmul: return LLVMBuildFMul(ctx->builder, lhs, rhs, "");
3095 case nir_op_imin: return LLVMBuildSelect(ctx->builder,
3096 LLVMBuildICmp(ctx->builder, LLVMIntSLT, lhs, rhs, ""),
3097 lhs, rhs, "");
3098 case nir_op_umin: return LLVMBuildSelect(ctx->builder,
3099 LLVMBuildICmp(ctx->builder, LLVMIntULT, lhs, rhs, ""),
3100 lhs, rhs, "");
3101 case nir_op_fmin: return ac_build_intrinsic(ctx,
3102 _64bit ? "llvm.minnum.f64" : "llvm.minnum.f32",
3103 _64bit ? ctx->f64 : ctx->f32,
3104 (LLVMValueRef[]){lhs, rhs}, 2, AC_FUNC_ATTR_READNONE);
3105 case nir_op_imax: return LLVMBuildSelect(ctx->builder,
3106 LLVMBuildICmp(ctx->builder, LLVMIntSGT, lhs, rhs, ""),
3107 lhs, rhs, "");
3108 case nir_op_umax: return LLVMBuildSelect(ctx->builder,
3109 LLVMBuildICmp(ctx->builder, LLVMIntUGT, lhs, rhs, ""),
3110 lhs, rhs, "");
3111 case nir_op_fmax: return ac_build_intrinsic(ctx,
3112 _64bit ? "llvm.maxnum.f64" : "llvm.maxnum.f32",
3113 _64bit ? ctx->f64 : ctx->f32,
3114 (LLVMValueRef[]){lhs, rhs}, 2, AC_FUNC_ATTR_READNONE);
3115 case nir_op_iand: return LLVMBuildAnd(ctx->builder, lhs, rhs, "");
3116 case nir_op_ior: return LLVMBuildOr(ctx->builder, lhs, rhs, "");
3117 case nir_op_ixor: return LLVMBuildXor(ctx->builder, lhs, rhs, "");
3118 default:
3119 unreachable("bad reduction intrinsic");
3120 }
3121 }
3122
3123 /**
3124 * \param maxprefix specifies that the result only needs to be correct for a
3125 * prefix of this many threads
3126 *
3127 * TODO: add inclusive and excluse scan functions for SI chip class.
3128 */
3129 static LLVMValueRef
3130 ac_build_scan(struct ac_llvm_context *ctx, nir_op op, LLVMValueRef src, LLVMValueRef identity,
3131 unsigned maxprefix)
3132 {
3133 LLVMValueRef result, tmp;
3134 result = src;
3135 if (maxprefix <= 1)
3136 return result;
3137 tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(1), 0xf, 0xf, false);
3138 result = ac_build_alu_op(ctx, result, tmp, op);
3139 if (maxprefix <= 2)
3140 return result;
3141 tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(2), 0xf, 0xf, false);
3142 result = ac_build_alu_op(ctx, result, tmp, op);
3143 if (maxprefix <= 3)
3144 return result;
3145 tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(3), 0xf, 0xf, false);
3146 result = ac_build_alu_op(ctx, result, tmp, op);
3147 if (maxprefix <= 4)
3148 return result;
3149 tmp = ac_build_dpp(ctx, identity, result, dpp_row_sr(4), 0xf, 0xe, false);
3150 result = ac_build_alu_op(ctx, result, tmp, op);
3151 if (maxprefix <= 8)
3152 return result;
3153 tmp = ac_build_dpp(ctx, identity, result, dpp_row_sr(8), 0xf, 0xc, false);
3154 result = ac_build_alu_op(ctx, result, tmp, op);
3155 if (maxprefix <= 16)
3156 return result;
3157 tmp = ac_build_dpp(ctx, identity, result, dpp_row_bcast15, 0xa, 0xf, false);
3158 result = ac_build_alu_op(ctx, result, tmp, op);
3159 if (maxprefix <= 32)
3160 return result;
3161 tmp = ac_build_dpp(ctx, identity, result, dpp_row_bcast31, 0xc, 0xf, false);
3162 result = ac_build_alu_op(ctx, result, tmp, op);
3163 return result;
3164 }
3165
3166 LLVMValueRef
3167 ac_build_inclusive_scan(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op)
3168 {
3169 LLVMValueRef result;
3170
3171 if (LLVMTypeOf(src) == ctx->i1 && op == nir_op_iadd) {
3172 LLVMBuilderRef builder = ctx->builder;
3173 src = LLVMBuildZExt(builder, src, ctx->i32, "");
3174 result = ac_build_ballot(ctx, src);
3175 result = ac_build_mbcnt(ctx, result);
3176 result = LLVMBuildAdd(builder, result, src, "");
3177 return result;
3178 }
3179
3180 ac_build_optimization_barrier(ctx, &src);
3181
3182 LLVMValueRef identity =
3183 get_reduction_identity(ctx, op, ac_get_type_size(LLVMTypeOf(src)));
3184 result = LLVMBuildBitCast(ctx->builder, ac_build_set_inactive(ctx, src, identity),
3185 LLVMTypeOf(identity), "");
3186 result = ac_build_scan(ctx, op, result, identity, 64);
3187
3188 return ac_build_wwm(ctx, result);
3189 }
3190
3191 LLVMValueRef
3192 ac_build_exclusive_scan(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op)
3193 {
3194 LLVMValueRef result;
3195
3196 if (LLVMTypeOf(src) == ctx->i1 && op == nir_op_iadd) {
3197 LLVMBuilderRef builder = ctx->builder;
3198 src = LLVMBuildZExt(builder, src, ctx->i32, "");
3199 result = ac_build_ballot(ctx, src);
3200 result = ac_build_mbcnt(ctx, result);
3201 return result;
3202 }
3203
3204 ac_build_optimization_barrier(ctx, &src);
3205
3206 LLVMValueRef identity =
3207 get_reduction_identity(ctx, op, ac_get_type_size(LLVMTypeOf(src)));
3208 result = LLVMBuildBitCast(ctx->builder, ac_build_set_inactive(ctx, src, identity),
3209 LLVMTypeOf(identity), "");
3210 result = ac_build_dpp(ctx, identity, result, dpp_wf_sr1, 0xf, 0xf, false);
3211 result = ac_build_scan(ctx, op, result, identity, 64);
3212
3213 return ac_build_wwm(ctx, result);
3214 }
3215
3216 LLVMValueRef
3217 ac_build_reduce(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op, unsigned cluster_size)
3218 {
3219 if (cluster_size == 1) return src;
3220 ac_build_optimization_barrier(ctx, &src);
3221 LLVMValueRef result, swap;
3222 LLVMValueRef identity = get_reduction_identity(ctx, op,
3223 ac_get_type_size(LLVMTypeOf(src)));
3224 result = LLVMBuildBitCast(ctx->builder,
3225 ac_build_set_inactive(ctx, src, identity),
3226 LLVMTypeOf(identity), "");
3227 swap = ac_build_quad_swizzle(ctx, result, 1, 0, 3, 2);
3228 result = ac_build_alu_op(ctx, result, swap, op);
3229 if (cluster_size == 2) return ac_build_wwm(ctx, result);
3230
3231 swap = ac_build_quad_swizzle(ctx, result, 2, 3, 0, 1);
3232 result = ac_build_alu_op(ctx, result, swap, op);
3233 if (cluster_size == 4) return ac_build_wwm(ctx, result);
3234
3235 if (ctx->chip_class >= VI)
3236 swap = ac_build_dpp(ctx, identity, result, dpp_row_half_mirror, 0xf, 0xf, false);
3237 else
3238 swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x04));
3239 result = ac_build_alu_op(ctx, result, swap, op);
3240 if (cluster_size == 8) return ac_build_wwm(ctx, result);
3241
3242 if (ctx->chip_class >= VI)
3243 swap = ac_build_dpp(ctx, identity, result, dpp_row_mirror, 0xf, 0xf, false);
3244 else
3245 swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x08));
3246 result = ac_build_alu_op(ctx, result, swap, op);
3247 if (cluster_size == 16) return ac_build_wwm(ctx, result);
3248
3249 if (ctx->chip_class >= VI && cluster_size != 32)
3250 swap = ac_build_dpp(ctx, identity, result, dpp_row_bcast15, 0xa, 0xf, false);
3251 else
3252 swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x10));
3253 result = ac_build_alu_op(ctx, result, swap, op);
3254 if (cluster_size == 32) return ac_build_wwm(ctx, result);
3255
3256 if (ctx->chip_class >= VI) {
3257 swap = ac_build_dpp(ctx, identity, result, dpp_row_bcast31, 0xc, 0xf, false);
3258 result = ac_build_alu_op(ctx, result, swap, op);
3259 result = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 63, 0));
3260 return ac_build_wwm(ctx, result);
3261 } else {
3262 swap = ac_build_readlane(ctx, result, ctx->i32_0);
3263 result = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 32, 0));
3264 result = ac_build_alu_op(ctx, result, swap, op);
3265 return ac_build_wwm(ctx, result);
3266 }
3267 }
3268
3269 /**
3270 * "Top half" of a scan that reduces per-wave values across an entire
3271 * workgroup.
3272 *
3273 * The source value must be present in the highest lane of the wave, and the
3274 * highest lane must be live.
3275 */
3276 void
3277 ac_build_wg_wavescan_top(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
3278 {
3279 if (ws->maxwaves <= 1)
3280 return;
3281
3282 const LLVMValueRef i32_63 = LLVMConstInt(ctx->i32, 63, false);
3283 LLVMBuilderRef builder = ctx->builder;
3284 LLVMValueRef tid = ac_get_thread_id(ctx);
3285 LLVMValueRef tmp;
3286
3287 tmp = LLVMBuildICmp(builder, LLVMIntEQ, tid, i32_63, "");
3288 ac_build_ifcc(ctx, tmp, 1000);
3289 LLVMBuildStore(builder, ws->src, LLVMBuildGEP(builder, ws->scratch, &ws->waveidx, 1, ""));
3290 ac_build_endif(ctx, 1000);
3291 }
3292
3293 /**
3294 * "Bottom half" of a scan that reduces per-wave values across an entire
3295 * workgroup.
3296 *
3297 * The caller must place a barrier between the top and bottom halves.
3298 */
3299 void
3300 ac_build_wg_wavescan_bottom(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
3301 {
3302 const LLVMTypeRef type = LLVMTypeOf(ws->src);
3303 const LLVMValueRef identity =
3304 get_reduction_identity(ctx, ws->op, ac_get_type_size(type));
3305
3306 if (ws->maxwaves <= 1) {
3307 ws->result_reduce = ws->src;
3308 ws->result_inclusive = ws->src;
3309 ws->result_exclusive = identity;
3310 return;
3311 }
3312 assert(ws->maxwaves <= 32);
3313
3314 LLVMBuilderRef builder = ctx->builder;
3315 LLVMValueRef tid = ac_get_thread_id(ctx);
3316 LLVMBasicBlockRef bbs[2];
3317 LLVMValueRef phivalues_scan[2];
3318 LLVMValueRef tmp, tmp2;
3319
3320 bbs[0] = LLVMGetInsertBlock(builder);
3321 phivalues_scan[0] = LLVMGetUndef(type);
3322
3323 if (ws->enable_reduce)
3324 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, ws->numwaves, "");
3325 else if (ws->enable_inclusive)
3326 tmp = LLVMBuildICmp(builder, LLVMIntULE, tid, ws->waveidx, "");
3327 else
3328 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, ws->waveidx, "");
3329 ac_build_ifcc(ctx, tmp, 1001);
3330 {
3331 tmp = LLVMBuildLoad(builder, LLVMBuildGEP(builder, ws->scratch, &tid, 1, ""), "");
3332
3333 ac_build_optimization_barrier(ctx, &tmp);
3334
3335 bbs[1] = LLVMGetInsertBlock(builder);
3336 phivalues_scan[1] = ac_build_scan(ctx, ws->op, tmp, identity, ws->maxwaves);
3337 }
3338 ac_build_endif(ctx, 1001);
3339
3340 const LLVMValueRef scan = ac_build_phi(ctx, type, 2, phivalues_scan, bbs);
3341
3342 if (ws->enable_reduce) {
3343 tmp = LLVMBuildSub(builder, ws->numwaves, ctx->i32_1, "");
3344 ws->result_reduce = ac_build_readlane(ctx, scan, tmp);
3345 }
3346 if (ws->enable_inclusive)
3347 ws->result_inclusive = ac_build_readlane(ctx, scan, ws->waveidx);
3348 if (ws->enable_exclusive) {
3349 tmp = LLVMBuildSub(builder, ws->waveidx, ctx->i32_1, "");
3350 tmp = ac_build_readlane(ctx, scan, tmp);
3351 tmp2 = LLVMBuildICmp(builder, LLVMIntEQ, ws->waveidx, ctx->i32_0, "");
3352 ws->result_exclusive = LLVMBuildSelect(builder, tmp2, identity, tmp, "");
3353 }
3354 }
3355
3356 /**
3357 * Inclusive scan of a per-wave value across an entire workgroup.
3358 *
3359 * This implies an s_barrier instruction.
3360 *
3361 * Unlike ac_build_inclusive_scan, the caller \em must ensure that all threads
3362 * of the workgroup are live. (This requirement cannot easily be relaxed in a
3363 * useful manner because of the barrier in the algorithm.)
3364 */
3365 void
3366 ac_build_wg_wavescan(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
3367 {
3368 ac_build_wg_wavescan_top(ctx, ws);
3369 ac_build_s_barrier(ctx);
3370 ac_build_wg_wavescan_bottom(ctx, ws);
3371 }
3372
3373 /**
3374 * "Top half" of a scan that reduces per-thread values across an entire
3375 * workgroup.
3376 *
3377 * All lanes must be active when this code runs.
3378 */
3379 void
3380 ac_build_wg_scan_top(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
3381 {
3382 if (ws->enable_exclusive) {
3383 ws->extra = ac_build_exclusive_scan(ctx, ws->src, ws->op);
3384 if (LLVMTypeOf(ws->src) == ctx->i1 && ws->op == nir_op_iadd)
3385 ws->src = LLVMBuildZExt(ctx->builder, ws->src, ctx->i32, "");
3386 ws->src = ac_build_alu_op(ctx, ws->extra, ws->src, ws->op);
3387 } else {
3388 ws->src = ac_build_inclusive_scan(ctx, ws->src, ws->op);
3389 }
3390
3391 bool enable_inclusive = ws->enable_inclusive;
3392 bool enable_exclusive = ws->enable_exclusive;
3393 ws->enable_inclusive = false;
3394 ws->enable_exclusive = ws->enable_exclusive || enable_inclusive;
3395 ac_build_wg_wavescan_top(ctx, ws);
3396 ws->enable_inclusive = enable_inclusive;
3397 ws->enable_exclusive = enable_exclusive;
3398 }
3399
3400 /**
3401 * "Bottom half" of a scan that reduces per-thread values across an entire
3402 * workgroup.
3403 *
3404 * The caller must place a barrier between the top and bottom halves.
3405 */
3406 void
3407 ac_build_wg_scan_bottom(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
3408 {
3409 bool enable_inclusive = ws->enable_inclusive;
3410 bool enable_exclusive = ws->enable_exclusive;
3411 ws->enable_inclusive = false;
3412 ws->enable_exclusive = ws->enable_exclusive || enable_inclusive;
3413 ac_build_wg_wavescan_bottom(ctx, ws);
3414 ws->enable_inclusive = enable_inclusive;
3415 ws->enable_exclusive = enable_exclusive;
3416
3417 /* ws->result_reduce is already the correct value */
3418 if (ws->enable_inclusive)
3419 ws->result_inclusive = ac_build_alu_op(ctx, ws->result_exclusive, ws->src, ws->op);
3420 if (ws->enable_exclusive)
3421 ws->result_exclusive = ac_build_alu_op(ctx, ws->result_exclusive, ws->extra, ws->op);
3422 }
3423
3424 /**
3425 * A scan that reduces per-thread values across an entire workgroup.
3426 *
3427 * The caller must ensure that all lanes are active when this code runs
3428 * (WWM is insufficient!), because there is an implied barrier.
3429 */
3430 void
3431 ac_build_wg_scan(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
3432 {
3433 ac_build_wg_scan_top(ctx, ws);
3434 ac_build_s_barrier(ctx);
3435 ac_build_wg_scan_bottom(ctx, ws);
3436 }
3437
3438 LLVMValueRef
3439 ac_build_quad_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src,
3440 unsigned lane0, unsigned lane1, unsigned lane2, unsigned lane3)
3441 {
3442 unsigned mask = dpp_quad_perm(lane0, lane1, lane2, lane3);
3443 if (ctx->chip_class >= VI) {
3444 return ac_build_dpp(ctx, src, src, mask, 0xf, 0xf, false);
3445 } else {
3446 return ac_build_ds_swizzle(ctx, src, (1 << 15) | mask);
3447 }
3448 }
3449
3450 LLVMValueRef
3451 ac_build_shuffle(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef index)
3452 {
3453 index = LLVMBuildMul(ctx->builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
3454 return ac_build_intrinsic(ctx,
3455 "llvm.amdgcn.ds.bpermute", ctx->i32,
3456 (LLVMValueRef []) {index, src}, 2,
3457 AC_FUNC_ATTR_READNONE |
3458 AC_FUNC_ATTR_CONVERGENT);
3459 }