amd/common: whitespace fixes
[mesa.git] / src / amd / common / ac_llvm_build.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25 /* based on pieces from si_pipe.c and radeon_llvm_emit.c */
26 #include "ac_llvm_build.h"
27
28 #include <llvm-c/Core.h>
29
30 #include "c11/threads.h"
31
32 #include <assert.h>
33 #include <stdio.h>
34
35 #include "ac_llvm_util.h"
36 #include "ac_exp_param.h"
37 #include "util/bitscan.h"
38 #include "util/macros.h"
39 #include "util/u_atomic.h"
40 #include "util/u_math.h"
41 #include "sid.h"
42
43 #include "shader_enums.h"
44
45 #define AC_LLVM_INITIAL_CF_DEPTH 4
46
47 /* Data for if/else/endif and bgnloop/endloop control flow structures.
48 */
49 struct ac_llvm_flow {
50 /* Loop exit or next part of if/else/endif. */
51 LLVMBasicBlockRef next_block;
52 LLVMBasicBlockRef loop_entry_block;
53 };
54
55 /* Initialize module-independent parts of the context.
56 *
57 * The caller is responsible for initializing ctx::module and ctx::builder.
58 */
59 void
60 ac_llvm_context_init(struct ac_llvm_context *ctx,
61 enum chip_class chip_class, enum radeon_family family)
62 {
63 LLVMValueRef args[1];
64
65 ctx->context = LLVMContextCreate();
66
67 ctx->chip_class = chip_class;
68 ctx->family = family;
69 ctx->module = NULL;
70 ctx->builder = NULL;
71
72 ctx->voidt = LLVMVoidTypeInContext(ctx->context);
73 ctx->i1 = LLVMInt1TypeInContext(ctx->context);
74 ctx->i8 = LLVMInt8TypeInContext(ctx->context);
75 ctx->i16 = LLVMIntTypeInContext(ctx->context, 16);
76 ctx->i32 = LLVMIntTypeInContext(ctx->context, 32);
77 ctx->i64 = LLVMIntTypeInContext(ctx->context, 64);
78 ctx->intptr = ctx->i32;
79 ctx->f16 = LLVMHalfTypeInContext(ctx->context);
80 ctx->f32 = LLVMFloatTypeInContext(ctx->context);
81 ctx->f64 = LLVMDoubleTypeInContext(ctx->context);
82 ctx->v2i16 = LLVMVectorType(ctx->i16, 2);
83 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
84 ctx->v3i32 = LLVMVectorType(ctx->i32, 3);
85 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
86 ctx->v2f32 = LLVMVectorType(ctx->f32, 2);
87 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
88 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
89
90 ctx->i16_0 = LLVMConstInt(ctx->i16, 0, false);
91 ctx->i16_1 = LLVMConstInt(ctx->i16, 1, false);
92 ctx->i32_0 = LLVMConstInt(ctx->i32, 0, false);
93 ctx->i32_1 = LLVMConstInt(ctx->i32, 1, false);
94 ctx->i64_0 = LLVMConstInt(ctx->i64, 0, false);
95 ctx->i64_1 = LLVMConstInt(ctx->i64, 1, false);
96 ctx->f32_0 = LLVMConstReal(ctx->f32, 0.0);
97 ctx->f32_1 = LLVMConstReal(ctx->f32, 1.0);
98 ctx->f64_0 = LLVMConstReal(ctx->f64, 0.0);
99 ctx->f64_1 = LLVMConstReal(ctx->f64, 1.0);
100
101 ctx->i1false = LLVMConstInt(ctx->i1, 0, false);
102 ctx->i1true = LLVMConstInt(ctx->i1, 1, false);
103
104 ctx->range_md_kind = LLVMGetMDKindIDInContext(ctx->context,
105 "range", 5);
106
107 ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(ctx->context,
108 "invariant.load", 14);
109
110 ctx->fpmath_md_kind = LLVMGetMDKindIDInContext(ctx->context, "fpmath", 6);
111
112 args[0] = LLVMConstReal(ctx->f32, 2.5);
113 ctx->fpmath_md_2p5_ulp = LLVMMDNodeInContext(ctx->context, args, 1);
114
115 ctx->uniform_md_kind = LLVMGetMDKindIDInContext(ctx->context,
116 "amdgpu.uniform", 14);
117
118 ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0);
119 }
120
121 void
122 ac_llvm_context_dispose(struct ac_llvm_context *ctx)
123 {
124 free(ctx->flow);
125 ctx->flow = NULL;
126 ctx->flow_depth_max = 0;
127 }
128
129 int
130 ac_get_llvm_num_components(LLVMValueRef value)
131 {
132 LLVMTypeRef type = LLVMTypeOf(value);
133 unsigned num_components = LLVMGetTypeKind(type) == LLVMVectorTypeKind
134 ? LLVMGetVectorSize(type)
135 : 1;
136 return num_components;
137 }
138
139 LLVMValueRef
140 ac_llvm_extract_elem(struct ac_llvm_context *ac,
141 LLVMValueRef value,
142 int index)
143 {
144 if (LLVMGetTypeKind(LLVMTypeOf(value)) != LLVMVectorTypeKind) {
145 assert(index == 0);
146 return value;
147 }
148
149 return LLVMBuildExtractElement(ac->builder, value,
150 LLVMConstInt(ac->i32, index, false), "");
151 }
152
153 int
154 ac_get_elem_bits(struct ac_llvm_context *ctx, LLVMTypeRef type)
155 {
156 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind)
157 type = LLVMGetElementType(type);
158
159 if (LLVMGetTypeKind(type) == LLVMIntegerTypeKind)
160 return LLVMGetIntTypeWidth(type);
161
162 if (type == ctx->f16)
163 return 16;
164 if (type == ctx->f32)
165 return 32;
166 if (type == ctx->f64)
167 return 64;
168
169 unreachable("Unhandled type kind in get_elem_bits");
170 }
171
172 unsigned
173 ac_get_type_size(LLVMTypeRef type)
174 {
175 LLVMTypeKind kind = LLVMGetTypeKind(type);
176
177 switch (kind) {
178 case LLVMIntegerTypeKind:
179 return LLVMGetIntTypeWidth(type) / 8;
180 case LLVMHalfTypeKind:
181 return 2;
182 case LLVMFloatTypeKind:
183 return 4;
184 case LLVMDoubleTypeKind:
185 return 8;
186 case LLVMPointerTypeKind:
187 if (LLVMGetPointerAddressSpace(type) == AC_ADDR_SPACE_CONST_32BIT)
188 return 4;
189 return 8;
190 case LLVMVectorTypeKind:
191 return LLVMGetVectorSize(type) *
192 ac_get_type_size(LLVMGetElementType(type));
193 case LLVMArrayTypeKind:
194 return LLVMGetArrayLength(type) *
195 ac_get_type_size(LLVMGetElementType(type));
196 default:
197 assert(0);
198 return 0;
199 }
200 }
201
202 static LLVMTypeRef to_integer_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
203 {
204 if (t == ctx->f16 || t == ctx->i16)
205 return ctx->i16;
206 else if (t == ctx->f32 || t == ctx->i32)
207 return ctx->i32;
208 else if (t == ctx->f64 || t == ctx->i64)
209 return ctx->i64;
210 else
211 unreachable("Unhandled integer size");
212 }
213
214 LLVMTypeRef
215 ac_to_integer_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
216 {
217 if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
218 LLVMTypeRef elem_type = LLVMGetElementType(t);
219 return LLVMVectorType(to_integer_type_scalar(ctx, elem_type),
220 LLVMGetVectorSize(t));
221 }
222 return to_integer_type_scalar(ctx, t);
223 }
224
225 LLVMValueRef
226 ac_to_integer(struct ac_llvm_context *ctx, LLVMValueRef v)
227 {
228 LLVMTypeRef type = LLVMTypeOf(v);
229 return LLVMBuildBitCast(ctx->builder, v, ac_to_integer_type(ctx, type), "");
230 }
231
232 LLVMValueRef
233 ac_to_integer_or_pointer(struct ac_llvm_context *ctx, LLVMValueRef v)
234 {
235 LLVMTypeRef type = LLVMTypeOf(v);
236 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind)
237 return v;
238 return ac_to_integer(ctx, v);
239 }
240
241 static LLVMTypeRef to_float_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
242 {
243 if (t == ctx->i16 || t == ctx->f16)
244 return ctx->f16;
245 else if (t == ctx->i32 || t == ctx->f32)
246 return ctx->f32;
247 else if (t == ctx->i64 || t == ctx->f64)
248 return ctx->f64;
249 else
250 unreachable("Unhandled float size");
251 }
252
253 LLVMTypeRef
254 ac_to_float_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
255 {
256 if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
257 LLVMTypeRef elem_type = LLVMGetElementType(t);
258 return LLVMVectorType(to_float_type_scalar(ctx, elem_type),
259 LLVMGetVectorSize(t));
260 }
261 return to_float_type_scalar(ctx, t);
262 }
263
264 LLVMValueRef
265 ac_to_float(struct ac_llvm_context *ctx, LLVMValueRef v)
266 {
267 LLVMTypeRef type = LLVMTypeOf(v);
268 return LLVMBuildBitCast(ctx->builder, v, ac_to_float_type(ctx, type), "");
269 }
270
271
272 LLVMValueRef
273 ac_build_intrinsic(struct ac_llvm_context *ctx, const char *name,
274 LLVMTypeRef return_type, LLVMValueRef *params,
275 unsigned param_count, unsigned attrib_mask)
276 {
277 LLVMValueRef function, call;
278 bool set_callsite_attrs = !(attrib_mask & AC_FUNC_ATTR_LEGACY);
279
280 function = LLVMGetNamedFunction(ctx->module, name);
281 if (!function) {
282 LLVMTypeRef param_types[32], function_type;
283 unsigned i;
284
285 assert(param_count <= 32);
286
287 for (i = 0; i < param_count; ++i) {
288 assert(params[i]);
289 param_types[i] = LLVMTypeOf(params[i]);
290 }
291 function_type =
292 LLVMFunctionType(return_type, param_types, param_count, 0);
293 function = LLVMAddFunction(ctx->module, name, function_type);
294
295 LLVMSetFunctionCallConv(function, LLVMCCallConv);
296 LLVMSetLinkage(function, LLVMExternalLinkage);
297
298 if (!set_callsite_attrs)
299 ac_add_func_attributes(ctx->context, function, attrib_mask);
300 }
301
302 call = LLVMBuildCall(ctx->builder, function, params, param_count, "");
303 if (set_callsite_attrs)
304 ac_add_func_attributes(ctx->context, call, attrib_mask);
305 return call;
306 }
307
308 /**
309 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
310 * intrinsic names).
311 */
312 void ac_build_type_name_for_intr(LLVMTypeRef type, char *buf, unsigned bufsize)
313 {
314 LLVMTypeRef elem_type = type;
315
316 assert(bufsize >= 8);
317
318 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
319 int ret = snprintf(buf, bufsize, "v%u",
320 LLVMGetVectorSize(type));
321 if (ret < 0) {
322 char *type_name = LLVMPrintTypeToString(type);
323 fprintf(stderr, "Error building type name for: %s\n",
324 type_name);
325 return;
326 }
327 elem_type = LLVMGetElementType(type);
328 buf += ret;
329 bufsize -= ret;
330 }
331 switch (LLVMGetTypeKind(elem_type)) {
332 default: break;
333 case LLVMIntegerTypeKind:
334 snprintf(buf, bufsize, "i%d", LLVMGetIntTypeWidth(elem_type));
335 break;
336 case LLVMHalfTypeKind:
337 snprintf(buf, bufsize, "f16");
338 break;
339 case LLVMFloatTypeKind:
340 snprintf(buf, bufsize, "f32");
341 break;
342 case LLVMDoubleTypeKind:
343 snprintf(buf, bufsize, "f64");
344 break;
345 }
346 }
347
348 /**
349 * Helper function that builds an LLVM IR PHI node and immediately adds
350 * incoming edges.
351 */
352 LLVMValueRef
353 ac_build_phi(struct ac_llvm_context *ctx, LLVMTypeRef type,
354 unsigned count_incoming, LLVMValueRef *values,
355 LLVMBasicBlockRef *blocks)
356 {
357 LLVMValueRef phi = LLVMBuildPhi(ctx->builder, type, "");
358 LLVMAddIncoming(phi, values, blocks, count_incoming);
359 return phi;
360 }
361
362 void ac_build_s_barrier(struct ac_llvm_context *ctx)
363 {
364 ac_build_intrinsic(ctx, "llvm.amdgcn.s.barrier", ctx->voidt, NULL,
365 0, AC_FUNC_ATTR_CONVERGENT);
366 }
367
368 /* Prevent optimizations (at least of memory accesses) across the current
369 * point in the program by emitting empty inline assembly that is marked as
370 * having side effects.
371 *
372 * Optionally, a value can be passed through the inline assembly to prevent
373 * LLVM from hoisting calls to ReadNone functions.
374 */
375 void
376 ac_build_optimization_barrier(struct ac_llvm_context *ctx,
377 LLVMValueRef *pvgpr)
378 {
379 static int counter = 0;
380
381 LLVMBuilderRef builder = ctx->builder;
382 char code[16];
383
384 snprintf(code, sizeof(code), "; %d", p_atomic_inc_return(&counter));
385
386 if (!pvgpr) {
387 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
388 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
389 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
390 } else {
391 LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
392 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "=v,0", true, false);
393 LLVMValueRef vgpr = *pvgpr;
394 LLVMTypeRef vgpr_type = LLVMTypeOf(vgpr);
395 unsigned vgpr_size = ac_get_type_size(vgpr_type);
396 LLVMValueRef vgpr0;
397
398 assert(vgpr_size % 4 == 0);
399
400 vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
401 vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
402 vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
403 vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
404 vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
405
406 *pvgpr = vgpr;
407 }
408 }
409
410 LLVMValueRef
411 ac_build_shader_clock(struct ac_llvm_context *ctx)
412 {
413 LLVMValueRef tmp = ac_build_intrinsic(ctx, "llvm.readcyclecounter",
414 ctx->i64, NULL, 0, 0);
415 return LLVMBuildBitCast(ctx->builder, tmp, ctx->v2i32, "");
416 }
417
418 LLVMValueRef
419 ac_build_ballot(struct ac_llvm_context *ctx,
420 LLVMValueRef value)
421 {
422 LLVMValueRef args[3] = {
423 value,
424 ctx->i32_0,
425 LLVMConstInt(ctx->i32, LLVMIntNE, 0)
426 };
427
428 /* We currently have no other way to prevent LLVM from lifting the icmp
429 * calls to a dominating basic block.
430 */
431 ac_build_optimization_barrier(ctx, &args[0]);
432
433 args[0] = ac_to_integer(ctx, args[0]);
434
435 return ac_build_intrinsic(ctx,
436 "llvm.amdgcn.icmp.i32",
437 ctx->i64, args, 3,
438 AC_FUNC_ATTR_NOUNWIND |
439 AC_FUNC_ATTR_READNONE |
440 AC_FUNC_ATTR_CONVERGENT);
441 }
442
443 LLVMValueRef
444 ac_build_vote_all(struct ac_llvm_context *ctx, LLVMValueRef value)
445 {
446 LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
447 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
448 return LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, active_set, "");
449 }
450
451 LLVMValueRef
452 ac_build_vote_any(struct ac_llvm_context *ctx, LLVMValueRef value)
453 {
454 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
455 return LLVMBuildICmp(ctx->builder, LLVMIntNE, vote_set,
456 LLVMConstInt(ctx->i64, 0, 0), "");
457 }
458
459 LLVMValueRef
460 ac_build_vote_eq(struct ac_llvm_context *ctx, LLVMValueRef value)
461 {
462 LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
463 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
464
465 LLVMValueRef all = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
466 vote_set, active_set, "");
467 LLVMValueRef none = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
468 vote_set,
469 LLVMConstInt(ctx->i64, 0, 0), "");
470 return LLVMBuildOr(ctx->builder, all, none, "");
471 }
472
473 LLVMValueRef
474 ac_build_varying_gather_values(struct ac_llvm_context *ctx, LLVMValueRef *values,
475 unsigned value_count, unsigned component)
476 {
477 LLVMValueRef vec = NULL;
478
479 if (value_count == 1) {
480 return values[component];
481 } else if (!value_count)
482 unreachable("value_count is 0");
483
484 for (unsigned i = component; i < value_count + component; i++) {
485 LLVMValueRef value = values[i];
486
487 if (i == component)
488 vec = LLVMGetUndef( LLVMVectorType(LLVMTypeOf(value), value_count));
489 LLVMValueRef index = LLVMConstInt(ctx->i32, i - component, false);
490 vec = LLVMBuildInsertElement(ctx->builder, vec, value, index, "");
491 }
492 return vec;
493 }
494
495 LLVMValueRef
496 ac_build_gather_values_extended(struct ac_llvm_context *ctx,
497 LLVMValueRef *values,
498 unsigned value_count,
499 unsigned value_stride,
500 bool load,
501 bool always_vector)
502 {
503 LLVMBuilderRef builder = ctx->builder;
504 LLVMValueRef vec = NULL;
505 unsigned i;
506
507 if (value_count == 1 && !always_vector) {
508 if (load)
509 return LLVMBuildLoad(builder, values[0], "");
510 return values[0];
511 } else if (!value_count)
512 unreachable("value_count is 0");
513
514 for (i = 0; i < value_count; i++) {
515 LLVMValueRef value = values[i * value_stride];
516 if (load)
517 value = LLVMBuildLoad(builder, value, "");
518
519 if (!i)
520 vec = LLVMGetUndef( LLVMVectorType(LLVMTypeOf(value), value_count));
521 LLVMValueRef index = LLVMConstInt(ctx->i32, i, false);
522 vec = LLVMBuildInsertElement(builder, vec, value, index, "");
523 }
524 return vec;
525 }
526
527 LLVMValueRef
528 ac_build_gather_values(struct ac_llvm_context *ctx,
529 LLVMValueRef *values,
530 unsigned value_count)
531 {
532 return ac_build_gather_values_extended(ctx, values, value_count, 1, false, false);
533 }
534
535 /* Expand a scalar or vector to <dst_channels x type> by filling the remaining
536 * channels with undef. Extract at most src_channels components from the input.
537 */
538 LLVMValueRef ac_build_expand(struct ac_llvm_context *ctx,
539 LLVMValueRef value,
540 unsigned src_channels,
541 unsigned dst_channels)
542 {
543 LLVMTypeRef elemtype;
544 LLVMValueRef chan[dst_channels];
545
546 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind) {
547 unsigned vec_size = LLVMGetVectorSize(LLVMTypeOf(value));
548
549 if (src_channels == dst_channels && vec_size == dst_channels)
550 return value;
551
552 src_channels = MIN2(src_channels, vec_size);
553
554 for (unsigned i = 0; i < src_channels; i++)
555 chan[i] = ac_llvm_extract_elem(ctx, value, i);
556
557 elemtype = LLVMGetElementType(LLVMTypeOf(value));
558 } else {
559 if (src_channels) {
560 assert(src_channels == 1);
561 chan[0] = value;
562 }
563 elemtype = LLVMTypeOf(value);
564 }
565
566 for (unsigned i = src_channels; i < dst_channels; i++)
567 chan[i] = LLVMGetUndef(elemtype);
568
569 return ac_build_gather_values(ctx, chan, dst_channels);
570 }
571
572 /* Expand a scalar or vector to <4 x type> by filling the remaining channels
573 * with undef. Extract at most num_channels components from the input.
574 */
575 LLVMValueRef ac_build_expand_to_vec4(struct ac_llvm_context *ctx,
576 LLVMValueRef value,
577 unsigned num_channels)
578 {
579 return ac_build_expand(ctx, value, num_channels, 4);
580 }
581
582 LLVMValueRef ac_build_round(struct ac_llvm_context *ctx, LLVMValueRef value)
583 {
584 unsigned type_size = ac_get_type_size(LLVMTypeOf(value));
585 const char *name;
586
587 if (type_size == 2)
588 name = "llvm.rint.f16";
589 else if (type_size == 4)
590 name = "llvm.rint.f32";
591 else
592 name = "llvm.rint.f64";
593
594 return ac_build_intrinsic(ctx, name, LLVMTypeOf(value), &value, 1,
595 AC_FUNC_ATTR_READNONE);
596 }
597
598 LLVMValueRef
599 ac_build_fdiv(struct ac_llvm_context *ctx,
600 LLVMValueRef num,
601 LLVMValueRef den)
602 {
603 /* If we do (num / den), LLVM >= 7.0 does:
604 * return num * v_rcp_f32(den * (fabs(den) > 0x1.0p+96f ? 0x1.0p-32f : 1.0f));
605 *
606 * If we do (num * (1 / den)), LLVM does:
607 * return num * v_rcp_f32(den);
608 */
609 LLVMValueRef one = LLVMTypeOf(num) == ctx->f64 ? ctx->f64_1 : ctx->f32_1;
610 LLVMValueRef rcp = LLVMBuildFDiv(ctx->builder, one, den, "");
611 LLVMValueRef ret = LLVMBuildFMul(ctx->builder, num, rcp, "");
612
613 /* Use v_rcp_f32 instead of precise division. */
614 if (!LLVMIsConstant(ret))
615 LLVMSetMetadata(ret, ctx->fpmath_md_kind, ctx->fpmath_md_2p5_ulp);
616 return ret;
617 }
618
619 /* See fast_idiv_by_const.h. */
620 /* Set: increment = util_fast_udiv_info::increment ? multiplier : 0; */
621 LLVMValueRef ac_build_fast_udiv(struct ac_llvm_context *ctx,
622 LLVMValueRef num,
623 LLVMValueRef multiplier,
624 LLVMValueRef pre_shift,
625 LLVMValueRef post_shift,
626 LLVMValueRef increment)
627 {
628 LLVMBuilderRef builder = ctx->builder;
629
630 num = LLVMBuildLShr(builder, num, pre_shift, "");
631 num = LLVMBuildMul(builder,
632 LLVMBuildZExt(builder, num, ctx->i64, ""),
633 LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
634 num = LLVMBuildAdd(builder, num,
635 LLVMBuildZExt(builder, increment, ctx->i64, ""), "");
636 num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
637 num = LLVMBuildTrunc(builder, num, ctx->i32, "");
638 return LLVMBuildLShr(builder, num, post_shift, "");
639 }
640
641 /* See fast_idiv_by_const.h. */
642 /* If num != UINT_MAX, this more efficient version can be used. */
643 /* Set: increment = util_fast_udiv_info::increment; */
644 LLVMValueRef ac_build_fast_udiv_nuw(struct ac_llvm_context *ctx,
645 LLVMValueRef num,
646 LLVMValueRef multiplier,
647 LLVMValueRef pre_shift,
648 LLVMValueRef post_shift,
649 LLVMValueRef increment)
650 {
651 LLVMBuilderRef builder = ctx->builder;
652
653 num = LLVMBuildLShr(builder, num, pre_shift, "");
654 num = LLVMBuildNUWAdd(builder, num, increment, "");
655 num = LLVMBuildMul(builder,
656 LLVMBuildZExt(builder, num, ctx->i64, ""),
657 LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
658 num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
659 num = LLVMBuildTrunc(builder, num, ctx->i32, "");
660 return LLVMBuildLShr(builder, num, post_shift, "");
661 }
662
663 /* See fast_idiv_by_const.h. */
664 /* Both operands must fit in 31 bits and the divisor must not be 1. */
665 LLVMValueRef ac_build_fast_udiv_u31_d_not_one(struct ac_llvm_context *ctx,
666 LLVMValueRef num,
667 LLVMValueRef multiplier,
668 LLVMValueRef post_shift)
669 {
670 LLVMBuilderRef builder = ctx->builder;
671
672 num = LLVMBuildMul(builder,
673 LLVMBuildZExt(builder, num, ctx->i64, ""),
674 LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
675 num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
676 num = LLVMBuildTrunc(builder, num, ctx->i32, "");
677 return LLVMBuildLShr(builder, num, post_shift, "");
678 }
679
680 /* Coordinates for cube map selection. sc, tc, and ma are as in Table 8.27
681 * of the OpenGL 4.5 (Compatibility Profile) specification, except ma is
682 * already multiplied by two. id is the cube face number.
683 */
684 struct cube_selection_coords {
685 LLVMValueRef stc[2];
686 LLVMValueRef ma;
687 LLVMValueRef id;
688 };
689
690 static void
691 build_cube_intrinsic(struct ac_llvm_context *ctx,
692 LLVMValueRef in[3],
693 struct cube_selection_coords *out)
694 {
695 LLVMTypeRef f32 = ctx->f32;
696
697 out->stc[1] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubetc",
698 f32, in, 3, AC_FUNC_ATTR_READNONE);
699 out->stc[0] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubesc",
700 f32, in, 3, AC_FUNC_ATTR_READNONE);
701 out->ma = ac_build_intrinsic(ctx, "llvm.amdgcn.cubema",
702 f32, in, 3, AC_FUNC_ATTR_READNONE);
703 out->id = ac_build_intrinsic(ctx, "llvm.amdgcn.cubeid",
704 f32, in, 3, AC_FUNC_ATTR_READNONE);
705 }
706
707 /**
708 * Build a manual selection sequence for cube face sc/tc coordinates and
709 * major axis vector (multiplied by 2 for consistency) for the given
710 * vec3 \p coords, for the face implied by \p selcoords.
711 *
712 * For the major axis, we always adjust the sign to be in the direction of
713 * selcoords.ma; i.e., a positive out_ma means that coords is pointed towards
714 * the selcoords major axis.
715 */
716 static void build_cube_select(struct ac_llvm_context *ctx,
717 const struct cube_selection_coords *selcoords,
718 const LLVMValueRef *coords,
719 LLVMValueRef *out_st,
720 LLVMValueRef *out_ma)
721 {
722 LLVMBuilderRef builder = ctx->builder;
723 LLVMTypeRef f32 = LLVMTypeOf(coords[0]);
724 LLVMValueRef is_ma_positive;
725 LLVMValueRef sgn_ma;
726 LLVMValueRef is_ma_z, is_not_ma_z;
727 LLVMValueRef is_ma_y;
728 LLVMValueRef is_ma_x;
729 LLVMValueRef sgn;
730 LLVMValueRef tmp;
731
732 is_ma_positive = LLVMBuildFCmp(builder, LLVMRealUGE,
733 selcoords->ma, LLVMConstReal(f32, 0.0), "");
734 sgn_ma = LLVMBuildSelect(builder, is_ma_positive,
735 LLVMConstReal(f32, 1.0), LLVMConstReal(f32, -1.0), "");
736
737 is_ma_z = LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 4.0), "");
738 is_not_ma_z = LLVMBuildNot(builder, is_ma_z, "");
739 is_ma_y = LLVMBuildAnd(builder, is_not_ma_z,
740 LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 2.0), ""), "");
741 is_ma_x = LLVMBuildAnd(builder, is_not_ma_z, LLVMBuildNot(builder, is_ma_y, ""), "");
742
743 /* Select sc */
744 tmp = LLVMBuildSelect(builder, is_ma_x, coords[2], coords[0], "");
745 sgn = LLVMBuildSelect(builder, is_ma_y, LLVMConstReal(f32, 1.0),
746 LLVMBuildSelect(builder, is_ma_z, sgn_ma,
747 LLVMBuildFNeg(builder, sgn_ma, ""), ""), "");
748 out_st[0] = LLVMBuildFMul(builder, tmp, sgn, "");
749
750 /* Select tc */
751 tmp = LLVMBuildSelect(builder, is_ma_y, coords[2], coords[1], "");
752 sgn = LLVMBuildSelect(builder, is_ma_y, sgn_ma,
753 LLVMConstReal(f32, -1.0), "");
754 out_st[1] = LLVMBuildFMul(builder, tmp, sgn, "");
755
756 /* Select ma */
757 tmp = LLVMBuildSelect(builder, is_ma_z, coords[2],
758 LLVMBuildSelect(builder, is_ma_y, coords[1], coords[0], ""), "");
759 tmp = ac_build_intrinsic(ctx, "llvm.fabs.f32",
760 ctx->f32, &tmp, 1, AC_FUNC_ATTR_READNONE);
761 *out_ma = LLVMBuildFMul(builder, tmp, LLVMConstReal(f32, 2.0), "");
762 }
763
764 void
765 ac_prepare_cube_coords(struct ac_llvm_context *ctx,
766 bool is_deriv, bool is_array, bool is_lod,
767 LLVMValueRef *coords_arg,
768 LLVMValueRef *derivs_arg)
769 {
770
771 LLVMBuilderRef builder = ctx->builder;
772 struct cube_selection_coords selcoords;
773 LLVMValueRef coords[3];
774 LLVMValueRef invma;
775
776 if (is_array && !is_lod) {
777 LLVMValueRef tmp = ac_build_round(ctx, coords_arg[3]);
778
779 /* Section 8.9 (Texture Functions) of the GLSL 4.50 spec says:
780 *
781 * "For Array forms, the array layer used will be
782 *
783 * max(0, min(d−1, floor(layer+0.5)))
784 *
785 * where d is the depth of the texture array and layer
786 * comes from the component indicated in the tables below.
787 * Workaroudn for an issue where the layer is taken from a
788 * helper invocation which happens to fall on a different
789 * layer due to extrapolation."
790 *
791 * VI and earlier attempt to implement this in hardware by
792 * clamping the value of coords[2] = (8 * layer) + face.
793 * Unfortunately, this means that the we end up with the wrong
794 * face when clamping occurs.
795 *
796 * Clamp the layer earlier to work around the issue.
797 */
798 if (ctx->chip_class <= VI) {
799 LLVMValueRef ge0;
800 ge0 = LLVMBuildFCmp(builder, LLVMRealOGE, tmp, ctx->f32_0, "");
801 tmp = LLVMBuildSelect(builder, ge0, tmp, ctx->f32_0, "");
802 }
803
804 coords_arg[3] = tmp;
805 }
806
807 build_cube_intrinsic(ctx, coords_arg, &selcoords);
808
809 invma = ac_build_intrinsic(ctx, "llvm.fabs.f32",
810 ctx->f32, &selcoords.ma, 1, AC_FUNC_ATTR_READNONE);
811 invma = ac_build_fdiv(ctx, LLVMConstReal(ctx->f32, 1.0), invma);
812
813 for (int i = 0; i < 2; ++i)
814 coords[i] = LLVMBuildFMul(builder, selcoords.stc[i], invma, "");
815
816 coords[2] = selcoords.id;
817
818 if (is_deriv && derivs_arg) {
819 LLVMValueRef derivs[4];
820 int axis;
821
822 /* Convert cube derivatives to 2D derivatives. */
823 for (axis = 0; axis < 2; axis++) {
824 LLVMValueRef deriv_st[2];
825 LLVMValueRef deriv_ma;
826
827 /* Transform the derivative alongside the texture
828 * coordinate. Mathematically, the correct formula is
829 * as follows. Assume we're projecting onto the +Z face
830 * and denote by dx/dh the derivative of the (original)
831 * X texture coordinate with respect to horizontal
832 * window coordinates. The projection onto the +Z face
833 * plane is:
834 *
835 * f(x,z) = x/z
836 *
837 * Then df/dh = df/dx * dx/dh + df/dz * dz/dh
838 * = 1/z * dx/dh - x/z * 1/z * dz/dh.
839 *
840 * This motivatives the implementation below.
841 *
842 * Whether this actually gives the expected results for
843 * apps that might feed in derivatives obtained via
844 * finite differences is anyone's guess. The OpenGL spec
845 * seems awfully quiet about how textureGrad for cube
846 * maps should be handled.
847 */
848 build_cube_select(ctx, &selcoords, &derivs_arg[axis * 3],
849 deriv_st, &deriv_ma);
850
851 deriv_ma = LLVMBuildFMul(builder, deriv_ma, invma, "");
852
853 for (int i = 0; i < 2; ++i)
854 derivs[axis * 2 + i] =
855 LLVMBuildFSub(builder,
856 LLVMBuildFMul(builder, deriv_st[i], invma, ""),
857 LLVMBuildFMul(builder, deriv_ma, coords[i], ""), "");
858 }
859
860 memcpy(derivs_arg, derivs, sizeof(derivs));
861 }
862
863 /* Shift the texture coordinate. This must be applied after the
864 * derivative calculation.
865 */
866 for (int i = 0; i < 2; ++i)
867 coords[i] = LLVMBuildFAdd(builder, coords[i], LLVMConstReal(ctx->f32, 1.5), "");
868
869 if (is_array) {
870 /* for cube arrays coord.z = coord.w(array_index) * 8 + face */
871 /* coords_arg.w component - array_index for cube arrays */
872 coords[2] = ac_build_fmad(ctx, coords_arg[3], LLVMConstReal(ctx->f32, 8.0), coords[2]);
873 }
874
875 memcpy(coords_arg, coords, sizeof(coords));
876 }
877
878
879 LLVMValueRef
880 ac_build_fs_interp(struct ac_llvm_context *ctx,
881 LLVMValueRef llvm_chan,
882 LLVMValueRef attr_number,
883 LLVMValueRef params,
884 LLVMValueRef i,
885 LLVMValueRef j)
886 {
887 LLVMValueRef args[5];
888 LLVMValueRef p1;
889
890 args[0] = i;
891 args[1] = llvm_chan;
892 args[2] = attr_number;
893 args[3] = params;
894
895 p1 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p1",
896 ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
897
898 args[0] = p1;
899 args[1] = j;
900 args[2] = llvm_chan;
901 args[3] = attr_number;
902 args[4] = params;
903
904 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p2",
905 ctx->f32, args, 5, AC_FUNC_ATTR_READNONE);
906 }
907
908 LLVMValueRef
909 ac_build_fs_interp_mov(struct ac_llvm_context *ctx,
910 LLVMValueRef parameter,
911 LLVMValueRef llvm_chan,
912 LLVMValueRef attr_number,
913 LLVMValueRef params)
914 {
915 LLVMValueRef args[4];
916
917 args[0] = parameter;
918 args[1] = llvm_chan;
919 args[2] = attr_number;
920 args[3] = params;
921
922 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.mov",
923 ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
924 }
925
926 LLVMValueRef
927 ac_build_gep0(struct ac_llvm_context *ctx,
928 LLVMValueRef base_ptr,
929 LLVMValueRef index)
930 {
931 LLVMValueRef indices[2] = {
932 ctx->i32_0,
933 index,
934 };
935 return LLVMBuildGEP(ctx->builder, base_ptr, indices, 2, "");
936 }
937
938 LLVMValueRef ac_build_pointer_add(struct ac_llvm_context *ctx, LLVMValueRef ptr,
939 LLVMValueRef index)
940 {
941 return LLVMBuildPointerCast(ctx->builder,
942 ac_build_gep0(ctx, ptr, index),
943 LLVMTypeOf(ptr), "");
944 }
945
946 void
947 ac_build_indexed_store(struct ac_llvm_context *ctx,
948 LLVMValueRef base_ptr, LLVMValueRef index,
949 LLVMValueRef value)
950 {
951 LLVMBuildStore(ctx->builder, value,
952 ac_build_gep0(ctx, base_ptr, index));
953 }
954
955 /**
956 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
957 * It's equivalent to doing a load from &base_ptr[index].
958 *
959 * \param base_ptr Where the array starts.
960 * \param index The element index into the array.
961 * \param uniform Whether the base_ptr and index can be assumed to be
962 * dynamically uniform (i.e. load to an SGPR)
963 * \param invariant Whether the load is invariant (no other opcodes affect it)
964 * \param no_unsigned_wraparound
965 * For all possible re-associations and re-distributions of an expression
966 * "base_ptr + index * elemsize" into "addr + offset" (excluding GEPs
967 * without inbounds in base_ptr), this parameter is true if "addr + offset"
968 * does not result in an unsigned integer wraparound. This is used for
969 * optimal code generation of 32-bit pointer arithmetic.
970 *
971 * For example, a 32-bit immediate offset that causes a 32-bit unsigned
972 * integer wraparound can't be an imm offset in s_load_dword, because
973 * the instruction performs "addr + offset" in 64 bits.
974 *
975 * Expected usage for bindless textures by chaining GEPs:
976 * // possible unsigned wraparound, don't use InBounds:
977 * ptr1 = LLVMBuildGEP(base_ptr, index);
978 * image = load(ptr1); // becomes "s_load ptr1, 0"
979 *
980 * ptr2 = LLVMBuildInBoundsGEP(ptr1, 32 / elemsize);
981 * sampler = load(ptr2); // becomes "s_load ptr1, 32" thanks to InBounds
982 */
983 static LLVMValueRef
984 ac_build_load_custom(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
985 LLVMValueRef index, bool uniform, bool invariant,
986 bool no_unsigned_wraparound)
987 {
988 LLVMValueRef pointer, result;
989 LLVMValueRef indices[2] = {ctx->i32_0, index};
990
991 if (no_unsigned_wraparound &&
992 LLVMGetPointerAddressSpace(LLVMTypeOf(base_ptr)) == AC_ADDR_SPACE_CONST_32BIT)
993 pointer = LLVMBuildInBoundsGEP(ctx->builder, base_ptr, indices, 2, "");
994 else
995 pointer = LLVMBuildGEP(ctx->builder, base_ptr, indices, 2, "");
996
997 if (uniform)
998 LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
999 result = LLVMBuildLoad(ctx->builder, pointer, "");
1000 if (invariant)
1001 LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
1002 return result;
1003 }
1004
1005 LLVMValueRef ac_build_load(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
1006 LLVMValueRef index)
1007 {
1008 return ac_build_load_custom(ctx, base_ptr, index, false, false, false);
1009 }
1010
1011 LLVMValueRef ac_build_load_invariant(struct ac_llvm_context *ctx,
1012 LLVMValueRef base_ptr, LLVMValueRef index)
1013 {
1014 return ac_build_load_custom(ctx, base_ptr, index, false, true, false);
1015 }
1016
1017 /* This assumes that there is no unsigned integer wraparound during the address
1018 * computation, excluding all GEPs within base_ptr. */
1019 LLVMValueRef ac_build_load_to_sgpr(struct ac_llvm_context *ctx,
1020 LLVMValueRef base_ptr, LLVMValueRef index)
1021 {
1022 return ac_build_load_custom(ctx, base_ptr, index, true, true, true);
1023 }
1024
1025 /* See ac_build_load_custom() documentation. */
1026 LLVMValueRef ac_build_load_to_sgpr_uint_wraparound(struct ac_llvm_context *ctx,
1027 LLVMValueRef base_ptr, LLVMValueRef index)
1028 {
1029 return ac_build_load_custom(ctx, base_ptr, index, true, true, false);
1030 }
1031
1032 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
1033 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
1034 * or v4i32 (num_channels=3,4).
1035 */
1036 void
1037 ac_build_buffer_store_dword(struct ac_llvm_context *ctx,
1038 LLVMValueRef rsrc,
1039 LLVMValueRef vdata,
1040 unsigned num_channels,
1041 LLVMValueRef voffset,
1042 LLVMValueRef soffset,
1043 unsigned inst_offset,
1044 bool glc,
1045 bool slc,
1046 bool writeonly_memory,
1047 bool swizzle_enable_hint)
1048 {
1049 /* Split 3 channel stores, becase LLVM doesn't support 3-channel
1050 * intrinsics. */
1051 if (num_channels == 3) {
1052 LLVMValueRef v[3], v01;
1053
1054 for (int i = 0; i < 3; i++) {
1055 v[i] = LLVMBuildExtractElement(ctx->builder, vdata,
1056 LLVMConstInt(ctx->i32, i, 0), "");
1057 }
1058 v01 = ac_build_gather_values(ctx, v, 2);
1059
1060 ac_build_buffer_store_dword(ctx, rsrc, v01, 2, voffset,
1061 soffset, inst_offset, glc, slc,
1062 writeonly_memory, swizzle_enable_hint);
1063 ac_build_buffer_store_dword(ctx, rsrc, v[2], 1, voffset,
1064 soffset, inst_offset + 8,
1065 glc, slc,
1066 writeonly_memory, swizzle_enable_hint);
1067 return;
1068 }
1069
1070 /* SWIZZLE_ENABLE requires that soffset isn't folded into voffset
1071 * (voffset is swizzled, but soffset isn't swizzled).
1072 * llvm.amdgcn.buffer.store doesn't have a separate soffset parameter.
1073 */
1074 if (!swizzle_enable_hint) {
1075 LLVMValueRef offset = soffset;
1076
1077 static const char *types[] = {"f32", "v2f32", "v4f32"};
1078
1079 if (inst_offset)
1080 offset = LLVMBuildAdd(ctx->builder, offset,
1081 LLVMConstInt(ctx->i32, inst_offset, 0), "");
1082 if (voffset)
1083 offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
1084
1085 LLVMValueRef args[] = {
1086 ac_to_float(ctx, vdata),
1087 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
1088 ctx->i32_0,
1089 offset,
1090 LLVMConstInt(ctx->i1, glc, 0),
1091 LLVMConstInt(ctx->i1, slc, 0),
1092 };
1093
1094 char name[256];
1095 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.store.%s",
1096 types[CLAMP(num_channels, 1, 3) - 1]);
1097
1098 ac_build_intrinsic(ctx, name, ctx->voidt,
1099 args, ARRAY_SIZE(args),
1100 writeonly_memory ?
1101 AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY :
1102 AC_FUNC_ATTR_WRITEONLY);
1103 return;
1104 }
1105
1106 static const unsigned dfmt[] = {
1107 V_008F0C_BUF_DATA_FORMAT_32,
1108 V_008F0C_BUF_DATA_FORMAT_32_32,
1109 V_008F0C_BUF_DATA_FORMAT_32_32_32,
1110 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
1111 };
1112 static const char *types[] = {"i32", "v2i32", "v4i32"};
1113 LLVMValueRef args[] = {
1114 vdata,
1115 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
1116 ctx->i32_0,
1117 voffset ? voffset : ctx->i32_0,
1118 soffset,
1119 LLVMConstInt(ctx->i32, inst_offset, 0),
1120 LLVMConstInt(ctx->i32, dfmt[num_channels - 1], 0),
1121 LLVMConstInt(ctx->i32, V_008F0C_BUF_NUM_FORMAT_UINT, 0),
1122 LLVMConstInt(ctx->i1, glc, 0),
1123 LLVMConstInt(ctx->i1, slc, 0),
1124 };
1125 char name[256];
1126 snprintf(name, sizeof(name), "llvm.amdgcn.tbuffer.store.%s",
1127 types[CLAMP(num_channels, 1, 3) - 1]);
1128
1129 ac_build_intrinsic(ctx, name, ctx->voidt,
1130 args, ARRAY_SIZE(args),
1131 writeonly_memory ?
1132 AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY :
1133 AC_FUNC_ATTR_WRITEONLY);
1134 }
1135
1136 static LLVMValueRef
1137 ac_build_buffer_load_common(struct ac_llvm_context *ctx,
1138 LLVMValueRef rsrc,
1139 LLVMValueRef vindex,
1140 LLVMValueRef voffset,
1141 unsigned num_channels,
1142 bool glc,
1143 bool slc,
1144 bool can_speculate,
1145 bool use_format)
1146 {
1147 LLVMValueRef args[] = {
1148 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
1149 vindex ? vindex : ctx->i32_0,
1150 voffset,
1151 LLVMConstInt(ctx->i1, glc, 0),
1152 LLVMConstInt(ctx->i1, slc, 0)
1153 };
1154 unsigned func = CLAMP(num_channels, 1, 3) - 1;
1155
1156 LLVMTypeRef types[] = {ctx->f32, ctx->v2f32, ctx->v4f32};
1157 const char *type_names[] = {"f32", "v2f32", "v4f32"};
1158 char name[256];
1159
1160 if (use_format) {
1161 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.format.%s",
1162 type_names[func]);
1163 } else {
1164 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s",
1165 type_names[func]);
1166 }
1167
1168 return ac_build_intrinsic(ctx, name, types[func], args,
1169 ARRAY_SIZE(args),
1170 ac_get_load_intr_attribs(can_speculate));
1171 }
1172
1173 static LLVMValueRef
1174 ac_build_llvm8_buffer_load_common(struct ac_llvm_context *ctx,
1175 LLVMValueRef rsrc,
1176 LLVMValueRef vindex,
1177 LLVMValueRef voffset,
1178 LLVMValueRef soffset,
1179 unsigned num_channels,
1180 bool glc,
1181 bool slc,
1182 bool can_speculate,
1183 bool use_format,
1184 bool structurized)
1185 {
1186 LLVMValueRef args[5];
1187 int idx = 0;
1188 args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
1189 if (structurized)
1190 args[idx++] = vindex ? vindex : ctx->i32_0;
1191 args[idx++] = voffset ? voffset : ctx->i32_0;
1192 args[idx++] = soffset ? soffset : ctx->i32_0;
1193 args[idx++] = LLVMConstInt(ctx->i32, (glc ? 1 : 0) + (slc ? 2 : 0), 0);
1194 unsigned func = CLAMP(num_channels, 1, 3) - 1;
1195
1196 LLVMTypeRef types[] = {ctx->f32, ctx->v2f32, ctx->v4f32};
1197 const char *type_names[] = {"f32", "v2f32", "v4f32"};
1198 const char *indexing_kind = structurized ? "struct" : "raw";
1199 char name[256];
1200
1201 if (use_format) {
1202 snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.load.format.%s",
1203 indexing_kind, type_names[func]);
1204 } else {
1205 snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.load.%s",
1206 indexing_kind, type_names[func]);
1207 }
1208
1209 return ac_build_intrinsic(ctx, name, types[func], args,
1210 idx,
1211 ac_get_load_intr_attribs(can_speculate));
1212 }
1213
1214 LLVMValueRef
1215 ac_build_buffer_load(struct ac_llvm_context *ctx,
1216 LLVMValueRef rsrc,
1217 int num_channels,
1218 LLVMValueRef vindex,
1219 LLVMValueRef voffset,
1220 LLVMValueRef soffset,
1221 unsigned inst_offset,
1222 unsigned glc,
1223 unsigned slc,
1224 bool can_speculate,
1225 bool allow_smem)
1226 {
1227 LLVMValueRef offset = LLVMConstInt(ctx->i32, inst_offset, 0);
1228 if (voffset)
1229 offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
1230 if (soffset)
1231 offset = LLVMBuildAdd(ctx->builder, offset, soffset, "");
1232
1233 /* TODO: VI and later generations can use SMEM with GLC=1.*/
1234 if (allow_smem && !glc && !slc) {
1235 assert(vindex == NULL);
1236
1237 LLVMValueRef result[8];
1238
1239 for (int i = 0; i < num_channels; i++) {
1240 if (i) {
1241 offset = LLVMBuildAdd(ctx->builder, offset,
1242 LLVMConstInt(ctx->i32, 4, 0), "");
1243 }
1244 LLVMValueRef args[2] = {rsrc, offset};
1245 result[i] = ac_build_intrinsic(ctx, "llvm.SI.load.const.v4i32",
1246 ctx->f32, args, 2,
1247 AC_FUNC_ATTR_READNONE |
1248 AC_FUNC_ATTR_LEGACY);
1249 }
1250 if (num_channels == 1)
1251 return result[0];
1252
1253 if (num_channels == 3)
1254 result[num_channels++] = LLVMGetUndef(ctx->f32);
1255 return ac_build_gather_values(ctx, result, num_channels);
1256 }
1257
1258 return ac_build_buffer_load_common(ctx, rsrc, vindex, offset,
1259 num_channels, glc, slc,
1260 can_speculate, false);
1261 }
1262
1263 LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx,
1264 LLVMValueRef rsrc,
1265 LLVMValueRef vindex,
1266 LLVMValueRef voffset,
1267 unsigned num_channels,
1268 bool glc,
1269 bool can_speculate)
1270 {
1271 if (HAVE_LLVM >= 0x800) {
1272 return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0,
1273 num_channels, glc, false,
1274 can_speculate, true, true);
1275 }
1276 return ac_build_buffer_load_common(ctx, rsrc, vindex, voffset,
1277 num_channels, glc, false,
1278 can_speculate, true);
1279 }
1280
1281 LLVMValueRef ac_build_buffer_load_format_gfx9_safe(struct ac_llvm_context *ctx,
1282 LLVMValueRef rsrc,
1283 LLVMValueRef vindex,
1284 LLVMValueRef voffset,
1285 unsigned num_channels,
1286 bool glc,
1287 bool can_speculate)
1288 {
1289 if (HAVE_LLVM >= 0x800) {
1290 return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0,
1291 num_channels, glc, false,
1292 can_speculate, true, true);
1293 }
1294
1295 LLVMValueRef elem_count = LLVMBuildExtractElement(ctx->builder, rsrc, LLVMConstInt(ctx->i32, 2, 0), "");
1296 LLVMValueRef stride = LLVMBuildExtractElement(ctx->builder, rsrc, ctx->i32_1, "");
1297 stride = LLVMBuildLShr(ctx->builder, stride, LLVMConstInt(ctx->i32, 16, 0), "");
1298
1299 LLVMValueRef new_elem_count = LLVMBuildSelect(ctx->builder,
1300 LLVMBuildICmp(ctx->builder, LLVMIntUGT, elem_count, stride, ""),
1301 elem_count, stride, "");
1302
1303 LLVMValueRef new_rsrc = LLVMBuildInsertElement(ctx->builder, rsrc, new_elem_count,
1304 LLVMConstInt(ctx->i32, 2, 0), "");
1305
1306 return ac_build_buffer_load_common(ctx, new_rsrc, vindex, voffset,
1307 num_channels, glc, false,
1308 can_speculate, true);
1309 }
1310
1311 LLVMValueRef
1312 ac_build_tbuffer_load_short(struct ac_llvm_context *ctx,
1313 LLVMValueRef rsrc,
1314 LLVMValueRef vindex,
1315 LLVMValueRef voffset,
1316 LLVMValueRef soffset,
1317 LLVMValueRef immoffset,
1318 LLVMValueRef glc)
1319 {
1320 const char *name = "llvm.amdgcn.tbuffer.load.i32";
1321 LLVMTypeRef type = ctx->i32;
1322 LLVMValueRef params[] = {
1323 rsrc,
1324 vindex,
1325 voffset,
1326 soffset,
1327 immoffset,
1328 LLVMConstInt(ctx->i32, V_008F0C_BUF_DATA_FORMAT_16, false),
1329 LLVMConstInt(ctx->i32, V_008F0C_BUF_NUM_FORMAT_UINT, false),
1330 glc,
1331 ctx->i1false,
1332 };
1333 LLVMValueRef res = ac_build_intrinsic(ctx, name, type, params, 9, 0);
1334 return LLVMBuildTrunc(ctx->builder, res, ctx->i16, "");
1335 }
1336
1337 /**
1338 * Set range metadata on an instruction. This can only be used on load and
1339 * call instructions. If you know an instruction can only produce the values
1340 * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
1341 * \p lo is the minimum value inclusive.
1342 * \p hi is the maximum value exclusive.
1343 */
1344 static void set_range_metadata(struct ac_llvm_context *ctx,
1345 LLVMValueRef value, unsigned lo, unsigned hi)
1346 {
1347 LLVMValueRef range_md, md_args[2];
1348 LLVMTypeRef type = LLVMTypeOf(value);
1349 LLVMContextRef context = LLVMGetTypeContext(type);
1350
1351 md_args[0] = LLVMConstInt(type, lo, false);
1352 md_args[1] = LLVMConstInt(type, hi, false);
1353 range_md = LLVMMDNodeInContext(context, md_args, 2);
1354 LLVMSetMetadata(value, ctx->range_md_kind, range_md);
1355 }
1356
1357 LLVMValueRef
1358 ac_get_thread_id(struct ac_llvm_context *ctx)
1359 {
1360 LLVMValueRef tid;
1361
1362 LLVMValueRef tid_args[2];
1363 tid_args[0] = LLVMConstInt(ctx->i32, 0xffffffff, false);
1364 tid_args[1] = ctx->i32_0;
1365 tid_args[1] = ac_build_intrinsic(ctx,
1366 "llvm.amdgcn.mbcnt.lo", ctx->i32,
1367 tid_args, 2, AC_FUNC_ATTR_READNONE);
1368
1369 tid = ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.hi",
1370 ctx->i32, tid_args,
1371 2, AC_FUNC_ATTR_READNONE);
1372 set_range_metadata(ctx, tid, 0, 64);
1373 return tid;
1374 }
1375
1376 /*
1377 * SI implements derivatives using the local data store (LDS)
1378 * All writes to the LDS happen in all executing threads at
1379 * the same time. TID is the Thread ID for the current
1380 * thread and is a value between 0 and 63, representing
1381 * the thread's position in the wavefront.
1382 *
1383 * For the pixel shader threads are grouped into quads of four pixels.
1384 * The TIDs of the pixels of a quad are:
1385 *
1386 * +------+------+
1387 * |4n + 0|4n + 1|
1388 * +------+------+
1389 * |4n + 2|4n + 3|
1390 * +------+------+
1391 *
1392 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
1393 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
1394 * the current pixel's column, and masking with 0xfffffffe yields the TID
1395 * of the left pixel of the current pixel's row.
1396 *
1397 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
1398 * adding 2 yields the TID of the pixel below the top pixel.
1399 */
1400 LLVMValueRef
1401 ac_build_ddxy(struct ac_llvm_context *ctx,
1402 uint32_t mask,
1403 int idx,
1404 LLVMValueRef val)
1405 {
1406 unsigned tl_lanes[4], trbl_lanes[4];
1407 LLVMValueRef tl, trbl, args[2];
1408 LLVMValueRef result;
1409
1410 for (unsigned i = 0; i < 4; ++i) {
1411 tl_lanes[i] = i & mask;
1412 trbl_lanes[i] = (i & mask) + idx;
1413 }
1414
1415 tl = ac_build_quad_swizzle(ctx, val,
1416 tl_lanes[0], tl_lanes[1],
1417 tl_lanes[2], tl_lanes[3]);
1418 trbl = ac_build_quad_swizzle(ctx, val,
1419 trbl_lanes[0], trbl_lanes[1],
1420 trbl_lanes[2], trbl_lanes[3]);
1421
1422 tl = LLVMBuildBitCast(ctx->builder, tl, ctx->f32, "");
1423 trbl = LLVMBuildBitCast(ctx->builder, trbl, ctx->f32, "");
1424 result = LLVMBuildFSub(ctx->builder, trbl, tl, "");
1425
1426 result = ac_build_intrinsic(ctx, "llvm.amdgcn.wqm.f32", ctx->f32,
1427 &result, 1, 0);
1428
1429 return result;
1430 }
1431
1432 void
1433 ac_build_sendmsg(struct ac_llvm_context *ctx,
1434 uint32_t msg,
1435 LLVMValueRef wave_id)
1436 {
1437 LLVMValueRef args[2];
1438 args[0] = LLVMConstInt(ctx->i32, msg, false);
1439 args[1] = wave_id;
1440 ac_build_intrinsic(ctx, "llvm.amdgcn.s.sendmsg", ctx->voidt, args, 2, 0);
1441 }
1442
1443 LLVMValueRef
1444 ac_build_imsb(struct ac_llvm_context *ctx,
1445 LLVMValueRef arg,
1446 LLVMTypeRef dst_type)
1447 {
1448 LLVMValueRef msb = ac_build_intrinsic(ctx, "llvm.amdgcn.sffbh.i32",
1449 dst_type, &arg, 1,
1450 AC_FUNC_ATTR_READNONE);
1451
1452 /* The HW returns the last bit index from MSB, but NIR/TGSI wants
1453 * the index from LSB. Invert it by doing "31 - msb". */
1454 msb = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, 31, false),
1455 msb, "");
1456
1457 LLVMValueRef all_ones = LLVMConstInt(ctx->i32, -1, true);
1458 LLVMValueRef cond = LLVMBuildOr(ctx->builder,
1459 LLVMBuildICmp(ctx->builder, LLVMIntEQ,
1460 arg, ctx->i32_0, ""),
1461 LLVMBuildICmp(ctx->builder, LLVMIntEQ,
1462 arg, all_ones, ""), "");
1463
1464 return LLVMBuildSelect(ctx->builder, cond, all_ones, msb, "");
1465 }
1466
1467 LLVMValueRef
1468 ac_build_umsb(struct ac_llvm_context *ctx,
1469 LLVMValueRef arg,
1470 LLVMTypeRef dst_type)
1471 {
1472 const char *intrin_name;
1473 LLVMTypeRef type;
1474 LLVMValueRef highest_bit;
1475 LLVMValueRef zero;
1476 unsigned bitsize;
1477
1478 bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(arg));
1479 switch (bitsize) {
1480 case 64:
1481 intrin_name = "llvm.ctlz.i64";
1482 type = ctx->i64;
1483 highest_bit = LLVMConstInt(ctx->i64, 63, false);
1484 zero = ctx->i64_0;
1485 break;
1486 case 32:
1487 intrin_name = "llvm.ctlz.i32";
1488 type = ctx->i32;
1489 highest_bit = LLVMConstInt(ctx->i32, 31, false);
1490 zero = ctx->i32_0;
1491 break;
1492 case 16:
1493 intrin_name = "llvm.ctlz.i16";
1494 type = ctx->i16;
1495 highest_bit = LLVMConstInt(ctx->i16, 15, false);
1496 zero = ctx->i16_0;
1497 break;
1498 default:
1499 unreachable(!"invalid bitsize");
1500 break;
1501 }
1502
1503 LLVMValueRef params[2] = {
1504 arg,
1505 ctx->i1true,
1506 };
1507
1508 LLVMValueRef msb = ac_build_intrinsic(ctx, intrin_name, type,
1509 params, 2,
1510 AC_FUNC_ATTR_READNONE);
1511
1512 /* The HW returns the last bit index from MSB, but TGSI/NIR wants
1513 * the index from LSB. Invert it by doing "31 - msb". */
1514 msb = LLVMBuildSub(ctx->builder, highest_bit, msb, "");
1515 msb = LLVMBuildTruncOrBitCast(ctx->builder, msb, ctx->i32, "");
1516
1517 /* check for zero */
1518 return LLVMBuildSelect(ctx->builder,
1519 LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg, zero, ""),
1520 LLVMConstInt(ctx->i32, -1, true), msb, "");
1521 }
1522
1523 LLVMValueRef ac_build_fmin(struct ac_llvm_context *ctx, LLVMValueRef a,
1524 LLVMValueRef b)
1525 {
1526 LLVMValueRef args[2] = {a, b};
1527 return ac_build_intrinsic(ctx, "llvm.minnum.f32", ctx->f32, args, 2,
1528 AC_FUNC_ATTR_READNONE);
1529 }
1530
1531 LLVMValueRef ac_build_fmax(struct ac_llvm_context *ctx, LLVMValueRef a,
1532 LLVMValueRef b)
1533 {
1534 LLVMValueRef args[2] = {a, b};
1535 return ac_build_intrinsic(ctx, "llvm.maxnum.f32", ctx->f32, args, 2,
1536 AC_FUNC_ATTR_READNONE);
1537 }
1538
1539 LLVMValueRef ac_build_imin(struct ac_llvm_context *ctx, LLVMValueRef a,
1540 LLVMValueRef b)
1541 {
1542 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntSLE, a, b, "");
1543 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1544 }
1545
1546 LLVMValueRef ac_build_imax(struct ac_llvm_context *ctx, LLVMValueRef a,
1547 LLVMValueRef b)
1548 {
1549 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGT, a, b, "");
1550 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1551 }
1552
1553 LLVMValueRef ac_build_umin(struct ac_llvm_context *ctx, LLVMValueRef a,
1554 LLVMValueRef b)
1555 {
1556 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntULE, a, b, "");
1557 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1558 }
1559
1560 LLVMValueRef ac_build_clamp(struct ac_llvm_context *ctx, LLVMValueRef value)
1561 {
1562 return ac_build_fmin(ctx, ac_build_fmax(ctx, value, ctx->f32_0),
1563 ctx->f32_1);
1564 }
1565
1566 void ac_build_export(struct ac_llvm_context *ctx, struct ac_export_args *a)
1567 {
1568 LLVMValueRef args[9];
1569
1570 args[0] = LLVMConstInt(ctx->i32, a->target, 0);
1571 args[1] = LLVMConstInt(ctx->i32, a->enabled_channels, 0);
1572
1573 if (a->compr) {
1574 LLVMTypeRef i16 = LLVMInt16TypeInContext(ctx->context);
1575 LLVMTypeRef v2i16 = LLVMVectorType(i16, 2);
1576
1577 args[2] = LLVMBuildBitCast(ctx->builder, a->out[0],
1578 v2i16, "");
1579 args[3] = LLVMBuildBitCast(ctx->builder, a->out[1],
1580 v2i16, "");
1581 args[4] = LLVMConstInt(ctx->i1, a->done, 0);
1582 args[5] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
1583
1584 ac_build_intrinsic(ctx, "llvm.amdgcn.exp.compr.v2i16",
1585 ctx->voidt, args, 6, 0);
1586 } else {
1587 args[2] = a->out[0];
1588 args[3] = a->out[1];
1589 args[4] = a->out[2];
1590 args[5] = a->out[3];
1591 args[6] = LLVMConstInt(ctx->i1, a->done, 0);
1592 args[7] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
1593
1594 ac_build_intrinsic(ctx, "llvm.amdgcn.exp.f32",
1595 ctx->voidt, args, 8, 0);
1596 }
1597 }
1598
1599 void ac_build_export_null(struct ac_llvm_context *ctx)
1600 {
1601 struct ac_export_args args;
1602
1603 args.enabled_channels = 0x0; /* enabled channels */
1604 args.valid_mask = 1; /* whether the EXEC mask is valid */
1605 args.done = 1; /* DONE bit */
1606 args.target = V_008DFC_SQ_EXP_NULL;
1607 args.compr = 0; /* COMPR flag (0 = 32-bit export) */
1608 args.out[0] = LLVMGetUndef(ctx->f32); /* R */
1609 args.out[1] = LLVMGetUndef(ctx->f32); /* G */
1610 args.out[2] = LLVMGetUndef(ctx->f32); /* B */
1611 args.out[3] = LLVMGetUndef(ctx->f32); /* A */
1612
1613 ac_build_export(ctx, &args);
1614 }
1615
1616 static unsigned ac_num_coords(enum ac_image_dim dim)
1617 {
1618 switch (dim) {
1619 case ac_image_1d:
1620 return 1;
1621 case ac_image_2d:
1622 case ac_image_1darray:
1623 return 2;
1624 case ac_image_3d:
1625 case ac_image_cube:
1626 case ac_image_2darray:
1627 case ac_image_2dmsaa:
1628 return 3;
1629 case ac_image_2darraymsaa:
1630 return 4;
1631 default:
1632 unreachable("ac_num_coords: bad dim");
1633 }
1634 }
1635
1636 static unsigned ac_num_derivs(enum ac_image_dim dim)
1637 {
1638 switch (dim) {
1639 case ac_image_1d:
1640 case ac_image_1darray:
1641 return 2;
1642 case ac_image_2d:
1643 case ac_image_2darray:
1644 case ac_image_cube:
1645 return 4;
1646 case ac_image_3d:
1647 return 6;
1648 case ac_image_2dmsaa:
1649 case ac_image_2darraymsaa:
1650 default:
1651 unreachable("derivatives not supported");
1652 }
1653 }
1654
1655 static const char *get_atomic_name(enum ac_atomic_op op)
1656 {
1657 switch (op) {
1658 case ac_atomic_swap: return "swap";
1659 case ac_atomic_add: return "add";
1660 case ac_atomic_sub: return "sub";
1661 case ac_atomic_smin: return "smin";
1662 case ac_atomic_umin: return "umin";
1663 case ac_atomic_smax: return "smax";
1664 case ac_atomic_umax: return "umax";
1665 case ac_atomic_and: return "and";
1666 case ac_atomic_or: return "or";
1667 case ac_atomic_xor: return "xor";
1668 }
1669 unreachable("bad atomic op");
1670 }
1671
1672 LLVMValueRef ac_build_image_opcode(struct ac_llvm_context *ctx,
1673 struct ac_image_args *a)
1674 {
1675 const char *overload[3] = { "", "", "" };
1676 unsigned num_overloads = 0;
1677 LLVMValueRef args[18];
1678 unsigned num_args = 0;
1679 enum ac_image_dim dim = a->dim;
1680
1681 assert(!a->lod || a->lod == ctx->i32_0 || a->lod == ctx->f32_0 ||
1682 !a->level_zero);
1683 assert((a->opcode != ac_image_get_resinfo && a->opcode != ac_image_load_mip &&
1684 a->opcode != ac_image_store_mip) ||
1685 a->lod);
1686 assert(a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
1687 (!a->compare && !a->offset));
1688 assert((a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
1689 a->opcode == ac_image_get_lod) ||
1690 !a->bias);
1691 assert((a->bias ? 1 : 0) +
1692 (a->lod ? 1 : 0) +
1693 (a->level_zero ? 1 : 0) +
1694 (a->derivs[0] ? 1 : 0) <= 1);
1695
1696 if (a->opcode == ac_image_get_lod) {
1697 switch (dim) {
1698 case ac_image_1darray:
1699 dim = ac_image_1d;
1700 break;
1701 case ac_image_2darray:
1702 case ac_image_cube:
1703 dim = ac_image_2d;
1704 break;
1705 default:
1706 break;
1707 }
1708 }
1709
1710 bool sample = a->opcode == ac_image_sample ||
1711 a->opcode == ac_image_gather4 ||
1712 a->opcode == ac_image_get_lod;
1713 bool atomic = a->opcode == ac_image_atomic ||
1714 a->opcode == ac_image_atomic_cmpswap;
1715 LLVMTypeRef coord_type = sample ? ctx->f32 : ctx->i32;
1716
1717 if (atomic || a->opcode == ac_image_store || a->opcode == ac_image_store_mip) {
1718 args[num_args++] = a->data[0];
1719 if (a->opcode == ac_image_atomic_cmpswap)
1720 args[num_args++] = a->data[1];
1721 }
1722
1723 if (!atomic)
1724 args[num_args++] = LLVMConstInt(ctx->i32, a->dmask, false);
1725
1726 if (a->offset)
1727 args[num_args++] = ac_to_integer(ctx, a->offset);
1728 if (a->bias) {
1729 args[num_args++] = ac_to_float(ctx, a->bias);
1730 overload[num_overloads++] = ".f32";
1731 }
1732 if (a->compare)
1733 args[num_args++] = ac_to_float(ctx, a->compare);
1734 if (a->derivs[0]) {
1735 unsigned count = ac_num_derivs(dim);
1736 for (unsigned i = 0; i < count; ++i)
1737 args[num_args++] = ac_to_float(ctx, a->derivs[i]);
1738 overload[num_overloads++] = ".f32";
1739 }
1740 unsigned num_coords =
1741 a->opcode != ac_image_get_resinfo ? ac_num_coords(dim) : 0;
1742 for (unsigned i = 0; i < num_coords; ++i)
1743 args[num_args++] = LLVMBuildBitCast(ctx->builder, a->coords[i], coord_type, "");
1744 if (a->lod)
1745 args[num_args++] = LLVMBuildBitCast(ctx->builder, a->lod, coord_type, "");
1746 overload[num_overloads++] = sample ? ".f32" : ".i32";
1747
1748 args[num_args++] = a->resource;
1749 if (sample) {
1750 args[num_args++] = a->sampler;
1751 args[num_args++] = LLVMConstInt(ctx->i1, a->unorm, false);
1752 }
1753
1754 args[num_args++] = ctx->i32_0; /* texfailctrl */
1755 args[num_args++] = LLVMConstInt(ctx->i32, a->cache_policy, false);
1756
1757 const char *name;
1758 const char *atomic_subop = "";
1759 switch (a->opcode) {
1760 case ac_image_sample: name = "sample"; break;
1761 case ac_image_gather4: name = "gather4"; break;
1762 case ac_image_load: name = "load"; break;
1763 case ac_image_load_mip: name = "load.mip"; break;
1764 case ac_image_store: name = "store"; break;
1765 case ac_image_store_mip: name = "store.mip"; break;
1766 case ac_image_atomic:
1767 name = "atomic.";
1768 atomic_subop = get_atomic_name(a->atomic);
1769 break;
1770 case ac_image_atomic_cmpswap:
1771 name = "atomic.";
1772 atomic_subop = "cmpswap";
1773 break;
1774 case ac_image_get_lod: name = "getlod"; break;
1775 case ac_image_get_resinfo: name = "getresinfo"; break;
1776 default: unreachable("invalid image opcode");
1777 }
1778
1779 const char *dimname;
1780 switch (dim) {
1781 case ac_image_1d: dimname = "1d"; break;
1782 case ac_image_2d: dimname = "2d"; break;
1783 case ac_image_3d: dimname = "3d"; break;
1784 case ac_image_cube: dimname = "cube"; break;
1785 case ac_image_1darray: dimname = "1darray"; break;
1786 case ac_image_2darray: dimname = "2darray"; break;
1787 case ac_image_2dmsaa: dimname = "2dmsaa"; break;
1788 case ac_image_2darraymsaa: dimname = "2darraymsaa"; break;
1789 default: unreachable("invalid dim");
1790 }
1791
1792 bool lod_suffix =
1793 a->lod && (a->opcode == ac_image_sample || a->opcode == ac_image_gather4);
1794 char intr_name[96];
1795 snprintf(intr_name, sizeof(intr_name),
1796 "llvm.amdgcn.image.%s%s" /* base name */
1797 "%s%s%s" /* sample/gather modifiers */
1798 ".%s.%s%s%s%s", /* dimension and type overloads */
1799 name, atomic_subop,
1800 a->compare ? ".c" : "",
1801 a->bias ? ".b" :
1802 lod_suffix ? ".l" :
1803 a->derivs[0] ? ".d" :
1804 a->level_zero ? ".lz" : "",
1805 a->offset ? ".o" : "",
1806 dimname,
1807 atomic ? "i32" : "v4f32",
1808 overload[0], overload[1], overload[2]);
1809
1810 LLVMTypeRef retty;
1811 if (atomic)
1812 retty = ctx->i32;
1813 else if (a->opcode == ac_image_store || a->opcode == ac_image_store_mip)
1814 retty = ctx->voidt;
1815 else
1816 retty = ctx->v4f32;
1817
1818 LLVMValueRef result =
1819 ac_build_intrinsic(ctx, intr_name, retty, args, num_args,
1820 a->attributes);
1821 if (!sample && retty == ctx->v4f32) {
1822 result = LLVMBuildBitCast(ctx->builder, result,
1823 ctx->v4i32, "");
1824 }
1825 return result;
1826 }
1827
1828 LLVMValueRef ac_build_cvt_pkrtz_f16(struct ac_llvm_context *ctx,
1829 LLVMValueRef args[2])
1830 {
1831 LLVMTypeRef v2f16 =
1832 LLVMVectorType(LLVMHalfTypeInContext(ctx->context), 2);
1833
1834 return ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pkrtz", v2f16,
1835 args, 2, AC_FUNC_ATTR_READNONE);
1836 }
1837
1838 LLVMValueRef ac_build_cvt_pknorm_i16(struct ac_llvm_context *ctx,
1839 LLVMValueRef args[2])
1840 {
1841 LLVMValueRef res =
1842 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pknorm.i16",
1843 ctx->v2i16, args, 2,
1844 AC_FUNC_ATTR_READNONE);
1845 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
1846 }
1847
1848 LLVMValueRef ac_build_cvt_pknorm_u16(struct ac_llvm_context *ctx,
1849 LLVMValueRef args[2])
1850 {
1851 LLVMValueRef res =
1852 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pknorm.u16",
1853 ctx->v2i16, args, 2,
1854 AC_FUNC_ATTR_READNONE);
1855 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
1856 }
1857
1858 /* The 8-bit and 10-bit clamping is for HW workarounds. */
1859 LLVMValueRef ac_build_cvt_pk_i16(struct ac_llvm_context *ctx,
1860 LLVMValueRef args[2], unsigned bits, bool hi)
1861 {
1862 assert(bits == 8 || bits == 10 || bits == 16);
1863
1864 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1865 bits == 8 ? 127 : bits == 10 ? 511 : 32767, 0);
1866 LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
1867 bits == 8 ? -128 : bits == 10 ? -512 : -32768, 0);
1868 LLVMValueRef max_alpha =
1869 bits != 10 ? max_rgb : ctx->i32_1;
1870 LLVMValueRef min_alpha =
1871 bits != 10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
1872
1873 /* Clamp. */
1874 if (bits != 16) {
1875 for (int i = 0; i < 2; i++) {
1876 bool alpha = hi && i == 1;
1877 args[i] = ac_build_imin(ctx, args[i],
1878 alpha ? max_alpha : max_rgb);
1879 args[i] = ac_build_imax(ctx, args[i],
1880 alpha ? min_alpha : min_rgb);
1881 }
1882 }
1883
1884 LLVMValueRef res =
1885 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pk.i16",
1886 ctx->v2i16, args, 2,
1887 AC_FUNC_ATTR_READNONE);
1888 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
1889 }
1890
1891 /* The 8-bit and 10-bit clamping is for HW workarounds. */
1892 LLVMValueRef ac_build_cvt_pk_u16(struct ac_llvm_context *ctx,
1893 LLVMValueRef args[2], unsigned bits, bool hi)
1894 {
1895 assert(bits == 8 || bits == 10 || bits == 16);
1896
1897 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
1898 bits == 8 ? 255 : bits == 10 ? 1023 : 65535, 0);
1899 LLVMValueRef max_alpha =
1900 bits != 10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
1901
1902 /* Clamp. */
1903 if (bits != 16) {
1904 for (int i = 0; i < 2; i++) {
1905 bool alpha = hi && i == 1;
1906 args[i] = ac_build_umin(ctx, args[i],
1907 alpha ? max_alpha : max_rgb);
1908 }
1909 }
1910
1911 LLVMValueRef res =
1912 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pk.u16",
1913 ctx->v2i16, args, 2,
1914 AC_FUNC_ATTR_READNONE);
1915 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
1916 }
1917
1918 LLVMValueRef ac_build_wqm_vote(struct ac_llvm_context *ctx, LLVMValueRef i1)
1919 {
1920 return ac_build_intrinsic(ctx, "llvm.amdgcn.wqm.vote", ctx->i1,
1921 &i1, 1, AC_FUNC_ATTR_READNONE);
1922 }
1923
1924 void ac_build_kill_if_false(struct ac_llvm_context *ctx, LLVMValueRef i1)
1925 {
1926 ac_build_intrinsic(ctx, "llvm.amdgcn.kill", ctx->voidt,
1927 &i1, 1, 0);
1928 }
1929
1930 LLVMValueRef ac_build_bfe(struct ac_llvm_context *ctx, LLVMValueRef input,
1931 LLVMValueRef offset, LLVMValueRef width,
1932 bool is_signed)
1933 {
1934 LLVMValueRef args[] = {
1935 input,
1936 offset,
1937 width,
1938 };
1939
1940 return ac_build_intrinsic(ctx,
1941 is_signed ? "llvm.amdgcn.sbfe.i32" :
1942 "llvm.amdgcn.ubfe.i32",
1943 ctx->i32, args, 3,
1944 AC_FUNC_ATTR_READNONE);
1945 }
1946
1947 LLVMValueRef ac_build_imad(struct ac_llvm_context *ctx, LLVMValueRef s0,
1948 LLVMValueRef s1, LLVMValueRef s2)
1949 {
1950 return LLVMBuildAdd(ctx->builder,
1951 LLVMBuildMul(ctx->builder, s0, s1, ""), s2, "");
1952 }
1953
1954 LLVMValueRef ac_build_fmad(struct ac_llvm_context *ctx, LLVMValueRef s0,
1955 LLVMValueRef s1, LLVMValueRef s2)
1956 {
1957 return LLVMBuildFAdd(ctx->builder,
1958 LLVMBuildFMul(ctx->builder, s0, s1, ""), s2, "");
1959 }
1960
1961 void ac_build_waitcnt(struct ac_llvm_context *ctx, unsigned simm16)
1962 {
1963 LLVMValueRef args[1] = {
1964 LLVMConstInt(ctx->i32, simm16, false),
1965 };
1966 ac_build_intrinsic(ctx, "llvm.amdgcn.s.waitcnt",
1967 ctx->voidt, args, 1, 0);
1968 }
1969
1970 LLVMValueRef ac_build_fract(struct ac_llvm_context *ctx, LLVMValueRef src0,
1971 unsigned bitsize)
1972 {
1973 LLVMTypeRef type;
1974 char *intr;
1975
1976 if (bitsize == 32) {
1977 intr = "llvm.floor.f32";
1978 type = ctx->f32;
1979 } else {
1980 intr = "llvm.floor.f64";
1981 type = ctx->f64;
1982 }
1983
1984 LLVMValueRef params[] = {
1985 src0,
1986 };
1987 LLVMValueRef floor = ac_build_intrinsic(ctx, intr, type, params, 1,
1988 AC_FUNC_ATTR_READNONE);
1989 return LLVMBuildFSub(ctx->builder, src0, floor, "");
1990 }
1991
1992 LLVMValueRef ac_build_isign(struct ac_llvm_context *ctx, LLVMValueRef src0,
1993 unsigned bitsize)
1994 {
1995 LLVMValueRef cmp, val, zero, one;
1996 LLVMTypeRef type;
1997
1998 switch (bitsize) {
1999 case 64:
2000 type = ctx->i64;
2001 zero = ctx->i64_0;
2002 one = ctx->i64_1;
2003 break;
2004 case 32:
2005 type = ctx->i32;
2006 zero = ctx->i32_0;
2007 one = ctx->i32_1;
2008 break;
2009 case 16:
2010 type = ctx->i16;
2011 zero = ctx->i16_0;
2012 one = ctx->i16_1;
2013 break;
2014 default:
2015 unreachable(!"invalid bitsize");
2016 break;
2017 }
2018
2019 cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGT, src0, zero, "");
2020 val = LLVMBuildSelect(ctx->builder, cmp, one, src0, "");
2021 cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGE, val, zero, "");
2022 val = LLVMBuildSelect(ctx->builder, cmp, val, LLVMConstInt(type, -1, true), "");
2023 return val;
2024 }
2025
2026 LLVMValueRef ac_build_fsign(struct ac_llvm_context *ctx, LLVMValueRef src0,
2027 unsigned bitsize)
2028 {
2029 LLVMValueRef cmp, val, zero, one;
2030 LLVMTypeRef type;
2031
2032 if (bitsize == 32) {
2033 type = ctx->f32;
2034 zero = ctx->f32_0;
2035 one = ctx->f32_1;
2036 } else {
2037 type = ctx->f64;
2038 zero = ctx->f64_0;
2039 one = ctx->f64_1;
2040 }
2041
2042 cmp = LLVMBuildFCmp(ctx->builder, LLVMRealOGT, src0, zero, "");
2043 val = LLVMBuildSelect(ctx->builder, cmp, one, src0, "");
2044 cmp = LLVMBuildFCmp(ctx->builder, LLVMRealOGE, val, zero, "");
2045 val = LLVMBuildSelect(ctx->builder, cmp, val, LLVMConstReal(type, -1.0), "");
2046 return val;
2047 }
2048
2049 LLVMValueRef ac_build_bit_count(struct ac_llvm_context *ctx, LLVMValueRef src0)
2050 {
2051 LLVMValueRef result;
2052 unsigned bitsize;
2053
2054 bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
2055
2056 switch (bitsize) {
2057 case 64:
2058 result = ac_build_intrinsic(ctx, "llvm.ctpop.i64", ctx->i64,
2059 (LLVMValueRef []) { src0 }, 1,
2060 AC_FUNC_ATTR_READNONE);
2061
2062 result = LLVMBuildTrunc(ctx->builder, result, ctx->i32, "");
2063 break;
2064 case 32:
2065 result = ac_build_intrinsic(ctx, "llvm.ctpop.i32", ctx->i32,
2066 (LLVMValueRef []) { src0 }, 1,
2067 AC_FUNC_ATTR_READNONE);
2068 break;
2069 case 16:
2070 result = ac_build_intrinsic(ctx, "llvm.ctpop.i16", ctx->i16,
2071 (LLVMValueRef []) { src0 }, 1,
2072 AC_FUNC_ATTR_READNONE);
2073 break;
2074 default:
2075 unreachable(!"invalid bitsize");
2076 break;
2077 }
2078
2079 return result;
2080 }
2081
2082 LLVMValueRef ac_build_bitfield_reverse(struct ac_llvm_context *ctx,
2083 LLVMValueRef src0)
2084 {
2085 LLVMValueRef result;
2086 unsigned bitsize;
2087
2088 bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
2089
2090 switch (bitsize) {
2091 case 32:
2092 result = ac_build_intrinsic(ctx, "llvm.bitreverse.i32", ctx->i32,
2093 (LLVMValueRef []) { src0 }, 1,
2094 AC_FUNC_ATTR_READNONE);
2095 break;
2096 case 16:
2097 result = ac_build_intrinsic(ctx, "llvm.bitreverse.i16", ctx->i16,
2098 (LLVMValueRef []) { src0 }, 1,
2099 AC_FUNC_ATTR_READNONE);
2100 break;
2101 default:
2102 unreachable(!"invalid bitsize");
2103 break;
2104 }
2105
2106 return result;
2107 }
2108
2109 #define AC_EXP_TARGET 0
2110 #define AC_EXP_ENABLED_CHANNELS 1
2111 #define AC_EXP_OUT0 2
2112
2113 enum ac_ir_type {
2114 AC_IR_UNDEF,
2115 AC_IR_CONST,
2116 AC_IR_VALUE,
2117 };
2118
2119 struct ac_vs_exp_chan
2120 {
2121 LLVMValueRef value;
2122 float const_float;
2123 enum ac_ir_type type;
2124 };
2125
2126 struct ac_vs_exp_inst {
2127 unsigned offset;
2128 LLVMValueRef inst;
2129 struct ac_vs_exp_chan chan[4];
2130 };
2131
2132 struct ac_vs_exports {
2133 unsigned num;
2134 struct ac_vs_exp_inst exp[VARYING_SLOT_MAX];
2135 };
2136
2137 /* Return true if the PARAM export has been eliminated. */
2138 static bool ac_eliminate_const_output(uint8_t *vs_output_param_offset,
2139 uint32_t num_outputs,
2140 struct ac_vs_exp_inst *exp)
2141 {
2142 unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */
2143 bool is_zero[4] = {}, is_one[4] = {};
2144
2145 for (i = 0; i < 4; i++) {
2146 /* It's a constant expression. Undef outputs are eliminated too. */
2147 if (exp->chan[i].type == AC_IR_UNDEF) {
2148 is_zero[i] = true;
2149 is_one[i] = true;
2150 } else if (exp->chan[i].type == AC_IR_CONST) {
2151 if (exp->chan[i].const_float == 0)
2152 is_zero[i] = true;
2153 else if (exp->chan[i].const_float == 1)
2154 is_one[i] = true;
2155 else
2156 return false; /* other constant */
2157 } else
2158 return false;
2159 }
2160
2161 /* Only certain combinations of 0 and 1 can be eliminated. */
2162 if (is_zero[0] && is_zero[1] && is_zero[2])
2163 default_val = is_zero[3] ? 0 : 1;
2164 else if (is_one[0] && is_one[1] && is_one[2])
2165 default_val = is_zero[3] ? 2 : 3;
2166 else
2167 return false;
2168
2169 /* The PARAM export can be represented as DEFAULT_VAL. Kill it. */
2170 LLVMInstructionEraseFromParent(exp->inst);
2171
2172 /* Change OFFSET to DEFAULT_VAL. */
2173 for (i = 0; i < num_outputs; i++) {
2174 if (vs_output_param_offset[i] == exp->offset) {
2175 vs_output_param_offset[i] =
2176 AC_EXP_PARAM_DEFAULT_VAL_0000 + default_val;
2177 break;
2178 }
2179 }
2180 return true;
2181 }
2182
2183 static bool ac_eliminate_duplicated_output(struct ac_llvm_context *ctx,
2184 uint8_t *vs_output_param_offset,
2185 uint32_t num_outputs,
2186 struct ac_vs_exports *processed,
2187 struct ac_vs_exp_inst *exp)
2188 {
2189 unsigned p, copy_back_channels = 0;
2190
2191 /* See if the output is already in the list of processed outputs.
2192 * The LLVMValueRef comparison relies on SSA.
2193 */
2194 for (p = 0; p < processed->num; p++) {
2195 bool different = false;
2196
2197 for (unsigned j = 0; j < 4; j++) {
2198 struct ac_vs_exp_chan *c1 = &processed->exp[p].chan[j];
2199 struct ac_vs_exp_chan *c2 = &exp->chan[j];
2200
2201 /* Treat undef as a match. */
2202 if (c2->type == AC_IR_UNDEF)
2203 continue;
2204
2205 /* If c1 is undef but c2 isn't, we can copy c2 to c1
2206 * and consider the instruction duplicated.
2207 */
2208 if (c1->type == AC_IR_UNDEF) {
2209 copy_back_channels |= 1 << j;
2210 continue;
2211 }
2212
2213 /* Test whether the channels are not equal. */
2214 if (c1->type != c2->type ||
2215 (c1->type == AC_IR_CONST &&
2216 c1->const_float != c2->const_float) ||
2217 (c1->type == AC_IR_VALUE &&
2218 c1->value != c2->value)) {
2219 different = true;
2220 break;
2221 }
2222 }
2223 if (!different)
2224 break;
2225
2226 copy_back_channels = 0;
2227 }
2228 if (p == processed->num)
2229 return false;
2230
2231 /* If a match was found, but the matching export has undef where the new
2232 * one has a normal value, copy the normal value to the undef channel.
2233 */
2234 struct ac_vs_exp_inst *match = &processed->exp[p];
2235
2236 /* Get current enabled channels mask. */
2237 LLVMValueRef arg = LLVMGetOperand(match->inst, AC_EXP_ENABLED_CHANNELS);
2238 unsigned enabled_channels = LLVMConstIntGetZExtValue(arg);
2239
2240 while (copy_back_channels) {
2241 unsigned chan = u_bit_scan(&copy_back_channels);
2242
2243 assert(match->chan[chan].type == AC_IR_UNDEF);
2244 LLVMSetOperand(match->inst, AC_EXP_OUT0 + chan,
2245 exp->chan[chan].value);
2246 match->chan[chan] = exp->chan[chan];
2247
2248 /* Update number of enabled channels because the original mask
2249 * is not always 0xf.
2250 */
2251 enabled_channels |= (1 << chan);
2252 LLVMSetOperand(match->inst, AC_EXP_ENABLED_CHANNELS,
2253 LLVMConstInt(ctx->i32, enabled_channels, 0));
2254 }
2255
2256 /* The PARAM export is duplicated. Kill it. */
2257 LLVMInstructionEraseFromParent(exp->inst);
2258
2259 /* Change OFFSET to the matching export. */
2260 for (unsigned i = 0; i < num_outputs; i++) {
2261 if (vs_output_param_offset[i] == exp->offset) {
2262 vs_output_param_offset[i] = match->offset;
2263 break;
2264 }
2265 }
2266 return true;
2267 }
2268
2269 void ac_optimize_vs_outputs(struct ac_llvm_context *ctx,
2270 LLVMValueRef main_fn,
2271 uint8_t *vs_output_param_offset,
2272 uint32_t num_outputs,
2273 uint8_t *num_param_exports)
2274 {
2275 LLVMBasicBlockRef bb;
2276 bool removed_any = false;
2277 struct ac_vs_exports exports;
2278
2279 exports.num = 0;
2280
2281 /* Process all LLVM instructions. */
2282 bb = LLVMGetFirstBasicBlock(main_fn);
2283 while (bb) {
2284 LLVMValueRef inst = LLVMGetFirstInstruction(bb);
2285
2286 while (inst) {
2287 LLVMValueRef cur = inst;
2288 inst = LLVMGetNextInstruction(inst);
2289 struct ac_vs_exp_inst exp;
2290
2291 if (LLVMGetInstructionOpcode(cur) != LLVMCall)
2292 continue;
2293
2294 LLVMValueRef callee = ac_llvm_get_called_value(cur);
2295
2296 if (!ac_llvm_is_function(callee))
2297 continue;
2298
2299 const char *name = LLVMGetValueName(callee);
2300 unsigned num_args = LLVMCountParams(callee);
2301
2302 /* Check if this is an export instruction. */
2303 if ((num_args != 9 && num_args != 8) ||
2304 (strcmp(name, "llvm.SI.export") &&
2305 strcmp(name, "llvm.amdgcn.exp.f32")))
2306 continue;
2307
2308 LLVMValueRef arg = LLVMGetOperand(cur, AC_EXP_TARGET);
2309 unsigned target = LLVMConstIntGetZExtValue(arg);
2310
2311 if (target < V_008DFC_SQ_EXP_PARAM)
2312 continue;
2313
2314 target -= V_008DFC_SQ_EXP_PARAM;
2315
2316 /* Parse the instruction. */
2317 memset(&exp, 0, sizeof(exp));
2318 exp.offset = target;
2319 exp.inst = cur;
2320
2321 for (unsigned i = 0; i < 4; i++) {
2322 LLVMValueRef v = LLVMGetOperand(cur, AC_EXP_OUT0 + i);
2323
2324 exp.chan[i].value = v;
2325
2326 if (LLVMIsUndef(v)) {
2327 exp.chan[i].type = AC_IR_UNDEF;
2328 } else if (LLVMIsAConstantFP(v)) {
2329 LLVMBool loses_info;
2330 exp.chan[i].type = AC_IR_CONST;
2331 exp.chan[i].const_float =
2332 LLVMConstRealGetDouble(v, &loses_info);
2333 } else {
2334 exp.chan[i].type = AC_IR_VALUE;
2335 }
2336 }
2337
2338 /* Eliminate constant and duplicated PARAM exports. */
2339 if (ac_eliminate_const_output(vs_output_param_offset,
2340 num_outputs, &exp) ||
2341 ac_eliminate_duplicated_output(ctx,
2342 vs_output_param_offset,
2343 num_outputs, &exports,
2344 &exp)) {
2345 removed_any = true;
2346 } else {
2347 exports.exp[exports.num++] = exp;
2348 }
2349 }
2350 bb = LLVMGetNextBasicBlock(bb);
2351 }
2352
2353 /* Remove holes in export memory due to removed PARAM exports.
2354 * This is done by renumbering all PARAM exports.
2355 */
2356 if (removed_any) {
2357 uint8_t old_offset[VARYING_SLOT_MAX];
2358 unsigned out, i;
2359
2360 /* Make a copy of the offsets. We need the old version while
2361 * we are modifying some of them. */
2362 memcpy(old_offset, vs_output_param_offset,
2363 sizeof(old_offset));
2364
2365 for (i = 0; i < exports.num; i++) {
2366 unsigned offset = exports.exp[i].offset;
2367
2368 /* Update vs_output_param_offset. Multiple outputs can
2369 * have the same offset.
2370 */
2371 for (out = 0; out < num_outputs; out++) {
2372 if (old_offset[out] == offset)
2373 vs_output_param_offset[out] = i;
2374 }
2375
2376 /* Change the PARAM offset in the instruction. */
2377 LLVMSetOperand(exports.exp[i].inst, AC_EXP_TARGET,
2378 LLVMConstInt(ctx->i32,
2379 V_008DFC_SQ_EXP_PARAM + i, 0));
2380 }
2381 *num_param_exports = exports.num;
2382 }
2383 }
2384
2385 void ac_init_exec_full_mask(struct ac_llvm_context *ctx)
2386 {
2387 LLVMValueRef full_mask = LLVMConstInt(ctx->i64, ~0ull, 0);
2388 ac_build_intrinsic(ctx,
2389 "llvm.amdgcn.init.exec", ctx->voidt,
2390 &full_mask, 1, AC_FUNC_ATTR_CONVERGENT);
2391 }
2392
2393 void ac_declare_lds_as_pointer(struct ac_llvm_context *ctx)
2394 {
2395 unsigned lds_size = ctx->chip_class >= CIK ? 65536 : 32768;
2396 ctx->lds = LLVMBuildIntToPtr(ctx->builder, ctx->i32_0,
2397 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), AC_ADDR_SPACE_LDS),
2398 "lds");
2399 }
2400
2401 LLVMValueRef ac_lds_load(struct ac_llvm_context *ctx,
2402 LLVMValueRef dw_addr)
2403 {
2404 return ac_build_load(ctx, ctx->lds, dw_addr);
2405 }
2406
2407 void ac_lds_store(struct ac_llvm_context *ctx,
2408 LLVMValueRef dw_addr,
2409 LLVMValueRef value)
2410 {
2411 value = ac_to_integer(ctx, value);
2412 ac_build_indexed_store(ctx, ctx->lds,
2413 dw_addr, value);
2414 }
2415
2416 LLVMValueRef ac_find_lsb(struct ac_llvm_context *ctx,
2417 LLVMTypeRef dst_type,
2418 LLVMValueRef src0)
2419 {
2420 unsigned src0_bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
2421 const char *intrin_name;
2422 LLVMTypeRef type;
2423 LLVMValueRef zero;
2424
2425 switch (src0_bitsize) {
2426 case 64:
2427 intrin_name = "llvm.cttz.i64";
2428 type = ctx->i64;
2429 zero = ctx->i64_0;
2430 break;
2431 case 32:
2432 intrin_name = "llvm.cttz.i32";
2433 type = ctx->i32;
2434 zero = ctx->i32_0;
2435 break;
2436 case 16:
2437 intrin_name = "llvm.cttz.i16";
2438 type = ctx->i16;
2439 zero = ctx->i16_0;
2440 break;
2441 default:
2442 unreachable(!"invalid bitsize");
2443 }
2444
2445 LLVMValueRef params[2] = {
2446 src0,
2447
2448 /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
2449 * add special code to check for x=0. The reason is that
2450 * the LLVM behavior for x=0 is different from what we
2451 * need here. However, LLVM also assumes that ffs(x) is
2452 * in [0, 31], but GLSL expects that ffs(0) = -1, so
2453 * a conditional assignment to handle 0 is still required.
2454 *
2455 * The hardware already implements the correct behavior.
2456 */
2457 ctx->i1true,
2458 };
2459
2460 LLVMValueRef lsb = ac_build_intrinsic(ctx, intrin_name, type,
2461 params, 2,
2462 AC_FUNC_ATTR_READNONE);
2463
2464 if (src0_bitsize == 64) {
2465 lsb = LLVMBuildTrunc(ctx->builder, lsb, ctx->i32, "");
2466 }
2467
2468 /* TODO: We need an intrinsic to skip this conditional. */
2469 /* Check for zero: */
2470 return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder,
2471 LLVMIntEQ, src0,
2472 zero, ""),
2473 LLVMConstInt(ctx->i32, -1, 0), lsb, "");
2474 }
2475
2476 LLVMTypeRef ac_array_in_const_addr_space(LLVMTypeRef elem_type)
2477 {
2478 return LLVMPointerType(LLVMArrayType(elem_type, 0),
2479 AC_ADDR_SPACE_CONST);
2480 }
2481
2482 LLVMTypeRef ac_array_in_const32_addr_space(LLVMTypeRef elem_type)
2483 {
2484 return LLVMPointerType(LLVMArrayType(elem_type, 0),
2485 AC_ADDR_SPACE_CONST_32BIT);
2486 }
2487
2488 static struct ac_llvm_flow *
2489 get_current_flow(struct ac_llvm_context *ctx)
2490 {
2491 if (ctx->flow_depth > 0)
2492 return &ctx->flow[ctx->flow_depth - 1];
2493 return NULL;
2494 }
2495
2496 static struct ac_llvm_flow *
2497 get_innermost_loop(struct ac_llvm_context *ctx)
2498 {
2499 for (unsigned i = ctx->flow_depth; i > 0; --i) {
2500 if (ctx->flow[i - 1].loop_entry_block)
2501 return &ctx->flow[i - 1];
2502 }
2503 return NULL;
2504 }
2505
2506 static struct ac_llvm_flow *
2507 push_flow(struct ac_llvm_context *ctx)
2508 {
2509 struct ac_llvm_flow *flow;
2510
2511 if (ctx->flow_depth >= ctx->flow_depth_max) {
2512 unsigned new_max = MAX2(ctx->flow_depth << 1,
2513 AC_LLVM_INITIAL_CF_DEPTH);
2514
2515 ctx->flow = realloc(ctx->flow, new_max * sizeof(*ctx->flow));
2516 ctx->flow_depth_max = new_max;
2517 }
2518
2519 flow = &ctx->flow[ctx->flow_depth];
2520 ctx->flow_depth++;
2521
2522 flow->next_block = NULL;
2523 flow->loop_entry_block = NULL;
2524 return flow;
2525 }
2526
2527 static void set_basicblock_name(LLVMBasicBlockRef bb, const char *base,
2528 int label_id)
2529 {
2530 char buf[32];
2531 snprintf(buf, sizeof(buf), "%s%d", base, label_id);
2532 LLVMSetValueName(LLVMBasicBlockAsValue(bb), buf);
2533 }
2534
2535 /* Append a basic block at the level of the parent flow.
2536 */
2537 static LLVMBasicBlockRef append_basic_block(struct ac_llvm_context *ctx,
2538 const char *name)
2539 {
2540 assert(ctx->flow_depth >= 1);
2541
2542 if (ctx->flow_depth >= 2) {
2543 struct ac_llvm_flow *flow = &ctx->flow[ctx->flow_depth - 2];
2544
2545 return LLVMInsertBasicBlockInContext(ctx->context,
2546 flow->next_block, name);
2547 }
2548
2549 LLVMValueRef main_fn =
2550 LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx->builder));
2551 return LLVMAppendBasicBlockInContext(ctx->context, main_fn, name);
2552 }
2553
2554 /* Emit a branch to the given default target for the current block if
2555 * applicable -- that is, if the current block does not already contain a
2556 * branch from a break or continue.
2557 */
2558 static void emit_default_branch(LLVMBuilderRef builder,
2559 LLVMBasicBlockRef target)
2560 {
2561 if (!LLVMGetBasicBlockTerminator(LLVMGetInsertBlock(builder)))
2562 LLVMBuildBr(builder, target);
2563 }
2564
2565 void ac_build_bgnloop(struct ac_llvm_context *ctx, int label_id)
2566 {
2567 struct ac_llvm_flow *flow = push_flow(ctx);
2568 flow->loop_entry_block = append_basic_block(ctx, "LOOP");
2569 flow->next_block = append_basic_block(ctx, "ENDLOOP");
2570 set_basicblock_name(flow->loop_entry_block, "loop", label_id);
2571 LLVMBuildBr(ctx->builder, flow->loop_entry_block);
2572 LLVMPositionBuilderAtEnd(ctx->builder, flow->loop_entry_block);
2573 }
2574
2575 void ac_build_break(struct ac_llvm_context *ctx)
2576 {
2577 struct ac_llvm_flow *flow = get_innermost_loop(ctx);
2578 LLVMBuildBr(ctx->builder, flow->next_block);
2579 }
2580
2581 void ac_build_continue(struct ac_llvm_context *ctx)
2582 {
2583 struct ac_llvm_flow *flow = get_innermost_loop(ctx);
2584 LLVMBuildBr(ctx->builder, flow->loop_entry_block);
2585 }
2586
2587 void ac_build_else(struct ac_llvm_context *ctx, int label_id)
2588 {
2589 struct ac_llvm_flow *current_branch = get_current_flow(ctx);
2590 LLVMBasicBlockRef endif_block;
2591
2592 assert(!current_branch->loop_entry_block);
2593
2594 endif_block = append_basic_block(ctx, "ENDIF");
2595 emit_default_branch(ctx->builder, endif_block);
2596
2597 LLVMPositionBuilderAtEnd(ctx->builder, current_branch->next_block);
2598 set_basicblock_name(current_branch->next_block, "else", label_id);
2599
2600 current_branch->next_block = endif_block;
2601 }
2602
2603 void ac_build_endif(struct ac_llvm_context *ctx, int label_id)
2604 {
2605 struct ac_llvm_flow *current_branch = get_current_flow(ctx);
2606
2607 assert(!current_branch->loop_entry_block);
2608
2609 emit_default_branch(ctx->builder, current_branch->next_block);
2610 LLVMPositionBuilderAtEnd(ctx->builder, current_branch->next_block);
2611 set_basicblock_name(current_branch->next_block, "endif", label_id);
2612
2613 ctx->flow_depth--;
2614 }
2615
2616 void ac_build_endloop(struct ac_llvm_context *ctx, int label_id)
2617 {
2618 struct ac_llvm_flow *current_loop = get_current_flow(ctx);
2619
2620 assert(current_loop->loop_entry_block);
2621
2622 emit_default_branch(ctx->builder, current_loop->loop_entry_block);
2623
2624 LLVMPositionBuilderAtEnd(ctx->builder, current_loop->next_block);
2625 set_basicblock_name(current_loop->next_block, "endloop", label_id);
2626 ctx->flow_depth--;
2627 }
2628
2629 static void if_cond_emit(struct ac_llvm_context *ctx, LLVMValueRef cond,
2630 int label_id)
2631 {
2632 struct ac_llvm_flow *flow = push_flow(ctx);
2633 LLVMBasicBlockRef if_block;
2634
2635 if_block = append_basic_block(ctx, "IF");
2636 flow->next_block = append_basic_block(ctx, "ELSE");
2637 set_basicblock_name(if_block, "if", label_id);
2638 LLVMBuildCondBr(ctx->builder, cond, if_block, flow->next_block);
2639 LLVMPositionBuilderAtEnd(ctx->builder, if_block);
2640 }
2641
2642 void ac_build_if(struct ac_llvm_context *ctx, LLVMValueRef value,
2643 int label_id)
2644 {
2645 LLVMValueRef cond = LLVMBuildFCmp(ctx->builder, LLVMRealUNE,
2646 value, ctx->f32_0, "");
2647 if_cond_emit(ctx, cond, label_id);
2648 }
2649
2650 void ac_build_uif(struct ac_llvm_context *ctx, LLVMValueRef value,
2651 int label_id)
2652 {
2653 LLVMValueRef cond = LLVMBuildICmp(ctx->builder, LLVMIntNE,
2654 ac_to_integer(ctx, value),
2655 ctx->i32_0, "");
2656 if_cond_emit(ctx, cond, label_id);
2657 }
2658
2659 LLVMValueRef ac_build_alloca_undef(struct ac_llvm_context *ac, LLVMTypeRef type,
2660 const char *name)
2661 {
2662 LLVMBuilderRef builder = ac->builder;
2663 LLVMBasicBlockRef current_block = LLVMGetInsertBlock(builder);
2664 LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
2665 LLVMBasicBlockRef first_block = LLVMGetEntryBasicBlock(function);
2666 LLVMValueRef first_instr = LLVMGetFirstInstruction(first_block);
2667 LLVMBuilderRef first_builder = LLVMCreateBuilderInContext(ac->context);
2668 LLVMValueRef res;
2669
2670 if (first_instr) {
2671 LLVMPositionBuilderBefore(first_builder, first_instr);
2672 } else {
2673 LLVMPositionBuilderAtEnd(first_builder, first_block);
2674 }
2675
2676 res = LLVMBuildAlloca(first_builder, type, name);
2677 LLVMDisposeBuilder(first_builder);
2678 return res;
2679 }
2680
2681 LLVMValueRef ac_build_alloca(struct ac_llvm_context *ac,
2682 LLVMTypeRef type, const char *name)
2683 {
2684 LLVMValueRef ptr = ac_build_alloca_undef(ac, type, name);
2685 LLVMBuildStore(ac->builder, LLVMConstNull(type), ptr);
2686 return ptr;
2687 }
2688
2689 LLVMValueRef ac_cast_ptr(struct ac_llvm_context *ctx, LLVMValueRef ptr,
2690 LLVMTypeRef type)
2691 {
2692 int addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
2693 return LLVMBuildBitCast(ctx->builder, ptr,
2694 LLVMPointerType(type, addr_space), "");
2695 }
2696
2697 LLVMValueRef ac_trim_vector(struct ac_llvm_context *ctx, LLVMValueRef value,
2698 unsigned count)
2699 {
2700 unsigned num_components = ac_get_llvm_num_components(value);
2701 if (count == num_components)
2702 return value;
2703
2704 LLVMValueRef masks[MAX2(count, 2)];
2705 masks[0] = ctx->i32_0;
2706 masks[1] = ctx->i32_1;
2707 for (unsigned i = 2; i < count; i++)
2708 masks[i] = LLVMConstInt(ctx->i32, i, false);
2709
2710 if (count == 1)
2711 return LLVMBuildExtractElement(ctx->builder, value, masks[0],
2712 "");
2713
2714 LLVMValueRef swizzle = LLVMConstVector(masks, count);
2715 return LLVMBuildShuffleVector(ctx->builder, value, value, swizzle, "");
2716 }
2717
2718 LLVMValueRef ac_unpack_param(struct ac_llvm_context *ctx, LLVMValueRef param,
2719 unsigned rshift, unsigned bitwidth)
2720 {
2721 LLVMValueRef value = param;
2722 if (rshift)
2723 value = LLVMBuildLShr(ctx->builder, value,
2724 LLVMConstInt(ctx->i32, rshift, false), "");
2725
2726 if (rshift + bitwidth < 32) {
2727 unsigned mask = (1 << bitwidth) - 1;
2728 value = LLVMBuildAnd(ctx->builder, value,
2729 LLVMConstInt(ctx->i32, mask, false), "");
2730 }
2731 return value;
2732 }
2733
2734 /* Adjust the sample index according to FMASK.
2735 *
2736 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
2737 * which is the identity mapping. Each nibble says which physical sample
2738 * should be fetched to get that sample.
2739 *
2740 * For example, 0x11111100 means there are only 2 samples stored and
2741 * the second sample covers 3/4 of the pixel. When reading samples 0
2742 * and 1, return physical sample 0 (determined by the first two 0s
2743 * in FMASK), otherwise return physical sample 1.
2744 *
2745 * The sample index should be adjusted as follows:
2746 * addr[sample_index] = (fmask >> (addr[sample_index] * 4)) & 0xF;
2747 */
2748 void ac_apply_fmask_to_sample(struct ac_llvm_context *ac, LLVMValueRef fmask,
2749 LLVMValueRef *addr, bool is_array_tex)
2750 {
2751 struct ac_image_args fmask_load = {};
2752 fmask_load.opcode = ac_image_load;
2753 fmask_load.resource = fmask;
2754 fmask_load.dmask = 0xf;
2755 fmask_load.dim = is_array_tex ? ac_image_2darray : ac_image_2d;
2756
2757 fmask_load.coords[0] = addr[0];
2758 fmask_load.coords[1] = addr[1];
2759 if (is_array_tex)
2760 fmask_load.coords[2] = addr[2];
2761
2762 LLVMValueRef fmask_value = ac_build_image_opcode(ac, &fmask_load);
2763 fmask_value = LLVMBuildExtractElement(ac->builder, fmask_value,
2764 ac->i32_0, "");
2765
2766 /* Apply the formula. */
2767 unsigned sample_chan = is_array_tex ? 3 : 2;
2768 LLVMValueRef final_sample;
2769 final_sample = LLVMBuildMul(ac->builder, addr[sample_chan],
2770 LLVMConstInt(ac->i32, 4, 0), "");
2771 final_sample = LLVMBuildLShr(ac->builder, fmask_value, final_sample, "");
2772 /* Mask the sample index by 0x7, because 0x8 means an unknown value
2773 * with EQAA, so those will map to 0. */
2774 final_sample = LLVMBuildAnd(ac->builder, final_sample,
2775 LLVMConstInt(ac->i32, 0x7, 0), "");
2776
2777 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
2778 * resource descriptor is 0 (invalid).
2779 */
2780 LLVMValueRef tmp;
2781 tmp = LLVMBuildBitCast(ac->builder, fmask, ac->v8i32, "");
2782 tmp = LLVMBuildExtractElement(ac->builder, tmp, ac->i32_1, "");
2783 tmp = LLVMBuildICmp(ac->builder, LLVMIntNE, tmp, ac->i32_0, "");
2784
2785 /* Replace the MSAA sample index. */
2786 addr[sample_chan] = LLVMBuildSelect(ac->builder, tmp, final_sample,
2787 addr[sample_chan], "");
2788 }
2789
2790 static LLVMValueRef
2791 _ac_build_readlane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef lane)
2792 {
2793 ac_build_optimization_barrier(ctx, &src);
2794 return ac_build_intrinsic(ctx,
2795 lane == NULL ? "llvm.amdgcn.readfirstlane" : "llvm.amdgcn.readlane",
2796 LLVMTypeOf(src), (LLVMValueRef []) {
2797 src, lane },
2798 lane == NULL ? 1 : 2,
2799 AC_FUNC_ATTR_READNONE |
2800 AC_FUNC_ATTR_CONVERGENT);
2801 }
2802
2803 /**
2804 * Builds the "llvm.amdgcn.readlane" or "llvm.amdgcn.readfirstlane" intrinsic.
2805 * @param ctx
2806 * @param src
2807 * @param lane - id of the lane or NULL for the first active lane
2808 * @return value of the lane
2809 */
2810 LLVMValueRef
2811 ac_build_readlane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef lane)
2812 {
2813 LLVMTypeRef src_type = LLVMTypeOf(src);
2814 src = ac_to_integer(ctx, src);
2815 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
2816 LLVMValueRef ret;
2817
2818 if (bits == 32) {
2819 ret = _ac_build_readlane(ctx, src, lane);
2820 } else {
2821 assert(bits % 32 == 0);
2822 LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
2823 LLVMValueRef src_vector =
2824 LLVMBuildBitCast(ctx->builder, src, vec_type, "");
2825 ret = LLVMGetUndef(vec_type);
2826 for (unsigned i = 0; i < bits / 32; i++) {
2827 src = LLVMBuildExtractElement(ctx->builder, src_vector,
2828 LLVMConstInt(ctx->i32, i, 0), "");
2829 LLVMValueRef ret_comp = _ac_build_readlane(ctx, src, lane);
2830 ret = LLVMBuildInsertElement(ctx->builder, ret, ret_comp,
2831 LLVMConstInt(ctx->i32, i, 0), "");
2832 }
2833 }
2834 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
2835 }
2836
2837 LLVMValueRef
2838 ac_build_writelane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef value, LLVMValueRef lane)
2839 {
2840 /* TODO: Use the actual instruction when LLVM adds an intrinsic for it.
2841 */
2842 LLVMValueRef pred = LLVMBuildICmp(ctx->builder, LLVMIntEQ, lane,
2843 ac_get_thread_id(ctx), "");
2844 return LLVMBuildSelect(ctx->builder, pred, value, src, "");
2845 }
2846
2847 LLVMValueRef
2848 ac_build_mbcnt(struct ac_llvm_context *ctx, LLVMValueRef mask)
2849 {
2850 LLVMValueRef mask_vec = LLVMBuildBitCast(ctx->builder, mask,
2851 LLVMVectorType(ctx->i32, 2),
2852 "");
2853 LLVMValueRef mask_lo = LLVMBuildExtractElement(ctx->builder, mask_vec,
2854 ctx->i32_0, "");
2855 LLVMValueRef mask_hi = LLVMBuildExtractElement(ctx->builder, mask_vec,
2856 ctx->i32_1, "");
2857 LLVMValueRef val =
2858 ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.lo", ctx->i32,
2859 (LLVMValueRef []) { mask_lo, ctx->i32_0 },
2860 2, AC_FUNC_ATTR_READNONE);
2861 val = ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.hi", ctx->i32,
2862 (LLVMValueRef []) { mask_hi, val },
2863 2, AC_FUNC_ATTR_READNONE);
2864 return val;
2865 }
2866
2867 enum dpp_ctrl {
2868 _dpp_quad_perm = 0x000,
2869 _dpp_row_sl = 0x100,
2870 _dpp_row_sr = 0x110,
2871 _dpp_row_rr = 0x120,
2872 dpp_wf_sl1 = 0x130,
2873 dpp_wf_rl1 = 0x134,
2874 dpp_wf_sr1 = 0x138,
2875 dpp_wf_rr1 = 0x13C,
2876 dpp_row_mirror = 0x140,
2877 dpp_row_half_mirror = 0x141,
2878 dpp_row_bcast15 = 0x142,
2879 dpp_row_bcast31 = 0x143
2880 };
2881
2882 static inline enum dpp_ctrl
2883 dpp_quad_perm(unsigned lane0, unsigned lane1, unsigned lane2, unsigned lane3)
2884 {
2885 assert(lane0 < 4 && lane1 < 4 && lane2 < 4 && lane3 < 4);
2886 return _dpp_quad_perm | lane0 | (lane1 << 2) | (lane2 << 4) | (lane3 << 6);
2887 }
2888
2889 static inline enum dpp_ctrl
2890 dpp_row_sl(unsigned amount)
2891 {
2892 assert(amount > 0 && amount < 16);
2893 return _dpp_row_sl | amount;
2894 }
2895
2896 static inline enum dpp_ctrl
2897 dpp_row_sr(unsigned amount)
2898 {
2899 assert(amount > 0 && amount < 16);
2900 return _dpp_row_sr | amount;
2901 }
2902
2903 static LLVMValueRef
2904 _ac_build_dpp(struct ac_llvm_context *ctx, LLVMValueRef old, LLVMValueRef src,
2905 enum dpp_ctrl dpp_ctrl, unsigned row_mask, unsigned bank_mask,
2906 bool bound_ctrl)
2907 {
2908 return ac_build_intrinsic(ctx, "llvm.amdgcn.update.dpp.i32",
2909 LLVMTypeOf(old),
2910 (LLVMValueRef[]) {
2911 old, src,
2912 LLVMConstInt(ctx->i32, dpp_ctrl, 0),
2913 LLVMConstInt(ctx->i32, row_mask, 0),
2914 LLVMConstInt(ctx->i32, bank_mask, 0),
2915 LLVMConstInt(ctx->i1, bound_ctrl, 0) },
2916 6, AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
2917 }
2918
2919 static LLVMValueRef
2920 ac_build_dpp(struct ac_llvm_context *ctx, LLVMValueRef old, LLVMValueRef src,
2921 enum dpp_ctrl dpp_ctrl, unsigned row_mask, unsigned bank_mask,
2922 bool bound_ctrl)
2923 {
2924 LLVMTypeRef src_type = LLVMTypeOf(src);
2925 src = ac_to_integer(ctx, src);
2926 old = ac_to_integer(ctx, old);
2927 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
2928 LLVMValueRef ret;
2929 if (bits == 32) {
2930 ret = _ac_build_dpp(ctx, old, src, dpp_ctrl, row_mask,
2931 bank_mask, bound_ctrl);
2932 } else {
2933 assert(bits % 32 == 0);
2934 LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
2935 LLVMValueRef src_vector =
2936 LLVMBuildBitCast(ctx->builder, src, vec_type, "");
2937 LLVMValueRef old_vector =
2938 LLVMBuildBitCast(ctx->builder, old, vec_type, "");
2939 ret = LLVMGetUndef(vec_type);
2940 for (unsigned i = 0; i < bits / 32; i++) {
2941 src = LLVMBuildExtractElement(ctx->builder, src_vector,
2942 LLVMConstInt(ctx->i32, i,
2943 0), "");
2944 old = LLVMBuildExtractElement(ctx->builder, old_vector,
2945 LLVMConstInt(ctx->i32, i,
2946 0), "");
2947 LLVMValueRef ret_comp = _ac_build_dpp(ctx, old, src,
2948 dpp_ctrl,
2949 row_mask,
2950 bank_mask,
2951 bound_ctrl);
2952 ret = LLVMBuildInsertElement(ctx->builder, ret,
2953 ret_comp,
2954 LLVMConstInt(ctx->i32, i,
2955 0), "");
2956 }
2957 }
2958 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
2959 }
2960
2961 static inline unsigned
2962 ds_pattern_bitmode(unsigned and_mask, unsigned or_mask, unsigned xor_mask)
2963 {
2964 assert(and_mask < 32 && or_mask < 32 && xor_mask < 32);
2965 return and_mask | (or_mask << 5) | (xor_mask << 10);
2966 }
2967
2968 static LLVMValueRef
2969 _ac_build_ds_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src, unsigned mask)
2970 {
2971 return ac_build_intrinsic(ctx, "llvm.amdgcn.ds.swizzle",
2972 LLVMTypeOf(src), (LLVMValueRef []) {
2973 src, LLVMConstInt(ctx->i32, mask, 0) },
2974 2, AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
2975 }
2976
2977 LLVMValueRef
2978 ac_build_ds_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src, unsigned mask)
2979 {
2980 LLVMTypeRef src_type = LLVMTypeOf(src);
2981 src = ac_to_integer(ctx, src);
2982 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
2983 LLVMValueRef ret;
2984 if (bits == 32) {
2985 ret = _ac_build_ds_swizzle(ctx, src, mask);
2986 } else {
2987 assert(bits % 32 == 0);
2988 LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
2989 LLVMValueRef src_vector =
2990 LLVMBuildBitCast(ctx->builder, src, vec_type, "");
2991 ret = LLVMGetUndef(vec_type);
2992 for (unsigned i = 0; i < bits / 32; i++) {
2993 src = LLVMBuildExtractElement(ctx->builder, src_vector,
2994 LLVMConstInt(ctx->i32, i,
2995 0), "");
2996 LLVMValueRef ret_comp = _ac_build_ds_swizzle(ctx, src,
2997 mask);
2998 ret = LLVMBuildInsertElement(ctx->builder, ret,
2999 ret_comp,
3000 LLVMConstInt(ctx->i32, i,
3001 0), "");
3002 }
3003 }
3004 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3005 }
3006
3007 static LLVMValueRef
3008 ac_build_wwm(struct ac_llvm_context *ctx, LLVMValueRef src)
3009 {
3010 char name[32], type[8];
3011 ac_build_type_name_for_intr(LLVMTypeOf(src), type, sizeof(type));
3012 snprintf(name, sizeof(name), "llvm.amdgcn.wwm.%s", type);
3013 return ac_build_intrinsic(ctx, name, LLVMTypeOf(src),
3014 (LLVMValueRef []) { src }, 1,
3015 AC_FUNC_ATTR_READNONE);
3016 }
3017
3018 static LLVMValueRef
3019 ac_build_set_inactive(struct ac_llvm_context *ctx, LLVMValueRef src,
3020 LLVMValueRef inactive)
3021 {
3022 char name[33], type[8];
3023 LLVMTypeRef src_type = LLVMTypeOf(src);
3024 src = ac_to_integer(ctx, src);
3025 inactive = ac_to_integer(ctx, inactive);
3026 ac_build_type_name_for_intr(LLVMTypeOf(src), type, sizeof(type));
3027 snprintf(name, sizeof(name), "llvm.amdgcn.set.inactive.%s", type);
3028 LLVMValueRef ret =
3029 ac_build_intrinsic(ctx, name,
3030 LLVMTypeOf(src), (LLVMValueRef []) {
3031 src, inactive }, 2,
3032 AC_FUNC_ATTR_READNONE |
3033 AC_FUNC_ATTR_CONVERGENT);
3034 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3035 }
3036
3037 static LLVMValueRef
3038 get_reduction_identity(struct ac_llvm_context *ctx, nir_op op, unsigned type_size)
3039 {
3040 if (type_size == 4) {
3041 switch (op) {
3042 case nir_op_iadd: return ctx->i32_0;
3043 case nir_op_fadd: return ctx->f32_0;
3044 case nir_op_imul: return ctx->i32_1;
3045 case nir_op_fmul: return ctx->f32_1;
3046 case nir_op_imin: return LLVMConstInt(ctx->i32, INT32_MAX, 0);
3047 case nir_op_umin: return LLVMConstInt(ctx->i32, UINT32_MAX, 0);
3048 case nir_op_fmin: return LLVMConstReal(ctx->f32, INFINITY);
3049 case nir_op_imax: return LLVMConstInt(ctx->i32, INT32_MIN, 0);
3050 case nir_op_umax: return ctx->i32_0;
3051 case nir_op_fmax: return LLVMConstReal(ctx->f32, -INFINITY);
3052 case nir_op_iand: return LLVMConstInt(ctx->i32, -1, 0);
3053 case nir_op_ior: return ctx->i32_0;
3054 case nir_op_ixor: return ctx->i32_0;
3055 default:
3056 unreachable("bad reduction intrinsic");
3057 }
3058 } else { /* type_size == 64bit */
3059 switch (op) {
3060 case nir_op_iadd: return ctx->i64_0;
3061 case nir_op_fadd: return ctx->f64_0;
3062 case nir_op_imul: return ctx->i64_1;
3063 case nir_op_fmul: return ctx->f64_1;
3064 case nir_op_imin: return LLVMConstInt(ctx->i64, INT64_MAX, 0);
3065 case nir_op_umin: return LLVMConstInt(ctx->i64, UINT64_MAX, 0);
3066 case nir_op_fmin: return LLVMConstReal(ctx->f64, INFINITY);
3067 case nir_op_imax: return LLVMConstInt(ctx->i64, INT64_MIN, 0);
3068 case nir_op_umax: return ctx->i64_0;
3069 case nir_op_fmax: return LLVMConstReal(ctx->f64, -INFINITY);
3070 case nir_op_iand: return LLVMConstInt(ctx->i64, -1, 0);
3071 case nir_op_ior: return ctx->i64_0;
3072 case nir_op_ixor: return ctx->i64_0;
3073 default:
3074 unreachable("bad reduction intrinsic");
3075 }
3076 }
3077 }
3078
3079 static LLVMValueRef
3080 ac_build_alu_op(struct ac_llvm_context *ctx, LLVMValueRef lhs, LLVMValueRef rhs, nir_op op)
3081 {
3082 bool _64bit = ac_get_type_size(LLVMTypeOf(lhs)) == 8;
3083 switch (op) {
3084 case nir_op_iadd: return LLVMBuildAdd(ctx->builder, lhs, rhs, "");
3085 case nir_op_fadd: return LLVMBuildFAdd(ctx->builder, lhs, rhs, "");
3086 case nir_op_imul: return LLVMBuildMul(ctx->builder, lhs, rhs, "");
3087 case nir_op_fmul: return LLVMBuildFMul(ctx->builder, lhs, rhs, "");
3088 case nir_op_imin: return LLVMBuildSelect(ctx->builder,
3089 LLVMBuildICmp(ctx->builder, LLVMIntSLT, lhs, rhs, ""),
3090 lhs, rhs, "");
3091 case nir_op_umin: return LLVMBuildSelect(ctx->builder,
3092 LLVMBuildICmp(ctx->builder, LLVMIntULT, lhs, rhs, ""),
3093 lhs, rhs, "");
3094 case nir_op_fmin: return ac_build_intrinsic(ctx,
3095 _64bit ? "llvm.minnum.f64" : "llvm.minnum.f32",
3096 _64bit ? ctx->f64 : ctx->f32,
3097 (LLVMValueRef[]){lhs, rhs}, 2, AC_FUNC_ATTR_READNONE);
3098 case nir_op_imax: return LLVMBuildSelect(ctx->builder,
3099 LLVMBuildICmp(ctx->builder, LLVMIntSGT, lhs, rhs, ""),
3100 lhs, rhs, "");
3101 case nir_op_umax: return LLVMBuildSelect(ctx->builder,
3102 LLVMBuildICmp(ctx->builder, LLVMIntUGT, lhs, rhs, ""),
3103 lhs, rhs, "");
3104 case nir_op_fmax: return ac_build_intrinsic(ctx,
3105 _64bit ? "llvm.maxnum.f64" : "llvm.maxnum.f32",
3106 _64bit ? ctx->f64 : ctx->f32,
3107 (LLVMValueRef[]){lhs, rhs}, 2, AC_FUNC_ATTR_READNONE);
3108 case nir_op_iand: return LLVMBuildAnd(ctx->builder, lhs, rhs, "");
3109 case nir_op_ior: return LLVMBuildOr(ctx->builder, lhs, rhs, "");
3110 case nir_op_ixor: return LLVMBuildXor(ctx->builder, lhs, rhs, "");
3111 default:
3112 unreachable("bad reduction intrinsic");
3113 }
3114 }
3115
3116 /* TODO: add inclusive and excluse scan functions for SI chip class. */
3117 static LLVMValueRef
3118 ac_build_scan(struct ac_llvm_context *ctx, nir_op op, LLVMValueRef src, LLVMValueRef identity)
3119 {
3120 LLVMValueRef result, tmp;
3121 result = src;
3122 tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(1), 0xf, 0xf, false);
3123 result = ac_build_alu_op(ctx, result, tmp, op);
3124 tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(2), 0xf, 0xf, false);
3125 result = ac_build_alu_op(ctx, result, tmp, op);
3126 tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(3), 0xf, 0xf, false);
3127 result = ac_build_alu_op(ctx, result, tmp, op);
3128 tmp = ac_build_dpp(ctx, identity, result, dpp_row_sr(4), 0xf, 0xe, false);
3129 result = ac_build_alu_op(ctx, result, tmp, op);
3130 tmp = ac_build_dpp(ctx, identity, result, dpp_row_sr(8), 0xf, 0xc, false);
3131 result = ac_build_alu_op(ctx, result, tmp, op);
3132 tmp = ac_build_dpp(ctx, identity, result, dpp_row_bcast15, 0xa, 0xf, false);
3133 result = ac_build_alu_op(ctx, result, tmp, op);
3134 tmp = ac_build_dpp(ctx, identity, result, dpp_row_bcast31, 0xc, 0xf, false);
3135 result = ac_build_alu_op(ctx, result, tmp, op);
3136 return result;
3137 }
3138
3139 LLVMValueRef
3140 ac_build_inclusive_scan(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op)
3141 {
3142 ac_build_optimization_barrier(ctx, &src);
3143 LLVMValueRef result;
3144 LLVMValueRef identity =
3145 get_reduction_identity(ctx, op, ac_get_type_size(LLVMTypeOf(src)));
3146 result = LLVMBuildBitCast(ctx->builder, ac_build_set_inactive(ctx, src, identity),
3147 LLVMTypeOf(identity), "");
3148 result = ac_build_scan(ctx, op, result, identity);
3149
3150 return ac_build_wwm(ctx, result);
3151 }
3152
3153 LLVMValueRef
3154 ac_build_exclusive_scan(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op)
3155 {
3156 ac_build_optimization_barrier(ctx, &src);
3157 LLVMValueRef result;
3158 LLVMValueRef identity =
3159 get_reduction_identity(ctx, op, ac_get_type_size(LLVMTypeOf(src)));
3160 result = LLVMBuildBitCast(ctx->builder, ac_build_set_inactive(ctx, src, identity),
3161 LLVMTypeOf(identity), "");
3162 result = ac_build_dpp(ctx, identity, result, dpp_wf_sr1, 0xf, 0xf, false);
3163 result = ac_build_scan(ctx, op, result, identity);
3164
3165 return ac_build_wwm(ctx, result);
3166 }
3167
3168 LLVMValueRef
3169 ac_build_reduce(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op, unsigned cluster_size)
3170 {
3171 if (cluster_size == 1) return src;
3172 ac_build_optimization_barrier(ctx, &src);
3173 LLVMValueRef result, swap;
3174 LLVMValueRef identity = get_reduction_identity(ctx, op,
3175 ac_get_type_size(LLVMTypeOf(src)));
3176 result = LLVMBuildBitCast(ctx->builder,
3177 ac_build_set_inactive(ctx, src, identity),
3178 LLVMTypeOf(identity), "");
3179 swap = ac_build_quad_swizzle(ctx, result, 1, 0, 3, 2);
3180 result = ac_build_alu_op(ctx, result, swap, op);
3181 if (cluster_size == 2) return ac_build_wwm(ctx, result);
3182
3183 swap = ac_build_quad_swizzle(ctx, result, 2, 3, 0, 1);
3184 result = ac_build_alu_op(ctx, result, swap, op);
3185 if (cluster_size == 4) return ac_build_wwm(ctx, result);
3186
3187 if (ctx->chip_class >= VI)
3188 swap = ac_build_dpp(ctx, identity, result, dpp_row_half_mirror, 0xf, 0xf, false);
3189 else
3190 swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x04));
3191 result = ac_build_alu_op(ctx, result, swap, op);
3192 if (cluster_size == 8) return ac_build_wwm(ctx, result);
3193
3194 if (ctx->chip_class >= VI)
3195 swap = ac_build_dpp(ctx, identity, result, dpp_row_mirror, 0xf, 0xf, false);
3196 else
3197 swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x08));
3198 result = ac_build_alu_op(ctx, result, swap, op);
3199 if (cluster_size == 16) return ac_build_wwm(ctx, result);
3200
3201 if (ctx->chip_class >= VI && cluster_size != 32)
3202 swap = ac_build_dpp(ctx, identity, result, dpp_row_bcast15, 0xa, 0xf, false);
3203 else
3204 swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x10));
3205 result = ac_build_alu_op(ctx, result, swap, op);
3206 if (cluster_size == 32) return ac_build_wwm(ctx, result);
3207
3208 if (ctx->chip_class >= VI) {
3209 swap = ac_build_dpp(ctx, identity, result, dpp_row_bcast31, 0xc, 0xf, false);
3210 result = ac_build_alu_op(ctx, result, swap, op);
3211 result = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 63, 0));
3212 return ac_build_wwm(ctx, result);
3213 } else {
3214 swap = ac_build_readlane(ctx, result, ctx->i32_0);
3215 result = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 32, 0));
3216 result = ac_build_alu_op(ctx, result, swap, op);
3217 return ac_build_wwm(ctx, result);
3218 }
3219 }
3220
3221 LLVMValueRef
3222 ac_build_quad_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src,
3223 unsigned lane0, unsigned lane1, unsigned lane2, unsigned lane3)
3224 {
3225 unsigned mask = dpp_quad_perm(lane0, lane1, lane2, lane3);
3226 if (ctx->chip_class >= VI) {
3227 return ac_build_dpp(ctx, src, src, mask, 0xf, 0xf, false);
3228 } else {
3229 return ac_build_ds_swizzle(ctx, src, (1 << 15) | mask);
3230 }
3231 }
3232
3233 LLVMValueRef
3234 ac_build_shuffle(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef index)
3235 {
3236 index = LLVMBuildMul(ctx->builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
3237 return ac_build_intrinsic(ctx,
3238 "llvm.amdgcn.ds.bpermute", ctx->i32,
3239 (LLVMValueRef []) {index, src}, 2,
3240 AC_FUNC_ATTR_READNONE |
3241 AC_FUNC_ATTR_CONVERGENT);
3242 }