ac,radv: do not emit vec3 for raw load/store on SI
[mesa.git] / src / amd / common / ac_llvm_build.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25 /* based on pieces from si_pipe.c and radeon_llvm_emit.c */
26 #include "ac_llvm_build.h"
27
28 #include <llvm-c/Core.h>
29
30 #include "c11/threads.h"
31
32 #include <assert.h>
33 #include <stdio.h>
34
35 #include "ac_llvm_util.h"
36 #include "ac_exp_param.h"
37 #include "util/bitscan.h"
38 #include "util/macros.h"
39 #include "util/u_atomic.h"
40 #include "util/u_math.h"
41 #include "sid.h"
42
43 #include "shader_enums.h"
44
45 #define AC_LLVM_INITIAL_CF_DEPTH 4
46
47 /* Data for if/else/endif and bgnloop/endloop control flow structures.
48 */
49 struct ac_llvm_flow {
50 /* Loop exit or next part of if/else/endif. */
51 LLVMBasicBlockRef next_block;
52 LLVMBasicBlockRef loop_entry_block;
53 };
54
55 /* Initialize module-independent parts of the context.
56 *
57 * The caller is responsible for initializing ctx::module and ctx::builder.
58 */
59 void
60 ac_llvm_context_init(struct ac_llvm_context *ctx,
61 enum chip_class chip_class, enum radeon_family family)
62 {
63 LLVMValueRef args[1];
64
65 ctx->context = LLVMContextCreate();
66
67 ctx->chip_class = chip_class;
68 ctx->family = family;
69 ctx->module = NULL;
70 ctx->builder = NULL;
71
72 ctx->voidt = LLVMVoidTypeInContext(ctx->context);
73 ctx->i1 = LLVMInt1TypeInContext(ctx->context);
74 ctx->i8 = LLVMInt8TypeInContext(ctx->context);
75 ctx->i16 = LLVMIntTypeInContext(ctx->context, 16);
76 ctx->i32 = LLVMIntTypeInContext(ctx->context, 32);
77 ctx->i64 = LLVMIntTypeInContext(ctx->context, 64);
78 ctx->intptr = ctx->i32;
79 ctx->f16 = LLVMHalfTypeInContext(ctx->context);
80 ctx->f32 = LLVMFloatTypeInContext(ctx->context);
81 ctx->f64 = LLVMDoubleTypeInContext(ctx->context);
82 ctx->v2i16 = LLVMVectorType(ctx->i16, 2);
83 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
84 ctx->v3i32 = LLVMVectorType(ctx->i32, 3);
85 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
86 ctx->v2f32 = LLVMVectorType(ctx->f32, 2);
87 ctx->v3f32 = LLVMVectorType(ctx->f32, 3);
88 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
89 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
90
91 ctx->i8_0 = LLVMConstInt(ctx->i8, 0, false);
92 ctx->i8_1 = LLVMConstInt(ctx->i8, 1, false);
93 ctx->i16_0 = LLVMConstInt(ctx->i16, 0, false);
94 ctx->i16_1 = LLVMConstInt(ctx->i16, 1, false);
95 ctx->i32_0 = LLVMConstInt(ctx->i32, 0, false);
96 ctx->i32_1 = LLVMConstInt(ctx->i32, 1, false);
97 ctx->i64_0 = LLVMConstInt(ctx->i64, 0, false);
98 ctx->i64_1 = LLVMConstInt(ctx->i64, 1, false);
99 ctx->f16_0 = LLVMConstReal(ctx->f16, 0.0);
100 ctx->f16_1 = LLVMConstReal(ctx->f16, 1.0);
101 ctx->f32_0 = LLVMConstReal(ctx->f32, 0.0);
102 ctx->f32_1 = LLVMConstReal(ctx->f32, 1.0);
103 ctx->f64_0 = LLVMConstReal(ctx->f64, 0.0);
104 ctx->f64_1 = LLVMConstReal(ctx->f64, 1.0);
105
106 ctx->i1false = LLVMConstInt(ctx->i1, 0, false);
107 ctx->i1true = LLVMConstInt(ctx->i1, 1, false);
108
109 ctx->range_md_kind = LLVMGetMDKindIDInContext(ctx->context,
110 "range", 5);
111
112 ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(ctx->context,
113 "invariant.load", 14);
114
115 ctx->fpmath_md_kind = LLVMGetMDKindIDInContext(ctx->context, "fpmath", 6);
116
117 args[0] = LLVMConstReal(ctx->f32, 2.5);
118 ctx->fpmath_md_2p5_ulp = LLVMMDNodeInContext(ctx->context, args, 1);
119
120 ctx->uniform_md_kind = LLVMGetMDKindIDInContext(ctx->context,
121 "amdgpu.uniform", 14);
122
123 ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0);
124 }
125
126 void
127 ac_llvm_context_dispose(struct ac_llvm_context *ctx)
128 {
129 free(ctx->flow);
130 ctx->flow = NULL;
131 ctx->flow_depth_max = 0;
132 }
133
134 int
135 ac_get_llvm_num_components(LLVMValueRef value)
136 {
137 LLVMTypeRef type = LLVMTypeOf(value);
138 unsigned num_components = LLVMGetTypeKind(type) == LLVMVectorTypeKind
139 ? LLVMGetVectorSize(type)
140 : 1;
141 return num_components;
142 }
143
144 LLVMValueRef
145 ac_llvm_extract_elem(struct ac_llvm_context *ac,
146 LLVMValueRef value,
147 int index)
148 {
149 if (LLVMGetTypeKind(LLVMTypeOf(value)) != LLVMVectorTypeKind) {
150 assert(index == 0);
151 return value;
152 }
153
154 return LLVMBuildExtractElement(ac->builder, value,
155 LLVMConstInt(ac->i32, index, false), "");
156 }
157
158 int
159 ac_get_elem_bits(struct ac_llvm_context *ctx, LLVMTypeRef type)
160 {
161 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind)
162 type = LLVMGetElementType(type);
163
164 if (LLVMGetTypeKind(type) == LLVMIntegerTypeKind)
165 return LLVMGetIntTypeWidth(type);
166
167 if (type == ctx->f16)
168 return 16;
169 if (type == ctx->f32)
170 return 32;
171 if (type == ctx->f64)
172 return 64;
173
174 unreachable("Unhandled type kind in get_elem_bits");
175 }
176
177 unsigned
178 ac_get_type_size(LLVMTypeRef type)
179 {
180 LLVMTypeKind kind = LLVMGetTypeKind(type);
181
182 switch (kind) {
183 case LLVMIntegerTypeKind:
184 return LLVMGetIntTypeWidth(type) / 8;
185 case LLVMHalfTypeKind:
186 return 2;
187 case LLVMFloatTypeKind:
188 return 4;
189 case LLVMDoubleTypeKind:
190 return 8;
191 case LLVMPointerTypeKind:
192 if (LLVMGetPointerAddressSpace(type) == AC_ADDR_SPACE_CONST_32BIT)
193 return 4;
194 return 8;
195 case LLVMVectorTypeKind:
196 return LLVMGetVectorSize(type) *
197 ac_get_type_size(LLVMGetElementType(type));
198 case LLVMArrayTypeKind:
199 return LLVMGetArrayLength(type) *
200 ac_get_type_size(LLVMGetElementType(type));
201 default:
202 assert(0);
203 return 0;
204 }
205 }
206
207 static LLVMTypeRef to_integer_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
208 {
209 if (t == ctx->i8)
210 return ctx->i8;
211 else if (t == ctx->f16 || t == ctx->i16)
212 return ctx->i16;
213 else if (t == ctx->f32 || t == ctx->i32)
214 return ctx->i32;
215 else if (t == ctx->f64 || t == ctx->i64)
216 return ctx->i64;
217 else
218 unreachable("Unhandled integer size");
219 }
220
221 LLVMTypeRef
222 ac_to_integer_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
223 {
224 if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
225 LLVMTypeRef elem_type = LLVMGetElementType(t);
226 return LLVMVectorType(to_integer_type_scalar(ctx, elem_type),
227 LLVMGetVectorSize(t));
228 }
229 if (LLVMGetTypeKind(t) == LLVMPointerTypeKind) {
230 switch (LLVMGetPointerAddressSpace(t)) {
231 case AC_ADDR_SPACE_GLOBAL:
232 return ctx->i64;
233 case AC_ADDR_SPACE_LDS:
234 return ctx->i32;
235 default:
236 unreachable("unhandled address space");
237 }
238 }
239 return to_integer_type_scalar(ctx, t);
240 }
241
242 LLVMValueRef
243 ac_to_integer(struct ac_llvm_context *ctx, LLVMValueRef v)
244 {
245 LLVMTypeRef type = LLVMTypeOf(v);
246 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
247 return LLVMBuildPtrToInt(ctx->builder, v, ac_to_integer_type(ctx, type), "");
248 }
249 return LLVMBuildBitCast(ctx->builder, v, ac_to_integer_type(ctx, type), "");
250 }
251
252 LLVMValueRef
253 ac_to_integer_or_pointer(struct ac_llvm_context *ctx, LLVMValueRef v)
254 {
255 LLVMTypeRef type = LLVMTypeOf(v);
256 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind)
257 return v;
258 return ac_to_integer(ctx, v);
259 }
260
261 static LLVMTypeRef to_float_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
262 {
263 if (t == ctx->i8)
264 return ctx->i8;
265 else if (t == ctx->i16 || t == ctx->f16)
266 return ctx->f16;
267 else if (t == ctx->i32 || t == ctx->f32)
268 return ctx->f32;
269 else if (t == ctx->i64 || t == ctx->f64)
270 return ctx->f64;
271 else
272 unreachable("Unhandled float size");
273 }
274
275 LLVMTypeRef
276 ac_to_float_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
277 {
278 if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
279 LLVMTypeRef elem_type = LLVMGetElementType(t);
280 return LLVMVectorType(to_float_type_scalar(ctx, elem_type),
281 LLVMGetVectorSize(t));
282 }
283 return to_float_type_scalar(ctx, t);
284 }
285
286 LLVMValueRef
287 ac_to_float(struct ac_llvm_context *ctx, LLVMValueRef v)
288 {
289 LLVMTypeRef type = LLVMTypeOf(v);
290 return LLVMBuildBitCast(ctx->builder, v, ac_to_float_type(ctx, type), "");
291 }
292
293
294 LLVMValueRef
295 ac_build_intrinsic(struct ac_llvm_context *ctx, const char *name,
296 LLVMTypeRef return_type, LLVMValueRef *params,
297 unsigned param_count, unsigned attrib_mask)
298 {
299 LLVMValueRef function, call;
300 bool set_callsite_attrs = !(attrib_mask & AC_FUNC_ATTR_LEGACY);
301
302 function = LLVMGetNamedFunction(ctx->module, name);
303 if (!function) {
304 LLVMTypeRef param_types[32], function_type;
305 unsigned i;
306
307 assert(param_count <= 32);
308
309 for (i = 0; i < param_count; ++i) {
310 assert(params[i]);
311 param_types[i] = LLVMTypeOf(params[i]);
312 }
313 function_type =
314 LLVMFunctionType(return_type, param_types, param_count, 0);
315 function = LLVMAddFunction(ctx->module, name, function_type);
316
317 LLVMSetFunctionCallConv(function, LLVMCCallConv);
318 LLVMSetLinkage(function, LLVMExternalLinkage);
319
320 if (!set_callsite_attrs)
321 ac_add_func_attributes(ctx->context, function, attrib_mask);
322 }
323
324 call = LLVMBuildCall(ctx->builder, function, params, param_count, "");
325 if (set_callsite_attrs)
326 ac_add_func_attributes(ctx->context, call, attrib_mask);
327 return call;
328 }
329
330 /**
331 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
332 * intrinsic names).
333 */
334 void ac_build_type_name_for_intr(LLVMTypeRef type, char *buf, unsigned bufsize)
335 {
336 LLVMTypeRef elem_type = type;
337
338 assert(bufsize >= 8);
339
340 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
341 int ret = snprintf(buf, bufsize, "v%u",
342 LLVMGetVectorSize(type));
343 if (ret < 0) {
344 char *type_name = LLVMPrintTypeToString(type);
345 fprintf(stderr, "Error building type name for: %s\n",
346 type_name);
347 return;
348 }
349 elem_type = LLVMGetElementType(type);
350 buf += ret;
351 bufsize -= ret;
352 }
353 switch (LLVMGetTypeKind(elem_type)) {
354 default: break;
355 case LLVMIntegerTypeKind:
356 snprintf(buf, bufsize, "i%d", LLVMGetIntTypeWidth(elem_type));
357 break;
358 case LLVMHalfTypeKind:
359 snprintf(buf, bufsize, "f16");
360 break;
361 case LLVMFloatTypeKind:
362 snprintf(buf, bufsize, "f32");
363 break;
364 case LLVMDoubleTypeKind:
365 snprintf(buf, bufsize, "f64");
366 break;
367 }
368 }
369
370 /**
371 * Helper function that builds an LLVM IR PHI node and immediately adds
372 * incoming edges.
373 */
374 LLVMValueRef
375 ac_build_phi(struct ac_llvm_context *ctx, LLVMTypeRef type,
376 unsigned count_incoming, LLVMValueRef *values,
377 LLVMBasicBlockRef *blocks)
378 {
379 LLVMValueRef phi = LLVMBuildPhi(ctx->builder, type, "");
380 LLVMAddIncoming(phi, values, blocks, count_incoming);
381 return phi;
382 }
383
384 void ac_build_s_barrier(struct ac_llvm_context *ctx)
385 {
386 ac_build_intrinsic(ctx, "llvm.amdgcn.s.barrier", ctx->voidt, NULL,
387 0, AC_FUNC_ATTR_CONVERGENT);
388 }
389
390 /* Prevent optimizations (at least of memory accesses) across the current
391 * point in the program by emitting empty inline assembly that is marked as
392 * having side effects.
393 *
394 * Optionally, a value can be passed through the inline assembly to prevent
395 * LLVM from hoisting calls to ReadNone functions.
396 */
397 void
398 ac_build_optimization_barrier(struct ac_llvm_context *ctx,
399 LLVMValueRef *pvgpr)
400 {
401 static int counter = 0;
402
403 LLVMBuilderRef builder = ctx->builder;
404 char code[16];
405
406 snprintf(code, sizeof(code), "; %d", p_atomic_inc_return(&counter));
407
408 if (!pvgpr) {
409 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
410 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
411 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
412 } else {
413 LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
414 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "=v,0", true, false);
415 LLVMValueRef vgpr = *pvgpr;
416 LLVMTypeRef vgpr_type = LLVMTypeOf(vgpr);
417 unsigned vgpr_size = ac_get_type_size(vgpr_type);
418 LLVMValueRef vgpr0;
419
420 assert(vgpr_size % 4 == 0);
421
422 vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
423 vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
424 vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
425 vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
426 vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
427
428 *pvgpr = vgpr;
429 }
430 }
431
432 LLVMValueRef
433 ac_build_shader_clock(struct ac_llvm_context *ctx)
434 {
435 LLVMValueRef tmp = ac_build_intrinsic(ctx, "llvm.readcyclecounter",
436 ctx->i64, NULL, 0, 0);
437 return LLVMBuildBitCast(ctx->builder, tmp, ctx->v2i32, "");
438 }
439
440 LLVMValueRef
441 ac_build_ballot(struct ac_llvm_context *ctx,
442 LLVMValueRef value)
443 {
444 LLVMValueRef args[3] = {
445 value,
446 ctx->i32_0,
447 LLVMConstInt(ctx->i32, LLVMIntNE, 0)
448 };
449
450 /* We currently have no other way to prevent LLVM from lifting the icmp
451 * calls to a dominating basic block.
452 */
453 ac_build_optimization_barrier(ctx, &args[0]);
454
455 args[0] = ac_to_integer(ctx, args[0]);
456
457 return ac_build_intrinsic(ctx,
458 "llvm.amdgcn.icmp.i32",
459 ctx->i64, args, 3,
460 AC_FUNC_ATTR_NOUNWIND |
461 AC_FUNC_ATTR_READNONE |
462 AC_FUNC_ATTR_CONVERGENT);
463 }
464
465 LLVMValueRef ac_get_i1_sgpr_mask(struct ac_llvm_context *ctx,
466 LLVMValueRef value)
467 {
468 LLVMValueRef args[3] = {
469 value,
470 ctx->i1false,
471 LLVMConstInt(ctx->i32, LLVMIntNE, 0),
472 };
473
474 assert(HAVE_LLVM >= 0x0800);
475 return ac_build_intrinsic(ctx, "llvm.amdgcn.icmp.i1", ctx->i64, args, 3,
476 AC_FUNC_ATTR_NOUNWIND |
477 AC_FUNC_ATTR_READNONE |
478 AC_FUNC_ATTR_CONVERGENT);
479 }
480
481 LLVMValueRef
482 ac_build_vote_all(struct ac_llvm_context *ctx, LLVMValueRef value)
483 {
484 LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
485 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
486 return LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, active_set, "");
487 }
488
489 LLVMValueRef
490 ac_build_vote_any(struct ac_llvm_context *ctx, LLVMValueRef value)
491 {
492 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
493 return LLVMBuildICmp(ctx->builder, LLVMIntNE, vote_set,
494 LLVMConstInt(ctx->i64, 0, 0), "");
495 }
496
497 LLVMValueRef
498 ac_build_vote_eq(struct ac_llvm_context *ctx, LLVMValueRef value)
499 {
500 LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
501 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
502
503 LLVMValueRef all = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
504 vote_set, active_set, "");
505 LLVMValueRef none = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
506 vote_set,
507 LLVMConstInt(ctx->i64, 0, 0), "");
508 return LLVMBuildOr(ctx->builder, all, none, "");
509 }
510
511 LLVMValueRef
512 ac_build_varying_gather_values(struct ac_llvm_context *ctx, LLVMValueRef *values,
513 unsigned value_count, unsigned component)
514 {
515 LLVMValueRef vec = NULL;
516
517 if (value_count == 1) {
518 return values[component];
519 } else if (!value_count)
520 unreachable("value_count is 0");
521
522 for (unsigned i = component; i < value_count + component; i++) {
523 LLVMValueRef value = values[i];
524
525 if (i == component)
526 vec = LLVMGetUndef( LLVMVectorType(LLVMTypeOf(value), value_count));
527 LLVMValueRef index = LLVMConstInt(ctx->i32, i - component, false);
528 vec = LLVMBuildInsertElement(ctx->builder, vec, value, index, "");
529 }
530 return vec;
531 }
532
533 LLVMValueRef
534 ac_build_gather_values_extended(struct ac_llvm_context *ctx,
535 LLVMValueRef *values,
536 unsigned value_count,
537 unsigned value_stride,
538 bool load,
539 bool always_vector)
540 {
541 LLVMBuilderRef builder = ctx->builder;
542 LLVMValueRef vec = NULL;
543 unsigned i;
544
545 if (value_count == 1 && !always_vector) {
546 if (load)
547 return LLVMBuildLoad(builder, values[0], "");
548 return values[0];
549 } else if (!value_count)
550 unreachable("value_count is 0");
551
552 for (i = 0; i < value_count; i++) {
553 LLVMValueRef value = values[i * value_stride];
554 if (load)
555 value = LLVMBuildLoad(builder, value, "");
556
557 if (!i)
558 vec = LLVMGetUndef( LLVMVectorType(LLVMTypeOf(value), value_count));
559 LLVMValueRef index = LLVMConstInt(ctx->i32, i, false);
560 vec = LLVMBuildInsertElement(builder, vec, value, index, "");
561 }
562 return vec;
563 }
564
565 LLVMValueRef
566 ac_build_gather_values(struct ac_llvm_context *ctx,
567 LLVMValueRef *values,
568 unsigned value_count)
569 {
570 return ac_build_gather_values_extended(ctx, values, value_count, 1, false, false);
571 }
572
573 /* Expand a scalar or vector to <dst_channels x type> by filling the remaining
574 * channels with undef. Extract at most src_channels components from the input.
575 */
576 static LLVMValueRef
577 ac_build_expand(struct ac_llvm_context *ctx,
578 LLVMValueRef value,
579 unsigned src_channels,
580 unsigned dst_channels)
581 {
582 LLVMTypeRef elemtype;
583 LLVMValueRef chan[dst_channels];
584
585 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind) {
586 unsigned vec_size = LLVMGetVectorSize(LLVMTypeOf(value));
587
588 if (src_channels == dst_channels && vec_size == dst_channels)
589 return value;
590
591 src_channels = MIN2(src_channels, vec_size);
592
593 for (unsigned i = 0; i < src_channels; i++)
594 chan[i] = ac_llvm_extract_elem(ctx, value, i);
595
596 elemtype = LLVMGetElementType(LLVMTypeOf(value));
597 } else {
598 if (src_channels) {
599 assert(src_channels == 1);
600 chan[0] = value;
601 }
602 elemtype = LLVMTypeOf(value);
603 }
604
605 for (unsigned i = src_channels; i < dst_channels; i++)
606 chan[i] = LLVMGetUndef(elemtype);
607
608 return ac_build_gather_values(ctx, chan, dst_channels);
609 }
610
611 /* Expand a scalar or vector to <4 x type> by filling the remaining channels
612 * with undef. Extract at most num_channels components from the input.
613 */
614 LLVMValueRef ac_build_expand_to_vec4(struct ac_llvm_context *ctx,
615 LLVMValueRef value,
616 unsigned num_channels)
617 {
618 return ac_build_expand(ctx, value, num_channels, 4);
619 }
620
621 LLVMValueRef ac_build_round(struct ac_llvm_context *ctx, LLVMValueRef value)
622 {
623 unsigned type_size = ac_get_type_size(LLVMTypeOf(value));
624 const char *name;
625
626 if (type_size == 2)
627 name = "llvm.rint.f16";
628 else if (type_size == 4)
629 name = "llvm.rint.f32";
630 else
631 name = "llvm.rint.f64";
632
633 return ac_build_intrinsic(ctx, name, LLVMTypeOf(value), &value, 1,
634 AC_FUNC_ATTR_READNONE);
635 }
636
637 LLVMValueRef
638 ac_build_fdiv(struct ac_llvm_context *ctx,
639 LLVMValueRef num,
640 LLVMValueRef den)
641 {
642 /* If we do (num / den), LLVM >= 7.0 does:
643 * return num * v_rcp_f32(den * (fabs(den) > 0x1.0p+96f ? 0x1.0p-32f : 1.0f));
644 *
645 * If we do (num * (1 / den)), LLVM does:
646 * return num * v_rcp_f32(den);
647 */
648 LLVMValueRef one = LLVMConstReal(LLVMTypeOf(num), 1.0);
649 LLVMValueRef rcp = LLVMBuildFDiv(ctx->builder, one, den, "");
650 LLVMValueRef ret = LLVMBuildFMul(ctx->builder, num, rcp, "");
651
652 /* Use v_rcp_f32 instead of precise division. */
653 if (!LLVMIsConstant(ret))
654 LLVMSetMetadata(ret, ctx->fpmath_md_kind, ctx->fpmath_md_2p5_ulp);
655 return ret;
656 }
657
658 /* See fast_idiv_by_const.h. */
659 /* Set: increment = util_fast_udiv_info::increment ? multiplier : 0; */
660 LLVMValueRef ac_build_fast_udiv(struct ac_llvm_context *ctx,
661 LLVMValueRef num,
662 LLVMValueRef multiplier,
663 LLVMValueRef pre_shift,
664 LLVMValueRef post_shift,
665 LLVMValueRef increment)
666 {
667 LLVMBuilderRef builder = ctx->builder;
668
669 num = LLVMBuildLShr(builder, num, pre_shift, "");
670 num = LLVMBuildMul(builder,
671 LLVMBuildZExt(builder, num, ctx->i64, ""),
672 LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
673 num = LLVMBuildAdd(builder, num,
674 LLVMBuildZExt(builder, increment, ctx->i64, ""), "");
675 num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
676 num = LLVMBuildTrunc(builder, num, ctx->i32, "");
677 return LLVMBuildLShr(builder, num, post_shift, "");
678 }
679
680 /* See fast_idiv_by_const.h. */
681 /* If num != UINT_MAX, this more efficient version can be used. */
682 /* Set: increment = util_fast_udiv_info::increment; */
683 LLVMValueRef ac_build_fast_udiv_nuw(struct ac_llvm_context *ctx,
684 LLVMValueRef num,
685 LLVMValueRef multiplier,
686 LLVMValueRef pre_shift,
687 LLVMValueRef post_shift,
688 LLVMValueRef increment)
689 {
690 LLVMBuilderRef builder = ctx->builder;
691
692 num = LLVMBuildLShr(builder, num, pre_shift, "");
693 num = LLVMBuildNUWAdd(builder, num, increment, "");
694 num = LLVMBuildMul(builder,
695 LLVMBuildZExt(builder, num, ctx->i64, ""),
696 LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
697 num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
698 num = LLVMBuildTrunc(builder, num, ctx->i32, "");
699 return LLVMBuildLShr(builder, num, post_shift, "");
700 }
701
702 /* See fast_idiv_by_const.h. */
703 /* Both operands must fit in 31 bits and the divisor must not be 1. */
704 LLVMValueRef ac_build_fast_udiv_u31_d_not_one(struct ac_llvm_context *ctx,
705 LLVMValueRef num,
706 LLVMValueRef multiplier,
707 LLVMValueRef post_shift)
708 {
709 LLVMBuilderRef builder = ctx->builder;
710
711 num = LLVMBuildMul(builder,
712 LLVMBuildZExt(builder, num, ctx->i64, ""),
713 LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
714 num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
715 num = LLVMBuildTrunc(builder, num, ctx->i32, "");
716 return LLVMBuildLShr(builder, num, post_shift, "");
717 }
718
719 /* Coordinates for cube map selection. sc, tc, and ma are as in Table 8.27
720 * of the OpenGL 4.5 (Compatibility Profile) specification, except ma is
721 * already multiplied by two. id is the cube face number.
722 */
723 struct cube_selection_coords {
724 LLVMValueRef stc[2];
725 LLVMValueRef ma;
726 LLVMValueRef id;
727 };
728
729 static void
730 build_cube_intrinsic(struct ac_llvm_context *ctx,
731 LLVMValueRef in[3],
732 struct cube_selection_coords *out)
733 {
734 LLVMTypeRef f32 = ctx->f32;
735
736 out->stc[1] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubetc",
737 f32, in, 3, AC_FUNC_ATTR_READNONE);
738 out->stc[0] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubesc",
739 f32, in, 3, AC_FUNC_ATTR_READNONE);
740 out->ma = ac_build_intrinsic(ctx, "llvm.amdgcn.cubema",
741 f32, in, 3, AC_FUNC_ATTR_READNONE);
742 out->id = ac_build_intrinsic(ctx, "llvm.amdgcn.cubeid",
743 f32, in, 3, AC_FUNC_ATTR_READNONE);
744 }
745
746 /**
747 * Build a manual selection sequence for cube face sc/tc coordinates and
748 * major axis vector (multiplied by 2 for consistency) for the given
749 * vec3 \p coords, for the face implied by \p selcoords.
750 *
751 * For the major axis, we always adjust the sign to be in the direction of
752 * selcoords.ma; i.e., a positive out_ma means that coords is pointed towards
753 * the selcoords major axis.
754 */
755 static void build_cube_select(struct ac_llvm_context *ctx,
756 const struct cube_selection_coords *selcoords,
757 const LLVMValueRef *coords,
758 LLVMValueRef *out_st,
759 LLVMValueRef *out_ma)
760 {
761 LLVMBuilderRef builder = ctx->builder;
762 LLVMTypeRef f32 = LLVMTypeOf(coords[0]);
763 LLVMValueRef is_ma_positive;
764 LLVMValueRef sgn_ma;
765 LLVMValueRef is_ma_z, is_not_ma_z;
766 LLVMValueRef is_ma_y;
767 LLVMValueRef is_ma_x;
768 LLVMValueRef sgn;
769 LLVMValueRef tmp;
770
771 is_ma_positive = LLVMBuildFCmp(builder, LLVMRealUGE,
772 selcoords->ma, LLVMConstReal(f32, 0.0), "");
773 sgn_ma = LLVMBuildSelect(builder, is_ma_positive,
774 LLVMConstReal(f32, 1.0), LLVMConstReal(f32, -1.0), "");
775
776 is_ma_z = LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 4.0), "");
777 is_not_ma_z = LLVMBuildNot(builder, is_ma_z, "");
778 is_ma_y = LLVMBuildAnd(builder, is_not_ma_z,
779 LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 2.0), ""), "");
780 is_ma_x = LLVMBuildAnd(builder, is_not_ma_z, LLVMBuildNot(builder, is_ma_y, ""), "");
781
782 /* Select sc */
783 tmp = LLVMBuildSelect(builder, is_ma_x, coords[2], coords[0], "");
784 sgn = LLVMBuildSelect(builder, is_ma_y, LLVMConstReal(f32, 1.0),
785 LLVMBuildSelect(builder, is_ma_z, sgn_ma,
786 LLVMBuildFNeg(builder, sgn_ma, ""), ""), "");
787 out_st[0] = LLVMBuildFMul(builder, tmp, sgn, "");
788
789 /* Select tc */
790 tmp = LLVMBuildSelect(builder, is_ma_y, coords[2], coords[1], "");
791 sgn = LLVMBuildSelect(builder, is_ma_y, sgn_ma,
792 LLVMConstReal(f32, -1.0), "");
793 out_st[1] = LLVMBuildFMul(builder, tmp, sgn, "");
794
795 /* Select ma */
796 tmp = LLVMBuildSelect(builder, is_ma_z, coords[2],
797 LLVMBuildSelect(builder, is_ma_y, coords[1], coords[0], ""), "");
798 tmp = ac_build_intrinsic(ctx, "llvm.fabs.f32",
799 ctx->f32, &tmp, 1, AC_FUNC_ATTR_READNONE);
800 *out_ma = LLVMBuildFMul(builder, tmp, LLVMConstReal(f32, 2.0), "");
801 }
802
803 void
804 ac_prepare_cube_coords(struct ac_llvm_context *ctx,
805 bool is_deriv, bool is_array, bool is_lod,
806 LLVMValueRef *coords_arg,
807 LLVMValueRef *derivs_arg)
808 {
809
810 LLVMBuilderRef builder = ctx->builder;
811 struct cube_selection_coords selcoords;
812 LLVMValueRef coords[3];
813 LLVMValueRef invma;
814
815 if (is_array && !is_lod) {
816 LLVMValueRef tmp = ac_build_round(ctx, coords_arg[3]);
817
818 /* Section 8.9 (Texture Functions) of the GLSL 4.50 spec says:
819 *
820 * "For Array forms, the array layer used will be
821 *
822 * max(0, min(d−1, floor(layer+0.5)))
823 *
824 * where d is the depth of the texture array and layer
825 * comes from the component indicated in the tables below.
826 * Workaroudn for an issue where the layer is taken from a
827 * helper invocation which happens to fall on a different
828 * layer due to extrapolation."
829 *
830 * GFX8 and earlier attempt to implement this in hardware by
831 * clamping the value of coords[2] = (8 * layer) + face.
832 * Unfortunately, this means that the we end up with the wrong
833 * face when clamping occurs.
834 *
835 * Clamp the layer earlier to work around the issue.
836 */
837 if (ctx->chip_class <= GFX8) {
838 LLVMValueRef ge0;
839 ge0 = LLVMBuildFCmp(builder, LLVMRealOGE, tmp, ctx->f32_0, "");
840 tmp = LLVMBuildSelect(builder, ge0, tmp, ctx->f32_0, "");
841 }
842
843 coords_arg[3] = tmp;
844 }
845
846 build_cube_intrinsic(ctx, coords_arg, &selcoords);
847
848 invma = ac_build_intrinsic(ctx, "llvm.fabs.f32",
849 ctx->f32, &selcoords.ma, 1, AC_FUNC_ATTR_READNONE);
850 invma = ac_build_fdiv(ctx, LLVMConstReal(ctx->f32, 1.0), invma);
851
852 for (int i = 0; i < 2; ++i)
853 coords[i] = LLVMBuildFMul(builder, selcoords.stc[i], invma, "");
854
855 coords[2] = selcoords.id;
856
857 if (is_deriv && derivs_arg) {
858 LLVMValueRef derivs[4];
859 int axis;
860
861 /* Convert cube derivatives to 2D derivatives. */
862 for (axis = 0; axis < 2; axis++) {
863 LLVMValueRef deriv_st[2];
864 LLVMValueRef deriv_ma;
865
866 /* Transform the derivative alongside the texture
867 * coordinate. Mathematically, the correct formula is
868 * as follows. Assume we're projecting onto the +Z face
869 * and denote by dx/dh the derivative of the (original)
870 * X texture coordinate with respect to horizontal
871 * window coordinates. The projection onto the +Z face
872 * plane is:
873 *
874 * f(x,z) = x/z
875 *
876 * Then df/dh = df/dx * dx/dh + df/dz * dz/dh
877 * = 1/z * dx/dh - x/z * 1/z * dz/dh.
878 *
879 * This motivatives the implementation below.
880 *
881 * Whether this actually gives the expected results for
882 * apps that might feed in derivatives obtained via
883 * finite differences is anyone's guess. The OpenGL spec
884 * seems awfully quiet about how textureGrad for cube
885 * maps should be handled.
886 */
887 build_cube_select(ctx, &selcoords, &derivs_arg[axis * 3],
888 deriv_st, &deriv_ma);
889
890 deriv_ma = LLVMBuildFMul(builder, deriv_ma, invma, "");
891
892 for (int i = 0; i < 2; ++i)
893 derivs[axis * 2 + i] =
894 LLVMBuildFSub(builder,
895 LLVMBuildFMul(builder, deriv_st[i], invma, ""),
896 LLVMBuildFMul(builder, deriv_ma, coords[i], ""), "");
897 }
898
899 memcpy(derivs_arg, derivs, sizeof(derivs));
900 }
901
902 /* Shift the texture coordinate. This must be applied after the
903 * derivative calculation.
904 */
905 for (int i = 0; i < 2; ++i)
906 coords[i] = LLVMBuildFAdd(builder, coords[i], LLVMConstReal(ctx->f32, 1.5), "");
907
908 if (is_array) {
909 /* for cube arrays coord.z = coord.w(array_index) * 8 + face */
910 /* coords_arg.w component - array_index for cube arrays */
911 coords[2] = ac_build_fmad(ctx, coords_arg[3], LLVMConstReal(ctx->f32, 8.0), coords[2]);
912 }
913
914 memcpy(coords_arg, coords, sizeof(coords));
915 }
916
917
918 LLVMValueRef
919 ac_build_fs_interp(struct ac_llvm_context *ctx,
920 LLVMValueRef llvm_chan,
921 LLVMValueRef attr_number,
922 LLVMValueRef params,
923 LLVMValueRef i,
924 LLVMValueRef j)
925 {
926 LLVMValueRef args[5];
927 LLVMValueRef p1;
928
929 args[0] = i;
930 args[1] = llvm_chan;
931 args[2] = attr_number;
932 args[3] = params;
933
934 p1 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p1",
935 ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
936
937 args[0] = p1;
938 args[1] = j;
939 args[2] = llvm_chan;
940 args[3] = attr_number;
941 args[4] = params;
942
943 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p2",
944 ctx->f32, args, 5, AC_FUNC_ATTR_READNONE);
945 }
946
947 LLVMValueRef
948 ac_build_fs_interp_f16(struct ac_llvm_context *ctx,
949 LLVMValueRef llvm_chan,
950 LLVMValueRef attr_number,
951 LLVMValueRef params,
952 LLVMValueRef i,
953 LLVMValueRef j)
954 {
955 LLVMValueRef args[6];
956 LLVMValueRef p1;
957
958 args[0] = i;
959 args[1] = llvm_chan;
960 args[2] = attr_number;
961 args[3] = ctx->i1false;
962 args[4] = params;
963
964 p1 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p1.f16",
965 ctx->f32, args, 5, AC_FUNC_ATTR_READNONE);
966
967 args[0] = p1;
968 args[1] = j;
969 args[2] = llvm_chan;
970 args[3] = attr_number;
971 args[4] = ctx->i1false;
972 args[5] = params;
973
974 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p2.f16",
975 ctx->f16, args, 6, AC_FUNC_ATTR_READNONE);
976 }
977
978 LLVMValueRef
979 ac_build_fs_interp_mov(struct ac_llvm_context *ctx,
980 LLVMValueRef parameter,
981 LLVMValueRef llvm_chan,
982 LLVMValueRef attr_number,
983 LLVMValueRef params)
984 {
985 LLVMValueRef args[4];
986
987 args[0] = parameter;
988 args[1] = llvm_chan;
989 args[2] = attr_number;
990 args[3] = params;
991
992 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.mov",
993 ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
994 }
995
996 LLVMValueRef
997 ac_build_gep_ptr(struct ac_llvm_context *ctx,
998 LLVMValueRef base_ptr,
999 LLVMValueRef index)
1000 {
1001 return LLVMBuildGEP(ctx->builder, base_ptr, &index, 1, "");
1002 }
1003
1004 LLVMValueRef
1005 ac_build_gep0(struct ac_llvm_context *ctx,
1006 LLVMValueRef base_ptr,
1007 LLVMValueRef index)
1008 {
1009 LLVMValueRef indices[2] = {
1010 ctx->i32_0,
1011 index,
1012 };
1013 return LLVMBuildGEP(ctx->builder, base_ptr, indices, 2, "");
1014 }
1015
1016 LLVMValueRef ac_build_pointer_add(struct ac_llvm_context *ctx, LLVMValueRef ptr,
1017 LLVMValueRef index)
1018 {
1019 return LLVMBuildPointerCast(ctx->builder,
1020 LLVMBuildGEP(ctx->builder, ptr, &index, 1, ""),
1021 LLVMTypeOf(ptr), "");
1022 }
1023
1024 void
1025 ac_build_indexed_store(struct ac_llvm_context *ctx,
1026 LLVMValueRef base_ptr, LLVMValueRef index,
1027 LLVMValueRef value)
1028 {
1029 LLVMBuildStore(ctx->builder, value,
1030 ac_build_gep0(ctx, base_ptr, index));
1031 }
1032
1033 /**
1034 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
1035 * It's equivalent to doing a load from &base_ptr[index].
1036 *
1037 * \param base_ptr Where the array starts.
1038 * \param index The element index into the array.
1039 * \param uniform Whether the base_ptr and index can be assumed to be
1040 * dynamically uniform (i.e. load to an SGPR)
1041 * \param invariant Whether the load is invariant (no other opcodes affect it)
1042 * \param no_unsigned_wraparound
1043 * For all possible re-associations and re-distributions of an expression
1044 * "base_ptr + index * elemsize" into "addr + offset" (excluding GEPs
1045 * without inbounds in base_ptr), this parameter is true if "addr + offset"
1046 * does not result in an unsigned integer wraparound. This is used for
1047 * optimal code generation of 32-bit pointer arithmetic.
1048 *
1049 * For example, a 32-bit immediate offset that causes a 32-bit unsigned
1050 * integer wraparound can't be an imm offset in s_load_dword, because
1051 * the instruction performs "addr + offset" in 64 bits.
1052 *
1053 * Expected usage for bindless textures by chaining GEPs:
1054 * // possible unsigned wraparound, don't use InBounds:
1055 * ptr1 = LLVMBuildGEP(base_ptr, index);
1056 * image = load(ptr1); // becomes "s_load ptr1, 0"
1057 *
1058 * ptr2 = LLVMBuildInBoundsGEP(ptr1, 32 / elemsize);
1059 * sampler = load(ptr2); // becomes "s_load ptr1, 32" thanks to InBounds
1060 */
1061 static LLVMValueRef
1062 ac_build_load_custom(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
1063 LLVMValueRef index, bool uniform, bool invariant,
1064 bool no_unsigned_wraparound)
1065 {
1066 LLVMValueRef pointer, result;
1067
1068 if (no_unsigned_wraparound &&
1069 LLVMGetPointerAddressSpace(LLVMTypeOf(base_ptr)) == AC_ADDR_SPACE_CONST_32BIT)
1070 pointer = LLVMBuildInBoundsGEP(ctx->builder, base_ptr, &index, 1, "");
1071 else
1072 pointer = LLVMBuildGEP(ctx->builder, base_ptr, &index, 1, "");
1073
1074 if (uniform)
1075 LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
1076 result = LLVMBuildLoad(ctx->builder, pointer, "");
1077 if (invariant)
1078 LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
1079 return result;
1080 }
1081
1082 LLVMValueRef ac_build_load(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
1083 LLVMValueRef index)
1084 {
1085 return ac_build_load_custom(ctx, base_ptr, index, false, false, false);
1086 }
1087
1088 LLVMValueRef ac_build_load_invariant(struct ac_llvm_context *ctx,
1089 LLVMValueRef base_ptr, LLVMValueRef index)
1090 {
1091 return ac_build_load_custom(ctx, base_ptr, index, false, true, false);
1092 }
1093
1094 /* This assumes that there is no unsigned integer wraparound during the address
1095 * computation, excluding all GEPs within base_ptr. */
1096 LLVMValueRef ac_build_load_to_sgpr(struct ac_llvm_context *ctx,
1097 LLVMValueRef base_ptr, LLVMValueRef index)
1098 {
1099 return ac_build_load_custom(ctx, base_ptr, index, true, true, true);
1100 }
1101
1102 /* See ac_build_load_custom() documentation. */
1103 LLVMValueRef ac_build_load_to_sgpr_uint_wraparound(struct ac_llvm_context *ctx,
1104 LLVMValueRef base_ptr, LLVMValueRef index)
1105 {
1106 return ac_build_load_custom(ctx, base_ptr, index, true, true, false);
1107 }
1108
1109 static void
1110 ac_build_buffer_store_common(struct ac_llvm_context *ctx,
1111 LLVMValueRef rsrc,
1112 LLVMValueRef data,
1113 LLVMValueRef vindex,
1114 LLVMValueRef voffset,
1115 unsigned num_channels,
1116 bool glc,
1117 bool slc,
1118 bool writeonly_memory,
1119 bool use_format)
1120 {
1121 LLVMValueRef args[] = {
1122 data,
1123 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
1124 vindex ? vindex : ctx->i32_0,
1125 voffset,
1126 LLVMConstInt(ctx->i1, glc, 0),
1127 LLVMConstInt(ctx->i1, slc, 0)
1128 };
1129 unsigned func = CLAMP(num_channels, 1, 3) - 1;
1130
1131 const char *type_names[] = {"f32", "v2f32", "v4f32"};
1132 char name[256];
1133
1134 if (use_format) {
1135 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.store.format.%s",
1136 type_names[func]);
1137 } else {
1138 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.store.%s",
1139 type_names[func]);
1140 }
1141
1142 ac_build_intrinsic(ctx, name, ctx->voidt, args, ARRAY_SIZE(args),
1143 ac_get_store_intr_attribs(writeonly_memory));
1144 }
1145
1146 static void
1147 ac_build_llvm8_buffer_store_common(struct ac_llvm_context *ctx,
1148 LLVMValueRef rsrc,
1149 LLVMValueRef data,
1150 LLVMValueRef vindex,
1151 LLVMValueRef voffset,
1152 LLVMValueRef soffset,
1153 unsigned num_channels,
1154 LLVMTypeRef return_channel_type,
1155 bool glc,
1156 bool slc,
1157 bool writeonly_memory,
1158 bool use_format,
1159 bool structurized)
1160 {
1161 LLVMValueRef args[6];
1162 int idx = 0;
1163 args[idx++] = data;
1164 args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
1165 if (structurized)
1166 args[idx++] = vindex ? vindex : ctx->i32_0;
1167 args[idx++] = voffset ? voffset : ctx->i32_0;
1168 args[idx++] = soffset ? soffset : ctx->i32_0;
1169 args[idx++] = LLVMConstInt(ctx->i32, (glc ? 1 : 0) + (slc ? 2 : 0), 0);
1170 unsigned func = !ac_has_vec3_support(ctx->chip_class, use_format) && num_channels == 3 ? 4 : num_channels;
1171 const char *indexing_kind = structurized ? "struct" : "raw";
1172 char name[256], type_name[8];
1173
1174 LLVMTypeRef type = func > 1 ? LLVMVectorType(return_channel_type, func) : return_channel_type;
1175 ac_build_type_name_for_intr(type, type_name, sizeof(type_name));
1176
1177 if (use_format) {
1178 snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.store.format.%s",
1179 indexing_kind, type_name);
1180 } else {
1181 snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.store.%s",
1182 indexing_kind, type_name);
1183 }
1184
1185 ac_build_intrinsic(ctx, name, ctx->voidt, args, idx,
1186 ac_get_store_intr_attribs(writeonly_memory));
1187 }
1188
1189 void
1190 ac_build_buffer_store_format(struct ac_llvm_context *ctx,
1191 LLVMValueRef rsrc,
1192 LLVMValueRef data,
1193 LLVMValueRef vindex,
1194 LLVMValueRef voffset,
1195 unsigned num_channels,
1196 bool glc,
1197 bool writeonly_memory)
1198 {
1199 if (HAVE_LLVM >= 0x800) {
1200 ac_build_llvm8_buffer_store_common(ctx, rsrc, data, vindex,
1201 voffset, NULL, num_channels,
1202 ctx->f32, glc, false,
1203 writeonly_memory, true, true);
1204 } else {
1205 ac_build_buffer_store_common(ctx, rsrc, data, vindex, voffset,
1206 num_channels, glc, false,
1207 writeonly_memory, true);
1208 }
1209 }
1210
1211 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
1212 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
1213 * or v4i32 (num_channels=3,4).
1214 */
1215 void
1216 ac_build_buffer_store_dword(struct ac_llvm_context *ctx,
1217 LLVMValueRef rsrc,
1218 LLVMValueRef vdata,
1219 unsigned num_channels,
1220 LLVMValueRef voffset,
1221 LLVMValueRef soffset,
1222 unsigned inst_offset,
1223 bool glc,
1224 bool slc,
1225 bool writeonly_memory,
1226 bool swizzle_enable_hint)
1227 {
1228 /* Split 3 channel stores, because only LLVM 9+ support 3-channel
1229 * intrinsics. */
1230 if (num_channels == 3 && !ac_has_vec3_support(ctx->chip_class, false)) {
1231 LLVMValueRef v[3], v01;
1232
1233 for (int i = 0; i < 3; i++) {
1234 v[i] = LLVMBuildExtractElement(ctx->builder, vdata,
1235 LLVMConstInt(ctx->i32, i, 0), "");
1236 }
1237 v01 = ac_build_gather_values(ctx, v, 2);
1238
1239 ac_build_buffer_store_dword(ctx, rsrc, v01, 2, voffset,
1240 soffset, inst_offset, glc, slc,
1241 writeonly_memory, swizzle_enable_hint);
1242 ac_build_buffer_store_dword(ctx, rsrc, v[2], 1, voffset,
1243 soffset, inst_offset + 8,
1244 glc, slc,
1245 writeonly_memory, swizzle_enable_hint);
1246 return;
1247 }
1248
1249 /* SWIZZLE_ENABLE requires that soffset isn't folded into voffset
1250 * (voffset is swizzled, but soffset isn't swizzled).
1251 * llvm.amdgcn.buffer.store doesn't have a separate soffset parameter.
1252 */
1253 if (!swizzle_enable_hint) {
1254 LLVMValueRef offset = soffset;
1255
1256 if (inst_offset)
1257 offset = LLVMBuildAdd(ctx->builder, offset,
1258 LLVMConstInt(ctx->i32, inst_offset, 0), "");
1259
1260 if (HAVE_LLVM >= 0x800) {
1261 ac_build_llvm8_buffer_store_common(ctx, rsrc,
1262 ac_to_float(ctx, vdata),
1263 ctx->i32_0,
1264 voffset, offset,
1265 num_channels,
1266 ctx->f32,
1267 glc, slc,
1268 writeonly_memory,
1269 false, false);
1270 } else {
1271 if (voffset)
1272 offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
1273
1274 ac_build_buffer_store_common(ctx, rsrc,
1275 ac_to_float(ctx, vdata),
1276 ctx->i32_0, offset,
1277 num_channels, glc, slc,
1278 writeonly_memory, false);
1279 }
1280 return;
1281 }
1282
1283 static const unsigned dfmts[] = {
1284 V_008F0C_BUF_DATA_FORMAT_32,
1285 V_008F0C_BUF_DATA_FORMAT_32_32,
1286 V_008F0C_BUF_DATA_FORMAT_32_32_32,
1287 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
1288 };
1289 unsigned dfmt = dfmts[num_channels - 1];
1290 unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
1291 LLVMValueRef immoffset = LLVMConstInt(ctx->i32, inst_offset, 0);
1292
1293 ac_build_raw_tbuffer_store(ctx, rsrc, vdata, voffset, soffset,
1294 immoffset, num_channels, dfmt, nfmt, glc,
1295 slc, writeonly_memory);
1296 }
1297
1298 static LLVMValueRef
1299 ac_build_buffer_load_common(struct ac_llvm_context *ctx,
1300 LLVMValueRef rsrc,
1301 LLVMValueRef vindex,
1302 LLVMValueRef voffset,
1303 unsigned num_channels,
1304 bool glc,
1305 bool slc,
1306 bool can_speculate,
1307 bool use_format)
1308 {
1309 LLVMValueRef args[] = {
1310 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
1311 vindex ? vindex : ctx->i32_0,
1312 voffset,
1313 LLVMConstInt(ctx->i1, glc, 0),
1314 LLVMConstInt(ctx->i1, slc, 0)
1315 };
1316 unsigned func = CLAMP(num_channels, 1, 3) - 1;
1317
1318 LLVMTypeRef types[] = {ctx->f32, ctx->v2f32, ctx->v4f32};
1319 const char *type_names[] = {"f32", "v2f32", "v4f32"};
1320 char name[256];
1321
1322 if (use_format) {
1323 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.format.%s",
1324 type_names[func]);
1325 } else {
1326 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s",
1327 type_names[func]);
1328 }
1329
1330 return ac_build_intrinsic(ctx, name, types[func], args,
1331 ARRAY_SIZE(args),
1332 ac_get_load_intr_attribs(can_speculate));
1333 }
1334
1335 static LLVMValueRef
1336 ac_build_llvm8_buffer_load_common(struct ac_llvm_context *ctx,
1337 LLVMValueRef rsrc,
1338 LLVMValueRef vindex,
1339 LLVMValueRef voffset,
1340 LLVMValueRef soffset,
1341 unsigned num_channels,
1342 LLVMTypeRef channel_type,
1343 bool glc,
1344 bool slc,
1345 bool can_speculate,
1346 bool use_format,
1347 bool structurized)
1348 {
1349 LLVMValueRef args[5];
1350 int idx = 0;
1351 args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
1352 if (structurized)
1353 args[idx++] = vindex ? vindex : ctx->i32_0;
1354 args[idx++] = voffset ? voffset : ctx->i32_0;
1355 args[idx++] = soffset ? soffset : ctx->i32_0;
1356 args[idx++] = LLVMConstInt(ctx->i32, (glc ? 1 : 0) + (slc ? 2 : 0), 0);
1357 unsigned func = !ac_has_vec3_support(ctx->chip_class, use_format) && num_channels == 3 ? 4 : num_channels;
1358 const char *indexing_kind = structurized ? "struct" : "raw";
1359 char name[256], type_name[8];
1360
1361 LLVMTypeRef type = func > 1 ? LLVMVectorType(channel_type, func) : channel_type;
1362 ac_build_type_name_for_intr(type, type_name, sizeof(type_name));
1363
1364 if (use_format) {
1365 snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.load.format.%s",
1366 indexing_kind, type_name);
1367 } else {
1368 snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.load.%s",
1369 indexing_kind, type_name);
1370 }
1371
1372 return ac_build_intrinsic(ctx, name, type, args, idx,
1373 ac_get_load_intr_attribs(can_speculate));
1374 }
1375
1376 LLVMValueRef
1377 ac_build_buffer_load(struct ac_llvm_context *ctx,
1378 LLVMValueRef rsrc,
1379 int num_channels,
1380 LLVMValueRef vindex,
1381 LLVMValueRef voffset,
1382 LLVMValueRef soffset,
1383 unsigned inst_offset,
1384 unsigned glc,
1385 unsigned slc,
1386 bool can_speculate,
1387 bool allow_smem)
1388 {
1389 LLVMValueRef offset = LLVMConstInt(ctx->i32, inst_offset, 0);
1390 if (voffset)
1391 offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
1392 if (soffset)
1393 offset = LLVMBuildAdd(ctx->builder, offset, soffset, "");
1394
1395 if (allow_smem && !slc &&
1396 (!glc || (HAVE_LLVM >= 0x0800 && ctx->chip_class >= GFX8))) {
1397 assert(vindex == NULL);
1398
1399 LLVMValueRef result[8];
1400
1401 for (int i = 0; i < num_channels; i++) {
1402 if (i) {
1403 offset = LLVMBuildAdd(ctx->builder, offset,
1404 LLVMConstInt(ctx->i32, 4, 0), "");
1405 }
1406 const char *intrname =
1407 HAVE_LLVM >= 0x0800 ? "llvm.amdgcn.s.buffer.load.f32"
1408 : "llvm.SI.load.const.v4i32";
1409 unsigned num_args = HAVE_LLVM >= 0x0800 ? 3 : 2;
1410 LLVMValueRef args[3] = {
1411 rsrc,
1412 offset,
1413 glc ? ctx->i32_1 : ctx->i32_0,
1414 };
1415 result[i] = ac_build_intrinsic(ctx, intrname,
1416 ctx->f32, args, num_args,
1417 AC_FUNC_ATTR_READNONE |
1418 (HAVE_LLVM < 0x0800 ? AC_FUNC_ATTR_LEGACY : 0));
1419 }
1420 if (num_channels == 1)
1421 return result[0];
1422
1423 if (num_channels == 3 && !ac_has_vec3_support(ctx->chip_class, false))
1424 result[num_channels++] = LLVMGetUndef(ctx->f32);
1425 return ac_build_gather_values(ctx, result, num_channels);
1426 }
1427
1428 if (HAVE_LLVM >= 0x0800) {
1429 return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex,
1430 offset, ctx->i32_0,
1431 num_channels, ctx->f32,
1432 glc, slc,
1433 can_speculate, false,
1434 false);
1435 }
1436
1437 return ac_build_buffer_load_common(ctx, rsrc, vindex, offset,
1438 num_channels, glc, slc,
1439 can_speculate, false);
1440 }
1441
1442 LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx,
1443 LLVMValueRef rsrc,
1444 LLVMValueRef vindex,
1445 LLVMValueRef voffset,
1446 unsigned num_channels,
1447 bool glc,
1448 bool can_speculate)
1449 {
1450 if (HAVE_LLVM >= 0x800) {
1451 return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0,
1452 num_channels, ctx->f32,
1453 glc, false,
1454 can_speculate, true, true);
1455 }
1456 return ac_build_buffer_load_common(ctx, rsrc, vindex, voffset,
1457 num_channels, glc, false,
1458 can_speculate, true);
1459 }
1460
1461 LLVMValueRef ac_build_buffer_load_format_gfx9_safe(struct ac_llvm_context *ctx,
1462 LLVMValueRef rsrc,
1463 LLVMValueRef vindex,
1464 LLVMValueRef voffset,
1465 unsigned num_channels,
1466 bool glc,
1467 bool can_speculate)
1468 {
1469 if (HAVE_LLVM >= 0x800) {
1470 return ac_build_llvm8_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0,
1471 num_channels, ctx->f32,
1472 glc, false,
1473 can_speculate, true, true);
1474 }
1475
1476 LLVMValueRef elem_count = LLVMBuildExtractElement(ctx->builder, rsrc, LLVMConstInt(ctx->i32, 2, 0), "");
1477 LLVMValueRef stride = LLVMBuildExtractElement(ctx->builder, rsrc, ctx->i32_1, "");
1478 stride = LLVMBuildLShr(ctx->builder, stride, LLVMConstInt(ctx->i32, 16, 0), "");
1479
1480 LLVMValueRef new_elem_count = LLVMBuildSelect(ctx->builder,
1481 LLVMBuildICmp(ctx->builder, LLVMIntUGT, elem_count, stride, ""),
1482 elem_count, stride, "");
1483
1484 LLVMValueRef new_rsrc = LLVMBuildInsertElement(ctx->builder, rsrc, new_elem_count,
1485 LLVMConstInt(ctx->i32, 2, 0), "");
1486
1487 return ac_build_buffer_load_common(ctx, new_rsrc, vindex, voffset,
1488 num_channels, glc, false,
1489 can_speculate, true);
1490 }
1491
1492 static LLVMValueRef
1493 ac_build_llvm8_tbuffer_load(struct ac_llvm_context *ctx,
1494 LLVMValueRef rsrc,
1495 LLVMValueRef vindex,
1496 LLVMValueRef voffset,
1497 LLVMValueRef soffset,
1498 unsigned num_channels,
1499 unsigned dfmt,
1500 unsigned nfmt,
1501 bool glc,
1502 bool slc,
1503 bool can_speculate,
1504 bool structurized)
1505 {
1506 LLVMValueRef args[6];
1507 int idx = 0;
1508 args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
1509 if (structurized)
1510 args[idx++] = vindex ? vindex : ctx->i32_0;
1511 args[idx++] = voffset ? voffset : ctx->i32_0;
1512 args[idx++] = soffset ? soffset : ctx->i32_0;
1513 args[idx++] = LLVMConstInt(ctx->i32, dfmt | (nfmt << 4), 0);
1514 args[idx++] = LLVMConstInt(ctx->i32, (glc ? 1 : 0) + (slc ? 2 : 0), 0);
1515 unsigned func = !ac_has_vec3_support(ctx->chip_class, true) && num_channels == 3 ? 4 : num_channels;
1516 const char *indexing_kind = structurized ? "struct" : "raw";
1517 char name[256], type_name[8];
1518
1519 LLVMTypeRef type = func > 1 ? LLVMVectorType(ctx->i32, func) : ctx->i32;
1520 ac_build_type_name_for_intr(type, type_name, sizeof(type_name));
1521
1522 snprintf(name, sizeof(name), "llvm.amdgcn.%s.tbuffer.load.%s",
1523 indexing_kind, type_name);
1524
1525 return ac_build_intrinsic(ctx, name, type, args, idx,
1526 ac_get_load_intr_attribs(can_speculate));
1527 }
1528
1529 static LLVMValueRef
1530 ac_build_tbuffer_load(struct ac_llvm_context *ctx,
1531 LLVMValueRef rsrc,
1532 LLVMValueRef vindex,
1533 LLVMValueRef voffset,
1534 LLVMValueRef soffset,
1535 LLVMValueRef immoffset,
1536 unsigned num_channels,
1537 unsigned dfmt,
1538 unsigned nfmt,
1539 bool glc,
1540 bool slc,
1541 bool can_speculate,
1542 bool structurized) /* only matters for LLVM 8+ */
1543 {
1544 if (HAVE_LLVM >= 0x800) {
1545 voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, "");
1546
1547 return ac_build_llvm8_tbuffer_load(ctx, rsrc, vindex, voffset,
1548 soffset, num_channels,
1549 dfmt, nfmt, glc, slc,
1550 can_speculate, structurized);
1551 }
1552
1553 LLVMValueRef args[] = {
1554 rsrc,
1555 vindex ? vindex : ctx->i32_0,
1556 voffset,
1557 soffset,
1558 immoffset,
1559 LLVMConstInt(ctx->i32, dfmt, false),
1560 LLVMConstInt(ctx->i32, nfmt, false),
1561 LLVMConstInt(ctx->i1, glc, false),
1562 LLVMConstInt(ctx->i1, slc, false),
1563 };
1564 unsigned func = CLAMP(num_channels, 1, 3) - 1;
1565 LLVMTypeRef types[] = {ctx->i32, ctx->v2i32, ctx->v4i32};
1566 const char *type_names[] = {"i32", "v2i32", "v4i32"};
1567 char name[256];
1568
1569 snprintf(name, sizeof(name), "llvm.amdgcn.tbuffer.load.%s",
1570 type_names[func]);
1571
1572 return ac_build_intrinsic(ctx, name, types[func], args, 9,
1573 ac_get_load_intr_attribs(can_speculate));
1574 }
1575
1576 LLVMValueRef
1577 ac_build_struct_tbuffer_load(struct ac_llvm_context *ctx,
1578 LLVMValueRef rsrc,
1579 LLVMValueRef vindex,
1580 LLVMValueRef voffset,
1581 LLVMValueRef soffset,
1582 LLVMValueRef immoffset,
1583 unsigned num_channels,
1584 unsigned dfmt,
1585 unsigned nfmt,
1586 bool glc,
1587 bool slc,
1588 bool can_speculate)
1589 {
1590 return ac_build_tbuffer_load(ctx, rsrc, vindex, voffset, soffset,
1591 immoffset, num_channels, dfmt, nfmt, glc,
1592 slc, can_speculate, true);
1593 }
1594
1595 LLVMValueRef
1596 ac_build_raw_tbuffer_load(struct ac_llvm_context *ctx,
1597 LLVMValueRef rsrc,
1598 LLVMValueRef voffset,
1599 LLVMValueRef soffset,
1600 LLVMValueRef immoffset,
1601 unsigned num_channels,
1602 unsigned dfmt,
1603 unsigned nfmt,
1604 bool glc,
1605 bool slc,
1606 bool can_speculate)
1607 {
1608 return ac_build_tbuffer_load(ctx, rsrc, NULL, voffset, soffset,
1609 immoffset, num_channels, dfmt, nfmt, glc,
1610 slc, can_speculate, false);
1611 }
1612
1613 LLVMValueRef
1614 ac_build_tbuffer_load_short(struct ac_llvm_context *ctx,
1615 LLVMValueRef rsrc,
1616 LLVMValueRef voffset,
1617 LLVMValueRef soffset,
1618 LLVMValueRef immoffset,
1619 bool glc)
1620 {
1621 LLVMValueRef res;
1622
1623 if (HAVE_LLVM >= 0x900) {
1624 voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, "");
1625
1626 /* LLVM 9+ supports i8/i16 with struct/raw intrinsics. */
1627 res = ac_build_llvm8_buffer_load_common(ctx, rsrc, NULL,
1628 voffset, soffset,
1629 1, ctx->i16, glc, false,
1630 false, false, false);
1631 } else {
1632 unsigned dfmt = V_008F0C_BUF_DATA_FORMAT_16;
1633 unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
1634
1635 res = ac_build_raw_tbuffer_load(ctx, rsrc, voffset, soffset,
1636 immoffset, 1, dfmt, nfmt, glc, false,
1637 false);
1638
1639 res = LLVMBuildTrunc(ctx->builder, res, ctx->i16, "");
1640 }
1641
1642 return res;
1643 }
1644
1645 LLVMValueRef
1646 ac_build_tbuffer_load_byte(struct ac_llvm_context *ctx,
1647 LLVMValueRef rsrc,
1648 LLVMValueRef voffset,
1649 LLVMValueRef soffset,
1650 LLVMValueRef immoffset,
1651 bool glc)
1652 {
1653 LLVMValueRef res;
1654
1655 if (HAVE_LLVM >= 0x900) {
1656 voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, "");
1657
1658 /* LLVM 9+ supports i8/i16 with struct/raw intrinsics. */
1659 res = ac_build_llvm8_buffer_load_common(ctx, rsrc, NULL,
1660 voffset, soffset,
1661 1, ctx->i8, glc, false,
1662 false, false, false);
1663 } else {
1664 unsigned dfmt = V_008F0C_BUF_DATA_FORMAT_8;
1665 unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
1666
1667 res = ac_build_raw_tbuffer_load(ctx, rsrc, voffset, soffset,
1668 immoffset, 1, dfmt, nfmt, glc, false,
1669 false);
1670
1671 res = LLVMBuildTrunc(ctx->builder, res, ctx->i8, "");
1672 }
1673
1674 return res;
1675 }
1676
1677 /**
1678 * Convert an 11- or 10-bit unsigned floating point number to an f32.
1679 *
1680 * The input exponent is expected to be biased analogous to IEEE-754, i.e. by
1681 * 2^(exp_bits-1) - 1 (as defined in OpenGL and other graphics APIs).
1682 */
1683 static LLVMValueRef
1684 ac_ufN_to_float(struct ac_llvm_context *ctx, LLVMValueRef src, unsigned exp_bits, unsigned mant_bits)
1685 {
1686 assert(LLVMTypeOf(src) == ctx->i32);
1687
1688 LLVMValueRef tmp;
1689 LLVMValueRef mantissa;
1690 mantissa = LLVMBuildAnd(ctx->builder, src, LLVMConstInt(ctx->i32, (1 << mant_bits) - 1, false), "");
1691
1692 /* Converting normal numbers is just a shift + correcting the exponent bias */
1693 unsigned normal_shift = 23 - mant_bits;
1694 unsigned bias_shift = 127 - ((1 << (exp_bits - 1)) - 1);
1695 LLVMValueRef shifted, normal;
1696
1697 shifted = LLVMBuildShl(ctx->builder, src, LLVMConstInt(ctx->i32, normal_shift, false), "");
1698 normal = LLVMBuildAdd(ctx->builder, shifted, LLVMConstInt(ctx->i32, bias_shift << 23, false), "");
1699
1700 /* Converting nan/inf numbers is the same, but with a different exponent update */
1701 LLVMValueRef naninf;
1702 naninf = LLVMBuildOr(ctx->builder, normal, LLVMConstInt(ctx->i32, 0xff << 23, false), "");
1703
1704 /* Converting denormals is the complex case: determine the leading zeros of the
1705 * mantissa to obtain the correct shift for the mantissa and exponent correction.
1706 */
1707 LLVMValueRef denormal;
1708 LLVMValueRef params[2] = {
1709 mantissa,
1710 ctx->i1true, /* result can be undef when arg is 0 */
1711 };
1712 LLVMValueRef ctlz = ac_build_intrinsic(ctx, "llvm.ctlz.i32", ctx->i32,
1713 params, 2, AC_FUNC_ATTR_READNONE);
1714
1715 /* Shift such that the leading 1 ends up as the LSB of the exponent field. */
1716 tmp = LLVMBuildSub(ctx->builder, ctlz, LLVMConstInt(ctx->i32, 8, false), "");
1717 denormal = LLVMBuildShl(ctx->builder, mantissa, tmp, "");
1718
1719 unsigned denormal_exp = bias_shift + (32 - mant_bits) - 1;
1720 tmp = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, denormal_exp, false), ctlz, "");
1721 tmp = LLVMBuildShl(ctx->builder, tmp, LLVMConstInt(ctx->i32, 23, false), "");
1722 denormal = LLVMBuildAdd(ctx->builder, denormal, tmp, "");
1723
1724 /* Select the final result. */
1725 LLVMValueRef result;
1726
1727 tmp = LLVMBuildICmp(ctx->builder, LLVMIntUGE, src,
1728 LLVMConstInt(ctx->i32, ((1 << exp_bits) - 1) << mant_bits, false), "");
1729 result = LLVMBuildSelect(ctx->builder, tmp, naninf, normal, "");
1730
1731 tmp = LLVMBuildICmp(ctx->builder, LLVMIntUGE, src,
1732 LLVMConstInt(ctx->i32, 1 << mant_bits, false), "");
1733 result = LLVMBuildSelect(ctx->builder, tmp, result, denormal, "");
1734
1735 tmp = LLVMBuildICmp(ctx->builder, LLVMIntNE, src, ctx->i32_0, "");
1736 result = LLVMBuildSelect(ctx->builder, tmp, result, ctx->i32_0, "");
1737
1738 return ac_to_float(ctx, result);
1739 }
1740
1741 /**
1742 * Generate a fully general open coded buffer format fetch with all required
1743 * fixups suitable for vertex fetch, using non-format buffer loads.
1744 *
1745 * Some combinations of argument values have special interpretations:
1746 * - size = 8 bytes, format = fixed indicates PIPE_FORMAT_R11G11B10_FLOAT
1747 * - size = 8 bytes, format != {float,fixed} indicates a 2_10_10_10 data format
1748 *
1749 * \param log_size log(size of channel in bytes)
1750 * \param num_channels number of channels (1 to 4)
1751 * \param format AC_FETCH_FORMAT_xxx value
1752 * \param reverse whether XYZ channels are reversed
1753 * \param known_aligned whether the source is known to be aligned to hardware's
1754 * effective element size for loading the given format
1755 * (note: this means dword alignment for 8_8_8_8, 16_16, etc.)
1756 * \param rsrc buffer resource descriptor
1757 * \return the resulting vector of floats or integers bitcast to <4 x i32>
1758 */
1759 LLVMValueRef
1760 ac_build_opencoded_load_format(struct ac_llvm_context *ctx,
1761 unsigned log_size,
1762 unsigned num_channels,
1763 unsigned format,
1764 bool reverse,
1765 bool known_aligned,
1766 LLVMValueRef rsrc,
1767 LLVMValueRef vindex,
1768 LLVMValueRef voffset,
1769 LLVMValueRef soffset,
1770 bool glc,
1771 bool slc,
1772 bool can_speculate)
1773 {
1774 LLVMValueRef tmp;
1775 unsigned load_log_size = log_size;
1776 unsigned load_num_channels = num_channels;
1777 if (log_size == 3) {
1778 load_log_size = 2;
1779 if (format == AC_FETCH_FORMAT_FLOAT) {
1780 load_num_channels = 2 * num_channels;
1781 } else {
1782 load_num_channels = 1; /* 10_11_11 or 2_10_10_10 */
1783 }
1784 }
1785
1786 int log_recombine = 0;
1787 if (ctx->chip_class == GFX6 && !known_aligned) {
1788 /* Avoid alignment restrictions by loading one byte at a time. */
1789 load_num_channels <<= load_log_size;
1790 log_recombine = load_log_size;
1791 load_log_size = 0;
1792 } else if (load_num_channels == 2 || load_num_channels == 4) {
1793 log_recombine = -util_logbase2(load_num_channels);
1794 load_num_channels = 1;
1795 load_log_size += -log_recombine;
1796 }
1797
1798 assert(load_log_size >= 2 || HAVE_LLVM >= 0x0900);
1799
1800 LLVMValueRef loads[32]; /* up to 32 bytes */
1801 for (unsigned i = 0; i < load_num_channels; ++i) {
1802 tmp = LLVMBuildAdd(ctx->builder, soffset,
1803 LLVMConstInt(ctx->i32, i << load_log_size, false), "");
1804 if (HAVE_LLVM >= 0x0800) {
1805 LLVMTypeRef channel_type = load_log_size == 0 ? ctx->i8 :
1806 load_log_size == 1 ? ctx->i16 : ctx->i32;
1807 unsigned num_channels = 1 << (MAX2(load_log_size, 2) - 2);
1808 loads[i] = ac_build_llvm8_buffer_load_common(
1809 ctx, rsrc, vindex, voffset, tmp,
1810 num_channels, channel_type, glc, slc,
1811 can_speculate, false, true);
1812 } else {
1813 tmp = LLVMBuildAdd(ctx->builder, voffset, tmp, "");
1814 loads[i] = ac_build_buffer_load_common(
1815 ctx, rsrc, vindex, tmp,
1816 1 << (load_log_size - 2), glc, slc, can_speculate, false);
1817 }
1818 if (load_log_size >= 2)
1819 loads[i] = ac_to_integer(ctx, loads[i]);
1820 }
1821
1822 if (log_recombine > 0) {
1823 /* Recombine bytes if necessary (GFX6 only) */
1824 LLVMTypeRef dst_type = log_recombine == 2 ? ctx->i32 : ctx->i16;
1825
1826 for (unsigned src = 0, dst = 0; src < load_num_channels; ++dst) {
1827 LLVMValueRef accum = NULL;
1828 for (unsigned i = 0; i < (1 << log_recombine); ++i, ++src) {
1829 tmp = LLVMBuildZExt(ctx->builder, loads[src], dst_type, "");
1830 if (i == 0) {
1831 accum = tmp;
1832 } else {
1833 tmp = LLVMBuildShl(ctx->builder, tmp,
1834 LLVMConstInt(dst_type, 8 * i, false), "");
1835 accum = LLVMBuildOr(ctx->builder, accum, tmp, "");
1836 }
1837 }
1838 loads[dst] = accum;
1839 }
1840 } else if (log_recombine < 0) {
1841 /* Split vectors of dwords */
1842 if (load_log_size > 2) {
1843 assert(load_num_channels == 1);
1844 LLVMValueRef loaded = loads[0];
1845 unsigned log_split = load_log_size - 2;
1846 log_recombine += log_split;
1847 load_num_channels = 1 << log_split;
1848 load_log_size = 2;
1849 for (unsigned i = 0; i < load_num_channels; ++i) {
1850 tmp = LLVMConstInt(ctx->i32, i, false);
1851 loads[i] = LLVMBuildExtractElement(ctx->builder, loaded, tmp, "");
1852 }
1853 }
1854
1855 /* Further split dwords and shorts if required */
1856 if (log_recombine < 0) {
1857 for (unsigned src = load_num_channels,
1858 dst = load_num_channels << -log_recombine;
1859 src > 0; --src) {
1860 unsigned dst_bits = 1 << (3 + load_log_size + log_recombine);
1861 LLVMTypeRef dst_type = LLVMIntTypeInContext(ctx->context, dst_bits);
1862 LLVMValueRef loaded = loads[src - 1];
1863 LLVMTypeRef loaded_type = LLVMTypeOf(loaded);
1864 for (unsigned i = 1 << -log_recombine; i > 0; --i, --dst) {
1865 tmp = LLVMConstInt(loaded_type, dst_bits * (i - 1), false);
1866 tmp = LLVMBuildLShr(ctx->builder, loaded, tmp, "");
1867 loads[dst - 1] = LLVMBuildTrunc(ctx->builder, tmp, dst_type, "");
1868 }
1869 }
1870 }
1871 }
1872
1873 if (log_size == 3) {
1874 if (format == AC_FETCH_FORMAT_FLOAT) {
1875 for (unsigned i = 0; i < num_channels; ++i) {
1876 tmp = ac_build_gather_values(ctx, &loads[2 * i], 2);
1877 loads[i] = LLVMBuildBitCast(ctx->builder, tmp, ctx->f64, "");
1878 }
1879 } else if (format == AC_FETCH_FORMAT_FIXED) {
1880 /* 10_11_11_FLOAT */
1881 LLVMValueRef data = loads[0];
1882 LLVMValueRef i32_2047 = LLVMConstInt(ctx->i32, 2047, false);
1883 LLVMValueRef r = LLVMBuildAnd(ctx->builder, data, i32_2047, "");
1884 tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 11, false), "");
1885 LLVMValueRef g = LLVMBuildAnd(ctx->builder, tmp, i32_2047, "");
1886 LLVMValueRef b = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 22, false), "");
1887
1888 loads[0] = ac_to_integer(ctx, ac_ufN_to_float(ctx, r, 5, 6));
1889 loads[1] = ac_to_integer(ctx, ac_ufN_to_float(ctx, g, 5, 6));
1890 loads[2] = ac_to_integer(ctx, ac_ufN_to_float(ctx, b, 5, 5));
1891
1892 num_channels = 3;
1893 log_size = 2;
1894 format = AC_FETCH_FORMAT_FLOAT;
1895 } else {
1896 /* 2_10_10_10 data formats */
1897 LLVMValueRef data = loads[0];
1898 LLVMTypeRef i10 = LLVMIntTypeInContext(ctx->context, 10);
1899 LLVMTypeRef i2 = LLVMIntTypeInContext(ctx->context, 2);
1900 loads[0] = LLVMBuildTrunc(ctx->builder, data, i10, "");
1901 tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 10, false), "");
1902 loads[1] = LLVMBuildTrunc(ctx->builder, tmp, i10, "");
1903 tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 20, false), "");
1904 loads[2] = LLVMBuildTrunc(ctx->builder, tmp, i10, "");
1905 tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 30, false), "");
1906 loads[3] = LLVMBuildTrunc(ctx->builder, tmp, i2, "");
1907
1908 num_channels = 4;
1909 }
1910 }
1911
1912 if (format == AC_FETCH_FORMAT_FLOAT) {
1913 if (log_size != 2) {
1914 for (unsigned chan = 0; chan < num_channels; ++chan) {
1915 tmp = ac_to_float(ctx, loads[chan]);
1916 if (log_size == 3)
1917 tmp = LLVMBuildFPTrunc(ctx->builder, tmp, ctx->f32, "");
1918 else if (log_size == 1)
1919 tmp = LLVMBuildFPExt(ctx->builder, tmp, ctx->f32, "");
1920 loads[chan] = ac_to_integer(ctx, tmp);
1921 }
1922 }
1923 } else if (format == AC_FETCH_FORMAT_UINT) {
1924 if (log_size != 2) {
1925 for (unsigned chan = 0; chan < num_channels; ++chan)
1926 loads[chan] = LLVMBuildZExt(ctx->builder, loads[chan], ctx->i32, "");
1927 }
1928 } else if (format == AC_FETCH_FORMAT_SINT) {
1929 if (log_size != 2) {
1930 for (unsigned chan = 0; chan < num_channels; ++chan)
1931 loads[chan] = LLVMBuildSExt(ctx->builder, loads[chan], ctx->i32, "");
1932 }
1933 } else {
1934 bool unsign = format == AC_FETCH_FORMAT_UNORM ||
1935 format == AC_FETCH_FORMAT_USCALED ||
1936 format == AC_FETCH_FORMAT_UINT;
1937
1938 for (unsigned chan = 0; chan < num_channels; ++chan) {
1939 if (unsign) {
1940 tmp = LLVMBuildUIToFP(ctx->builder, loads[chan], ctx->f32, "");
1941 } else {
1942 tmp = LLVMBuildSIToFP(ctx->builder, loads[chan], ctx->f32, "");
1943 }
1944
1945 LLVMValueRef scale = NULL;
1946 if (format == AC_FETCH_FORMAT_FIXED) {
1947 assert(log_size == 2);
1948 scale = LLVMConstReal(ctx->f32, 1.0 / 0x10000);
1949 } else if (format == AC_FETCH_FORMAT_UNORM) {
1950 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(loads[chan]));
1951 scale = LLVMConstReal(ctx->f32, 1.0 / (((uint64_t)1 << bits) - 1));
1952 } else if (format == AC_FETCH_FORMAT_SNORM) {
1953 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(loads[chan]));
1954 scale = LLVMConstReal(ctx->f32, 1.0 / (((uint64_t)1 << (bits - 1)) - 1));
1955 }
1956 if (scale)
1957 tmp = LLVMBuildFMul(ctx->builder, tmp, scale, "");
1958
1959 if (format == AC_FETCH_FORMAT_SNORM) {
1960 /* Clamp to [-1, 1] */
1961 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
1962 LLVMValueRef clamp =
1963 LLVMBuildFCmp(ctx->builder, LLVMRealULT, tmp, neg_one, "");
1964 tmp = LLVMBuildSelect(ctx->builder, clamp, neg_one, tmp, "");
1965 }
1966
1967 loads[chan] = ac_to_integer(ctx, tmp);
1968 }
1969 }
1970
1971 while (num_channels < 4) {
1972 if (format == AC_FETCH_FORMAT_UINT || format == AC_FETCH_FORMAT_SINT) {
1973 loads[num_channels] = num_channels == 3 ? ctx->i32_1 : ctx->i32_0;
1974 } else {
1975 loads[num_channels] = ac_to_integer(ctx, num_channels == 3 ? ctx->f32_1 : ctx->f32_0);
1976 }
1977 num_channels++;
1978 }
1979
1980 if (reverse) {
1981 tmp = loads[0];
1982 loads[0] = loads[2];
1983 loads[2] = tmp;
1984 }
1985
1986 return ac_build_gather_values(ctx, loads, 4);
1987 }
1988
1989 static void
1990 ac_build_llvm8_tbuffer_store(struct ac_llvm_context *ctx,
1991 LLVMValueRef rsrc,
1992 LLVMValueRef vdata,
1993 LLVMValueRef vindex,
1994 LLVMValueRef voffset,
1995 LLVMValueRef soffset,
1996 unsigned num_channels,
1997 unsigned dfmt,
1998 unsigned nfmt,
1999 bool glc,
2000 bool slc,
2001 bool writeonly_memory,
2002 bool structurized)
2003 {
2004 LLVMValueRef args[7];
2005 int idx = 0;
2006 args[idx++] = vdata;
2007 args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
2008 if (structurized)
2009 args[idx++] = vindex ? vindex : ctx->i32_0;
2010 args[idx++] = voffset ? voffset : ctx->i32_0;
2011 args[idx++] = soffset ? soffset : ctx->i32_0;
2012 args[idx++] = LLVMConstInt(ctx->i32, dfmt | (nfmt << 4), 0);
2013 args[idx++] = LLVMConstInt(ctx->i32, (glc ? 1 : 0) + (slc ? 2 : 0), 0);
2014 unsigned func = !ac_has_vec3_support(ctx->chip_class, true) && num_channels == 3 ? 4 : num_channels;
2015 const char *indexing_kind = structurized ? "struct" : "raw";
2016 char name[256], type_name[8];
2017
2018 LLVMTypeRef type = func > 1 ? LLVMVectorType(ctx->i32, func) : ctx->i32;
2019 ac_build_type_name_for_intr(type, type_name, sizeof(type_name));
2020
2021 snprintf(name, sizeof(name), "llvm.amdgcn.%s.tbuffer.store.%s",
2022 indexing_kind, type_name);
2023
2024 ac_build_intrinsic(ctx, name, ctx->voidt, args, idx,
2025 ac_get_store_intr_attribs(writeonly_memory));
2026 }
2027
2028 static void
2029 ac_build_tbuffer_store(struct ac_llvm_context *ctx,
2030 LLVMValueRef rsrc,
2031 LLVMValueRef vdata,
2032 LLVMValueRef vindex,
2033 LLVMValueRef voffset,
2034 LLVMValueRef soffset,
2035 LLVMValueRef immoffset,
2036 unsigned num_channels,
2037 unsigned dfmt,
2038 unsigned nfmt,
2039 bool glc,
2040 bool slc,
2041 bool writeonly_memory,
2042 bool structurized) /* only matters for LLVM 8+ */
2043 {
2044 if (HAVE_LLVM >= 0x800) {
2045 voffset = LLVMBuildAdd(ctx->builder,
2046 voffset ? voffset : ctx->i32_0,
2047 immoffset, "");
2048
2049 ac_build_llvm8_tbuffer_store(ctx, rsrc, vdata, vindex, voffset,
2050 soffset, num_channels, dfmt, nfmt,
2051 glc, slc, writeonly_memory,
2052 structurized);
2053 } else {
2054 LLVMValueRef params[] = {
2055 vdata,
2056 rsrc,
2057 vindex ? vindex : ctx->i32_0,
2058 voffset ? voffset : ctx->i32_0,
2059 soffset ? soffset : ctx->i32_0,
2060 immoffset,
2061 LLVMConstInt(ctx->i32, dfmt, false),
2062 LLVMConstInt(ctx->i32, nfmt, false),
2063 LLVMConstInt(ctx->i1, glc, false),
2064 LLVMConstInt(ctx->i1, slc, false),
2065 };
2066 unsigned func = CLAMP(num_channels, 1, 3) - 1;
2067 const char *type_names[] = {"i32", "v2i32", "v4i32"};
2068 char name[256];
2069
2070 snprintf(name, sizeof(name), "llvm.amdgcn.tbuffer.store.%s",
2071 type_names[func]);
2072
2073 ac_build_intrinsic(ctx, name, ctx->voidt, params, 10,
2074 ac_get_store_intr_attribs(writeonly_memory));
2075 }
2076 }
2077
2078 void
2079 ac_build_struct_tbuffer_store(struct ac_llvm_context *ctx,
2080 LLVMValueRef rsrc,
2081 LLVMValueRef vdata,
2082 LLVMValueRef vindex,
2083 LLVMValueRef voffset,
2084 LLVMValueRef soffset,
2085 LLVMValueRef immoffset,
2086 unsigned num_channels,
2087 unsigned dfmt,
2088 unsigned nfmt,
2089 bool glc,
2090 bool slc,
2091 bool writeonly_memory)
2092 {
2093 ac_build_tbuffer_store(ctx, rsrc, vdata, vindex, voffset, soffset,
2094 immoffset, num_channels, dfmt, nfmt, glc, slc,
2095 writeonly_memory, true);
2096 }
2097
2098 void
2099 ac_build_raw_tbuffer_store(struct ac_llvm_context *ctx,
2100 LLVMValueRef rsrc,
2101 LLVMValueRef vdata,
2102 LLVMValueRef voffset,
2103 LLVMValueRef soffset,
2104 LLVMValueRef immoffset,
2105 unsigned num_channels,
2106 unsigned dfmt,
2107 unsigned nfmt,
2108 bool glc,
2109 bool slc,
2110 bool writeonly_memory)
2111 {
2112 ac_build_tbuffer_store(ctx, rsrc, vdata, NULL, voffset, soffset,
2113 immoffset, num_channels, dfmt, nfmt, glc, slc,
2114 writeonly_memory, false);
2115 }
2116
2117 void
2118 ac_build_tbuffer_store_short(struct ac_llvm_context *ctx,
2119 LLVMValueRef rsrc,
2120 LLVMValueRef vdata,
2121 LLVMValueRef voffset,
2122 LLVMValueRef soffset,
2123 bool glc,
2124 bool writeonly_memory)
2125 {
2126 vdata = LLVMBuildBitCast(ctx->builder, vdata, ctx->i16, "");
2127
2128 if (HAVE_LLVM >= 0x900) {
2129 /* LLVM 9+ supports i8/i16 with struct/raw intrinsics. */
2130 ac_build_llvm8_buffer_store_common(ctx, rsrc, vdata, NULL,
2131 voffset, soffset, 1,
2132 ctx->i16, glc, false,
2133 writeonly_memory, false,
2134 false);
2135 } else {
2136 unsigned dfmt = V_008F0C_BUF_DATA_FORMAT_16;
2137 unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
2138
2139 vdata = LLVMBuildZExt(ctx->builder, vdata, ctx->i32, "");
2140
2141 ac_build_raw_tbuffer_store(ctx, rsrc, vdata, voffset, soffset,
2142 ctx->i32_0, 1, dfmt, nfmt, glc, false,
2143 writeonly_memory);
2144 }
2145 }
2146
2147 void
2148 ac_build_tbuffer_store_byte(struct ac_llvm_context *ctx,
2149 LLVMValueRef rsrc,
2150 LLVMValueRef vdata,
2151 LLVMValueRef voffset,
2152 LLVMValueRef soffset,
2153 bool glc,
2154 bool writeonly_memory)
2155 {
2156 vdata = LLVMBuildBitCast(ctx->builder, vdata, ctx->i8, "");
2157
2158 if (HAVE_LLVM >= 0x900) {
2159 /* LLVM 9+ supports i8/i16 with struct/raw intrinsics. */
2160 ac_build_llvm8_buffer_store_common(ctx, rsrc, vdata, NULL,
2161 voffset, soffset, 1,
2162 ctx->i8, glc, false,
2163 writeonly_memory, false,
2164 false);
2165 } else {
2166 unsigned dfmt = V_008F0C_BUF_DATA_FORMAT_8;
2167 unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
2168
2169 vdata = LLVMBuildZExt(ctx->builder, vdata, ctx->i32, "");
2170
2171 ac_build_raw_tbuffer_store(ctx, rsrc, vdata, voffset, soffset,
2172 ctx->i32_0, 1, dfmt, nfmt, glc, false,
2173 writeonly_memory);
2174 }
2175 }
2176 /**
2177 * Set range metadata on an instruction. This can only be used on load and
2178 * call instructions. If you know an instruction can only produce the values
2179 * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
2180 * \p lo is the minimum value inclusive.
2181 * \p hi is the maximum value exclusive.
2182 */
2183 static void set_range_metadata(struct ac_llvm_context *ctx,
2184 LLVMValueRef value, unsigned lo, unsigned hi)
2185 {
2186 LLVMValueRef range_md, md_args[2];
2187 LLVMTypeRef type = LLVMTypeOf(value);
2188 LLVMContextRef context = LLVMGetTypeContext(type);
2189
2190 md_args[0] = LLVMConstInt(type, lo, false);
2191 md_args[1] = LLVMConstInt(type, hi, false);
2192 range_md = LLVMMDNodeInContext(context, md_args, 2);
2193 LLVMSetMetadata(value, ctx->range_md_kind, range_md);
2194 }
2195
2196 LLVMValueRef
2197 ac_get_thread_id(struct ac_llvm_context *ctx)
2198 {
2199 LLVMValueRef tid;
2200
2201 LLVMValueRef tid_args[2];
2202 tid_args[0] = LLVMConstInt(ctx->i32, 0xffffffff, false);
2203 tid_args[1] = ctx->i32_0;
2204 tid_args[1] = ac_build_intrinsic(ctx,
2205 "llvm.amdgcn.mbcnt.lo", ctx->i32,
2206 tid_args, 2, AC_FUNC_ATTR_READNONE);
2207
2208 tid = ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.hi",
2209 ctx->i32, tid_args,
2210 2, AC_FUNC_ATTR_READNONE);
2211 set_range_metadata(ctx, tid, 0, 64);
2212 return tid;
2213 }
2214
2215 /*
2216 * AMD GCN implements derivatives using the local data store (LDS)
2217 * All writes to the LDS happen in all executing threads at
2218 * the same time. TID is the Thread ID for the current
2219 * thread and is a value between 0 and 63, representing
2220 * the thread's position in the wavefront.
2221 *
2222 * For the pixel shader threads are grouped into quads of four pixels.
2223 * The TIDs of the pixels of a quad are:
2224 *
2225 * +------+------+
2226 * |4n + 0|4n + 1|
2227 * +------+------+
2228 * |4n + 2|4n + 3|
2229 * +------+------+
2230 *
2231 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
2232 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
2233 * the current pixel's column, and masking with 0xfffffffe yields the TID
2234 * of the left pixel of the current pixel's row.
2235 *
2236 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
2237 * adding 2 yields the TID of the pixel below the top pixel.
2238 */
2239 LLVMValueRef
2240 ac_build_ddxy(struct ac_llvm_context *ctx,
2241 uint32_t mask,
2242 int idx,
2243 LLVMValueRef val)
2244 {
2245 unsigned tl_lanes[4], trbl_lanes[4];
2246 char name[32], type[8];
2247 LLVMValueRef tl, trbl;
2248 LLVMTypeRef result_type;
2249 LLVMValueRef result;
2250
2251 result_type = ac_to_float_type(ctx, LLVMTypeOf(val));
2252
2253 if (result_type == ctx->f16)
2254 val = LLVMBuildZExt(ctx->builder, val, ctx->i32, "");
2255
2256 for (unsigned i = 0; i < 4; ++i) {
2257 tl_lanes[i] = i & mask;
2258 trbl_lanes[i] = (i & mask) + idx;
2259 }
2260
2261 tl = ac_build_quad_swizzle(ctx, val,
2262 tl_lanes[0], tl_lanes[1],
2263 tl_lanes[2], tl_lanes[3]);
2264 trbl = ac_build_quad_swizzle(ctx, val,
2265 trbl_lanes[0], trbl_lanes[1],
2266 trbl_lanes[2], trbl_lanes[3]);
2267
2268 if (result_type == ctx->f16) {
2269 tl = LLVMBuildTrunc(ctx->builder, tl, ctx->i16, "");
2270 trbl = LLVMBuildTrunc(ctx->builder, trbl, ctx->i16, "");
2271 }
2272
2273 tl = LLVMBuildBitCast(ctx->builder, tl, result_type, "");
2274 trbl = LLVMBuildBitCast(ctx->builder, trbl, result_type, "");
2275 result = LLVMBuildFSub(ctx->builder, trbl, tl, "");
2276
2277 ac_build_type_name_for_intr(result_type, type, sizeof(type));
2278 snprintf(name, sizeof(name), "llvm.amdgcn.wqm.%s", type);
2279
2280 return ac_build_intrinsic(ctx, name, result_type, &result, 1, 0);
2281 }
2282
2283 void
2284 ac_build_sendmsg(struct ac_llvm_context *ctx,
2285 uint32_t msg,
2286 LLVMValueRef wave_id)
2287 {
2288 LLVMValueRef args[2];
2289 args[0] = LLVMConstInt(ctx->i32, msg, false);
2290 args[1] = wave_id;
2291 ac_build_intrinsic(ctx, "llvm.amdgcn.s.sendmsg", ctx->voidt, args, 2, 0);
2292 }
2293
2294 LLVMValueRef
2295 ac_build_imsb(struct ac_llvm_context *ctx,
2296 LLVMValueRef arg,
2297 LLVMTypeRef dst_type)
2298 {
2299 LLVMValueRef msb = ac_build_intrinsic(ctx, "llvm.amdgcn.sffbh.i32",
2300 dst_type, &arg, 1,
2301 AC_FUNC_ATTR_READNONE);
2302
2303 /* The HW returns the last bit index from MSB, but NIR/TGSI wants
2304 * the index from LSB. Invert it by doing "31 - msb". */
2305 msb = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, 31, false),
2306 msb, "");
2307
2308 LLVMValueRef all_ones = LLVMConstInt(ctx->i32, -1, true);
2309 LLVMValueRef cond = LLVMBuildOr(ctx->builder,
2310 LLVMBuildICmp(ctx->builder, LLVMIntEQ,
2311 arg, ctx->i32_0, ""),
2312 LLVMBuildICmp(ctx->builder, LLVMIntEQ,
2313 arg, all_ones, ""), "");
2314
2315 return LLVMBuildSelect(ctx->builder, cond, all_ones, msb, "");
2316 }
2317
2318 LLVMValueRef
2319 ac_build_umsb(struct ac_llvm_context *ctx,
2320 LLVMValueRef arg,
2321 LLVMTypeRef dst_type)
2322 {
2323 const char *intrin_name;
2324 LLVMTypeRef type;
2325 LLVMValueRef highest_bit;
2326 LLVMValueRef zero;
2327 unsigned bitsize;
2328
2329 bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(arg));
2330 switch (bitsize) {
2331 case 64:
2332 intrin_name = "llvm.ctlz.i64";
2333 type = ctx->i64;
2334 highest_bit = LLVMConstInt(ctx->i64, 63, false);
2335 zero = ctx->i64_0;
2336 break;
2337 case 32:
2338 intrin_name = "llvm.ctlz.i32";
2339 type = ctx->i32;
2340 highest_bit = LLVMConstInt(ctx->i32, 31, false);
2341 zero = ctx->i32_0;
2342 break;
2343 case 16:
2344 intrin_name = "llvm.ctlz.i16";
2345 type = ctx->i16;
2346 highest_bit = LLVMConstInt(ctx->i16, 15, false);
2347 zero = ctx->i16_0;
2348 break;
2349 case 8:
2350 intrin_name = "llvm.ctlz.i8";
2351 type = ctx->i8;
2352 highest_bit = LLVMConstInt(ctx->i8, 7, false);
2353 zero = ctx->i8_0;
2354 break;
2355 default:
2356 unreachable(!"invalid bitsize");
2357 break;
2358 }
2359
2360 LLVMValueRef params[2] = {
2361 arg,
2362 ctx->i1true,
2363 };
2364
2365 LLVMValueRef msb = ac_build_intrinsic(ctx, intrin_name, type,
2366 params, 2,
2367 AC_FUNC_ATTR_READNONE);
2368
2369 /* The HW returns the last bit index from MSB, but TGSI/NIR wants
2370 * the index from LSB. Invert it by doing "31 - msb". */
2371 msb = LLVMBuildSub(ctx->builder, highest_bit, msb, "");
2372
2373 if (bitsize == 64) {
2374 msb = LLVMBuildTrunc(ctx->builder, msb, ctx->i32, "");
2375 } else if (bitsize < 32) {
2376 msb = LLVMBuildSExt(ctx->builder, msb, ctx->i32, "");
2377 }
2378
2379 /* check for zero */
2380 return LLVMBuildSelect(ctx->builder,
2381 LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg, zero, ""),
2382 LLVMConstInt(ctx->i32, -1, true), msb, "");
2383 }
2384
2385 LLVMValueRef ac_build_fmin(struct ac_llvm_context *ctx, LLVMValueRef a,
2386 LLVMValueRef b)
2387 {
2388 char name[64];
2389 snprintf(name, sizeof(name), "llvm.minnum.f%d", ac_get_elem_bits(ctx, LLVMTypeOf(a)));
2390 LLVMValueRef args[2] = {a, b};
2391 return ac_build_intrinsic(ctx, name, LLVMTypeOf(a), args, 2,
2392 AC_FUNC_ATTR_READNONE);
2393 }
2394
2395 LLVMValueRef ac_build_fmax(struct ac_llvm_context *ctx, LLVMValueRef a,
2396 LLVMValueRef b)
2397 {
2398 char name[64];
2399 snprintf(name, sizeof(name), "llvm.maxnum.f%d", ac_get_elem_bits(ctx, LLVMTypeOf(a)));
2400 LLVMValueRef args[2] = {a, b};
2401 return ac_build_intrinsic(ctx, name, LLVMTypeOf(a), args, 2,
2402 AC_FUNC_ATTR_READNONE);
2403 }
2404
2405 LLVMValueRef ac_build_imin(struct ac_llvm_context *ctx, LLVMValueRef a,
2406 LLVMValueRef b)
2407 {
2408 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntSLE, a, b, "");
2409 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
2410 }
2411
2412 LLVMValueRef ac_build_imax(struct ac_llvm_context *ctx, LLVMValueRef a,
2413 LLVMValueRef b)
2414 {
2415 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGT, a, b, "");
2416 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
2417 }
2418
2419 LLVMValueRef ac_build_umin(struct ac_llvm_context *ctx, LLVMValueRef a,
2420 LLVMValueRef b)
2421 {
2422 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntULE, a, b, "");
2423 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
2424 }
2425
2426 LLVMValueRef ac_build_umax(struct ac_llvm_context *ctx, LLVMValueRef a,
2427 LLVMValueRef b)
2428 {
2429 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntUGE, a, b, "");
2430 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
2431 }
2432
2433 LLVMValueRef ac_build_clamp(struct ac_llvm_context *ctx, LLVMValueRef value)
2434 {
2435 LLVMTypeRef t = LLVMTypeOf(value);
2436 return ac_build_fmin(ctx, ac_build_fmax(ctx, value, LLVMConstReal(t, 0.0)),
2437 LLVMConstReal(t, 1.0));
2438 }
2439
2440 void ac_build_export(struct ac_llvm_context *ctx, struct ac_export_args *a)
2441 {
2442 LLVMValueRef args[9];
2443
2444 args[0] = LLVMConstInt(ctx->i32, a->target, 0);
2445 args[1] = LLVMConstInt(ctx->i32, a->enabled_channels, 0);
2446
2447 if (a->compr) {
2448 LLVMTypeRef i16 = LLVMInt16TypeInContext(ctx->context);
2449 LLVMTypeRef v2i16 = LLVMVectorType(i16, 2);
2450
2451 args[2] = LLVMBuildBitCast(ctx->builder, a->out[0],
2452 v2i16, "");
2453 args[3] = LLVMBuildBitCast(ctx->builder, a->out[1],
2454 v2i16, "");
2455 args[4] = LLVMConstInt(ctx->i1, a->done, 0);
2456 args[5] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
2457
2458 ac_build_intrinsic(ctx, "llvm.amdgcn.exp.compr.v2i16",
2459 ctx->voidt, args, 6, 0);
2460 } else {
2461 args[2] = a->out[0];
2462 args[3] = a->out[1];
2463 args[4] = a->out[2];
2464 args[5] = a->out[3];
2465 args[6] = LLVMConstInt(ctx->i1, a->done, 0);
2466 args[7] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
2467
2468 ac_build_intrinsic(ctx, "llvm.amdgcn.exp.f32",
2469 ctx->voidt, args, 8, 0);
2470 }
2471 }
2472
2473 void ac_build_export_null(struct ac_llvm_context *ctx)
2474 {
2475 struct ac_export_args args;
2476
2477 args.enabled_channels = 0x0; /* enabled channels */
2478 args.valid_mask = 1; /* whether the EXEC mask is valid */
2479 args.done = 1; /* DONE bit */
2480 args.target = V_008DFC_SQ_EXP_NULL;
2481 args.compr = 0; /* COMPR flag (0 = 32-bit export) */
2482 args.out[0] = LLVMGetUndef(ctx->f32); /* R */
2483 args.out[1] = LLVMGetUndef(ctx->f32); /* G */
2484 args.out[2] = LLVMGetUndef(ctx->f32); /* B */
2485 args.out[3] = LLVMGetUndef(ctx->f32); /* A */
2486
2487 ac_build_export(ctx, &args);
2488 }
2489
2490 static unsigned ac_num_coords(enum ac_image_dim dim)
2491 {
2492 switch (dim) {
2493 case ac_image_1d:
2494 return 1;
2495 case ac_image_2d:
2496 case ac_image_1darray:
2497 return 2;
2498 case ac_image_3d:
2499 case ac_image_cube:
2500 case ac_image_2darray:
2501 case ac_image_2dmsaa:
2502 return 3;
2503 case ac_image_2darraymsaa:
2504 return 4;
2505 default:
2506 unreachable("ac_num_coords: bad dim");
2507 }
2508 }
2509
2510 static unsigned ac_num_derivs(enum ac_image_dim dim)
2511 {
2512 switch (dim) {
2513 case ac_image_1d:
2514 case ac_image_1darray:
2515 return 2;
2516 case ac_image_2d:
2517 case ac_image_2darray:
2518 case ac_image_cube:
2519 return 4;
2520 case ac_image_3d:
2521 return 6;
2522 case ac_image_2dmsaa:
2523 case ac_image_2darraymsaa:
2524 default:
2525 unreachable("derivatives not supported");
2526 }
2527 }
2528
2529 static const char *get_atomic_name(enum ac_atomic_op op)
2530 {
2531 switch (op) {
2532 case ac_atomic_swap: return "swap";
2533 case ac_atomic_add: return "add";
2534 case ac_atomic_sub: return "sub";
2535 case ac_atomic_smin: return "smin";
2536 case ac_atomic_umin: return "umin";
2537 case ac_atomic_smax: return "smax";
2538 case ac_atomic_umax: return "umax";
2539 case ac_atomic_and: return "and";
2540 case ac_atomic_or: return "or";
2541 case ac_atomic_xor: return "xor";
2542 }
2543 unreachable("bad atomic op");
2544 }
2545
2546 LLVMValueRef ac_build_image_opcode(struct ac_llvm_context *ctx,
2547 struct ac_image_args *a)
2548 {
2549 const char *overload[3] = { "", "", "" };
2550 unsigned num_overloads = 0;
2551 LLVMValueRef args[18];
2552 unsigned num_args = 0;
2553 enum ac_image_dim dim = a->dim;
2554
2555 assert(!a->lod || a->lod == ctx->i32_0 || a->lod == ctx->f32_0 ||
2556 !a->level_zero);
2557 assert((a->opcode != ac_image_get_resinfo && a->opcode != ac_image_load_mip &&
2558 a->opcode != ac_image_store_mip) ||
2559 a->lod);
2560 assert(a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
2561 (!a->compare && !a->offset));
2562 assert((a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
2563 a->opcode == ac_image_get_lod) ||
2564 !a->bias);
2565 assert((a->bias ? 1 : 0) +
2566 (a->lod ? 1 : 0) +
2567 (a->level_zero ? 1 : 0) +
2568 (a->derivs[0] ? 1 : 0) <= 1);
2569
2570 if (a->opcode == ac_image_get_lod) {
2571 switch (dim) {
2572 case ac_image_1darray:
2573 dim = ac_image_1d;
2574 break;
2575 case ac_image_2darray:
2576 case ac_image_cube:
2577 dim = ac_image_2d;
2578 break;
2579 default:
2580 break;
2581 }
2582 }
2583
2584 bool sample = a->opcode == ac_image_sample ||
2585 a->opcode == ac_image_gather4 ||
2586 a->opcode == ac_image_get_lod;
2587 bool atomic = a->opcode == ac_image_atomic ||
2588 a->opcode == ac_image_atomic_cmpswap;
2589 LLVMTypeRef coord_type = sample ? ctx->f32 : ctx->i32;
2590
2591 if (atomic || a->opcode == ac_image_store || a->opcode == ac_image_store_mip) {
2592 args[num_args++] = a->data[0];
2593 if (a->opcode == ac_image_atomic_cmpswap)
2594 args[num_args++] = a->data[1];
2595 }
2596
2597 if (!atomic)
2598 args[num_args++] = LLVMConstInt(ctx->i32, a->dmask, false);
2599
2600 if (a->offset)
2601 args[num_args++] = ac_to_integer(ctx, a->offset);
2602 if (a->bias) {
2603 args[num_args++] = ac_to_float(ctx, a->bias);
2604 overload[num_overloads++] = ".f32";
2605 }
2606 if (a->compare)
2607 args[num_args++] = ac_to_float(ctx, a->compare);
2608 if (a->derivs[0]) {
2609 unsigned count = ac_num_derivs(dim);
2610 for (unsigned i = 0; i < count; ++i)
2611 args[num_args++] = ac_to_float(ctx, a->derivs[i]);
2612 overload[num_overloads++] = ".f32";
2613 }
2614 unsigned num_coords =
2615 a->opcode != ac_image_get_resinfo ? ac_num_coords(dim) : 0;
2616 for (unsigned i = 0; i < num_coords; ++i)
2617 args[num_args++] = LLVMBuildBitCast(ctx->builder, a->coords[i], coord_type, "");
2618 if (a->lod)
2619 args[num_args++] = LLVMBuildBitCast(ctx->builder, a->lod, coord_type, "");
2620 overload[num_overloads++] = sample ? ".f32" : ".i32";
2621
2622 args[num_args++] = a->resource;
2623 if (sample) {
2624 args[num_args++] = a->sampler;
2625 args[num_args++] = LLVMConstInt(ctx->i1, a->unorm, false);
2626 }
2627
2628 args[num_args++] = ctx->i32_0; /* texfailctrl */
2629 args[num_args++] = LLVMConstInt(ctx->i32, a->cache_policy, false);
2630
2631 const char *name;
2632 const char *atomic_subop = "";
2633 switch (a->opcode) {
2634 case ac_image_sample: name = "sample"; break;
2635 case ac_image_gather4: name = "gather4"; break;
2636 case ac_image_load: name = "load"; break;
2637 case ac_image_load_mip: name = "load.mip"; break;
2638 case ac_image_store: name = "store"; break;
2639 case ac_image_store_mip: name = "store.mip"; break;
2640 case ac_image_atomic:
2641 name = "atomic.";
2642 atomic_subop = get_atomic_name(a->atomic);
2643 break;
2644 case ac_image_atomic_cmpswap:
2645 name = "atomic.";
2646 atomic_subop = "cmpswap";
2647 break;
2648 case ac_image_get_lod: name = "getlod"; break;
2649 case ac_image_get_resinfo: name = "getresinfo"; break;
2650 default: unreachable("invalid image opcode");
2651 }
2652
2653 const char *dimname;
2654 switch (dim) {
2655 case ac_image_1d: dimname = "1d"; break;
2656 case ac_image_2d: dimname = "2d"; break;
2657 case ac_image_3d: dimname = "3d"; break;
2658 case ac_image_cube: dimname = "cube"; break;
2659 case ac_image_1darray: dimname = "1darray"; break;
2660 case ac_image_2darray: dimname = "2darray"; break;
2661 case ac_image_2dmsaa: dimname = "2dmsaa"; break;
2662 case ac_image_2darraymsaa: dimname = "2darraymsaa"; break;
2663 default: unreachable("invalid dim");
2664 }
2665
2666 bool lod_suffix =
2667 a->lod && (a->opcode == ac_image_sample || a->opcode == ac_image_gather4);
2668 char intr_name[96];
2669 snprintf(intr_name, sizeof(intr_name),
2670 "llvm.amdgcn.image.%s%s" /* base name */
2671 "%s%s%s" /* sample/gather modifiers */
2672 ".%s.%s%s%s%s", /* dimension and type overloads */
2673 name, atomic_subop,
2674 a->compare ? ".c" : "",
2675 a->bias ? ".b" :
2676 lod_suffix ? ".l" :
2677 a->derivs[0] ? ".d" :
2678 a->level_zero ? ".lz" : "",
2679 a->offset ? ".o" : "",
2680 dimname,
2681 atomic ? "i32" : "v4f32",
2682 overload[0], overload[1], overload[2]);
2683
2684 LLVMTypeRef retty;
2685 if (atomic)
2686 retty = ctx->i32;
2687 else if (a->opcode == ac_image_store || a->opcode == ac_image_store_mip)
2688 retty = ctx->voidt;
2689 else
2690 retty = ctx->v4f32;
2691
2692 LLVMValueRef result =
2693 ac_build_intrinsic(ctx, intr_name, retty, args, num_args,
2694 a->attributes);
2695 if (!sample && retty == ctx->v4f32) {
2696 result = LLVMBuildBitCast(ctx->builder, result,
2697 ctx->v4i32, "");
2698 }
2699 return result;
2700 }
2701
2702 LLVMValueRef ac_build_cvt_pkrtz_f16(struct ac_llvm_context *ctx,
2703 LLVMValueRef args[2])
2704 {
2705 LLVMTypeRef v2f16 =
2706 LLVMVectorType(LLVMHalfTypeInContext(ctx->context), 2);
2707
2708 return ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pkrtz", v2f16,
2709 args, 2, AC_FUNC_ATTR_READNONE);
2710 }
2711
2712 LLVMValueRef ac_build_cvt_pknorm_i16(struct ac_llvm_context *ctx,
2713 LLVMValueRef args[2])
2714 {
2715 LLVMValueRef res =
2716 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pknorm.i16",
2717 ctx->v2i16, args, 2,
2718 AC_FUNC_ATTR_READNONE);
2719 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
2720 }
2721
2722 LLVMValueRef ac_build_cvt_pknorm_u16(struct ac_llvm_context *ctx,
2723 LLVMValueRef args[2])
2724 {
2725 LLVMValueRef res =
2726 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pknorm.u16",
2727 ctx->v2i16, args, 2,
2728 AC_FUNC_ATTR_READNONE);
2729 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
2730 }
2731
2732 /* The 8-bit and 10-bit clamping is for HW workarounds. */
2733 LLVMValueRef ac_build_cvt_pk_i16(struct ac_llvm_context *ctx,
2734 LLVMValueRef args[2], unsigned bits, bool hi)
2735 {
2736 assert(bits == 8 || bits == 10 || bits == 16);
2737
2738 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
2739 bits == 8 ? 127 : bits == 10 ? 511 : 32767, 0);
2740 LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
2741 bits == 8 ? -128 : bits == 10 ? -512 : -32768, 0);
2742 LLVMValueRef max_alpha =
2743 bits != 10 ? max_rgb : ctx->i32_1;
2744 LLVMValueRef min_alpha =
2745 bits != 10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
2746
2747 /* Clamp. */
2748 if (bits != 16) {
2749 for (int i = 0; i < 2; i++) {
2750 bool alpha = hi && i == 1;
2751 args[i] = ac_build_imin(ctx, args[i],
2752 alpha ? max_alpha : max_rgb);
2753 args[i] = ac_build_imax(ctx, args[i],
2754 alpha ? min_alpha : min_rgb);
2755 }
2756 }
2757
2758 LLVMValueRef res =
2759 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pk.i16",
2760 ctx->v2i16, args, 2,
2761 AC_FUNC_ATTR_READNONE);
2762 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
2763 }
2764
2765 /* The 8-bit and 10-bit clamping is for HW workarounds. */
2766 LLVMValueRef ac_build_cvt_pk_u16(struct ac_llvm_context *ctx,
2767 LLVMValueRef args[2], unsigned bits, bool hi)
2768 {
2769 assert(bits == 8 || bits == 10 || bits == 16);
2770
2771 LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
2772 bits == 8 ? 255 : bits == 10 ? 1023 : 65535, 0);
2773 LLVMValueRef max_alpha =
2774 bits != 10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
2775
2776 /* Clamp. */
2777 if (bits != 16) {
2778 for (int i = 0; i < 2; i++) {
2779 bool alpha = hi && i == 1;
2780 args[i] = ac_build_umin(ctx, args[i],
2781 alpha ? max_alpha : max_rgb);
2782 }
2783 }
2784
2785 LLVMValueRef res =
2786 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pk.u16",
2787 ctx->v2i16, args, 2,
2788 AC_FUNC_ATTR_READNONE);
2789 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
2790 }
2791
2792 LLVMValueRef ac_build_wqm_vote(struct ac_llvm_context *ctx, LLVMValueRef i1)
2793 {
2794 return ac_build_intrinsic(ctx, "llvm.amdgcn.wqm.vote", ctx->i1,
2795 &i1, 1, AC_FUNC_ATTR_READNONE);
2796 }
2797
2798 void ac_build_kill_if_false(struct ac_llvm_context *ctx, LLVMValueRef i1)
2799 {
2800 ac_build_intrinsic(ctx, "llvm.amdgcn.kill", ctx->voidt,
2801 &i1, 1, 0);
2802 }
2803
2804 LLVMValueRef ac_build_bfe(struct ac_llvm_context *ctx, LLVMValueRef input,
2805 LLVMValueRef offset, LLVMValueRef width,
2806 bool is_signed)
2807 {
2808 LLVMValueRef args[] = {
2809 input,
2810 offset,
2811 width,
2812 };
2813
2814 return ac_build_intrinsic(ctx,
2815 is_signed ? "llvm.amdgcn.sbfe.i32" :
2816 "llvm.amdgcn.ubfe.i32",
2817 ctx->i32, args, 3,
2818 AC_FUNC_ATTR_READNONE);
2819 }
2820
2821 LLVMValueRef ac_build_imad(struct ac_llvm_context *ctx, LLVMValueRef s0,
2822 LLVMValueRef s1, LLVMValueRef s2)
2823 {
2824 return LLVMBuildAdd(ctx->builder,
2825 LLVMBuildMul(ctx->builder, s0, s1, ""), s2, "");
2826 }
2827
2828 LLVMValueRef ac_build_fmad(struct ac_llvm_context *ctx, LLVMValueRef s0,
2829 LLVMValueRef s1, LLVMValueRef s2)
2830 {
2831 return LLVMBuildFAdd(ctx->builder,
2832 LLVMBuildFMul(ctx->builder, s0, s1, ""), s2, "");
2833 }
2834
2835 void ac_build_waitcnt(struct ac_llvm_context *ctx, unsigned simm16)
2836 {
2837 LLVMValueRef args[1] = {
2838 LLVMConstInt(ctx->i32, simm16, false),
2839 };
2840 ac_build_intrinsic(ctx, "llvm.amdgcn.s.waitcnt",
2841 ctx->voidt, args, 1, 0);
2842 }
2843
2844 LLVMValueRef ac_build_fmed3(struct ac_llvm_context *ctx, LLVMValueRef src0,
2845 LLVMValueRef src1, LLVMValueRef src2,
2846 unsigned bitsize)
2847 {
2848 LLVMTypeRef type;
2849 char *intr;
2850
2851 if (bitsize == 16) {
2852 intr = "llvm.amdgcn.fmed3.f16";
2853 type = ctx->f16;
2854 } else if (bitsize == 32) {
2855 intr = "llvm.amdgcn.fmed3.f32";
2856 type = ctx->f32;
2857 } else {
2858 intr = "llvm.amdgcn.fmed3.f64";
2859 type = ctx->f64;
2860 }
2861
2862 LLVMValueRef params[] = {
2863 src0,
2864 src1,
2865 src2,
2866 };
2867 return ac_build_intrinsic(ctx, intr, type, params, 3,
2868 AC_FUNC_ATTR_READNONE);
2869 }
2870
2871 LLVMValueRef ac_build_fract(struct ac_llvm_context *ctx, LLVMValueRef src0,
2872 unsigned bitsize)
2873 {
2874 LLVMTypeRef type;
2875 char *intr;
2876
2877 if (bitsize == 16) {
2878 intr = "llvm.amdgcn.fract.f16";
2879 type = ctx->f16;
2880 } else if (bitsize == 32) {
2881 intr = "llvm.amdgcn.fract.f32";
2882 type = ctx->f32;
2883 } else {
2884 intr = "llvm.amdgcn.fract.f64";
2885 type = ctx->f64;
2886 }
2887
2888 LLVMValueRef params[] = {
2889 src0,
2890 };
2891 return ac_build_intrinsic(ctx, intr, type, params, 1,
2892 AC_FUNC_ATTR_READNONE);
2893 }
2894
2895 LLVMValueRef ac_build_isign(struct ac_llvm_context *ctx, LLVMValueRef src0,
2896 unsigned bitsize)
2897 {
2898 LLVMTypeRef type = LLVMIntTypeInContext(ctx->context, bitsize);
2899 LLVMValueRef zero = LLVMConstInt(type, 0, false);
2900 LLVMValueRef one = LLVMConstInt(type, 1, false);
2901
2902 LLVMValueRef cmp, val;
2903 cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGT, src0, zero, "");
2904 val = LLVMBuildSelect(ctx->builder, cmp, one, src0, "");
2905 cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGE, val, zero, "");
2906 val = LLVMBuildSelect(ctx->builder, cmp, val, LLVMConstInt(type, -1, true), "");
2907 return val;
2908 }
2909
2910 LLVMValueRef ac_build_fsign(struct ac_llvm_context *ctx, LLVMValueRef src0,
2911 unsigned bitsize)
2912 {
2913 LLVMValueRef cmp, val, zero, one;
2914 LLVMTypeRef type;
2915
2916 if (bitsize == 16) {
2917 type = ctx->f16;
2918 zero = ctx->f16_0;
2919 one = ctx->f16_1;
2920 } else if (bitsize == 32) {
2921 type = ctx->f32;
2922 zero = ctx->f32_0;
2923 one = ctx->f32_1;
2924 } else {
2925 type = ctx->f64;
2926 zero = ctx->f64_0;
2927 one = ctx->f64_1;
2928 }
2929
2930 cmp = LLVMBuildFCmp(ctx->builder, LLVMRealOGT, src0, zero, "");
2931 val = LLVMBuildSelect(ctx->builder, cmp, one, src0, "");
2932 cmp = LLVMBuildFCmp(ctx->builder, LLVMRealOGE, val, zero, "");
2933 val = LLVMBuildSelect(ctx->builder, cmp, val, LLVMConstReal(type, -1.0), "");
2934 return val;
2935 }
2936
2937 LLVMValueRef ac_build_bit_count(struct ac_llvm_context *ctx, LLVMValueRef src0)
2938 {
2939 LLVMValueRef result;
2940 unsigned bitsize;
2941
2942 bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
2943
2944 switch (bitsize) {
2945 case 64:
2946 result = ac_build_intrinsic(ctx, "llvm.ctpop.i64", ctx->i64,
2947 (LLVMValueRef []) { src0 }, 1,
2948 AC_FUNC_ATTR_READNONE);
2949
2950 result = LLVMBuildTrunc(ctx->builder, result, ctx->i32, "");
2951 break;
2952 case 32:
2953 result = ac_build_intrinsic(ctx, "llvm.ctpop.i32", ctx->i32,
2954 (LLVMValueRef []) { src0 }, 1,
2955 AC_FUNC_ATTR_READNONE);
2956 break;
2957 case 16:
2958 result = ac_build_intrinsic(ctx, "llvm.ctpop.i16", ctx->i16,
2959 (LLVMValueRef []) { src0 }, 1,
2960 AC_FUNC_ATTR_READNONE);
2961
2962 result = LLVMBuildZExt(ctx->builder, result, ctx->i32, "");
2963 break;
2964 case 8:
2965 result = ac_build_intrinsic(ctx, "llvm.ctpop.i8", ctx->i8,
2966 (LLVMValueRef []) { src0 }, 1,
2967 AC_FUNC_ATTR_READNONE);
2968
2969 result = LLVMBuildZExt(ctx->builder, result, ctx->i32, "");
2970 break;
2971 default:
2972 unreachable(!"invalid bitsize");
2973 break;
2974 }
2975
2976 return result;
2977 }
2978
2979 LLVMValueRef ac_build_bitfield_reverse(struct ac_llvm_context *ctx,
2980 LLVMValueRef src0)
2981 {
2982 LLVMValueRef result;
2983 unsigned bitsize;
2984
2985 bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
2986
2987 switch (bitsize) {
2988 case 64:
2989 result = ac_build_intrinsic(ctx, "llvm.bitreverse.i64", ctx->i64,
2990 (LLVMValueRef []) { src0 }, 1,
2991 AC_FUNC_ATTR_READNONE);
2992
2993 result = LLVMBuildTrunc(ctx->builder, result, ctx->i32, "");
2994 break;
2995 case 32:
2996 result = ac_build_intrinsic(ctx, "llvm.bitreverse.i32", ctx->i32,
2997 (LLVMValueRef []) { src0 }, 1,
2998 AC_FUNC_ATTR_READNONE);
2999 break;
3000 case 16:
3001 result = ac_build_intrinsic(ctx, "llvm.bitreverse.i16", ctx->i16,
3002 (LLVMValueRef []) { src0 }, 1,
3003 AC_FUNC_ATTR_READNONE);
3004
3005 result = LLVMBuildZExt(ctx->builder, result, ctx->i32, "");
3006 break;
3007 case 8:
3008 result = ac_build_intrinsic(ctx, "llvm.bitreverse.i8", ctx->i8,
3009 (LLVMValueRef []) { src0 }, 1,
3010 AC_FUNC_ATTR_READNONE);
3011
3012 result = LLVMBuildZExt(ctx->builder, result, ctx->i32, "");
3013 break;
3014 default:
3015 unreachable(!"invalid bitsize");
3016 break;
3017 }
3018
3019 return result;
3020 }
3021
3022 #define AC_EXP_TARGET 0
3023 #define AC_EXP_ENABLED_CHANNELS 1
3024 #define AC_EXP_OUT0 2
3025
3026 enum ac_ir_type {
3027 AC_IR_UNDEF,
3028 AC_IR_CONST,
3029 AC_IR_VALUE,
3030 };
3031
3032 struct ac_vs_exp_chan
3033 {
3034 LLVMValueRef value;
3035 float const_float;
3036 enum ac_ir_type type;
3037 };
3038
3039 struct ac_vs_exp_inst {
3040 unsigned offset;
3041 LLVMValueRef inst;
3042 struct ac_vs_exp_chan chan[4];
3043 };
3044
3045 struct ac_vs_exports {
3046 unsigned num;
3047 struct ac_vs_exp_inst exp[VARYING_SLOT_MAX];
3048 };
3049
3050 /* Return true if the PARAM export has been eliminated. */
3051 static bool ac_eliminate_const_output(uint8_t *vs_output_param_offset,
3052 uint32_t num_outputs,
3053 struct ac_vs_exp_inst *exp)
3054 {
3055 unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */
3056 bool is_zero[4] = {}, is_one[4] = {};
3057
3058 for (i = 0; i < 4; i++) {
3059 /* It's a constant expression. Undef outputs are eliminated too. */
3060 if (exp->chan[i].type == AC_IR_UNDEF) {
3061 is_zero[i] = true;
3062 is_one[i] = true;
3063 } else if (exp->chan[i].type == AC_IR_CONST) {
3064 if (exp->chan[i].const_float == 0)
3065 is_zero[i] = true;
3066 else if (exp->chan[i].const_float == 1)
3067 is_one[i] = true;
3068 else
3069 return false; /* other constant */
3070 } else
3071 return false;
3072 }
3073
3074 /* Only certain combinations of 0 and 1 can be eliminated. */
3075 if (is_zero[0] && is_zero[1] && is_zero[2])
3076 default_val = is_zero[3] ? 0 : 1;
3077 else if (is_one[0] && is_one[1] && is_one[2])
3078 default_val = is_zero[3] ? 2 : 3;
3079 else
3080 return false;
3081
3082 /* The PARAM export can be represented as DEFAULT_VAL. Kill it. */
3083 LLVMInstructionEraseFromParent(exp->inst);
3084
3085 /* Change OFFSET to DEFAULT_VAL. */
3086 for (i = 0; i < num_outputs; i++) {
3087 if (vs_output_param_offset[i] == exp->offset) {
3088 vs_output_param_offset[i] =
3089 AC_EXP_PARAM_DEFAULT_VAL_0000 + default_val;
3090 break;
3091 }
3092 }
3093 return true;
3094 }
3095
3096 static bool ac_eliminate_duplicated_output(struct ac_llvm_context *ctx,
3097 uint8_t *vs_output_param_offset,
3098 uint32_t num_outputs,
3099 struct ac_vs_exports *processed,
3100 struct ac_vs_exp_inst *exp)
3101 {
3102 unsigned p, copy_back_channels = 0;
3103
3104 /* See if the output is already in the list of processed outputs.
3105 * The LLVMValueRef comparison relies on SSA.
3106 */
3107 for (p = 0; p < processed->num; p++) {
3108 bool different = false;
3109
3110 for (unsigned j = 0; j < 4; j++) {
3111 struct ac_vs_exp_chan *c1 = &processed->exp[p].chan[j];
3112 struct ac_vs_exp_chan *c2 = &exp->chan[j];
3113
3114 /* Treat undef as a match. */
3115 if (c2->type == AC_IR_UNDEF)
3116 continue;
3117
3118 /* If c1 is undef but c2 isn't, we can copy c2 to c1
3119 * and consider the instruction duplicated.
3120 */
3121 if (c1->type == AC_IR_UNDEF) {
3122 copy_back_channels |= 1 << j;
3123 continue;
3124 }
3125
3126 /* Test whether the channels are not equal. */
3127 if (c1->type != c2->type ||
3128 (c1->type == AC_IR_CONST &&
3129 c1->const_float != c2->const_float) ||
3130 (c1->type == AC_IR_VALUE &&
3131 c1->value != c2->value)) {
3132 different = true;
3133 break;
3134 }
3135 }
3136 if (!different)
3137 break;
3138
3139 copy_back_channels = 0;
3140 }
3141 if (p == processed->num)
3142 return false;
3143
3144 /* If a match was found, but the matching export has undef where the new
3145 * one has a normal value, copy the normal value to the undef channel.
3146 */
3147 struct ac_vs_exp_inst *match = &processed->exp[p];
3148
3149 /* Get current enabled channels mask. */
3150 LLVMValueRef arg = LLVMGetOperand(match->inst, AC_EXP_ENABLED_CHANNELS);
3151 unsigned enabled_channels = LLVMConstIntGetZExtValue(arg);
3152
3153 while (copy_back_channels) {
3154 unsigned chan = u_bit_scan(&copy_back_channels);
3155
3156 assert(match->chan[chan].type == AC_IR_UNDEF);
3157 LLVMSetOperand(match->inst, AC_EXP_OUT0 + chan,
3158 exp->chan[chan].value);
3159 match->chan[chan] = exp->chan[chan];
3160
3161 /* Update number of enabled channels because the original mask
3162 * is not always 0xf.
3163 */
3164 enabled_channels |= (1 << chan);
3165 LLVMSetOperand(match->inst, AC_EXP_ENABLED_CHANNELS,
3166 LLVMConstInt(ctx->i32, enabled_channels, 0));
3167 }
3168
3169 /* The PARAM export is duplicated. Kill it. */
3170 LLVMInstructionEraseFromParent(exp->inst);
3171
3172 /* Change OFFSET to the matching export. */
3173 for (unsigned i = 0; i < num_outputs; i++) {
3174 if (vs_output_param_offset[i] == exp->offset) {
3175 vs_output_param_offset[i] = match->offset;
3176 break;
3177 }
3178 }
3179 return true;
3180 }
3181
3182 void ac_optimize_vs_outputs(struct ac_llvm_context *ctx,
3183 LLVMValueRef main_fn,
3184 uint8_t *vs_output_param_offset,
3185 uint32_t num_outputs,
3186 uint8_t *num_param_exports)
3187 {
3188 LLVMBasicBlockRef bb;
3189 bool removed_any = false;
3190 struct ac_vs_exports exports;
3191
3192 exports.num = 0;
3193
3194 /* Process all LLVM instructions. */
3195 bb = LLVMGetFirstBasicBlock(main_fn);
3196 while (bb) {
3197 LLVMValueRef inst = LLVMGetFirstInstruction(bb);
3198
3199 while (inst) {
3200 LLVMValueRef cur = inst;
3201 inst = LLVMGetNextInstruction(inst);
3202 struct ac_vs_exp_inst exp;
3203
3204 if (LLVMGetInstructionOpcode(cur) != LLVMCall)
3205 continue;
3206
3207 LLVMValueRef callee = ac_llvm_get_called_value(cur);
3208
3209 if (!ac_llvm_is_function(callee))
3210 continue;
3211
3212 const char *name = LLVMGetValueName(callee);
3213 unsigned num_args = LLVMCountParams(callee);
3214
3215 /* Check if this is an export instruction. */
3216 if ((num_args != 9 && num_args != 8) ||
3217 (strcmp(name, "llvm.SI.export") &&
3218 strcmp(name, "llvm.amdgcn.exp.f32")))
3219 continue;
3220
3221 LLVMValueRef arg = LLVMGetOperand(cur, AC_EXP_TARGET);
3222 unsigned target = LLVMConstIntGetZExtValue(arg);
3223
3224 if (target < V_008DFC_SQ_EXP_PARAM)
3225 continue;
3226
3227 target -= V_008DFC_SQ_EXP_PARAM;
3228
3229 /* Parse the instruction. */
3230 memset(&exp, 0, sizeof(exp));
3231 exp.offset = target;
3232 exp.inst = cur;
3233
3234 for (unsigned i = 0; i < 4; i++) {
3235 LLVMValueRef v = LLVMGetOperand(cur, AC_EXP_OUT0 + i);
3236
3237 exp.chan[i].value = v;
3238
3239 if (LLVMIsUndef(v)) {
3240 exp.chan[i].type = AC_IR_UNDEF;
3241 } else if (LLVMIsAConstantFP(v)) {
3242 LLVMBool loses_info;
3243 exp.chan[i].type = AC_IR_CONST;
3244 exp.chan[i].const_float =
3245 LLVMConstRealGetDouble(v, &loses_info);
3246 } else {
3247 exp.chan[i].type = AC_IR_VALUE;
3248 }
3249 }
3250
3251 /* Eliminate constant and duplicated PARAM exports. */
3252 if (ac_eliminate_const_output(vs_output_param_offset,
3253 num_outputs, &exp) ||
3254 ac_eliminate_duplicated_output(ctx,
3255 vs_output_param_offset,
3256 num_outputs, &exports,
3257 &exp)) {
3258 removed_any = true;
3259 } else {
3260 exports.exp[exports.num++] = exp;
3261 }
3262 }
3263 bb = LLVMGetNextBasicBlock(bb);
3264 }
3265
3266 /* Remove holes in export memory due to removed PARAM exports.
3267 * This is done by renumbering all PARAM exports.
3268 */
3269 if (removed_any) {
3270 uint8_t old_offset[VARYING_SLOT_MAX];
3271 unsigned out, i;
3272
3273 /* Make a copy of the offsets. We need the old version while
3274 * we are modifying some of them. */
3275 memcpy(old_offset, vs_output_param_offset,
3276 sizeof(old_offset));
3277
3278 for (i = 0; i < exports.num; i++) {
3279 unsigned offset = exports.exp[i].offset;
3280
3281 /* Update vs_output_param_offset. Multiple outputs can
3282 * have the same offset.
3283 */
3284 for (out = 0; out < num_outputs; out++) {
3285 if (old_offset[out] == offset)
3286 vs_output_param_offset[out] = i;
3287 }
3288
3289 /* Change the PARAM offset in the instruction. */
3290 LLVMSetOperand(exports.exp[i].inst, AC_EXP_TARGET,
3291 LLVMConstInt(ctx->i32,
3292 V_008DFC_SQ_EXP_PARAM + i, 0));
3293 }
3294 *num_param_exports = exports.num;
3295 }
3296 }
3297
3298 void ac_init_exec_full_mask(struct ac_llvm_context *ctx)
3299 {
3300 LLVMValueRef full_mask = LLVMConstInt(ctx->i64, ~0ull, 0);
3301 ac_build_intrinsic(ctx,
3302 "llvm.amdgcn.init.exec", ctx->voidt,
3303 &full_mask, 1, AC_FUNC_ATTR_CONVERGENT);
3304 }
3305
3306 void ac_declare_lds_as_pointer(struct ac_llvm_context *ctx)
3307 {
3308 unsigned lds_size = ctx->chip_class >= GFX7 ? 65536 : 32768;
3309 ctx->lds = LLVMBuildIntToPtr(ctx->builder, ctx->i32_0,
3310 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), AC_ADDR_SPACE_LDS),
3311 "lds");
3312 }
3313
3314 LLVMValueRef ac_lds_load(struct ac_llvm_context *ctx,
3315 LLVMValueRef dw_addr)
3316 {
3317 return LLVMBuildLoad(ctx->builder, ac_build_gep0(ctx, ctx->lds, dw_addr), "");
3318 }
3319
3320 void ac_lds_store(struct ac_llvm_context *ctx,
3321 LLVMValueRef dw_addr,
3322 LLVMValueRef value)
3323 {
3324 value = ac_to_integer(ctx, value);
3325 ac_build_indexed_store(ctx, ctx->lds,
3326 dw_addr, value);
3327 }
3328
3329 LLVMValueRef ac_find_lsb(struct ac_llvm_context *ctx,
3330 LLVMTypeRef dst_type,
3331 LLVMValueRef src0)
3332 {
3333 unsigned src0_bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
3334 const char *intrin_name;
3335 LLVMTypeRef type;
3336 LLVMValueRef zero;
3337
3338 switch (src0_bitsize) {
3339 case 64:
3340 intrin_name = "llvm.cttz.i64";
3341 type = ctx->i64;
3342 zero = ctx->i64_0;
3343 break;
3344 case 32:
3345 intrin_name = "llvm.cttz.i32";
3346 type = ctx->i32;
3347 zero = ctx->i32_0;
3348 break;
3349 case 16:
3350 intrin_name = "llvm.cttz.i16";
3351 type = ctx->i16;
3352 zero = ctx->i16_0;
3353 break;
3354 case 8:
3355 intrin_name = "llvm.cttz.i8";
3356 type = ctx->i8;
3357 zero = ctx->i8_0;
3358 break;
3359 default:
3360 unreachable(!"invalid bitsize");
3361 }
3362
3363 LLVMValueRef params[2] = {
3364 src0,
3365
3366 /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
3367 * add special code to check for x=0. The reason is that
3368 * the LLVM behavior for x=0 is different from what we
3369 * need here. However, LLVM also assumes that ffs(x) is
3370 * in [0, 31], but GLSL expects that ffs(0) = -1, so
3371 * a conditional assignment to handle 0 is still required.
3372 *
3373 * The hardware already implements the correct behavior.
3374 */
3375 ctx->i1true,
3376 };
3377
3378 LLVMValueRef lsb = ac_build_intrinsic(ctx, intrin_name, type,
3379 params, 2,
3380 AC_FUNC_ATTR_READNONE);
3381
3382 if (src0_bitsize == 64) {
3383 lsb = LLVMBuildTrunc(ctx->builder, lsb, ctx->i32, "");
3384 } else if (src0_bitsize < 32) {
3385 lsb = LLVMBuildSExt(ctx->builder, lsb, ctx->i32, "");
3386 }
3387
3388 /* TODO: We need an intrinsic to skip this conditional. */
3389 /* Check for zero: */
3390 return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder,
3391 LLVMIntEQ, src0,
3392 zero, ""),
3393 LLVMConstInt(ctx->i32, -1, 0), lsb, "");
3394 }
3395
3396 LLVMTypeRef ac_array_in_const_addr_space(LLVMTypeRef elem_type)
3397 {
3398 return LLVMPointerType(elem_type, AC_ADDR_SPACE_CONST);
3399 }
3400
3401 LLVMTypeRef ac_array_in_const32_addr_space(LLVMTypeRef elem_type)
3402 {
3403 return LLVMPointerType(elem_type, AC_ADDR_SPACE_CONST_32BIT);
3404 }
3405
3406 static struct ac_llvm_flow *
3407 get_current_flow(struct ac_llvm_context *ctx)
3408 {
3409 if (ctx->flow_depth > 0)
3410 return &ctx->flow[ctx->flow_depth - 1];
3411 return NULL;
3412 }
3413
3414 static struct ac_llvm_flow *
3415 get_innermost_loop(struct ac_llvm_context *ctx)
3416 {
3417 for (unsigned i = ctx->flow_depth; i > 0; --i) {
3418 if (ctx->flow[i - 1].loop_entry_block)
3419 return &ctx->flow[i - 1];
3420 }
3421 return NULL;
3422 }
3423
3424 static struct ac_llvm_flow *
3425 push_flow(struct ac_llvm_context *ctx)
3426 {
3427 struct ac_llvm_flow *flow;
3428
3429 if (ctx->flow_depth >= ctx->flow_depth_max) {
3430 unsigned new_max = MAX2(ctx->flow_depth << 1,
3431 AC_LLVM_INITIAL_CF_DEPTH);
3432
3433 ctx->flow = realloc(ctx->flow, new_max * sizeof(*ctx->flow));
3434 ctx->flow_depth_max = new_max;
3435 }
3436
3437 flow = &ctx->flow[ctx->flow_depth];
3438 ctx->flow_depth++;
3439
3440 flow->next_block = NULL;
3441 flow->loop_entry_block = NULL;
3442 return flow;
3443 }
3444
3445 static void set_basicblock_name(LLVMBasicBlockRef bb, const char *base,
3446 int label_id)
3447 {
3448 char buf[32];
3449 snprintf(buf, sizeof(buf), "%s%d", base, label_id);
3450 LLVMSetValueName(LLVMBasicBlockAsValue(bb), buf);
3451 }
3452
3453 /* Append a basic block at the level of the parent flow.
3454 */
3455 static LLVMBasicBlockRef append_basic_block(struct ac_llvm_context *ctx,
3456 const char *name)
3457 {
3458 assert(ctx->flow_depth >= 1);
3459
3460 if (ctx->flow_depth >= 2) {
3461 struct ac_llvm_flow *flow = &ctx->flow[ctx->flow_depth - 2];
3462
3463 return LLVMInsertBasicBlockInContext(ctx->context,
3464 flow->next_block, name);
3465 }
3466
3467 LLVMValueRef main_fn =
3468 LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx->builder));
3469 return LLVMAppendBasicBlockInContext(ctx->context, main_fn, name);
3470 }
3471
3472 /* Emit a branch to the given default target for the current block if
3473 * applicable -- that is, if the current block does not already contain a
3474 * branch from a break or continue.
3475 */
3476 static void emit_default_branch(LLVMBuilderRef builder,
3477 LLVMBasicBlockRef target)
3478 {
3479 if (!LLVMGetBasicBlockTerminator(LLVMGetInsertBlock(builder)))
3480 LLVMBuildBr(builder, target);
3481 }
3482
3483 void ac_build_bgnloop(struct ac_llvm_context *ctx, int label_id)
3484 {
3485 struct ac_llvm_flow *flow = push_flow(ctx);
3486 flow->loop_entry_block = append_basic_block(ctx, "LOOP");
3487 flow->next_block = append_basic_block(ctx, "ENDLOOP");
3488 set_basicblock_name(flow->loop_entry_block, "loop", label_id);
3489 LLVMBuildBr(ctx->builder, flow->loop_entry_block);
3490 LLVMPositionBuilderAtEnd(ctx->builder, flow->loop_entry_block);
3491 }
3492
3493 void ac_build_break(struct ac_llvm_context *ctx)
3494 {
3495 struct ac_llvm_flow *flow = get_innermost_loop(ctx);
3496 LLVMBuildBr(ctx->builder, flow->next_block);
3497 }
3498
3499 void ac_build_continue(struct ac_llvm_context *ctx)
3500 {
3501 struct ac_llvm_flow *flow = get_innermost_loop(ctx);
3502 LLVMBuildBr(ctx->builder, flow->loop_entry_block);
3503 }
3504
3505 void ac_build_else(struct ac_llvm_context *ctx, int label_id)
3506 {
3507 struct ac_llvm_flow *current_branch = get_current_flow(ctx);
3508 LLVMBasicBlockRef endif_block;
3509
3510 assert(!current_branch->loop_entry_block);
3511
3512 endif_block = append_basic_block(ctx, "ENDIF");
3513 emit_default_branch(ctx->builder, endif_block);
3514
3515 LLVMPositionBuilderAtEnd(ctx->builder, current_branch->next_block);
3516 set_basicblock_name(current_branch->next_block, "else", label_id);
3517
3518 current_branch->next_block = endif_block;
3519 }
3520
3521 void ac_build_endif(struct ac_llvm_context *ctx, int label_id)
3522 {
3523 struct ac_llvm_flow *current_branch = get_current_flow(ctx);
3524
3525 assert(!current_branch->loop_entry_block);
3526
3527 emit_default_branch(ctx->builder, current_branch->next_block);
3528 LLVMPositionBuilderAtEnd(ctx->builder, current_branch->next_block);
3529 set_basicblock_name(current_branch->next_block, "endif", label_id);
3530
3531 ctx->flow_depth--;
3532 }
3533
3534 void ac_build_endloop(struct ac_llvm_context *ctx, int label_id)
3535 {
3536 struct ac_llvm_flow *current_loop = get_current_flow(ctx);
3537
3538 assert(current_loop->loop_entry_block);
3539
3540 emit_default_branch(ctx->builder, current_loop->loop_entry_block);
3541
3542 LLVMPositionBuilderAtEnd(ctx->builder, current_loop->next_block);
3543 set_basicblock_name(current_loop->next_block, "endloop", label_id);
3544 ctx->flow_depth--;
3545 }
3546
3547 void ac_build_ifcc(struct ac_llvm_context *ctx, LLVMValueRef cond, int label_id)
3548 {
3549 struct ac_llvm_flow *flow = push_flow(ctx);
3550 LLVMBasicBlockRef if_block;
3551
3552 if_block = append_basic_block(ctx, "IF");
3553 flow->next_block = append_basic_block(ctx, "ELSE");
3554 set_basicblock_name(if_block, "if", label_id);
3555 LLVMBuildCondBr(ctx->builder, cond, if_block, flow->next_block);
3556 LLVMPositionBuilderAtEnd(ctx->builder, if_block);
3557 }
3558
3559 void ac_build_if(struct ac_llvm_context *ctx, LLVMValueRef value,
3560 int label_id)
3561 {
3562 LLVMValueRef cond = LLVMBuildFCmp(ctx->builder, LLVMRealUNE,
3563 value, ctx->f32_0, "");
3564 ac_build_ifcc(ctx, cond, label_id);
3565 }
3566
3567 void ac_build_uif(struct ac_llvm_context *ctx, LLVMValueRef value,
3568 int label_id)
3569 {
3570 LLVMValueRef cond = LLVMBuildICmp(ctx->builder, LLVMIntNE,
3571 ac_to_integer(ctx, value),
3572 ctx->i32_0, "");
3573 ac_build_ifcc(ctx, cond, label_id);
3574 }
3575
3576 LLVMValueRef ac_build_alloca_undef(struct ac_llvm_context *ac, LLVMTypeRef type,
3577 const char *name)
3578 {
3579 LLVMBuilderRef builder = ac->builder;
3580 LLVMBasicBlockRef current_block = LLVMGetInsertBlock(builder);
3581 LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
3582 LLVMBasicBlockRef first_block = LLVMGetEntryBasicBlock(function);
3583 LLVMValueRef first_instr = LLVMGetFirstInstruction(first_block);
3584 LLVMBuilderRef first_builder = LLVMCreateBuilderInContext(ac->context);
3585 LLVMValueRef res;
3586
3587 if (first_instr) {
3588 LLVMPositionBuilderBefore(first_builder, first_instr);
3589 } else {
3590 LLVMPositionBuilderAtEnd(first_builder, first_block);
3591 }
3592
3593 res = LLVMBuildAlloca(first_builder, type, name);
3594 LLVMDisposeBuilder(first_builder);
3595 return res;
3596 }
3597
3598 LLVMValueRef ac_build_alloca(struct ac_llvm_context *ac,
3599 LLVMTypeRef type, const char *name)
3600 {
3601 LLVMValueRef ptr = ac_build_alloca_undef(ac, type, name);
3602 LLVMBuildStore(ac->builder, LLVMConstNull(type), ptr);
3603 return ptr;
3604 }
3605
3606 LLVMValueRef ac_cast_ptr(struct ac_llvm_context *ctx, LLVMValueRef ptr,
3607 LLVMTypeRef type)
3608 {
3609 int addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
3610 return LLVMBuildBitCast(ctx->builder, ptr,
3611 LLVMPointerType(type, addr_space), "");
3612 }
3613
3614 LLVMValueRef ac_trim_vector(struct ac_llvm_context *ctx, LLVMValueRef value,
3615 unsigned count)
3616 {
3617 unsigned num_components = ac_get_llvm_num_components(value);
3618 if (count == num_components)
3619 return value;
3620
3621 LLVMValueRef masks[MAX2(count, 2)];
3622 masks[0] = ctx->i32_0;
3623 masks[1] = ctx->i32_1;
3624 for (unsigned i = 2; i < count; i++)
3625 masks[i] = LLVMConstInt(ctx->i32, i, false);
3626
3627 if (count == 1)
3628 return LLVMBuildExtractElement(ctx->builder, value, masks[0],
3629 "");
3630
3631 LLVMValueRef swizzle = LLVMConstVector(masks, count);
3632 return LLVMBuildShuffleVector(ctx->builder, value, value, swizzle, "");
3633 }
3634
3635 LLVMValueRef ac_unpack_param(struct ac_llvm_context *ctx, LLVMValueRef param,
3636 unsigned rshift, unsigned bitwidth)
3637 {
3638 LLVMValueRef value = param;
3639 if (rshift)
3640 value = LLVMBuildLShr(ctx->builder, value,
3641 LLVMConstInt(ctx->i32, rshift, false), "");
3642
3643 if (rshift + bitwidth < 32) {
3644 unsigned mask = (1 << bitwidth) - 1;
3645 value = LLVMBuildAnd(ctx->builder, value,
3646 LLVMConstInt(ctx->i32, mask, false), "");
3647 }
3648 return value;
3649 }
3650
3651 /* Adjust the sample index according to FMASK.
3652 *
3653 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
3654 * which is the identity mapping. Each nibble says which physical sample
3655 * should be fetched to get that sample.
3656 *
3657 * For example, 0x11111100 means there are only 2 samples stored and
3658 * the second sample covers 3/4 of the pixel. When reading samples 0
3659 * and 1, return physical sample 0 (determined by the first two 0s
3660 * in FMASK), otherwise return physical sample 1.
3661 *
3662 * The sample index should be adjusted as follows:
3663 * addr[sample_index] = (fmask >> (addr[sample_index] * 4)) & 0xF;
3664 */
3665 void ac_apply_fmask_to_sample(struct ac_llvm_context *ac, LLVMValueRef fmask,
3666 LLVMValueRef *addr, bool is_array_tex)
3667 {
3668 struct ac_image_args fmask_load = {};
3669 fmask_load.opcode = ac_image_load;
3670 fmask_load.resource = fmask;
3671 fmask_load.dmask = 0xf;
3672 fmask_load.dim = is_array_tex ? ac_image_2darray : ac_image_2d;
3673 fmask_load.attributes = AC_FUNC_ATTR_READNONE;
3674
3675 fmask_load.coords[0] = addr[0];
3676 fmask_load.coords[1] = addr[1];
3677 if (is_array_tex)
3678 fmask_load.coords[2] = addr[2];
3679
3680 LLVMValueRef fmask_value = ac_build_image_opcode(ac, &fmask_load);
3681 fmask_value = LLVMBuildExtractElement(ac->builder, fmask_value,
3682 ac->i32_0, "");
3683
3684 /* Apply the formula. */
3685 unsigned sample_chan = is_array_tex ? 3 : 2;
3686 LLVMValueRef final_sample;
3687 final_sample = LLVMBuildMul(ac->builder, addr[sample_chan],
3688 LLVMConstInt(ac->i32, 4, 0), "");
3689 final_sample = LLVMBuildLShr(ac->builder, fmask_value, final_sample, "");
3690 /* Mask the sample index by 0x7, because 0x8 means an unknown value
3691 * with EQAA, so those will map to 0. */
3692 final_sample = LLVMBuildAnd(ac->builder, final_sample,
3693 LLVMConstInt(ac->i32, 0x7, 0), "");
3694
3695 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
3696 * resource descriptor is 0 (invalid).
3697 */
3698 LLVMValueRef tmp;
3699 tmp = LLVMBuildBitCast(ac->builder, fmask, ac->v8i32, "");
3700 tmp = LLVMBuildExtractElement(ac->builder, tmp, ac->i32_1, "");
3701 tmp = LLVMBuildICmp(ac->builder, LLVMIntNE, tmp, ac->i32_0, "");
3702
3703 /* Replace the MSAA sample index. */
3704 addr[sample_chan] = LLVMBuildSelect(ac->builder, tmp, final_sample,
3705 addr[sample_chan], "");
3706 }
3707
3708 static LLVMValueRef
3709 _ac_build_readlane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef lane)
3710 {
3711 ac_build_optimization_barrier(ctx, &src);
3712 return ac_build_intrinsic(ctx,
3713 lane == NULL ? "llvm.amdgcn.readfirstlane" : "llvm.amdgcn.readlane",
3714 LLVMTypeOf(src), (LLVMValueRef []) {
3715 src, lane },
3716 lane == NULL ? 1 : 2,
3717 AC_FUNC_ATTR_READNONE |
3718 AC_FUNC_ATTR_CONVERGENT);
3719 }
3720
3721 /**
3722 * Builds the "llvm.amdgcn.readlane" or "llvm.amdgcn.readfirstlane" intrinsic.
3723 * @param ctx
3724 * @param src
3725 * @param lane - id of the lane or NULL for the first active lane
3726 * @return value of the lane
3727 */
3728 LLVMValueRef
3729 ac_build_readlane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef lane)
3730 {
3731 LLVMTypeRef src_type = LLVMTypeOf(src);
3732 src = ac_to_integer(ctx, src);
3733 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
3734 LLVMValueRef ret;
3735
3736 if (bits == 32) {
3737 ret = _ac_build_readlane(ctx, src, lane);
3738 } else {
3739 assert(bits % 32 == 0);
3740 LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
3741 LLVMValueRef src_vector =
3742 LLVMBuildBitCast(ctx->builder, src, vec_type, "");
3743 ret = LLVMGetUndef(vec_type);
3744 for (unsigned i = 0; i < bits / 32; i++) {
3745 src = LLVMBuildExtractElement(ctx->builder, src_vector,
3746 LLVMConstInt(ctx->i32, i, 0), "");
3747 LLVMValueRef ret_comp = _ac_build_readlane(ctx, src, lane);
3748 ret = LLVMBuildInsertElement(ctx->builder, ret, ret_comp,
3749 LLVMConstInt(ctx->i32, i, 0), "");
3750 }
3751 }
3752 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3753 }
3754
3755 LLVMValueRef
3756 ac_build_writelane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef value, LLVMValueRef lane)
3757 {
3758 /* TODO: Use the actual instruction when LLVM adds an intrinsic for it.
3759 */
3760 LLVMValueRef pred = LLVMBuildICmp(ctx->builder, LLVMIntEQ, lane,
3761 ac_get_thread_id(ctx), "");
3762 return LLVMBuildSelect(ctx->builder, pred, value, src, "");
3763 }
3764
3765 LLVMValueRef
3766 ac_build_mbcnt(struct ac_llvm_context *ctx, LLVMValueRef mask)
3767 {
3768 LLVMValueRef mask_vec = LLVMBuildBitCast(ctx->builder, mask,
3769 LLVMVectorType(ctx->i32, 2),
3770 "");
3771 LLVMValueRef mask_lo = LLVMBuildExtractElement(ctx->builder, mask_vec,
3772 ctx->i32_0, "");
3773 LLVMValueRef mask_hi = LLVMBuildExtractElement(ctx->builder, mask_vec,
3774 ctx->i32_1, "");
3775 LLVMValueRef val =
3776 ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.lo", ctx->i32,
3777 (LLVMValueRef []) { mask_lo, ctx->i32_0 },
3778 2, AC_FUNC_ATTR_READNONE);
3779 val = ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.hi", ctx->i32,
3780 (LLVMValueRef []) { mask_hi, val },
3781 2, AC_FUNC_ATTR_READNONE);
3782 return val;
3783 }
3784
3785 enum dpp_ctrl {
3786 _dpp_quad_perm = 0x000,
3787 _dpp_row_sl = 0x100,
3788 _dpp_row_sr = 0x110,
3789 _dpp_row_rr = 0x120,
3790 dpp_wf_sl1 = 0x130,
3791 dpp_wf_rl1 = 0x134,
3792 dpp_wf_sr1 = 0x138,
3793 dpp_wf_rr1 = 0x13C,
3794 dpp_row_mirror = 0x140,
3795 dpp_row_half_mirror = 0x141,
3796 dpp_row_bcast15 = 0x142,
3797 dpp_row_bcast31 = 0x143
3798 };
3799
3800 static inline enum dpp_ctrl
3801 dpp_quad_perm(unsigned lane0, unsigned lane1, unsigned lane2, unsigned lane3)
3802 {
3803 assert(lane0 < 4 && lane1 < 4 && lane2 < 4 && lane3 < 4);
3804 return _dpp_quad_perm | lane0 | (lane1 << 2) | (lane2 << 4) | (lane3 << 6);
3805 }
3806
3807 static inline enum dpp_ctrl
3808 dpp_row_sl(unsigned amount)
3809 {
3810 assert(amount > 0 && amount < 16);
3811 return _dpp_row_sl | amount;
3812 }
3813
3814 static inline enum dpp_ctrl
3815 dpp_row_sr(unsigned amount)
3816 {
3817 assert(amount > 0 && amount < 16);
3818 return _dpp_row_sr | amount;
3819 }
3820
3821 static LLVMValueRef
3822 _ac_build_dpp(struct ac_llvm_context *ctx, LLVMValueRef old, LLVMValueRef src,
3823 enum dpp_ctrl dpp_ctrl, unsigned row_mask, unsigned bank_mask,
3824 bool bound_ctrl)
3825 {
3826 return ac_build_intrinsic(ctx, "llvm.amdgcn.update.dpp.i32",
3827 LLVMTypeOf(old),
3828 (LLVMValueRef[]) {
3829 old, src,
3830 LLVMConstInt(ctx->i32, dpp_ctrl, 0),
3831 LLVMConstInt(ctx->i32, row_mask, 0),
3832 LLVMConstInt(ctx->i32, bank_mask, 0),
3833 LLVMConstInt(ctx->i1, bound_ctrl, 0) },
3834 6, AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
3835 }
3836
3837 static LLVMValueRef
3838 ac_build_dpp(struct ac_llvm_context *ctx, LLVMValueRef old, LLVMValueRef src,
3839 enum dpp_ctrl dpp_ctrl, unsigned row_mask, unsigned bank_mask,
3840 bool bound_ctrl)
3841 {
3842 LLVMTypeRef src_type = LLVMTypeOf(src);
3843 src = ac_to_integer(ctx, src);
3844 old = ac_to_integer(ctx, old);
3845 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
3846 LLVMValueRef ret;
3847 if (bits == 32) {
3848 ret = _ac_build_dpp(ctx, old, src, dpp_ctrl, row_mask,
3849 bank_mask, bound_ctrl);
3850 } else {
3851 assert(bits % 32 == 0);
3852 LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
3853 LLVMValueRef src_vector =
3854 LLVMBuildBitCast(ctx->builder, src, vec_type, "");
3855 LLVMValueRef old_vector =
3856 LLVMBuildBitCast(ctx->builder, old, vec_type, "");
3857 ret = LLVMGetUndef(vec_type);
3858 for (unsigned i = 0; i < bits / 32; i++) {
3859 src = LLVMBuildExtractElement(ctx->builder, src_vector,
3860 LLVMConstInt(ctx->i32, i,
3861 0), "");
3862 old = LLVMBuildExtractElement(ctx->builder, old_vector,
3863 LLVMConstInt(ctx->i32, i,
3864 0), "");
3865 LLVMValueRef ret_comp = _ac_build_dpp(ctx, old, src,
3866 dpp_ctrl,
3867 row_mask,
3868 bank_mask,
3869 bound_ctrl);
3870 ret = LLVMBuildInsertElement(ctx->builder, ret,
3871 ret_comp,
3872 LLVMConstInt(ctx->i32, i,
3873 0), "");
3874 }
3875 }
3876 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3877 }
3878
3879 static inline unsigned
3880 ds_pattern_bitmode(unsigned and_mask, unsigned or_mask, unsigned xor_mask)
3881 {
3882 assert(and_mask < 32 && or_mask < 32 && xor_mask < 32);
3883 return and_mask | (or_mask << 5) | (xor_mask << 10);
3884 }
3885
3886 static LLVMValueRef
3887 _ac_build_ds_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src, unsigned mask)
3888 {
3889 return ac_build_intrinsic(ctx, "llvm.amdgcn.ds.swizzle",
3890 LLVMTypeOf(src), (LLVMValueRef []) {
3891 src, LLVMConstInt(ctx->i32, mask, 0) },
3892 2, AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
3893 }
3894
3895 LLVMValueRef
3896 ac_build_ds_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src, unsigned mask)
3897 {
3898 LLVMTypeRef src_type = LLVMTypeOf(src);
3899 src = ac_to_integer(ctx, src);
3900 unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
3901 LLVMValueRef ret;
3902 if (bits == 32) {
3903 ret = _ac_build_ds_swizzle(ctx, src, mask);
3904 } else {
3905 assert(bits % 32 == 0);
3906 LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
3907 LLVMValueRef src_vector =
3908 LLVMBuildBitCast(ctx->builder, src, vec_type, "");
3909 ret = LLVMGetUndef(vec_type);
3910 for (unsigned i = 0; i < bits / 32; i++) {
3911 src = LLVMBuildExtractElement(ctx->builder, src_vector,
3912 LLVMConstInt(ctx->i32, i,
3913 0), "");
3914 LLVMValueRef ret_comp = _ac_build_ds_swizzle(ctx, src,
3915 mask);
3916 ret = LLVMBuildInsertElement(ctx->builder, ret,
3917 ret_comp,
3918 LLVMConstInt(ctx->i32, i,
3919 0), "");
3920 }
3921 }
3922 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3923 }
3924
3925 static LLVMValueRef
3926 ac_build_wwm(struct ac_llvm_context *ctx, LLVMValueRef src)
3927 {
3928 char name[32], type[8];
3929 ac_build_type_name_for_intr(LLVMTypeOf(src), type, sizeof(type));
3930 snprintf(name, sizeof(name), "llvm.amdgcn.wwm.%s", type);
3931 return ac_build_intrinsic(ctx, name, LLVMTypeOf(src),
3932 (LLVMValueRef []) { src }, 1,
3933 AC_FUNC_ATTR_READNONE);
3934 }
3935
3936 static LLVMValueRef
3937 ac_build_set_inactive(struct ac_llvm_context *ctx, LLVMValueRef src,
3938 LLVMValueRef inactive)
3939 {
3940 char name[33], type[8];
3941 LLVMTypeRef src_type = LLVMTypeOf(src);
3942 src = ac_to_integer(ctx, src);
3943 inactive = ac_to_integer(ctx, inactive);
3944 ac_build_type_name_for_intr(LLVMTypeOf(src), type, sizeof(type));
3945 snprintf(name, sizeof(name), "llvm.amdgcn.set.inactive.%s", type);
3946 LLVMValueRef ret =
3947 ac_build_intrinsic(ctx, name,
3948 LLVMTypeOf(src), (LLVMValueRef []) {
3949 src, inactive }, 2,
3950 AC_FUNC_ATTR_READNONE |
3951 AC_FUNC_ATTR_CONVERGENT);
3952 return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3953 }
3954
3955 static LLVMValueRef
3956 get_reduction_identity(struct ac_llvm_context *ctx, nir_op op, unsigned type_size)
3957 {
3958 if (type_size == 4) {
3959 switch (op) {
3960 case nir_op_iadd: return ctx->i32_0;
3961 case nir_op_fadd: return ctx->f32_0;
3962 case nir_op_imul: return ctx->i32_1;
3963 case nir_op_fmul: return ctx->f32_1;
3964 case nir_op_imin: return LLVMConstInt(ctx->i32, INT32_MAX, 0);
3965 case nir_op_umin: return LLVMConstInt(ctx->i32, UINT32_MAX, 0);
3966 case nir_op_fmin: return LLVMConstReal(ctx->f32, INFINITY);
3967 case nir_op_imax: return LLVMConstInt(ctx->i32, INT32_MIN, 0);
3968 case nir_op_umax: return ctx->i32_0;
3969 case nir_op_fmax: return LLVMConstReal(ctx->f32, -INFINITY);
3970 case nir_op_iand: return LLVMConstInt(ctx->i32, -1, 0);
3971 case nir_op_ior: return ctx->i32_0;
3972 case nir_op_ixor: return ctx->i32_0;
3973 default:
3974 unreachable("bad reduction intrinsic");
3975 }
3976 } else { /* type_size == 64bit */
3977 switch (op) {
3978 case nir_op_iadd: return ctx->i64_0;
3979 case nir_op_fadd: return ctx->f64_0;
3980 case nir_op_imul: return ctx->i64_1;
3981 case nir_op_fmul: return ctx->f64_1;
3982 case nir_op_imin: return LLVMConstInt(ctx->i64, INT64_MAX, 0);
3983 case nir_op_umin: return LLVMConstInt(ctx->i64, UINT64_MAX, 0);
3984 case nir_op_fmin: return LLVMConstReal(ctx->f64, INFINITY);
3985 case nir_op_imax: return LLVMConstInt(ctx->i64, INT64_MIN, 0);
3986 case nir_op_umax: return ctx->i64_0;
3987 case nir_op_fmax: return LLVMConstReal(ctx->f64, -INFINITY);
3988 case nir_op_iand: return LLVMConstInt(ctx->i64, -1, 0);
3989 case nir_op_ior: return ctx->i64_0;
3990 case nir_op_ixor: return ctx->i64_0;
3991 default:
3992 unreachable("bad reduction intrinsic");
3993 }
3994 }
3995 }
3996
3997 static LLVMValueRef
3998 ac_build_alu_op(struct ac_llvm_context *ctx, LLVMValueRef lhs, LLVMValueRef rhs, nir_op op)
3999 {
4000 bool _64bit = ac_get_type_size(LLVMTypeOf(lhs)) == 8;
4001 switch (op) {
4002 case nir_op_iadd: return LLVMBuildAdd(ctx->builder, lhs, rhs, "");
4003 case nir_op_fadd: return LLVMBuildFAdd(ctx->builder, lhs, rhs, "");
4004 case nir_op_imul: return LLVMBuildMul(ctx->builder, lhs, rhs, "");
4005 case nir_op_fmul: return LLVMBuildFMul(ctx->builder, lhs, rhs, "");
4006 case nir_op_imin: return LLVMBuildSelect(ctx->builder,
4007 LLVMBuildICmp(ctx->builder, LLVMIntSLT, lhs, rhs, ""),
4008 lhs, rhs, "");
4009 case nir_op_umin: return LLVMBuildSelect(ctx->builder,
4010 LLVMBuildICmp(ctx->builder, LLVMIntULT, lhs, rhs, ""),
4011 lhs, rhs, "");
4012 case nir_op_fmin: return ac_build_intrinsic(ctx,
4013 _64bit ? "llvm.minnum.f64" : "llvm.minnum.f32",
4014 _64bit ? ctx->f64 : ctx->f32,
4015 (LLVMValueRef[]){lhs, rhs}, 2, AC_FUNC_ATTR_READNONE);
4016 case nir_op_imax: return LLVMBuildSelect(ctx->builder,
4017 LLVMBuildICmp(ctx->builder, LLVMIntSGT, lhs, rhs, ""),
4018 lhs, rhs, "");
4019 case nir_op_umax: return LLVMBuildSelect(ctx->builder,
4020 LLVMBuildICmp(ctx->builder, LLVMIntUGT, lhs, rhs, ""),
4021 lhs, rhs, "");
4022 case nir_op_fmax: return ac_build_intrinsic(ctx,
4023 _64bit ? "llvm.maxnum.f64" : "llvm.maxnum.f32",
4024 _64bit ? ctx->f64 : ctx->f32,
4025 (LLVMValueRef[]){lhs, rhs}, 2, AC_FUNC_ATTR_READNONE);
4026 case nir_op_iand: return LLVMBuildAnd(ctx->builder, lhs, rhs, "");
4027 case nir_op_ior: return LLVMBuildOr(ctx->builder, lhs, rhs, "");
4028 case nir_op_ixor: return LLVMBuildXor(ctx->builder, lhs, rhs, "");
4029 default:
4030 unreachable("bad reduction intrinsic");
4031 }
4032 }
4033
4034 /**
4035 * \param maxprefix specifies that the result only needs to be correct for a
4036 * prefix of this many threads
4037 *
4038 * TODO: add inclusive and excluse scan functions for GFX6.
4039 */
4040 static LLVMValueRef
4041 ac_build_scan(struct ac_llvm_context *ctx, nir_op op, LLVMValueRef src, LLVMValueRef identity,
4042 unsigned maxprefix)
4043 {
4044 LLVMValueRef result, tmp;
4045 result = src;
4046 if (maxprefix <= 1)
4047 return result;
4048 tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(1), 0xf, 0xf, false);
4049 result = ac_build_alu_op(ctx, result, tmp, op);
4050 if (maxprefix <= 2)
4051 return result;
4052 tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(2), 0xf, 0xf, false);
4053 result = ac_build_alu_op(ctx, result, tmp, op);
4054 if (maxprefix <= 3)
4055 return result;
4056 tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(3), 0xf, 0xf, false);
4057 result = ac_build_alu_op(ctx, result, tmp, op);
4058 if (maxprefix <= 4)
4059 return result;
4060 tmp = ac_build_dpp(ctx, identity, result, dpp_row_sr(4), 0xf, 0xe, false);
4061 result = ac_build_alu_op(ctx, result, tmp, op);
4062 if (maxprefix <= 8)
4063 return result;
4064 tmp = ac_build_dpp(ctx, identity, result, dpp_row_sr(8), 0xf, 0xc, false);
4065 result = ac_build_alu_op(ctx, result, tmp, op);
4066 if (maxprefix <= 16)
4067 return result;
4068 tmp = ac_build_dpp(ctx, identity, result, dpp_row_bcast15, 0xa, 0xf, false);
4069 result = ac_build_alu_op(ctx, result, tmp, op);
4070 if (maxprefix <= 32)
4071 return result;
4072 tmp = ac_build_dpp(ctx, identity, result, dpp_row_bcast31, 0xc, 0xf, false);
4073 result = ac_build_alu_op(ctx, result, tmp, op);
4074 return result;
4075 }
4076
4077 LLVMValueRef
4078 ac_build_inclusive_scan(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op)
4079 {
4080 LLVMValueRef result;
4081
4082 if (LLVMTypeOf(src) == ctx->i1 && op == nir_op_iadd) {
4083 LLVMBuilderRef builder = ctx->builder;
4084 src = LLVMBuildZExt(builder, src, ctx->i32, "");
4085 result = ac_build_ballot(ctx, src);
4086 result = ac_build_mbcnt(ctx, result);
4087 result = LLVMBuildAdd(builder, result, src, "");
4088 return result;
4089 }
4090
4091 ac_build_optimization_barrier(ctx, &src);
4092
4093 LLVMValueRef identity =
4094 get_reduction_identity(ctx, op, ac_get_type_size(LLVMTypeOf(src)));
4095 result = LLVMBuildBitCast(ctx->builder, ac_build_set_inactive(ctx, src, identity),
4096 LLVMTypeOf(identity), "");
4097 result = ac_build_scan(ctx, op, result, identity, 64);
4098
4099 return ac_build_wwm(ctx, result);
4100 }
4101
4102 LLVMValueRef
4103 ac_build_exclusive_scan(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op)
4104 {
4105 LLVMValueRef result;
4106
4107 if (LLVMTypeOf(src) == ctx->i1 && op == nir_op_iadd) {
4108 LLVMBuilderRef builder = ctx->builder;
4109 src = LLVMBuildZExt(builder, src, ctx->i32, "");
4110 result = ac_build_ballot(ctx, src);
4111 result = ac_build_mbcnt(ctx, result);
4112 return result;
4113 }
4114
4115 ac_build_optimization_barrier(ctx, &src);
4116
4117 LLVMValueRef identity =
4118 get_reduction_identity(ctx, op, ac_get_type_size(LLVMTypeOf(src)));
4119 result = LLVMBuildBitCast(ctx->builder, ac_build_set_inactive(ctx, src, identity),
4120 LLVMTypeOf(identity), "");
4121 result = ac_build_dpp(ctx, identity, result, dpp_wf_sr1, 0xf, 0xf, false);
4122 result = ac_build_scan(ctx, op, result, identity, 64);
4123
4124 return ac_build_wwm(ctx, result);
4125 }
4126
4127 LLVMValueRef
4128 ac_build_reduce(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op, unsigned cluster_size)
4129 {
4130 if (cluster_size == 1) return src;
4131 ac_build_optimization_barrier(ctx, &src);
4132 LLVMValueRef result, swap;
4133 LLVMValueRef identity = get_reduction_identity(ctx, op,
4134 ac_get_type_size(LLVMTypeOf(src)));
4135 result = LLVMBuildBitCast(ctx->builder,
4136 ac_build_set_inactive(ctx, src, identity),
4137 LLVMTypeOf(identity), "");
4138 swap = ac_build_quad_swizzle(ctx, result, 1, 0, 3, 2);
4139 result = ac_build_alu_op(ctx, result, swap, op);
4140 if (cluster_size == 2) return ac_build_wwm(ctx, result);
4141
4142 swap = ac_build_quad_swizzle(ctx, result, 2, 3, 0, 1);
4143 result = ac_build_alu_op(ctx, result, swap, op);
4144 if (cluster_size == 4) return ac_build_wwm(ctx, result);
4145
4146 if (ctx->chip_class >= GFX8)
4147 swap = ac_build_dpp(ctx, identity, result, dpp_row_half_mirror, 0xf, 0xf, false);
4148 else
4149 swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x04));
4150 result = ac_build_alu_op(ctx, result, swap, op);
4151 if (cluster_size == 8) return ac_build_wwm(ctx, result);
4152
4153 if (ctx->chip_class >= GFX8)
4154 swap = ac_build_dpp(ctx, identity, result, dpp_row_mirror, 0xf, 0xf, false);
4155 else
4156 swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x08));
4157 result = ac_build_alu_op(ctx, result, swap, op);
4158 if (cluster_size == 16) return ac_build_wwm(ctx, result);
4159
4160 if (ctx->chip_class >= GFX8 && cluster_size != 32)
4161 swap = ac_build_dpp(ctx, identity, result, dpp_row_bcast15, 0xa, 0xf, false);
4162 else
4163 swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x10));
4164 result = ac_build_alu_op(ctx, result, swap, op);
4165 if (cluster_size == 32) return ac_build_wwm(ctx, result);
4166
4167 if (ctx->chip_class >= GFX8) {
4168 swap = ac_build_dpp(ctx, identity, result, dpp_row_bcast31, 0xc, 0xf, false);
4169 result = ac_build_alu_op(ctx, result, swap, op);
4170 result = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 63, 0));
4171 return ac_build_wwm(ctx, result);
4172 } else {
4173 swap = ac_build_readlane(ctx, result, ctx->i32_0);
4174 result = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 32, 0));
4175 result = ac_build_alu_op(ctx, result, swap, op);
4176 return ac_build_wwm(ctx, result);
4177 }
4178 }
4179
4180 /**
4181 * "Top half" of a scan that reduces per-wave values across an entire
4182 * workgroup.
4183 *
4184 * The source value must be present in the highest lane of the wave, and the
4185 * highest lane must be live.
4186 */
4187 void
4188 ac_build_wg_wavescan_top(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4189 {
4190 if (ws->maxwaves <= 1)
4191 return;
4192
4193 const LLVMValueRef i32_63 = LLVMConstInt(ctx->i32, 63, false);
4194 LLVMBuilderRef builder = ctx->builder;
4195 LLVMValueRef tid = ac_get_thread_id(ctx);
4196 LLVMValueRef tmp;
4197
4198 tmp = LLVMBuildICmp(builder, LLVMIntEQ, tid, i32_63, "");
4199 ac_build_ifcc(ctx, tmp, 1000);
4200 LLVMBuildStore(builder, ws->src, LLVMBuildGEP(builder, ws->scratch, &ws->waveidx, 1, ""));
4201 ac_build_endif(ctx, 1000);
4202 }
4203
4204 /**
4205 * "Bottom half" of a scan that reduces per-wave values across an entire
4206 * workgroup.
4207 *
4208 * The caller must place a barrier between the top and bottom halves.
4209 */
4210 void
4211 ac_build_wg_wavescan_bottom(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4212 {
4213 const LLVMTypeRef type = LLVMTypeOf(ws->src);
4214 const LLVMValueRef identity =
4215 get_reduction_identity(ctx, ws->op, ac_get_type_size(type));
4216
4217 if (ws->maxwaves <= 1) {
4218 ws->result_reduce = ws->src;
4219 ws->result_inclusive = ws->src;
4220 ws->result_exclusive = identity;
4221 return;
4222 }
4223 assert(ws->maxwaves <= 32);
4224
4225 LLVMBuilderRef builder = ctx->builder;
4226 LLVMValueRef tid = ac_get_thread_id(ctx);
4227 LLVMBasicBlockRef bbs[2];
4228 LLVMValueRef phivalues_scan[2];
4229 LLVMValueRef tmp, tmp2;
4230
4231 bbs[0] = LLVMGetInsertBlock(builder);
4232 phivalues_scan[0] = LLVMGetUndef(type);
4233
4234 if (ws->enable_reduce)
4235 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, ws->numwaves, "");
4236 else if (ws->enable_inclusive)
4237 tmp = LLVMBuildICmp(builder, LLVMIntULE, tid, ws->waveidx, "");
4238 else
4239 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, ws->waveidx, "");
4240 ac_build_ifcc(ctx, tmp, 1001);
4241 {
4242 tmp = LLVMBuildLoad(builder, LLVMBuildGEP(builder, ws->scratch, &tid, 1, ""), "");
4243
4244 ac_build_optimization_barrier(ctx, &tmp);
4245
4246 bbs[1] = LLVMGetInsertBlock(builder);
4247 phivalues_scan[1] = ac_build_scan(ctx, ws->op, tmp, identity, ws->maxwaves);
4248 }
4249 ac_build_endif(ctx, 1001);
4250
4251 const LLVMValueRef scan = ac_build_phi(ctx, type, 2, phivalues_scan, bbs);
4252
4253 if (ws->enable_reduce) {
4254 tmp = LLVMBuildSub(builder, ws->numwaves, ctx->i32_1, "");
4255 ws->result_reduce = ac_build_readlane(ctx, scan, tmp);
4256 }
4257 if (ws->enable_inclusive)
4258 ws->result_inclusive = ac_build_readlane(ctx, scan, ws->waveidx);
4259 if (ws->enable_exclusive) {
4260 tmp = LLVMBuildSub(builder, ws->waveidx, ctx->i32_1, "");
4261 tmp = ac_build_readlane(ctx, scan, tmp);
4262 tmp2 = LLVMBuildICmp(builder, LLVMIntEQ, ws->waveidx, ctx->i32_0, "");
4263 ws->result_exclusive = LLVMBuildSelect(builder, tmp2, identity, tmp, "");
4264 }
4265 }
4266
4267 /**
4268 * Inclusive scan of a per-wave value across an entire workgroup.
4269 *
4270 * This implies an s_barrier instruction.
4271 *
4272 * Unlike ac_build_inclusive_scan, the caller \em must ensure that all threads
4273 * of the workgroup are live. (This requirement cannot easily be relaxed in a
4274 * useful manner because of the barrier in the algorithm.)
4275 */
4276 void
4277 ac_build_wg_wavescan(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4278 {
4279 ac_build_wg_wavescan_top(ctx, ws);
4280 ac_build_s_barrier(ctx);
4281 ac_build_wg_wavescan_bottom(ctx, ws);
4282 }
4283
4284 /**
4285 * "Top half" of a scan that reduces per-thread values across an entire
4286 * workgroup.
4287 *
4288 * All lanes must be active when this code runs.
4289 */
4290 void
4291 ac_build_wg_scan_top(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4292 {
4293 if (ws->enable_exclusive) {
4294 ws->extra = ac_build_exclusive_scan(ctx, ws->src, ws->op);
4295 if (LLVMTypeOf(ws->src) == ctx->i1 && ws->op == nir_op_iadd)
4296 ws->src = LLVMBuildZExt(ctx->builder, ws->src, ctx->i32, "");
4297 ws->src = ac_build_alu_op(ctx, ws->extra, ws->src, ws->op);
4298 } else {
4299 ws->src = ac_build_inclusive_scan(ctx, ws->src, ws->op);
4300 }
4301
4302 bool enable_inclusive = ws->enable_inclusive;
4303 bool enable_exclusive = ws->enable_exclusive;
4304 ws->enable_inclusive = false;
4305 ws->enable_exclusive = ws->enable_exclusive || enable_inclusive;
4306 ac_build_wg_wavescan_top(ctx, ws);
4307 ws->enable_inclusive = enable_inclusive;
4308 ws->enable_exclusive = enable_exclusive;
4309 }
4310
4311 /**
4312 * "Bottom half" of a scan that reduces per-thread values across an entire
4313 * workgroup.
4314 *
4315 * The caller must place a barrier between the top and bottom halves.
4316 */
4317 void
4318 ac_build_wg_scan_bottom(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4319 {
4320 bool enable_inclusive = ws->enable_inclusive;
4321 bool enable_exclusive = ws->enable_exclusive;
4322 ws->enable_inclusive = false;
4323 ws->enable_exclusive = ws->enable_exclusive || enable_inclusive;
4324 ac_build_wg_wavescan_bottom(ctx, ws);
4325 ws->enable_inclusive = enable_inclusive;
4326 ws->enable_exclusive = enable_exclusive;
4327
4328 /* ws->result_reduce is already the correct value */
4329 if (ws->enable_inclusive)
4330 ws->result_inclusive = ac_build_alu_op(ctx, ws->result_exclusive, ws->src, ws->op);
4331 if (ws->enable_exclusive)
4332 ws->result_exclusive = ac_build_alu_op(ctx, ws->result_exclusive, ws->extra, ws->op);
4333 }
4334
4335 /**
4336 * A scan that reduces per-thread values across an entire workgroup.
4337 *
4338 * The caller must ensure that all lanes are active when this code runs
4339 * (WWM is insufficient!), because there is an implied barrier.
4340 */
4341 void
4342 ac_build_wg_scan(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4343 {
4344 ac_build_wg_scan_top(ctx, ws);
4345 ac_build_s_barrier(ctx);
4346 ac_build_wg_scan_bottom(ctx, ws);
4347 }
4348
4349 LLVMValueRef
4350 ac_build_quad_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src,
4351 unsigned lane0, unsigned lane1, unsigned lane2, unsigned lane3)
4352 {
4353 unsigned mask = dpp_quad_perm(lane0, lane1, lane2, lane3);
4354 if (ctx->chip_class >= GFX8) {
4355 return ac_build_dpp(ctx, src, src, mask, 0xf, 0xf, false);
4356 } else {
4357 return ac_build_ds_swizzle(ctx, src, (1 << 15) | mask);
4358 }
4359 }
4360
4361 LLVMValueRef
4362 ac_build_shuffle(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef index)
4363 {
4364 index = LLVMBuildMul(ctx->builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
4365 return ac_build_intrinsic(ctx,
4366 "llvm.amdgcn.ds.bpermute", ctx->i32,
4367 (LLVMValueRef []) {index, src}, 2,
4368 AC_FUNC_ATTR_READNONE |
4369 AC_FUNC_ATTR_CONVERGENT);
4370 }
4371
4372 LLVMValueRef
4373 ac_build_frexp_exp(struct ac_llvm_context *ctx, LLVMValueRef src0,
4374 unsigned bitsize)
4375 {
4376 LLVMTypeRef type;
4377 char *intr;
4378
4379 if (bitsize == 16) {
4380 intr = "llvm.amdgcn.frexp.exp.i16.f16";
4381 type = ctx->i16;
4382 } else if (bitsize == 32) {
4383 intr = "llvm.amdgcn.frexp.exp.i32.f32";
4384 type = ctx->i32;
4385 } else {
4386 intr = "llvm.amdgcn.frexp.exp.i32.f64";
4387 type = ctx->i32;
4388 }
4389
4390 LLVMValueRef params[] = {
4391 src0,
4392 };
4393 return ac_build_intrinsic(ctx, intr, type, params, 1,
4394 AC_FUNC_ATTR_READNONE);
4395 }
4396 LLVMValueRef
4397 ac_build_frexp_mant(struct ac_llvm_context *ctx, LLVMValueRef src0,
4398 unsigned bitsize)
4399 {
4400 LLVMTypeRef type;
4401 char *intr;
4402
4403 if (bitsize == 16) {
4404 intr = "llvm.amdgcn.frexp.mant.f16";
4405 type = ctx->f16;
4406 } else if (bitsize == 32) {
4407 intr = "llvm.amdgcn.frexp.mant.f32";
4408 type = ctx->f32;
4409 } else {
4410 intr = "llvm.amdgcn.frexp.mant.f64";
4411 type = ctx->f64;
4412 }
4413
4414 LLVMValueRef params[] = {
4415 src0,
4416 };
4417 return ac_build_intrinsic(ctx, intr, type, params, 1,
4418 AC_FUNC_ATTR_READNONE);
4419 }
4420
4421 /*
4422 * this takes an I,J coordinate pair,
4423 * and works out the X and Y derivatives.
4424 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
4425 */
4426 LLVMValueRef
4427 ac_build_ddxy_interp(struct ac_llvm_context *ctx, LLVMValueRef interp_ij)
4428 {
4429 LLVMValueRef result[4], a;
4430 unsigned i;
4431
4432 for (i = 0; i < 2; i++) {
4433 a = LLVMBuildExtractElement(ctx->builder, interp_ij,
4434 LLVMConstInt(ctx->i32, i, false), "");
4435 result[i] = ac_build_ddxy(ctx, AC_TID_MASK_TOP_LEFT, 1, a);
4436 result[2+i] = ac_build_ddxy(ctx, AC_TID_MASK_TOP_LEFT, 2, a);
4437 }
4438 return ac_build_gather_values(ctx, result, 4);
4439 }
4440
4441 LLVMValueRef
4442 ac_build_load_helper_invocation(struct ac_llvm_context *ctx)
4443 {
4444 LLVMValueRef result = ac_build_intrinsic(ctx, "llvm.amdgcn.ps.live",
4445 ctx->i1, NULL, 0,
4446 AC_FUNC_ATTR_READNONE);
4447 result = LLVMBuildNot(ctx->builder, result, "");
4448 return LLVMBuildSExt(ctx->builder, result, ctx->i32, "");
4449 }