radv: pre-calculate user_data_0 registers and store in pipeline
[mesa.git] / src / amd / common / ac_llvm_build.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25 /* based on pieces from si_pipe.c and radeon_llvm_emit.c */
26 #include "ac_llvm_build.h"
27
28 #include <llvm-c/Core.h>
29
30 #include "c11/threads.h"
31
32 #include <assert.h>
33 #include <stdio.h>
34
35 #include "ac_llvm_util.h"
36 #include "ac_exp_param.h"
37 #include "util/bitscan.h"
38 #include "util/macros.h"
39 #include "util/u_atomic.h"
40 #include "sid.h"
41
42 #include "shader_enums.h"
43
44 /* Initialize module-independent parts of the context.
45 *
46 * The caller is responsible for initializing ctx::module and ctx::builder.
47 */
48 void
49 ac_llvm_context_init(struct ac_llvm_context *ctx, LLVMContextRef context,
50 enum chip_class chip_class)
51 {
52 LLVMValueRef args[1];
53
54 ctx->chip_class = chip_class;
55
56 ctx->context = context;
57 ctx->module = NULL;
58 ctx->builder = NULL;
59
60 ctx->voidt = LLVMVoidTypeInContext(ctx->context);
61 ctx->i1 = LLVMInt1TypeInContext(ctx->context);
62 ctx->i8 = LLVMInt8TypeInContext(ctx->context);
63 ctx->i16 = LLVMIntTypeInContext(ctx->context, 16);
64 ctx->i32 = LLVMIntTypeInContext(ctx->context, 32);
65 ctx->i64 = LLVMIntTypeInContext(ctx->context, 64);
66 ctx->f16 = LLVMHalfTypeInContext(ctx->context);
67 ctx->f32 = LLVMFloatTypeInContext(ctx->context);
68 ctx->f64 = LLVMDoubleTypeInContext(ctx->context);
69 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
70 ctx->v3i32 = LLVMVectorType(ctx->i32, 3);
71 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
72 ctx->v2f32 = LLVMVectorType(ctx->f32, 2);
73 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
74 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
75
76 ctx->i32_0 = LLVMConstInt(ctx->i32, 0, false);
77 ctx->i32_1 = LLVMConstInt(ctx->i32, 1, false);
78 ctx->f32_0 = LLVMConstReal(ctx->f32, 0.0);
79 ctx->f32_1 = LLVMConstReal(ctx->f32, 1.0);
80
81 ctx->i1false = LLVMConstInt(ctx->i1, 0, false);
82 ctx->i1true = LLVMConstInt(ctx->i1, 1, false);
83
84 ctx->range_md_kind = LLVMGetMDKindIDInContext(ctx->context,
85 "range", 5);
86
87 ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(ctx->context,
88 "invariant.load", 14);
89
90 ctx->fpmath_md_kind = LLVMGetMDKindIDInContext(ctx->context, "fpmath", 6);
91
92 args[0] = LLVMConstReal(ctx->f32, 2.5);
93 ctx->fpmath_md_2p5_ulp = LLVMMDNodeInContext(ctx->context, args, 1);
94
95 ctx->uniform_md_kind = LLVMGetMDKindIDInContext(ctx->context,
96 "amdgpu.uniform", 14);
97
98 ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0);
99 }
100
101 unsigned
102 ac_get_type_size(LLVMTypeRef type)
103 {
104 LLVMTypeKind kind = LLVMGetTypeKind(type);
105
106 switch (kind) {
107 case LLVMIntegerTypeKind:
108 return LLVMGetIntTypeWidth(type) / 8;
109 case LLVMFloatTypeKind:
110 return 4;
111 case LLVMDoubleTypeKind:
112 case LLVMPointerTypeKind:
113 return 8;
114 case LLVMVectorTypeKind:
115 return LLVMGetVectorSize(type) *
116 ac_get_type_size(LLVMGetElementType(type));
117 case LLVMArrayTypeKind:
118 return LLVMGetArrayLength(type) *
119 ac_get_type_size(LLVMGetElementType(type));
120 default:
121 assert(0);
122 return 0;
123 }
124 }
125
126 static LLVMTypeRef to_integer_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
127 {
128 if (t == ctx->f16 || t == ctx->i16)
129 return ctx->i16;
130 else if (t == ctx->f32 || t == ctx->i32)
131 return ctx->i32;
132 else if (t == ctx->f64 || t == ctx->i64)
133 return ctx->i64;
134 else
135 unreachable("Unhandled integer size");
136 }
137
138 LLVMTypeRef
139 ac_to_integer_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
140 {
141 if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
142 LLVMTypeRef elem_type = LLVMGetElementType(t);
143 return LLVMVectorType(to_integer_type_scalar(ctx, elem_type),
144 LLVMGetVectorSize(t));
145 }
146 return to_integer_type_scalar(ctx, t);
147 }
148
149 LLVMValueRef
150 ac_to_integer(struct ac_llvm_context *ctx, LLVMValueRef v)
151 {
152 LLVMTypeRef type = LLVMTypeOf(v);
153 return LLVMBuildBitCast(ctx->builder, v, ac_to_integer_type(ctx, type), "");
154 }
155
156 static LLVMTypeRef to_float_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
157 {
158 if (t == ctx->i16 || t == ctx->f16)
159 return ctx->f16;
160 else if (t == ctx->i32 || t == ctx->f32)
161 return ctx->f32;
162 else if (t == ctx->i64 || t == ctx->f64)
163 return ctx->f64;
164 else
165 unreachable("Unhandled float size");
166 }
167
168 LLVMTypeRef
169 ac_to_float_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
170 {
171 if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
172 LLVMTypeRef elem_type = LLVMGetElementType(t);
173 return LLVMVectorType(to_float_type_scalar(ctx, elem_type),
174 LLVMGetVectorSize(t));
175 }
176 return to_float_type_scalar(ctx, t);
177 }
178
179 LLVMValueRef
180 ac_to_float(struct ac_llvm_context *ctx, LLVMValueRef v)
181 {
182 LLVMTypeRef type = LLVMTypeOf(v);
183 return LLVMBuildBitCast(ctx->builder, v, ac_to_float_type(ctx, type), "");
184 }
185
186
187 LLVMValueRef
188 ac_build_intrinsic(struct ac_llvm_context *ctx, const char *name,
189 LLVMTypeRef return_type, LLVMValueRef *params,
190 unsigned param_count, unsigned attrib_mask)
191 {
192 LLVMValueRef function, call;
193 bool set_callsite_attrs = HAVE_LLVM >= 0x0400 &&
194 !(attrib_mask & AC_FUNC_ATTR_LEGACY);
195
196 function = LLVMGetNamedFunction(ctx->module, name);
197 if (!function) {
198 LLVMTypeRef param_types[32], function_type;
199 unsigned i;
200
201 assert(param_count <= 32);
202
203 for (i = 0; i < param_count; ++i) {
204 assert(params[i]);
205 param_types[i] = LLVMTypeOf(params[i]);
206 }
207 function_type =
208 LLVMFunctionType(return_type, param_types, param_count, 0);
209 function = LLVMAddFunction(ctx->module, name, function_type);
210
211 LLVMSetFunctionCallConv(function, LLVMCCallConv);
212 LLVMSetLinkage(function, LLVMExternalLinkage);
213
214 if (!set_callsite_attrs)
215 ac_add_func_attributes(ctx->context, function, attrib_mask);
216 }
217
218 call = LLVMBuildCall(ctx->builder, function, params, param_count, "");
219 if (set_callsite_attrs)
220 ac_add_func_attributes(ctx->context, call, attrib_mask);
221 return call;
222 }
223
224 /**
225 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
226 * intrinsic names).
227 */
228 void ac_build_type_name_for_intr(LLVMTypeRef type, char *buf, unsigned bufsize)
229 {
230 LLVMTypeRef elem_type = type;
231
232 assert(bufsize >= 8);
233
234 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
235 int ret = snprintf(buf, bufsize, "v%u",
236 LLVMGetVectorSize(type));
237 if (ret < 0) {
238 char *type_name = LLVMPrintTypeToString(type);
239 fprintf(stderr, "Error building type name for: %s\n",
240 type_name);
241 return;
242 }
243 elem_type = LLVMGetElementType(type);
244 buf += ret;
245 bufsize -= ret;
246 }
247 switch (LLVMGetTypeKind(elem_type)) {
248 default: break;
249 case LLVMIntegerTypeKind:
250 snprintf(buf, bufsize, "i%d", LLVMGetIntTypeWidth(elem_type));
251 break;
252 case LLVMFloatTypeKind:
253 snprintf(buf, bufsize, "f32");
254 break;
255 case LLVMDoubleTypeKind:
256 snprintf(buf, bufsize, "f64");
257 break;
258 }
259 }
260
261 /**
262 * Helper function that builds an LLVM IR PHI node and immediately adds
263 * incoming edges.
264 */
265 LLVMValueRef
266 ac_build_phi(struct ac_llvm_context *ctx, LLVMTypeRef type,
267 unsigned count_incoming, LLVMValueRef *values,
268 LLVMBasicBlockRef *blocks)
269 {
270 LLVMValueRef phi = LLVMBuildPhi(ctx->builder, type, "");
271 LLVMAddIncoming(phi, values, blocks, count_incoming);
272 return phi;
273 }
274
275 /* Prevent optimizations (at least of memory accesses) across the current
276 * point in the program by emitting empty inline assembly that is marked as
277 * having side effects.
278 *
279 * Optionally, a value can be passed through the inline assembly to prevent
280 * LLVM from hoisting calls to ReadNone functions.
281 */
282 void
283 ac_build_optimization_barrier(struct ac_llvm_context *ctx,
284 LLVMValueRef *pvgpr)
285 {
286 static int counter = 0;
287
288 LLVMBuilderRef builder = ctx->builder;
289 char code[16];
290
291 snprintf(code, sizeof(code), "; %d", p_atomic_inc_return(&counter));
292
293 if (!pvgpr) {
294 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
295 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
296 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
297 } else {
298 LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
299 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "=v,0", true, false);
300 LLVMValueRef vgpr = *pvgpr;
301 LLVMTypeRef vgpr_type = LLVMTypeOf(vgpr);
302 unsigned vgpr_size = ac_get_type_size(vgpr_type);
303 LLVMValueRef vgpr0;
304
305 assert(vgpr_size % 4 == 0);
306
307 vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
308 vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
309 vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
310 vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
311 vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
312
313 *pvgpr = vgpr;
314 }
315 }
316
317 LLVMValueRef
318 ac_build_ballot(struct ac_llvm_context *ctx,
319 LLVMValueRef value)
320 {
321 LLVMValueRef args[3] = {
322 value,
323 ctx->i32_0,
324 LLVMConstInt(ctx->i32, LLVMIntNE, 0)
325 };
326
327 /* We currently have no other way to prevent LLVM from lifting the icmp
328 * calls to a dominating basic block.
329 */
330 ac_build_optimization_barrier(ctx, &args[0]);
331
332 if (LLVMTypeOf(args[0]) != ctx->i32)
333 args[0] = LLVMBuildBitCast(ctx->builder, args[0], ctx->i32, "");
334
335 return ac_build_intrinsic(ctx,
336 "llvm.amdgcn.icmp.i32",
337 ctx->i64, args, 3,
338 AC_FUNC_ATTR_NOUNWIND |
339 AC_FUNC_ATTR_READNONE |
340 AC_FUNC_ATTR_CONVERGENT);
341 }
342
343 LLVMValueRef
344 ac_build_vote_all(struct ac_llvm_context *ctx, LLVMValueRef value)
345 {
346 LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
347 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
348 return LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, active_set, "");
349 }
350
351 LLVMValueRef
352 ac_build_vote_any(struct ac_llvm_context *ctx, LLVMValueRef value)
353 {
354 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
355 return LLVMBuildICmp(ctx->builder, LLVMIntNE, vote_set,
356 LLVMConstInt(ctx->i64, 0, 0), "");
357 }
358
359 LLVMValueRef
360 ac_build_vote_eq(struct ac_llvm_context *ctx, LLVMValueRef value)
361 {
362 LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
363 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
364
365 LLVMValueRef all = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
366 vote_set, active_set, "");
367 LLVMValueRef none = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
368 vote_set,
369 LLVMConstInt(ctx->i64, 0, 0), "");
370 return LLVMBuildOr(ctx->builder, all, none, "");
371 }
372
373 LLVMValueRef
374 ac_build_gather_values_extended(struct ac_llvm_context *ctx,
375 LLVMValueRef *values,
376 unsigned value_count,
377 unsigned value_stride,
378 bool load,
379 bool always_vector)
380 {
381 LLVMBuilderRef builder = ctx->builder;
382 LLVMValueRef vec = NULL;
383 unsigned i;
384
385 if (value_count == 1 && !always_vector) {
386 if (load)
387 return LLVMBuildLoad(builder, values[0], "");
388 return values[0];
389 } else if (!value_count)
390 unreachable("value_count is 0");
391
392 for (i = 0; i < value_count; i++) {
393 LLVMValueRef value = values[i * value_stride];
394 if (load)
395 value = LLVMBuildLoad(builder, value, "");
396
397 if (!i)
398 vec = LLVMGetUndef( LLVMVectorType(LLVMTypeOf(value), value_count));
399 LLVMValueRef index = LLVMConstInt(ctx->i32, i, false);
400 vec = LLVMBuildInsertElement(builder, vec, value, index, "");
401 }
402 return vec;
403 }
404
405 LLVMValueRef
406 ac_build_gather_values(struct ac_llvm_context *ctx,
407 LLVMValueRef *values,
408 unsigned value_count)
409 {
410 return ac_build_gather_values_extended(ctx, values, value_count, 1, false, false);
411 }
412
413 LLVMValueRef
414 ac_build_fdiv(struct ac_llvm_context *ctx,
415 LLVMValueRef num,
416 LLVMValueRef den)
417 {
418 LLVMValueRef ret = LLVMBuildFDiv(ctx->builder, num, den, "");
419
420 if (!LLVMIsConstant(ret))
421 LLVMSetMetadata(ret, ctx->fpmath_md_kind, ctx->fpmath_md_2p5_ulp);
422 return ret;
423 }
424
425 /* Coordinates for cube map selection. sc, tc, and ma are as in Table 8.27
426 * of the OpenGL 4.5 (Compatibility Profile) specification, except ma is
427 * already multiplied by two. id is the cube face number.
428 */
429 struct cube_selection_coords {
430 LLVMValueRef stc[2];
431 LLVMValueRef ma;
432 LLVMValueRef id;
433 };
434
435 static void
436 build_cube_intrinsic(struct ac_llvm_context *ctx,
437 LLVMValueRef in[3],
438 struct cube_selection_coords *out)
439 {
440 LLVMTypeRef f32 = ctx->f32;
441
442 out->stc[1] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubetc",
443 f32, in, 3, AC_FUNC_ATTR_READNONE);
444 out->stc[0] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubesc",
445 f32, in, 3, AC_FUNC_ATTR_READNONE);
446 out->ma = ac_build_intrinsic(ctx, "llvm.amdgcn.cubema",
447 f32, in, 3, AC_FUNC_ATTR_READNONE);
448 out->id = ac_build_intrinsic(ctx, "llvm.amdgcn.cubeid",
449 f32, in, 3, AC_FUNC_ATTR_READNONE);
450 }
451
452 /**
453 * Build a manual selection sequence for cube face sc/tc coordinates and
454 * major axis vector (multiplied by 2 for consistency) for the given
455 * vec3 \p coords, for the face implied by \p selcoords.
456 *
457 * For the major axis, we always adjust the sign to be in the direction of
458 * selcoords.ma; i.e., a positive out_ma means that coords is pointed towards
459 * the selcoords major axis.
460 */
461 static void build_cube_select(struct ac_llvm_context *ctx,
462 const struct cube_selection_coords *selcoords,
463 const LLVMValueRef *coords,
464 LLVMValueRef *out_st,
465 LLVMValueRef *out_ma)
466 {
467 LLVMBuilderRef builder = ctx->builder;
468 LLVMTypeRef f32 = LLVMTypeOf(coords[0]);
469 LLVMValueRef is_ma_positive;
470 LLVMValueRef sgn_ma;
471 LLVMValueRef is_ma_z, is_not_ma_z;
472 LLVMValueRef is_ma_y;
473 LLVMValueRef is_ma_x;
474 LLVMValueRef sgn;
475 LLVMValueRef tmp;
476
477 is_ma_positive = LLVMBuildFCmp(builder, LLVMRealUGE,
478 selcoords->ma, LLVMConstReal(f32, 0.0), "");
479 sgn_ma = LLVMBuildSelect(builder, is_ma_positive,
480 LLVMConstReal(f32, 1.0), LLVMConstReal(f32, -1.0), "");
481
482 is_ma_z = LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 4.0), "");
483 is_not_ma_z = LLVMBuildNot(builder, is_ma_z, "");
484 is_ma_y = LLVMBuildAnd(builder, is_not_ma_z,
485 LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 2.0), ""), "");
486 is_ma_x = LLVMBuildAnd(builder, is_not_ma_z, LLVMBuildNot(builder, is_ma_y, ""), "");
487
488 /* Select sc */
489 tmp = LLVMBuildSelect(builder, is_ma_x, coords[2], coords[0], "");
490 sgn = LLVMBuildSelect(builder, is_ma_y, LLVMConstReal(f32, 1.0),
491 LLVMBuildSelect(builder, is_ma_z, sgn_ma,
492 LLVMBuildFNeg(builder, sgn_ma, ""), ""), "");
493 out_st[0] = LLVMBuildFMul(builder, tmp, sgn, "");
494
495 /* Select tc */
496 tmp = LLVMBuildSelect(builder, is_ma_y, coords[2], coords[1], "");
497 sgn = LLVMBuildSelect(builder, is_ma_y, sgn_ma,
498 LLVMConstReal(f32, -1.0), "");
499 out_st[1] = LLVMBuildFMul(builder, tmp, sgn, "");
500
501 /* Select ma */
502 tmp = LLVMBuildSelect(builder, is_ma_z, coords[2],
503 LLVMBuildSelect(builder, is_ma_y, coords[1], coords[0], ""), "");
504 tmp = ac_build_intrinsic(ctx, "llvm.fabs.f32",
505 ctx->f32, &tmp, 1, AC_FUNC_ATTR_READNONE);
506 *out_ma = LLVMBuildFMul(builder, tmp, LLVMConstReal(f32, 2.0), "");
507 }
508
509 void
510 ac_prepare_cube_coords(struct ac_llvm_context *ctx,
511 bool is_deriv, bool is_array, bool is_lod,
512 LLVMValueRef *coords_arg,
513 LLVMValueRef *derivs_arg)
514 {
515
516 LLVMBuilderRef builder = ctx->builder;
517 struct cube_selection_coords selcoords;
518 LLVMValueRef coords[3];
519 LLVMValueRef invma;
520
521 if (is_array && !is_lod) {
522 LLVMValueRef tmp = coords_arg[3];
523 tmp = ac_build_intrinsic(ctx, "llvm.rint.f32", ctx->f32, &tmp, 1, 0);
524
525 /* Section 8.9 (Texture Functions) of the GLSL 4.50 spec says:
526 *
527 * "For Array forms, the array layer used will be
528 *
529 * max(0, min(d−1, floor(layer+0.5)))
530 *
531 * where d is the depth of the texture array and layer
532 * comes from the component indicated in the tables below.
533 * Workaroudn for an issue where the layer is taken from a
534 * helper invocation which happens to fall on a different
535 * layer due to extrapolation."
536 *
537 * VI and earlier attempt to implement this in hardware by
538 * clamping the value of coords[2] = (8 * layer) + face.
539 * Unfortunately, this means that the we end up with the wrong
540 * face when clamping occurs.
541 *
542 * Clamp the layer earlier to work around the issue.
543 */
544 if (ctx->chip_class <= VI) {
545 LLVMValueRef ge0;
546 ge0 = LLVMBuildFCmp(builder, LLVMRealOGE, tmp, ctx->f32_0, "");
547 tmp = LLVMBuildSelect(builder, ge0, tmp, ctx->f32_0, "");
548 }
549
550 coords_arg[3] = tmp;
551 }
552
553 build_cube_intrinsic(ctx, coords_arg, &selcoords);
554
555 invma = ac_build_intrinsic(ctx, "llvm.fabs.f32",
556 ctx->f32, &selcoords.ma, 1, AC_FUNC_ATTR_READNONE);
557 invma = ac_build_fdiv(ctx, LLVMConstReal(ctx->f32, 1.0), invma);
558
559 for (int i = 0; i < 2; ++i)
560 coords[i] = LLVMBuildFMul(builder, selcoords.stc[i], invma, "");
561
562 coords[2] = selcoords.id;
563
564 if (is_deriv && derivs_arg) {
565 LLVMValueRef derivs[4];
566 int axis;
567
568 /* Convert cube derivatives to 2D derivatives. */
569 for (axis = 0; axis < 2; axis++) {
570 LLVMValueRef deriv_st[2];
571 LLVMValueRef deriv_ma;
572
573 /* Transform the derivative alongside the texture
574 * coordinate. Mathematically, the correct formula is
575 * as follows. Assume we're projecting onto the +Z face
576 * and denote by dx/dh the derivative of the (original)
577 * X texture coordinate with respect to horizontal
578 * window coordinates. The projection onto the +Z face
579 * plane is:
580 *
581 * f(x,z) = x/z
582 *
583 * Then df/dh = df/dx * dx/dh + df/dz * dz/dh
584 * = 1/z * dx/dh - x/z * 1/z * dz/dh.
585 *
586 * This motivatives the implementation below.
587 *
588 * Whether this actually gives the expected results for
589 * apps that might feed in derivatives obtained via
590 * finite differences is anyone's guess. The OpenGL spec
591 * seems awfully quiet about how textureGrad for cube
592 * maps should be handled.
593 */
594 build_cube_select(ctx, &selcoords, &derivs_arg[axis * 3],
595 deriv_st, &deriv_ma);
596
597 deriv_ma = LLVMBuildFMul(builder, deriv_ma, invma, "");
598
599 for (int i = 0; i < 2; ++i)
600 derivs[axis * 2 + i] =
601 LLVMBuildFSub(builder,
602 LLVMBuildFMul(builder, deriv_st[i], invma, ""),
603 LLVMBuildFMul(builder, deriv_ma, coords[i], ""), "");
604 }
605
606 memcpy(derivs_arg, derivs, sizeof(derivs));
607 }
608
609 /* Shift the texture coordinate. This must be applied after the
610 * derivative calculation.
611 */
612 for (int i = 0; i < 2; ++i)
613 coords[i] = LLVMBuildFAdd(builder, coords[i], LLVMConstReal(ctx->f32, 1.5), "");
614
615 if (is_array) {
616 /* for cube arrays coord.z = coord.w(array_index) * 8 + face */
617 /* coords_arg.w component - array_index for cube arrays */
618 LLVMValueRef tmp = LLVMBuildFMul(ctx->builder, coords_arg[3], LLVMConstReal(ctx->f32, 8.0), "");
619 coords[2] = LLVMBuildFAdd(ctx->builder, tmp, coords[2], "");
620 }
621
622 memcpy(coords_arg, coords, sizeof(coords));
623 }
624
625
626 LLVMValueRef
627 ac_build_fs_interp(struct ac_llvm_context *ctx,
628 LLVMValueRef llvm_chan,
629 LLVMValueRef attr_number,
630 LLVMValueRef params,
631 LLVMValueRef i,
632 LLVMValueRef j)
633 {
634 LLVMValueRef args[5];
635 LLVMValueRef p1;
636
637 if (HAVE_LLVM < 0x0400) {
638 LLVMValueRef ij[2];
639 ij[0] = LLVMBuildBitCast(ctx->builder, i, ctx->i32, "");
640 ij[1] = LLVMBuildBitCast(ctx->builder, j, ctx->i32, "");
641
642 args[0] = llvm_chan;
643 args[1] = attr_number;
644 args[2] = params;
645 args[3] = ac_build_gather_values(ctx, ij, 2);
646 return ac_build_intrinsic(ctx, "llvm.SI.fs.interp",
647 ctx->f32, args, 4,
648 AC_FUNC_ATTR_READNONE);
649 }
650
651 args[0] = i;
652 args[1] = llvm_chan;
653 args[2] = attr_number;
654 args[3] = params;
655
656 p1 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p1",
657 ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
658
659 args[0] = p1;
660 args[1] = j;
661 args[2] = llvm_chan;
662 args[3] = attr_number;
663 args[4] = params;
664
665 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p2",
666 ctx->f32, args, 5, AC_FUNC_ATTR_READNONE);
667 }
668
669 LLVMValueRef
670 ac_build_fs_interp_mov(struct ac_llvm_context *ctx,
671 LLVMValueRef parameter,
672 LLVMValueRef llvm_chan,
673 LLVMValueRef attr_number,
674 LLVMValueRef params)
675 {
676 LLVMValueRef args[4];
677 if (HAVE_LLVM < 0x0400) {
678 args[0] = llvm_chan;
679 args[1] = attr_number;
680 args[2] = params;
681
682 return ac_build_intrinsic(ctx,
683 "llvm.SI.fs.constant",
684 ctx->f32, args, 3,
685 AC_FUNC_ATTR_READNONE);
686 }
687
688 args[0] = parameter;
689 args[1] = llvm_chan;
690 args[2] = attr_number;
691 args[3] = params;
692
693 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.mov",
694 ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
695 }
696
697 LLVMValueRef
698 ac_build_gep0(struct ac_llvm_context *ctx,
699 LLVMValueRef base_ptr,
700 LLVMValueRef index)
701 {
702 LLVMValueRef indices[2] = {
703 LLVMConstInt(ctx->i32, 0, 0),
704 index,
705 };
706 return LLVMBuildGEP(ctx->builder, base_ptr,
707 indices, 2, "");
708 }
709
710 void
711 ac_build_indexed_store(struct ac_llvm_context *ctx,
712 LLVMValueRef base_ptr, LLVMValueRef index,
713 LLVMValueRef value)
714 {
715 LLVMBuildStore(ctx->builder, value,
716 ac_build_gep0(ctx, base_ptr, index));
717 }
718
719 /**
720 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
721 * It's equivalent to doing a load from &base_ptr[index].
722 *
723 * \param base_ptr Where the array starts.
724 * \param index The element index into the array.
725 * \param uniform Whether the base_ptr and index can be assumed to be
726 * dynamically uniform (i.e. load to an SGPR)
727 * \param invariant Whether the load is invariant (no other opcodes affect it)
728 */
729 static LLVMValueRef
730 ac_build_load_custom(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
731 LLVMValueRef index, bool uniform, bool invariant)
732 {
733 LLVMValueRef pointer, result;
734
735 pointer = ac_build_gep0(ctx, base_ptr, index);
736 if (uniform)
737 LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
738 result = LLVMBuildLoad(ctx->builder, pointer, "");
739 if (invariant)
740 LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
741 return result;
742 }
743
744 LLVMValueRef ac_build_load(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
745 LLVMValueRef index)
746 {
747 return ac_build_load_custom(ctx, base_ptr, index, false, false);
748 }
749
750 LLVMValueRef ac_build_load_invariant(struct ac_llvm_context *ctx,
751 LLVMValueRef base_ptr, LLVMValueRef index)
752 {
753 return ac_build_load_custom(ctx, base_ptr, index, false, true);
754 }
755
756 LLVMValueRef ac_build_load_to_sgpr(struct ac_llvm_context *ctx,
757 LLVMValueRef base_ptr, LLVMValueRef index)
758 {
759 return ac_build_load_custom(ctx, base_ptr, index, true, true);
760 }
761
762 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
763 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
764 * or v4i32 (num_channels=3,4).
765 */
766 void
767 ac_build_buffer_store_dword(struct ac_llvm_context *ctx,
768 LLVMValueRef rsrc,
769 LLVMValueRef vdata,
770 unsigned num_channels,
771 LLVMValueRef voffset,
772 LLVMValueRef soffset,
773 unsigned inst_offset,
774 bool glc,
775 bool slc,
776 bool writeonly_memory,
777 bool swizzle_enable_hint)
778 {
779 /* SWIZZLE_ENABLE requires that soffset isn't folded into voffset
780 * (voffset is swizzled, but soffset isn't swizzled).
781 * llvm.amdgcn.buffer.store doesn't have a separate soffset parameter.
782 */
783 if (!swizzle_enable_hint) {
784 /* Split 3 channel stores, becase LLVM doesn't support 3-channel
785 * intrinsics. */
786 if (num_channels == 3) {
787 LLVMValueRef v[3], v01;
788
789 for (int i = 0; i < 3; i++) {
790 v[i] = LLVMBuildExtractElement(ctx->builder, vdata,
791 LLVMConstInt(ctx->i32, i, 0), "");
792 }
793 v01 = ac_build_gather_values(ctx, v, 2);
794
795 ac_build_buffer_store_dword(ctx, rsrc, v01, 2, voffset,
796 soffset, inst_offset, glc, slc,
797 writeonly_memory, swizzle_enable_hint);
798 ac_build_buffer_store_dword(ctx, rsrc, v[2], 1, voffset,
799 soffset, inst_offset + 8,
800 glc, slc,
801 writeonly_memory, swizzle_enable_hint);
802 return;
803 }
804
805 unsigned func = CLAMP(num_channels, 1, 3) - 1;
806 static const char *types[] = {"f32", "v2f32", "v4f32"};
807 char name[256];
808 LLVMValueRef offset = soffset;
809
810 if (inst_offset)
811 offset = LLVMBuildAdd(ctx->builder, offset,
812 LLVMConstInt(ctx->i32, inst_offset, 0), "");
813 if (voffset)
814 offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
815
816 LLVMValueRef args[] = {
817 ac_to_float(ctx, vdata),
818 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
819 LLVMConstInt(ctx->i32, 0, 0),
820 offset,
821 LLVMConstInt(ctx->i1, glc, 0),
822 LLVMConstInt(ctx->i1, slc, 0),
823 };
824
825 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.store.%s",
826 types[func]);
827
828 ac_build_intrinsic(ctx, name, ctx->voidt,
829 args, ARRAY_SIZE(args),
830 writeonly_memory ?
831 AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY :
832 AC_FUNC_ATTR_WRITEONLY);
833 return;
834 }
835
836 static unsigned dfmt[] = {
837 V_008F0C_BUF_DATA_FORMAT_32,
838 V_008F0C_BUF_DATA_FORMAT_32_32,
839 V_008F0C_BUF_DATA_FORMAT_32_32_32,
840 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
841 };
842 assert(num_channels >= 1 && num_channels <= 4);
843
844 LLVMValueRef args[] = {
845 rsrc,
846 vdata,
847 LLVMConstInt(ctx->i32, num_channels, 0),
848 voffset ? voffset : LLVMGetUndef(ctx->i32),
849 soffset,
850 LLVMConstInt(ctx->i32, inst_offset, 0),
851 LLVMConstInt(ctx->i32, dfmt[num_channels - 1], 0),
852 LLVMConstInt(ctx->i32, V_008F0C_BUF_NUM_FORMAT_UINT, 0),
853 LLVMConstInt(ctx->i32, voffset != NULL, 0),
854 LLVMConstInt(ctx->i32, 0, 0), /* idxen */
855 LLVMConstInt(ctx->i32, glc, 0),
856 LLVMConstInt(ctx->i32, slc, 0),
857 LLVMConstInt(ctx->i32, 0, 0), /* tfe*/
858 };
859
860 /* The instruction offset field has 12 bits */
861 assert(voffset || inst_offset < (1 << 12));
862
863 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
864 unsigned func = CLAMP(num_channels, 1, 3) - 1;
865 const char *types[] = {"i32", "v2i32", "v4i32"};
866 char name[256];
867 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
868
869 ac_build_intrinsic(ctx, name, ctx->voidt,
870 args, ARRAY_SIZE(args),
871 AC_FUNC_ATTR_LEGACY);
872 }
873
874 LLVMValueRef
875 ac_build_buffer_load(struct ac_llvm_context *ctx,
876 LLVMValueRef rsrc,
877 int num_channels,
878 LLVMValueRef vindex,
879 LLVMValueRef voffset,
880 LLVMValueRef soffset,
881 unsigned inst_offset,
882 unsigned glc,
883 unsigned slc,
884 bool can_speculate,
885 bool allow_smem)
886 {
887 LLVMValueRef offset = LLVMConstInt(ctx->i32, inst_offset, 0);
888 if (voffset)
889 offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
890 if (soffset)
891 offset = LLVMBuildAdd(ctx->builder, offset, soffset, "");
892
893 /* TODO: VI and later generations can use SMEM with GLC=1.*/
894 if (allow_smem && !glc && !slc) {
895 assert(vindex == NULL);
896
897 LLVMValueRef result[4];
898
899 for (int i = 0; i < num_channels; i++) {
900 if (i) {
901 offset = LLVMBuildAdd(ctx->builder, offset,
902 LLVMConstInt(ctx->i32, 4, 0), "");
903 }
904 LLVMValueRef args[2] = {rsrc, offset};
905 result[i] = ac_build_intrinsic(ctx, "llvm.SI.load.const.v4i32",
906 ctx->f32, args, 2,
907 AC_FUNC_ATTR_READNONE |
908 AC_FUNC_ATTR_LEGACY);
909 }
910 if (num_channels == 1)
911 return result[0];
912
913 if (num_channels == 3)
914 result[num_channels++] = LLVMGetUndef(ctx->f32);
915 return ac_build_gather_values(ctx, result, num_channels);
916 }
917
918 unsigned func = CLAMP(num_channels, 1, 3) - 1;
919
920 LLVMValueRef args[] = {
921 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
922 vindex ? vindex : LLVMConstInt(ctx->i32, 0, 0),
923 offset,
924 LLVMConstInt(ctx->i1, glc, 0),
925 LLVMConstInt(ctx->i1, slc, 0)
926 };
927
928 LLVMTypeRef types[] = {ctx->f32, LLVMVectorType(ctx->f32, 2),
929 ctx->v4f32};
930 const char *type_names[] = {"f32", "v2f32", "v4f32"};
931 char name[256];
932
933 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s",
934 type_names[func]);
935
936 return ac_build_intrinsic(ctx, name, types[func], args,
937 ARRAY_SIZE(args),
938 /* READNONE means writes can't affect it, while
939 * READONLY means that writes can affect it. */
940 can_speculate && HAVE_LLVM >= 0x0400 ?
941 AC_FUNC_ATTR_READNONE :
942 AC_FUNC_ATTR_READONLY);
943 }
944
945 LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx,
946 LLVMValueRef rsrc,
947 LLVMValueRef vindex,
948 LLVMValueRef voffset,
949 bool can_speculate)
950 {
951 LLVMValueRef args [] = {
952 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
953 vindex,
954 voffset,
955 ctx->i1false, /* glc */
956 ctx->i1false, /* slc */
957 };
958
959 return ac_build_intrinsic(ctx,
960 "llvm.amdgcn.buffer.load.format.v4f32",
961 ctx->v4f32, args, ARRAY_SIZE(args),
962 /* READNONE means writes can't affect it, while
963 * READONLY means that writes can affect it. */
964 can_speculate && HAVE_LLVM >= 0x0400 ?
965 AC_FUNC_ATTR_READNONE :
966 AC_FUNC_ATTR_READONLY);
967 }
968
969 /**
970 * Set range metadata on an instruction. This can only be used on load and
971 * call instructions. If you know an instruction can only produce the values
972 * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
973 * \p lo is the minimum value inclusive.
974 * \p hi is the maximum value exclusive.
975 */
976 static void set_range_metadata(struct ac_llvm_context *ctx,
977 LLVMValueRef value, unsigned lo, unsigned hi)
978 {
979 LLVMValueRef range_md, md_args[2];
980 LLVMTypeRef type = LLVMTypeOf(value);
981 LLVMContextRef context = LLVMGetTypeContext(type);
982
983 md_args[0] = LLVMConstInt(type, lo, false);
984 md_args[1] = LLVMConstInt(type, hi, false);
985 range_md = LLVMMDNodeInContext(context, md_args, 2);
986 LLVMSetMetadata(value, ctx->range_md_kind, range_md);
987 }
988
989 LLVMValueRef
990 ac_get_thread_id(struct ac_llvm_context *ctx)
991 {
992 LLVMValueRef tid;
993
994 LLVMValueRef tid_args[2];
995 tid_args[0] = LLVMConstInt(ctx->i32, 0xffffffff, false);
996 tid_args[1] = LLVMConstInt(ctx->i32, 0, false);
997 tid_args[1] = ac_build_intrinsic(ctx,
998 "llvm.amdgcn.mbcnt.lo", ctx->i32,
999 tid_args, 2, AC_FUNC_ATTR_READNONE);
1000
1001 tid = ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.hi",
1002 ctx->i32, tid_args,
1003 2, AC_FUNC_ATTR_READNONE);
1004 set_range_metadata(ctx, tid, 0, 64);
1005 return tid;
1006 }
1007
1008 /*
1009 * SI implements derivatives using the local data store (LDS)
1010 * All writes to the LDS happen in all executing threads at
1011 * the same time. TID is the Thread ID for the current
1012 * thread and is a value between 0 and 63, representing
1013 * the thread's position in the wavefront.
1014 *
1015 * For the pixel shader threads are grouped into quads of four pixels.
1016 * The TIDs of the pixels of a quad are:
1017 *
1018 * +------+------+
1019 * |4n + 0|4n + 1|
1020 * +------+------+
1021 * |4n + 2|4n + 3|
1022 * +------+------+
1023 *
1024 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
1025 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
1026 * the current pixel's column, and masking with 0xfffffffe yields the TID
1027 * of the left pixel of the current pixel's row.
1028 *
1029 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
1030 * adding 2 yields the TID of the pixel below the top pixel.
1031 */
1032 LLVMValueRef
1033 ac_build_ddxy(struct ac_llvm_context *ctx,
1034 uint32_t mask,
1035 int idx,
1036 LLVMValueRef val)
1037 {
1038 LLVMValueRef tl, trbl, args[2];
1039 LLVMValueRef result;
1040
1041 if (ctx->chip_class >= VI) {
1042 LLVMValueRef thread_id, tl_tid, trbl_tid;
1043 thread_id = ac_get_thread_id(ctx);
1044
1045 tl_tid = LLVMBuildAnd(ctx->builder, thread_id,
1046 LLVMConstInt(ctx->i32, mask, false), "");
1047
1048 trbl_tid = LLVMBuildAdd(ctx->builder, tl_tid,
1049 LLVMConstInt(ctx->i32, idx, false), "");
1050
1051 args[0] = LLVMBuildMul(ctx->builder, tl_tid,
1052 LLVMConstInt(ctx->i32, 4, false), "");
1053 args[1] = val;
1054 tl = ac_build_intrinsic(ctx,
1055 "llvm.amdgcn.ds.bpermute", ctx->i32,
1056 args, 2,
1057 AC_FUNC_ATTR_READNONE |
1058 AC_FUNC_ATTR_CONVERGENT);
1059
1060 args[0] = LLVMBuildMul(ctx->builder, trbl_tid,
1061 LLVMConstInt(ctx->i32, 4, false), "");
1062 trbl = ac_build_intrinsic(ctx,
1063 "llvm.amdgcn.ds.bpermute", ctx->i32,
1064 args, 2,
1065 AC_FUNC_ATTR_READNONE |
1066 AC_FUNC_ATTR_CONVERGENT);
1067 } else {
1068 uint32_t masks[2] = {};
1069
1070 switch (mask) {
1071 case AC_TID_MASK_TOP_LEFT:
1072 masks[0] = 0x8000;
1073 if (idx == 1)
1074 masks[1] = 0x8055;
1075 else
1076 masks[1] = 0x80aa;
1077
1078 break;
1079 case AC_TID_MASK_TOP:
1080 masks[0] = 0x8044;
1081 masks[1] = 0x80ee;
1082 break;
1083 case AC_TID_MASK_LEFT:
1084 masks[0] = 0x80a0;
1085 masks[1] = 0x80f5;
1086 break;
1087 default:
1088 assert(0);
1089 }
1090
1091 args[0] = val;
1092 args[1] = LLVMConstInt(ctx->i32, masks[0], false);
1093
1094 tl = ac_build_intrinsic(ctx,
1095 "llvm.amdgcn.ds.swizzle", ctx->i32,
1096 args, 2,
1097 AC_FUNC_ATTR_READNONE |
1098 AC_FUNC_ATTR_CONVERGENT);
1099
1100 args[1] = LLVMConstInt(ctx->i32, masks[1], false);
1101 trbl = ac_build_intrinsic(ctx,
1102 "llvm.amdgcn.ds.swizzle", ctx->i32,
1103 args, 2,
1104 AC_FUNC_ATTR_READNONE |
1105 AC_FUNC_ATTR_CONVERGENT);
1106 }
1107
1108 tl = LLVMBuildBitCast(ctx->builder, tl, ctx->f32, "");
1109 trbl = LLVMBuildBitCast(ctx->builder, trbl, ctx->f32, "");
1110 result = LLVMBuildFSub(ctx->builder, trbl, tl, "");
1111 return result;
1112 }
1113
1114 void
1115 ac_build_sendmsg(struct ac_llvm_context *ctx,
1116 uint32_t msg,
1117 LLVMValueRef wave_id)
1118 {
1119 LLVMValueRef args[2];
1120 const char *intr_name = (HAVE_LLVM < 0x0400) ? "llvm.SI.sendmsg" : "llvm.amdgcn.s.sendmsg";
1121 args[0] = LLVMConstInt(ctx->i32, msg, false);
1122 args[1] = wave_id;
1123 ac_build_intrinsic(ctx, intr_name, ctx->voidt, args, 2, 0);
1124 }
1125
1126 LLVMValueRef
1127 ac_build_imsb(struct ac_llvm_context *ctx,
1128 LLVMValueRef arg,
1129 LLVMTypeRef dst_type)
1130 {
1131 const char *intr_name = (HAVE_LLVM < 0x0400) ? "llvm.AMDGPU.flbit.i32" :
1132 "llvm.amdgcn.sffbh.i32";
1133 LLVMValueRef msb = ac_build_intrinsic(ctx, intr_name,
1134 dst_type, &arg, 1,
1135 AC_FUNC_ATTR_READNONE);
1136
1137 /* The HW returns the last bit index from MSB, but NIR/TGSI wants
1138 * the index from LSB. Invert it by doing "31 - msb". */
1139 msb = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, 31, false),
1140 msb, "");
1141
1142 LLVMValueRef all_ones = LLVMConstInt(ctx->i32, -1, true);
1143 LLVMValueRef cond = LLVMBuildOr(ctx->builder,
1144 LLVMBuildICmp(ctx->builder, LLVMIntEQ,
1145 arg, LLVMConstInt(ctx->i32, 0, 0), ""),
1146 LLVMBuildICmp(ctx->builder, LLVMIntEQ,
1147 arg, all_ones, ""), "");
1148
1149 return LLVMBuildSelect(ctx->builder, cond, all_ones, msb, "");
1150 }
1151
1152 LLVMValueRef
1153 ac_build_umsb(struct ac_llvm_context *ctx,
1154 LLVMValueRef arg,
1155 LLVMTypeRef dst_type)
1156 {
1157 LLVMValueRef args[2] = {
1158 arg,
1159 ctx->i1true,
1160 };
1161 LLVMValueRef msb = ac_build_intrinsic(ctx, "llvm.ctlz.i32",
1162 dst_type, args, ARRAY_SIZE(args),
1163 AC_FUNC_ATTR_READNONE);
1164
1165 /* The HW returns the last bit index from MSB, but TGSI/NIR wants
1166 * the index from LSB. Invert it by doing "31 - msb". */
1167 msb = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, 31, false),
1168 msb, "");
1169
1170 /* check for zero */
1171 return LLVMBuildSelect(ctx->builder,
1172 LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg,
1173 LLVMConstInt(ctx->i32, 0, 0), ""),
1174 LLVMConstInt(ctx->i32, -1, true), msb, "");
1175 }
1176
1177 LLVMValueRef ac_build_umin(struct ac_llvm_context *ctx, LLVMValueRef a,
1178 LLVMValueRef b)
1179 {
1180 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntULE, a, b, "");
1181 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1182 }
1183
1184 LLVMValueRef ac_build_clamp(struct ac_llvm_context *ctx, LLVMValueRef value)
1185 {
1186 if (HAVE_LLVM >= 0x0500) {
1187 LLVMValueRef max[2] = {
1188 value,
1189 LLVMConstReal(ctx->f32, 0),
1190 };
1191 LLVMValueRef min[2] = {
1192 LLVMConstReal(ctx->f32, 1),
1193 };
1194
1195 min[1] = ac_build_intrinsic(ctx, "llvm.maxnum.f32",
1196 ctx->f32, max, 2,
1197 AC_FUNC_ATTR_READNONE);
1198 return ac_build_intrinsic(ctx, "llvm.minnum.f32",
1199 ctx->f32, min, 2,
1200 AC_FUNC_ATTR_READNONE);
1201 }
1202
1203 LLVMValueRef args[3] = {
1204 value,
1205 LLVMConstReal(ctx->f32, 0),
1206 LLVMConstReal(ctx->f32, 1),
1207 };
1208
1209 return ac_build_intrinsic(ctx, "llvm.AMDGPU.clamp.", ctx->f32, args, 3,
1210 AC_FUNC_ATTR_READNONE |
1211 AC_FUNC_ATTR_LEGACY);
1212 }
1213
1214 void ac_build_export(struct ac_llvm_context *ctx, struct ac_export_args *a)
1215 {
1216 LLVMValueRef args[9];
1217
1218 if (HAVE_LLVM >= 0x0500) {
1219 args[0] = LLVMConstInt(ctx->i32, a->target, 0);
1220 args[1] = LLVMConstInt(ctx->i32, a->enabled_channels, 0);
1221
1222 if (a->compr) {
1223 LLVMTypeRef i16 = LLVMInt16TypeInContext(ctx->context);
1224 LLVMTypeRef v2i16 = LLVMVectorType(i16, 2);
1225
1226 args[2] = LLVMBuildBitCast(ctx->builder, a->out[0],
1227 v2i16, "");
1228 args[3] = LLVMBuildBitCast(ctx->builder, a->out[1],
1229 v2i16, "");
1230 args[4] = LLVMConstInt(ctx->i1, a->done, 0);
1231 args[5] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
1232
1233 ac_build_intrinsic(ctx, "llvm.amdgcn.exp.compr.v2i16",
1234 ctx->voidt, args, 6, 0);
1235 } else {
1236 args[2] = a->out[0];
1237 args[3] = a->out[1];
1238 args[4] = a->out[2];
1239 args[5] = a->out[3];
1240 args[6] = LLVMConstInt(ctx->i1, a->done, 0);
1241 args[7] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
1242
1243 ac_build_intrinsic(ctx, "llvm.amdgcn.exp.f32",
1244 ctx->voidt, args, 8, 0);
1245 }
1246 return;
1247 }
1248
1249 args[0] = LLVMConstInt(ctx->i32, a->enabled_channels, 0);
1250 args[1] = LLVMConstInt(ctx->i32, a->valid_mask, 0);
1251 args[2] = LLVMConstInt(ctx->i32, a->done, 0);
1252 args[3] = LLVMConstInt(ctx->i32, a->target, 0);
1253 args[4] = LLVMConstInt(ctx->i32, a->compr, 0);
1254 memcpy(args + 5, a->out, sizeof(a->out[0]) * 4);
1255
1256 ac_build_intrinsic(ctx, "llvm.SI.export", ctx->voidt, args, 9,
1257 AC_FUNC_ATTR_LEGACY);
1258 }
1259
1260 LLVMValueRef ac_build_image_opcode(struct ac_llvm_context *ctx,
1261 struct ac_image_args *a)
1262 {
1263 LLVMTypeRef dst_type;
1264 LLVMValueRef args[11];
1265 unsigned num_args = 0;
1266 const char *name = NULL;
1267 char intr_name[128], type[64];
1268
1269 if (HAVE_LLVM >= 0x0400) {
1270 bool sample = a->opcode == ac_image_sample ||
1271 a->opcode == ac_image_gather4 ||
1272 a->opcode == ac_image_get_lod;
1273
1274 if (sample)
1275 args[num_args++] = ac_to_float(ctx, a->addr);
1276 else
1277 args[num_args++] = a->addr;
1278
1279 args[num_args++] = a->resource;
1280 if (sample)
1281 args[num_args++] = a->sampler;
1282 args[num_args++] = LLVMConstInt(ctx->i32, a->dmask, 0);
1283 if (sample)
1284 args[num_args++] = LLVMConstInt(ctx->i1, a->unorm, 0);
1285 args[num_args++] = ctx->i1false; /* glc */
1286 args[num_args++] = ctx->i1false; /* slc */
1287 args[num_args++] = ctx->i1false; /* lwe */
1288 args[num_args++] = LLVMConstInt(ctx->i1, a->da, 0);
1289
1290 switch (a->opcode) {
1291 case ac_image_sample:
1292 name = "llvm.amdgcn.image.sample";
1293 break;
1294 case ac_image_gather4:
1295 name = "llvm.amdgcn.image.gather4";
1296 break;
1297 case ac_image_load:
1298 name = "llvm.amdgcn.image.load";
1299 break;
1300 case ac_image_load_mip:
1301 name = "llvm.amdgcn.image.load.mip";
1302 break;
1303 case ac_image_get_lod:
1304 name = "llvm.amdgcn.image.getlod";
1305 break;
1306 case ac_image_get_resinfo:
1307 name = "llvm.amdgcn.image.getresinfo";
1308 break;
1309 default:
1310 unreachable("invalid image opcode");
1311 }
1312
1313 ac_build_type_name_for_intr(LLVMTypeOf(args[0]), type,
1314 sizeof(type));
1315
1316 snprintf(intr_name, sizeof(intr_name), "%s%s%s%s.v4f32.%s.v8i32",
1317 name,
1318 a->compare ? ".c" : "",
1319 a->bias ? ".b" :
1320 a->lod ? ".l" :
1321 a->deriv ? ".d" :
1322 a->level_zero ? ".lz" : "",
1323 a->offset ? ".o" : "",
1324 type);
1325
1326 LLVMValueRef result =
1327 ac_build_intrinsic(ctx, intr_name,
1328 ctx->v4f32, args, num_args,
1329 AC_FUNC_ATTR_READNONE);
1330 if (!sample) {
1331 result = LLVMBuildBitCast(ctx->builder, result,
1332 ctx->v4i32, "");
1333 }
1334 return result;
1335 }
1336
1337 args[num_args++] = a->addr;
1338 args[num_args++] = a->resource;
1339
1340 if (a->opcode == ac_image_load ||
1341 a->opcode == ac_image_load_mip ||
1342 a->opcode == ac_image_get_resinfo) {
1343 dst_type = ctx->v4i32;
1344 } else {
1345 dst_type = ctx->v4f32;
1346 args[num_args++] = a->sampler;
1347 }
1348
1349 args[num_args++] = LLVMConstInt(ctx->i32, a->dmask, 0);
1350 args[num_args++] = LLVMConstInt(ctx->i32, a->unorm, 0);
1351 args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* r128 */
1352 args[num_args++] = LLVMConstInt(ctx->i32, a->da, 0);
1353 args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* glc */
1354 args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* slc */
1355 args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* tfe */
1356 args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* lwe */
1357
1358 switch (a->opcode) {
1359 case ac_image_sample:
1360 name = "llvm.SI.image.sample";
1361 break;
1362 case ac_image_gather4:
1363 name = "llvm.SI.gather4";
1364 break;
1365 case ac_image_load:
1366 name = "llvm.SI.image.load";
1367 break;
1368 case ac_image_load_mip:
1369 name = "llvm.SI.image.load.mip";
1370 break;
1371 case ac_image_get_lod:
1372 name = "llvm.SI.getlod";
1373 break;
1374 case ac_image_get_resinfo:
1375 name = "llvm.SI.getresinfo";
1376 break;
1377 }
1378
1379 ac_build_type_name_for_intr(LLVMTypeOf(a->addr), type, sizeof(type));
1380 snprintf(intr_name, sizeof(intr_name), "%s%s%s%s.%s",
1381 name,
1382 a->compare ? ".c" : "",
1383 a->bias ? ".b" :
1384 a->lod ? ".l" :
1385 a->deriv ? ".d" :
1386 a->level_zero ? ".lz" : "",
1387 a->offset ? ".o" : "",
1388 type);
1389
1390 return ac_build_intrinsic(ctx, intr_name,
1391 dst_type, args, num_args,
1392 AC_FUNC_ATTR_READNONE |
1393 AC_FUNC_ATTR_LEGACY);
1394 }
1395
1396 LLVMValueRef ac_build_cvt_pkrtz_f16(struct ac_llvm_context *ctx,
1397 LLVMValueRef args[2])
1398 {
1399 if (HAVE_LLVM >= 0x0500) {
1400 LLVMTypeRef v2f16 =
1401 LLVMVectorType(LLVMHalfTypeInContext(ctx->context), 2);
1402 LLVMValueRef res =
1403 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pkrtz",
1404 v2f16, args, 2,
1405 AC_FUNC_ATTR_READNONE);
1406 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
1407 }
1408
1409 return ac_build_intrinsic(ctx, "llvm.SI.packf16", ctx->i32, args, 2,
1410 AC_FUNC_ATTR_READNONE |
1411 AC_FUNC_ATTR_LEGACY);
1412 }
1413
1414 LLVMValueRef ac_build_wqm_vote(struct ac_llvm_context *ctx, LLVMValueRef i1)
1415 {
1416 assert(HAVE_LLVM >= 0x0600);
1417 return ac_build_intrinsic(ctx, "llvm.amdgcn.wqm.vote", ctx->i1,
1418 &i1, 1, AC_FUNC_ATTR_READNONE);
1419 }
1420
1421 void ac_build_kill_if_false(struct ac_llvm_context *ctx, LLVMValueRef i1)
1422 {
1423 if (HAVE_LLVM >= 0x0600) {
1424 ac_build_intrinsic(ctx, "llvm.amdgcn.kill", ctx->voidt,
1425 &i1, 1, 0);
1426 return;
1427 }
1428
1429 LLVMValueRef value = LLVMBuildSelect(ctx->builder, i1,
1430 LLVMConstReal(ctx->f32, 1),
1431 LLVMConstReal(ctx->f32, -1), "");
1432 ac_build_intrinsic(ctx, "llvm.AMDGPU.kill", ctx->voidt,
1433 &value, 1, AC_FUNC_ATTR_LEGACY);
1434 }
1435
1436 LLVMValueRef ac_build_bfe(struct ac_llvm_context *ctx, LLVMValueRef input,
1437 LLVMValueRef offset, LLVMValueRef width,
1438 bool is_signed)
1439 {
1440 LLVMValueRef args[] = {
1441 input,
1442 offset,
1443 width,
1444 };
1445
1446 if (HAVE_LLVM >= 0x0500) {
1447 return ac_build_intrinsic(ctx,
1448 is_signed ? "llvm.amdgcn.sbfe.i32" :
1449 "llvm.amdgcn.ubfe.i32",
1450 ctx->i32, args, 3,
1451 AC_FUNC_ATTR_READNONE);
1452 }
1453
1454 return ac_build_intrinsic(ctx,
1455 is_signed ? "llvm.AMDGPU.bfe.i32" :
1456 "llvm.AMDGPU.bfe.u32",
1457 ctx->i32, args, 3,
1458 AC_FUNC_ATTR_READNONE |
1459 AC_FUNC_ATTR_LEGACY);
1460 }
1461
1462 void ac_get_image_intr_name(const char *base_name,
1463 LLVMTypeRef data_type,
1464 LLVMTypeRef coords_type,
1465 LLVMTypeRef rsrc_type,
1466 char *out_name, unsigned out_len)
1467 {
1468 char coords_type_name[8];
1469
1470 ac_build_type_name_for_intr(coords_type, coords_type_name,
1471 sizeof(coords_type_name));
1472
1473 if (HAVE_LLVM <= 0x0309) {
1474 snprintf(out_name, out_len, "%s.%s", base_name, coords_type_name);
1475 } else {
1476 char data_type_name[8];
1477 char rsrc_type_name[8];
1478
1479 ac_build_type_name_for_intr(data_type, data_type_name,
1480 sizeof(data_type_name));
1481 ac_build_type_name_for_intr(rsrc_type, rsrc_type_name,
1482 sizeof(rsrc_type_name));
1483 snprintf(out_name, out_len, "%s.%s.%s.%s", base_name,
1484 data_type_name, coords_type_name, rsrc_type_name);
1485 }
1486 }
1487
1488 #define AC_EXP_TARGET (HAVE_LLVM >= 0x0500 ? 0 : 3)
1489 #define AC_EXP_OUT0 (HAVE_LLVM >= 0x0500 ? 2 : 5)
1490
1491 enum ac_ir_type {
1492 AC_IR_UNDEF,
1493 AC_IR_CONST,
1494 AC_IR_VALUE,
1495 };
1496
1497 struct ac_vs_exp_chan
1498 {
1499 LLVMValueRef value;
1500 float const_float;
1501 enum ac_ir_type type;
1502 };
1503
1504 struct ac_vs_exp_inst {
1505 unsigned offset;
1506 LLVMValueRef inst;
1507 struct ac_vs_exp_chan chan[4];
1508 };
1509
1510 struct ac_vs_exports {
1511 unsigned num;
1512 struct ac_vs_exp_inst exp[VARYING_SLOT_MAX];
1513 };
1514
1515 /* Return true if the PARAM export has been eliminated. */
1516 static bool ac_eliminate_const_output(uint8_t *vs_output_param_offset,
1517 uint32_t num_outputs,
1518 struct ac_vs_exp_inst *exp)
1519 {
1520 unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */
1521 bool is_zero[4] = {}, is_one[4] = {};
1522
1523 for (i = 0; i < 4; i++) {
1524 /* It's a constant expression. Undef outputs are eliminated too. */
1525 if (exp->chan[i].type == AC_IR_UNDEF) {
1526 is_zero[i] = true;
1527 is_one[i] = true;
1528 } else if (exp->chan[i].type == AC_IR_CONST) {
1529 if (exp->chan[i].const_float == 0)
1530 is_zero[i] = true;
1531 else if (exp->chan[i].const_float == 1)
1532 is_one[i] = true;
1533 else
1534 return false; /* other constant */
1535 } else
1536 return false;
1537 }
1538
1539 /* Only certain combinations of 0 and 1 can be eliminated. */
1540 if (is_zero[0] && is_zero[1] && is_zero[2])
1541 default_val = is_zero[3] ? 0 : 1;
1542 else if (is_one[0] && is_one[1] && is_one[2])
1543 default_val = is_zero[3] ? 2 : 3;
1544 else
1545 return false;
1546
1547 /* The PARAM export can be represented as DEFAULT_VAL. Kill it. */
1548 LLVMInstructionEraseFromParent(exp->inst);
1549
1550 /* Change OFFSET to DEFAULT_VAL. */
1551 for (i = 0; i < num_outputs; i++) {
1552 if (vs_output_param_offset[i] == exp->offset) {
1553 vs_output_param_offset[i] =
1554 AC_EXP_PARAM_DEFAULT_VAL_0000 + default_val;
1555 break;
1556 }
1557 }
1558 return true;
1559 }
1560
1561 static bool ac_eliminate_duplicated_output(uint8_t *vs_output_param_offset,
1562 uint32_t num_outputs,
1563 struct ac_vs_exports *processed,
1564 struct ac_vs_exp_inst *exp)
1565 {
1566 unsigned p, copy_back_channels = 0;
1567
1568 /* See if the output is already in the list of processed outputs.
1569 * The LLVMValueRef comparison relies on SSA.
1570 */
1571 for (p = 0; p < processed->num; p++) {
1572 bool different = false;
1573
1574 for (unsigned j = 0; j < 4; j++) {
1575 struct ac_vs_exp_chan *c1 = &processed->exp[p].chan[j];
1576 struct ac_vs_exp_chan *c2 = &exp->chan[j];
1577
1578 /* Treat undef as a match. */
1579 if (c2->type == AC_IR_UNDEF)
1580 continue;
1581
1582 /* If c1 is undef but c2 isn't, we can copy c2 to c1
1583 * and consider the instruction duplicated.
1584 */
1585 if (c1->type == AC_IR_UNDEF) {
1586 copy_back_channels |= 1 << j;
1587 continue;
1588 }
1589
1590 /* Test whether the channels are not equal. */
1591 if (c1->type != c2->type ||
1592 (c1->type == AC_IR_CONST &&
1593 c1->const_float != c2->const_float) ||
1594 (c1->type == AC_IR_VALUE &&
1595 c1->value != c2->value)) {
1596 different = true;
1597 break;
1598 }
1599 }
1600 if (!different)
1601 break;
1602
1603 copy_back_channels = 0;
1604 }
1605 if (p == processed->num)
1606 return false;
1607
1608 /* If a match was found, but the matching export has undef where the new
1609 * one has a normal value, copy the normal value to the undef channel.
1610 */
1611 struct ac_vs_exp_inst *match = &processed->exp[p];
1612
1613 while (copy_back_channels) {
1614 unsigned chan = u_bit_scan(&copy_back_channels);
1615
1616 assert(match->chan[chan].type == AC_IR_UNDEF);
1617 LLVMSetOperand(match->inst, AC_EXP_OUT0 + chan,
1618 exp->chan[chan].value);
1619 match->chan[chan] = exp->chan[chan];
1620 }
1621
1622 /* The PARAM export is duplicated. Kill it. */
1623 LLVMInstructionEraseFromParent(exp->inst);
1624
1625 /* Change OFFSET to the matching export. */
1626 for (unsigned i = 0; i < num_outputs; i++) {
1627 if (vs_output_param_offset[i] == exp->offset) {
1628 vs_output_param_offset[i] = match->offset;
1629 break;
1630 }
1631 }
1632 return true;
1633 }
1634
1635 void ac_optimize_vs_outputs(struct ac_llvm_context *ctx,
1636 LLVMValueRef main_fn,
1637 uint8_t *vs_output_param_offset,
1638 uint32_t num_outputs,
1639 uint8_t *num_param_exports)
1640 {
1641 LLVMBasicBlockRef bb;
1642 bool removed_any = false;
1643 struct ac_vs_exports exports;
1644
1645 exports.num = 0;
1646
1647 /* Process all LLVM instructions. */
1648 bb = LLVMGetFirstBasicBlock(main_fn);
1649 while (bb) {
1650 LLVMValueRef inst = LLVMGetFirstInstruction(bb);
1651
1652 while (inst) {
1653 LLVMValueRef cur = inst;
1654 inst = LLVMGetNextInstruction(inst);
1655 struct ac_vs_exp_inst exp;
1656
1657 if (LLVMGetInstructionOpcode(cur) != LLVMCall)
1658 continue;
1659
1660 LLVMValueRef callee = ac_llvm_get_called_value(cur);
1661
1662 if (!ac_llvm_is_function(callee))
1663 continue;
1664
1665 const char *name = LLVMGetValueName(callee);
1666 unsigned num_args = LLVMCountParams(callee);
1667
1668 /* Check if this is an export instruction. */
1669 if ((num_args != 9 && num_args != 8) ||
1670 (strcmp(name, "llvm.SI.export") &&
1671 strcmp(name, "llvm.amdgcn.exp.f32")))
1672 continue;
1673
1674 LLVMValueRef arg = LLVMGetOperand(cur, AC_EXP_TARGET);
1675 unsigned target = LLVMConstIntGetZExtValue(arg);
1676
1677 if (target < V_008DFC_SQ_EXP_PARAM)
1678 continue;
1679
1680 target -= V_008DFC_SQ_EXP_PARAM;
1681
1682 /* Parse the instruction. */
1683 memset(&exp, 0, sizeof(exp));
1684 exp.offset = target;
1685 exp.inst = cur;
1686
1687 for (unsigned i = 0; i < 4; i++) {
1688 LLVMValueRef v = LLVMGetOperand(cur, AC_EXP_OUT0 + i);
1689
1690 exp.chan[i].value = v;
1691
1692 if (LLVMIsUndef(v)) {
1693 exp.chan[i].type = AC_IR_UNDEF;
1694 } else if (LLVMIsAConstantFP(v)) {
1695 LLVMBool loses_info;
1696 exp.chan[i].type = AC_IR_CONST;
1697 exp.chan[i].const_float =
1698 LLVMConstRealGetDouble(v, &loses_info);
1699 } else {
1700 exp.chan[i].type = AC_IR_VALUE;
1701 }
1702 }
1703
1704 /* Eliminate constant and duplicated PARAM exports. */
1705 if (ac_eliminate_const_output(vs_output_param_offset,
1706 num_outputs, &exp) ||
1707 ac_eliminate_duplicated_output(vs_output_param_offset,
1708 num_outputs, &exports,
1709 &exp)) {
1710 removed_any = true;
1711 } else {
1712 exports.exp[exports.num++] = exp;
1713 }
1714 }
1715 bb = LLVMGetNextBasicBlock(bb);
1716 }
1717
1718 /* Remove holes in export memory due to removed PARAM exports.
1719 * This is done by renumbering all PARAM exports.
1720 */
1721 if (removed_any) {
1722 uint8_t old_offset[VARYING_SLOT_MAX];
1723 unsigned out, i;
1724
1725 /* Make a copy of the offsets. We need the old version while
1726 * we are modifying some of them. */
1727 memcpy(old_offset, vs_output_param_offset,
1728 sizeof(old_offset));
1729
1730 for (i = 0; i < exports.num; i++) {
1731 unsigned offset = exports.exp[i].offset;
1732
1733 /* Update vs_output_param_offset. Multiple outputs can
1734 * have the same offset.
1735 */
1736 for (out = 0; out < num_outputs; out++) {
1737 if (old_offset[out] == offset)
1738 vs_output_param_offset[out] = i;
1739 }
1740
1741 /* Change the PARAM offset in the instruction. */
1742 LLVMSetOperand(exports.exp[i].inst, AC_EXP_TARGET,
1743 LLVMConstInt(ctx->i32,
1744 V_008DFC_SQ_EXP_PARAM + i, 0));
1745 }
1746 *num_param_exports = exports.num;
1747 }
1748 }
1749
1750 void ac_init_exec_full_mask(struct ac_llvm_context *ctx)
1751 {
1752 LLVMValueRef full_mask = LLVMConstInt(ctx->i64, ~0ull, 0);
1753 ac_build_intrinsic(ctx,
1754 "llvm.amdgcn.init.exec", ctx->voidt,
1755 &full_mask, 1, AC_FUNC_ATTR_CONVERGENT);
1756 }
1757
1758 void ac_declare_lds_as_pointer(struct ac_llvm_context *ctx)
1759 {
1760 unsigned lds_size = ctx->chip_class >= CIK ? 65536 : 32768;
1761 ctx->lds = LLVMBuildIntToPtr(ctx->builder, ctx->i32_0,
1762 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), AC_LOCAL_ADDR_SPACE),
1763 "lds");
1764 }
1765
1766 LLVMValueRef ac_lds_load(struct ac_llvm_context *ctx,
1767 LLVMValueRef dw_addr)
1768 {
1769 return ac_build_load(ctx, ctx->lds, dw_addr);
1770 }
1771
1772 void ac_lds_store(struct ac_llvm_context *ctx,
1773 LLVMValueRef dw_addr,
1774 LLVMValueRef value)
1775 {
1776 value = ac_to_integer(ctx, value);
1777 ac_build_indexed_store(ctx, ctx->lds,
1778 dw_addr, value);
1779 }
1780
1781 LLVMValueRef ac_find_lsb(struct ac_llvm_context *ctx,
1782 LLVMTypeRef dst_type,
1783 LLVMValueRef src0)
1784 {
1785 LLVMValueRef params[2] = {
1786 src0,
1787
1788 /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
1789 * add special code to check for x=0. The reason is that
1790 * the LLVM behavior for x=0 is different from what we
1791 * need here. However, LLVM also assumes that ffs(x) is
1792 * in [0, 31], but GLSL expects that ffs(0) = -1, so
1793 * a conditional assignment to handle 0 is still required.
1794 *
1795 * The hardware already implements the correct behavior.
1796 */
1797 LLVMConstInt(ctx->i1, 1, false),
1798 };
1799
1800 LLVMValueRef lsb = ac_build_intrinsic(ctx, "llvm.cttz.i32", ctx->i32,
1801 params, 2,
1802 AC_FUNC_ATTR_READNONE);
1803
1804 /* TODO: We need an intrinsic to skip this conditional. */
1805 /* Check for zero: */
1806 return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder,
1807 LLVMIntEQ, src0,
1808 ctx->i32_0, ""),
1809 LLVMConstInt(ctx->i32, -1, 0), lsb, "");
1810 }