ac/llvm: consolidate find lsb function.
[mesa.git] / src / amd / common / ac_llvm_build.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 */
25 /* based on pieces from si_pipe.c and radeon_llvm_emit.c */
26 #include "ac_llvm_build.h"
27
28 #include <llvm-c/Core.h>
29
30 #include "c11/threads.h"
31
32 #include <assert.h>
33 #include <stdio.h>
34
35 #include "ac_llvm_util.h"
36 #include "ac_exp_param.h"
37 #include "util/bitscan.h"
38 #include "util/macros.h"
39 #include "util/u_atomic.h"
40 #include "sid.h"
41
42 #include "shader_enums.h"
43
44 /* Initialize module-independent parts of the context.
45 *
46 * The caller is responsible for initializing ctx::module and ctx::builder.
47 */
48 void
49 ac_llvm_context_init(struct ac_llvm_context *ctx, LLVMContextRef context,
50 enum chip_class chip_class)
51 {
52 LLVMValueRef args[1];
53
54 ctx->chip_class = chip_class;
55
56 ctx->context = context;
57 ctx->module = NULL;
58 ctx->builder = NULL;
59
60 ctx->voidt = LLVMVoidTypeInContext(ctx->context);
61 ctx->i1 = LLVMInt1TypeInContext(ctx->context);
62 ctx->i8 = LLVMInt8TypeInContext(ctx->context);
63 ctx->i16 = LLVMIntTypeInContext(ctx->context, 16);
64 ctx->i32 = LLVMIntTypeInContext(ctx->context, 32);
65 ctx->i64 = LLVMIntTypeInContext(ctx->context, 64);
66 ctx->f16 = LLVMHalfTypeInContext(ctx->context);
67 ctx->f32 = LLVMFloatTypeInContext(ctx->context);
68 ctx->f64 = LLVMDoubleTypeInContext(ctx->context);
69 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
70 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
71 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
72
73 ctx->i32_0 = LLVMConstInt(ctx->i32, 0, false);
74 ctx->i32_1 = LLVMConstInt(ctx->i32, 1, false);
75 ctx->f32_0 = LLVMConstReal(ctx->f32, 0.0);
76 ctx->f32_1 = LLVMConstReal(ctx->f32, 1.0);
77
78 ctx->i1false = LLVMConstInt(ctx->i1, 0, false);
79 ctx->i1true = LLVMConstInt(ctx->i1, 1, false);
80
81 ctx->range_md_kind = LLVMGetMDKindIDInContext(ctx->context,
82 "range", 5);
83
84 ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(ctx->context,
85 "invariant.load", 14);
86
87 ctx->fpmath_md_kind = LLVMGetMDKindIDInContext(ctx->context, "fpmath", 6);
88
89 args[0] = LLVMConstReal(ctx->f32, 2.5);
90 ctx->fpmath_md_2p5_ulp = LLVMMDNodeInContext(ctx->context, args, 1);
91
92 ctx->uniform_md_kind = LLVMGetMDKindIDInContext(ctx->context,
93 "amdgpu.uniform", 14);
94
95 ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0);
96 }
97
98 unsigned
99 ac_get_type_size(LLVMTypeRef type)
100 {
101 LLVMTypeKind kind = LLVMGetTypeKind(type);
102
103 switch (kind) {
104 case LLVMIntegerTypeKind:
105 return LLVMGetIntTypeWidth(type) / 8;
106 case LLVMFloatTypeKind:
107 return 4;
108 case LLVMDoubleTypeKind:
109 case LLVMPointerTypeKind:
110 return 8;
111 case LLVMVectorTypeKind:
112 return LLVMGetVectorSize(type) *
113 ac_get_type_size(LLVMGetElementType(type));
114 case LLVMArrayTypeKind:
115 return LLVMGetArrayLength(type) *
116 ac_get_type_size(LLVMGetElementType(type));
117 default:
118 assert(0);
119 return 0;
120 }
121 }
122
123 static LLVMTypeRef to_integer_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
124 {
125 if (t == ctx->f16 || t == ctx->i16)
126 return ctx->i16;
127 else if (t == ctx->f32 || t == ctx->i32)
128 return ctx->i32;
129 else if (t == ctx->f64 || t == ctx->i64)
130 return ctx->i64;
131 else
132 unreachable("Unhandled integer size");
133 }
134
135 LLVMTypeRef
136 ac_to_integer_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
137 {
138 if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
139 LLVMTypeRef elem_type = LLVMGetElementType(t);
140 return LLVMVectorType(to_integer_type_scalar(ctx, elem_type),
141 LLVMGetVectorSize(t));
142 }
143 return to_integer_type_scalar(ctx, t);
144 }
145
146 LLVMValueRef
147 ac_to_integer(struct ac_llvm_context *ctx, LLVMValueRef v)
148 {
149 LLVMTypeRef type = LLVMTypeOf(v);
150 return LLVMBuildBitCast(ctx->builder, v, ac_to_integer_type(ctx, type), "");
151 }
152
153 static LLVMTypeRef to_float_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
154 {
155 if (t == ctx->i16 || t == ctx->f16)
156 return ctx->f16;
157 else if (t == ctx->i32 || t == ctx->f32)
158 return ctx->f32;
159 else if (t == ctx->i64 || t == ctx->f64)
160 return ctx->f64;
161 else
162 unreachable("Unhandled float size");
163 }
164
165 LLVMTypeRef
166 ac_to_float_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
167 {
168 if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
169 LLVMTypeRef elem_type = LLVMGetElementType(t);
170 return LLVMVectorType(to_float_type_scalar(ctx, elem_type),
171 LLVMGetVectorSize(t));
172 }
173 return to_float_type_scalar(ctx, t);
174 }
175
176 LLVMValueRef
177 ac_to_float(struct ac_llvm_context *ctx, LLVMValueRef v)
178 {
179 LLVMTypeRef type = LLVMTypeOf(v);
180 return LLVMBuildBitCast(ctx->builder, v, ac_to_float_type(ctx, type), "");
181 }
182
183
184 LLVMValueRef
185 ac_build_intrinsic(struct ac_llvm_context *ctx, const char *name,
186 LLVMTypeRef return_type, LLVMValueRef *params,
187 unsigned param_count, unsigned attrib_mask)
188 {
189 LLVMValueRef function, call;
190 bool set_callsite_attrs = HAVE_LLVM >= 0x0400 &&
191 !(attrib_mask & AC_FUNC_ATTR_LEGACY);
192
193 function = LLVMGetNamedFunction(ctx->module, name);
194 if (!function) {
195 LLVMTypeRef param_types[32], function_type;
196 unsigned i;
197
198 assert(param_count <= 32);
199
200 for (i = 0; i < param_count; ++i) {
201 assert(params[i]);
202 param_types[i] = LLVMTypeOf(params[i]);
203 }
204 function_type =
205 LLVMFunctionType(return_type, param_types, param_count, 0);
206 function = LLVMAddFunction(ctx->module, name, function_type);
207
208 LLVMSetFunctionCallConv(function, LLVMCCallConv);
209 LLVMSetLinkage(function, LLVMExternalLinkage);
210
211 if (!set_callsite_attrs)
212 ac_add_func_attributes(ctx->context, function, attrib_mask);
213 }
214
215 call = LLVMBuildCall(ctx->builder, function, params, param_count, "");
216 if (set_callsite_attrs)
217 ac_add_func_attributes(ctx->context, call, attrib_mask);
218 return call;
219 }
220
221 /**
222 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
223 * intrinsic names).
224 */
225 void ac_build_type_name_for_intr(LLVMTypeRef type, char *buf, unsigned bufsize)
226 {
227 LLVMTypeRef elem_type = type;
228
229 assert(bufsize >= 8);
230
231 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
232 int ret = snprintf(buf, bufsize, "v%u",
233 LLVMGetVectorSize(type));
234 if (ret < 0) {
235 char *type_name = LLVMPrintTypeToString(type);
236 fprintf(stderr, "Error building type name for: %s\n",
237 type_name);
238 return;
239 }
240 elem_type = LLVMGetElementType(type);
241 buf += ret;
242 bufsize -= ret;
243 }
244 switch (LLVMGetTypeKind(elem_type)) {
245 default: break;
246 case LLVMIntegerTypeKind:
247 snprintf(buf, bufsize, "i%d", LLVMGetIntTypeWidth(elem_type));
248 break;
249 case LLVMFloatTypeKind:
250 snprintf(buf, bufsize, "f32");
251 break;
252 case LLVMDoubleTypeKind:
253 snprintf(buf, bufsize, "f64");
254 break;
255 }
256 }
257
258 /**
259 * Helper function that builds an LLVM IR PHI node and immediately adds
260 * incoming edges.
261 */
262 LLVMValueRef
263 ac_build_phi(struct ac_llvm_context *ctx, LLVMTypeRef type,
264 unsigned count_incoming, LLVMValueRef *values,
265 LLVMBasicBlockRef *blocks)
266 {
267 LLVMValueRef phi = LLVMBuildPhi(ctx->builder, type, "");
268 LLVMAddIncoming(phi, values, blocks, count_incoming);
269 return phi;
270 }
271
272 /* Prevent optimizations (at least of memory accesses) across the current
273 * point in the program by emitting empty inline assembly that is marked as
274 * having side effects.
275 *
276 * Optionally, a value can be passed through the inline assembly to prevent
277 * LLVM from hoisting calls to ReadNone functions.
278 */
279 void
280 ac_build_optimization_barrier(struct ac_llvm_context *ctx,
281 LLVMValueRef *pvgpr)
282 {
283 static int counter = 0;
284
285 LLVMBuilderRef builder = ctx->builder;
286 char code[16];
287
288 snprintf(code, sizeof(code), "; %d", p_atomic_inc_return(&counter));
289
290 if (!pvgpr) {
291 LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
292 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
293 LLVMBuildCall(builder, inlineasm, NULL, 0, "");
294 } else {
295 LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
296 LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "=v,0", true, false);
297 LLVMValueRef vgpr = *pvgpr;
298 LLVMTypeRef vgpr_type = LLVMTypeOf(vgpr);
299 unsigned vgpr_size = ac_get_type_size(vgpr_type);
300 LLVMValueRef vgpr0;
301
302 assert(vgpr_size % 4 == 0);
303
304 vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
305 vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
306 vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
307 vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
308 vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
309
310 *pvgpr = vgpr;
311 }
312 }
313
314 LLVMValueRef
315 ac_build_ballot(struct ac_llvm_context *ctx,
316 LLVMValueRef value)
317 {
318 LLVMValueRef args[3] = {
319 value,
320 ctx->i32_0,
321 LLVMConstInt(ctx->i32, LLVMIntNE, 0)
322 };
323
324 /* We currently have no other way to prevent LLVM from lifting the icmp
325 * calls to a dominating basic block.
326 */
327 ac_build_optimization_barrier(ctx, &args[0]);
328
329 if (LLVMTypeOf(args[0]) != ctx->i32)
330 args[0] = LLVMBuildBitCast(ctx->builder, args[0], ctx->i32, "");
331
332 return ac_build_intrinsic(ctx,
333 "llvm.amdgcn.icmp.i32",
334 ctx->i64, args, 3,
335 AC_FUNC_ATTR_NOUNWIND |
336 AC_FUNC_ATTR_READNONE |
337 AC_FUNC_ATTR_CONVERGENT);
338 }
339
340 LLVMValueRef
341 ac_build_vote_all(struct ac_llvm_context *ctx, LLVMValueRef value)
342 {
343 LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
344 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
345 return LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, active_set, "");
346 }
347
348 LLVMValueRef
349 ac_build_vote_any(struct ac_llvm_context *ctx, LLVMValueRef value)
350 {
351 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
352 return LLVMBuildICmp(ctx->builder, LLVMIntNE, vote_set,
353 LLVMConstInt(ctx->i64, 0, 0), "");
354 }
355
356 LLVMValueRef
357 ac_build_vote_eq(struct ac_llvm_context *ctx, LLVMValueRef value)
358 {
359 LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
360 LLVMValueRef vote_set = ac_build_ballot(ctx, value);
361
362 LLVMValueRef all = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
363 vote_set, active_set, "");
364 LLVMValueRef none = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
365 vote_set,
366 LLVMConstInt(ctx->i64, 0, 0), "");
367 return LLVMBuildOr(ctx->builder, all, none, "");
368 }
369
370 LLVMValueRef
371 ac_build_gather_values_extended(struct ac_llvm_context *ctx,
372 LLVMValueRef *values,
373 unsigned value_count,
374 unsigned value_stride,
375 bool load,
376 bool always_vector)
377 {
378 LLVMBuilderRef builder = ctx->builder;
379 LLVMValueRef vec = NULL;
380 unsigned i;
381
382 if (value_count == 1 && !always_vector) {
383 if (load)
384 return LLVMBuildLoad(builder, values[0], "");
385 return values[0];
386 } else if (!value_count)
387 unreachable("value_count is 0");
388
389 for (i = 0; i < value_count; i++) {
390 LLVMValueRef value = values[i * value_stride];
391 if (load)
392 value = LLVMBuildLoad(builder, value, "");
393
394 if (!i)
395 vec = LLVMGetUndef( LLVMVectorType(LLVMTypeOf(value), value_count));
396 LLVMValueRef index = LLVMConstInt(ctx->i32, i, false);
397 vec = LLVMBuildInsertElement(builder, vec, value, index, "");
398 }
399 return vec;
400 }
401
402 LLVMValueRef
403 ac_build_gather_values(struct ac_llvm_context *ctx,
404 LLVMValueRef *values,
405 unsigned value_count)
406 {
407 return ac_build_gather_values_extended(ctx, values, value_count, 1, false, false);
408 }
409
410 LLVMValueRef
411 ac_build_fdiv(struct ac_llvm_context *ctx,
412 LLVMValueRef num,
413 LLVMValueRef den)
414 {
415 LLVMValueRef ret = LLVMBuildFDiv(ctx->builder, num, den, "");
416
417 if (!LLVMIsConstant(ret))
418 LLVMSetMetadata(ret, ctx->fpmath_md_kind, ctx->fpmath_md_2p5_ulp);
419 return ret;
420 }
421
422 /* Coordinates for cube map selection. sc, tc, and ma are as in Table 8.27
423 * of the OpenGL 4.5 (Compatibility Profile) specification, except ma is
424 * already multiplied by two. id is the cube face number.
425 */
426 struct cube_selection_coords {
427 LLVMValueRef stc[2];
428 LLVMValueRef ma;
429 LLVMValueRef id;
430 };
431
432 static void
433 build_cube_intrinsic(struct ac_llvm_context *ctx,
434 LLVMValueRef in[3],
435 struct cube_selection_coords *out)
436 {
437 LLVMTypeRef f32 = ctx->f32;
438
439 out->stc[1] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubetc",
440 f32, in, 3, AC_FUNC_ATTR_READNONE);
441 out->stc[0] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubesc",
442 f32, in, 3, AC_FUNC_ATTR_READNONE);
443 out->ma = ac_build_intrinsic(ctx, "llvm.amdgcn.cubema",
444 f32, in, 3, AC_FUNC_ATTR_READNONE);
445 out->id = ac_build_intrinsic(ctx, "llvm.amdgcn.cubeid",
446 f32, in, 3, AC_FUNC_ATTR_READNONE);
447 }
448
449 /**
450 * Build a manual selection sequence for cube face sc/tc coordinates and
451 * major axis vector (multiplied by 2 for consistency) for the given
452 * vec3 \p coords, for the face implied by \p selcoords.
453 *
454 * For the major axis, we always adjust the sign to be in the direction of
455 * selcoords.ma; i.e., a positive out_ma means that coords is pointed towards
456 * the selcoords major axis.
457 */
458 static void build_cube_select(struct ac_llvm_context *ctx,
459 const struct cube_selection_coords *selcoords,
460 const LLVMValueRef *coords,
461 LLVMValueRef *out_st,
462 LLVMValueRef *out_ma)
463 {
464 LLVMBuilderRef builder = ctx->builder;
465 LLVMTypeRef f32 = LLVMTypeOf(coords[0]);
466 LLVMValueRef is_ma_positive;
467 LLVMValueRef sgn_ma;
468 LLVMValueRef is_ma_z, is_not_ma_z;
469 LLVMValueRef is_ma_y;
470 LLVMValueRef is_ma_x;
471 LLVMValueRef sgn;
472 LLVMValueRef tmp;
473
474 is_ma_positive = LLVMBuildFCmp(builder, LLVMRealUGE,
475 selcoords->ma, LLVMConstReal(f32, 0.0), "");
476 sgn_ma = LLVMBuildSelect(builder, is_ma_positive,
477 LLVMConstReal(f32, 1.0), LLVMConstReal(f32, -1.0), "");
478
479 is_ma_z = LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 4.0), "");
480 is_not_ma_z = LLVMBuildNot(builder, is_ma_z, "");
481 is_ma_y = LLVMBuildAnd(builder, is_not_ma_z,
482 LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 2.0), ""), "");
483 is_ma_x = LLVMBuildAnd(builder, is_not_ma_z, LLVMBuildNot(builder, is_ma_y, ""), "");
484
485 /* Select sc */
486 tmp = LLVMBuildSelect(builder, is_ma_x, coords[2], coords[0], "");
487 sgn = LLVMBuildSelect(builder, is_ma_y, LLVMConstReal(f32, 1.0),
488 LLVMBuildSelect(builder, is_ma_z, sgn_ma,
489 LLVMBuildFNeg(builder, sgn_ma, ""), ""), "");
490 out_st[0] = LLVMBuildFMul(builder, tmp, sgn, "");
491
492 /* Select tc */
493 tmp = LLVMBuildSelect(builder, is_ma_y, coords[2], coords[1], "");
494 sgn = LLVMBuildSelect(builder, is_ma_y, sgn_ma,
495 LLVMConstReal(f32, -1.0), "");
496 out_st[1] = LLVMBuildFMul(builder, tmp, sgn, "");
497
498 /* Select ma */
499 tmp = LLVMBuildSelect(builder, is_ma_z, coords[2],
500 LLVMBuildSelect(builder, is_ma_y, coords[1], coords[0], ""), "");
501 tmp = ac_build_intrinsic(ctx, "llvm.fabs.f32",
502 ctx->f32, &tmp, 1, AC_FUNC_ATTR_READNONE);
503 *out_ma = LLVMBuildFMul(builder, tmp, LLVMConstReal(f32, 2.0), "");
504 }
505
506 void
507 ac_prepare_cube_coords(struct ac_llvm_context *ctx,
508 bool is_deriv, bool is_array, bool is_lod,
509 LLVMValueRef *coords_arg,
510 LLVMValueRef *derivs_arg)
511 {
512
513 LLVMBuilderRef builder = ctx->builder;
514 struct cube_selection_coords selcoords;
515 LLVMValueRef coords[3];
516 LLVMValueRef invma;
517
518 if (is_array && !is_lod) {
519 LLVMValueRef tmp = coords_arg[3];
520 tmp = ac_build_intrinsic(ctx, "llvm.rint.f32", ctx->f32, &tmp, 1, 0);
521
522 /* Section 8.9 (Texture Functions) of the GLSL 4.50 spec says:
523 *
524 * "For Array forms, the array layer used will be
525 *
526 * max(0, min(d−1, floor(layer+0.5)))
527 *
528 * where d is the depth of the texture array and layer
529 * comes from the component indicated in the tables below.
530 * Workaroudn for an issue where the layer is taken from a
531 * helper invocation which happens to fall on a different
532 * layer due to extrapolation."
533 *
534 * VI and earlier attempt to implement this in hardware by
535 * clamping the value of coords[2] = (8 * layer) + face.
536 * Unfortunately, this means that the we end up with the wrong
537 * face when clamping occurs.
538 *
539 * Clamp the layer earlier to work around the issue.
540 */
541 if (ctx->chip_class <= VI) {
542 LLVMValueRef ge0;
543 ge0 = LLVMBuildFCmp(builder, LLVMRealOGE, tmp, ctx->f32_0, "");
544 tmp = LLVMBuildSelect(builder, ge0, tmp, ctx->f32_0, "");
545 }
546
547 coords_arg[3] = tmp;
548 }
549
550 build_cube_intrinsic(ctx, coords_arg, &selcoords);
551
552 invma = ac_build_intrinsic(ctx, "llvm.fabs.f32",
553 ctx->f32, &selcoords.ma, 1, AC_FUNC_ATTR_READNONE);
554 invma = ac_build_fdiv(ctx, LLVMConstReal(ctx->f32, 1.0), invma);
555
556 for (int i = 0; i < 2; ++i)
557 coords[i] = LLVMBuildFMul(builder, selcoords.stc[i], invma, "");
558
559 coords[2] = selcoords.id;
560
561 if (is_deriv && derivs_arg) {
562 LLVMValueRef derivs[4];
563 int axis;
564
565 /* Convert cube derivatives to 2D derivatives. */
566 for (axis = 0; axis < 2; axis++) {
567 LLVMValueRef deriv_st[2];
568 LLVMValueRef deriv_ma;
569
570 /* Transform the derivative alongside the texture
571 * coordinate. Mathematically, the correct formula is
572 * as follows. Assume we're projecting onto the +Z face
573 * and denote by dx/dh the derivative of the (original)
574 * X texture coordinate with respect to horizontal
575 * window coordinates. The projection onto the +Z face
576 * plane is:
577 *
578 * f(x,z) = x/z
579 *
580 * Then df/dh = df/dx * dx/dh + df/dz * dz/dh
581 * = 1/z * dx/dh - x/z * 1/z * dz/dh.
582 *
583 * This motivatives the implementation below.
584 *
585 * Whether this actually gives the expected results for
586 * apps that might feed in derivatives obtained via
587 * finite differences is anyone's guess. The OpenGL spec
588 * seems awfully quiet about how textureGrad for cube
589 * maps should be handled.
590 */
591 build_cube_select(ctx, &selcoords, &derivs_arg[axis * 3],
592 deriv_st, &deriv_ma);
593
594 deriv_ma = LLVMBuildFMul(builder, deriv_ma, invma, "");
595
596 for (int i = 0; i < 2; ++i)
597 derivs[axis * 2 + i] =
598 LLVMBuildFSub(builder,
599 LLVMBuildFMul(builder, deriv_st[i], invma, ""),
600 LLVMBuildFMul(builder, deriv_ma, coords[i], ""), "");
601 }
602
603 memcpy(derivs_arg, derivs, sizeof(derivs));
604 }
605
606 /* Shift the texture coordinate. This must be applied after the
607 * derivative calculation.
608 */
609 for (int i = 0; i < 2; ++i)
610 coords[i] = LLVMBuildFAdd(builder, coords[i], LLVMConstReal(ctx->f32, 1.5), "");
611
612 if (is_array) {
613 /* for cube arrays coord.z = coord.w(array_index) * 8 + face */
614 /* coords_arg.w component - array_index for cube arrays */
615 LLVMValueRef tmp = LLVMBuildFMul(ctx->builder, coords_arg[3], LLVMConstReal(ctx->f32, 8.0), "");
616 coords[2] = LLVMBuildFAdd(ctx->builder, tmp, coords[2], "");
617 }
618
619 memcpy(coords_arg, coords, sizeof(coords));
620 }
621
622
623 LLVMValueRef
624 ac_build_fs_interp(struct ac_llvm_context *ctx,
625 LLVMValueRef llvm_chan,
626 LLVMValueRef attr_number,
627 LLVMValueRef params,
628 LLVMValueRef i,
629 LLVMValueRef j)
630 {
631 LLVMValueRef args[5];
632 LLVMValueRef p1;
633
634 if (HAVE_LLVM < 0x0400) {
635 LLVMValueRef ij[2];
636 ij[0] = LLVMBuildBitCast(ctx->builder, i, ctx->i32, "");
637 ij[1] = LLVMBuildBitCast(ctx->builder, j, ctx->i32, "");
638
639 args[0] = llvm_chan;
640 args[1] = attr_number;
641 args[2] = params;
642 args[3] = ac_build_gather_values(ctx, ij, 2);
643 return ac_build_intrinsic(ctx, "llvm.SI.fs.interp",
644 ctx->f32, args, 4,
645 AC_FUNC_ATTR_READNONE);
646 }
647
648 args[0] = i;
649 args[1] = llvm_chan;
650 args[2] = attr_number;
651 args[3] = params;
652
653 p1 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p1",
654 ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
655
656 args[0] = p1;
657 args[1] = j;
658 args[2] = llvm_chan;
659 args[3] = attr_number;
660 args[4] = params;
661
662 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p2",
663 ctx->f32, args, 5, AC_FUNC_ATTR_READNONE);
664 }
665
666 LLVMValueRef
667 ac_build_fs_interp_mov(struct ac_llvm_context *ctx,
668 LLVMValueRef parameter,
669 LLVMValueRef llvm_chan,
670 LLVMValueRef attr_number,
671 LLVMValueRef params)
672 {
673 LLVMValueRef args[4];
674 if (HAVE_LLVM < 0x0400) {
675 args[0] = llvm_chan;
676 args[1] = attr_number;
677 args[2] = params;
678
679 return ac_build_intrinsic(ctx,
680 "llvm.SI.fs.constant",
681 ctx->f32, args, 3,
682 AC_FUNC_ATTR_READNONE);
683 }
684
685 args[0] = parameter;
686 args[1] = llvm_chan;
687 args[2] = attr_number;
688 args[3] = params;
689
690 return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.mov",
691 ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
692 }
693
694 LLVMValueRef
695 ac_build_gep0(struct ac_llvm_context *ctx,
696 LLVMValueRef base_ptr,
697 LLVMValueRef index)
698 {
699 LLVMValueRef indices[2] = {
700 LLVMConstInt(ctx->i32, 0, 0),
701 index,
702 };
703 return LLVMBuildGEP(ctx->builder, base_ptr,
704 indices, 2, "");
705 }
706
707 void
708 ac_build_indexed_store(struct ac_llvm_context *ctx,
709 LLVMValueRef base_ptr, LLVMValueRef index,
710 LLVMValueRef value)
711 {
712 LLVMBuildStore(ctx->builder, value,
713 ac_build_gep0(ctx, base_ptr, index));
714 }
715
716 /**
717 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
718 * It's equivalent to doing a load from &base_ptr[index].
719 *
720 * \param base_ptr Where the array starts.
721 * \param index The element index into the array.
722 * \param uniform Whether the base_ptr and index can be assumed to be
723 * dynamically uniform (i.e. load to an SGPR)
724 * \param invariant Whether the load is invariant (no other opcodes affect it)
725 */
726 static LLVMValueRef
727 ac_build_load_custom(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
728 LLVMValueRef index, bool uniform, bool invariant)
729 {
730 LLVMValueRef pointer, result;
731
732 pointer = ac_build_gep0(ctx, base_ptr, index);
733 if (uniform)
734 LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
735 result = LLVMBuildLoad(ctx->builder, pointer, "");
736 if (invariant)
737 LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
738 return result;
739 }
740
741 LLVMValueRef ac_build_load(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
742 LLVMValueRef index)
743 {
744 return ac_build_load_custom(ctx, base_ptr, index, false, false);
745 }
746
747 LLVMValueRef ac_build_load_invariant(struct ac_llvm_context *ctx,
748 LLVMValueRef base_ptr, LLVMValueRef index)
749 {
750 return ac_build_load_custom(ctx, base_ptr, index, false, true);
751 }
752
753 LLVMValueRef ac_build_load_to_sgpr(struct ac_llvm_context *ctx,
754 LLVMValueRef base_ptr, LLVMValueRef index)
755 {
756 return ac_build_load_custom(ctx, base_ptr, index, true, true);
757 }
758
759 /* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
760 * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
761 * or v4i32 (num_channels=3,4).
762 */
763 void
764 ac_build_buffer_store_dword(struct ac_llvm_context *ctx,
765 LLVMValueRef rsrc,
766 LLVMValueRef vdata,
767 unsigned num_channels,
768 LLVMValueRef voffset,
769 LLVMValueRef soffset,
770 unsigned inst_offset,
771 bool glc,
772 bool slc,
773 bool writeonly_memory,
774 bool swizzle_enable_hint)
775 {
776 /* SWIZZLE_ENABLE requires that soffset isn't folded into voffset
777 * (voffset is swizzled, but soffset isn't swizzled).
778 * llvm.amdgcn.buffer.store doesn't have a separate soffset parameter.
779 */
780 if (!swizzle_enable_hint) {
781 /* Split 3 channel stores, becase LLVM doesn't support 3-channel
782 * intrinsics. */
783 if (num_channels == 3) {
784 LLVMValueRef v[3], v01;
785
786 for (int i = 0; i < 3; i++) {
787 v[i] = LLVMBuildExtractElement(ctx->builder, vdata,
788 LLVMConstInt(ctx->i32, i, 0), "");
789 }
790 v01 = ac_build_gather_values(ctx, v, 2);
791
792 ac_build_buffer_store_dword(ctx, rsrc, v01, 2, voffset,
793 soffset, inst_offset, glc, slc,
794 writeonly_memory, swizzle_enable_hint);
795 ac_build_buffer_store_dword(ctx, rsrc, v[2], 1, voffset,
796 soffset, inst_offset + 8,
797 glc, slc,
798 writeonly_memory, swizzle_enable_hint);
799 return;
800 }
801
802 unsigned func = CLAMP(num_channels, 1, 3) - 1;
803 static const char *types[] = {"f32", "v2f32", "v4f32"};
804 char name[256];
805 LLVMValueRef offset = soffset;
806
807 if (inst_offset)
808 offset = LLVMBuildAdd(ctx->builder, offset,
809 LLVMConstInt(ctx->i32, inst_offset, 0), "");
810 if (voffset)
811 offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
812
813 LLVMValueRef args[] = {
814 ac_to_float(ctx, vdata),
815 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
816 LLVMConstInt(ctx->i32, 0, 0),
817 offset,
818 LLVMConstInt(ctx->i1, glc, 0),
819 LLVMConstInt(ctx->i1, slc, 0),
820 };
821
822 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.store.%s",
823 types[func]);
824
825 ac_build_intrinsic(ctx, name, ctx->voidt,
826 args, ARRAY_SIZE(args),
827 writeonly_memory ?
828 AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY :
829 AC_FUNC_ATTR_WRITEONLY);
830 return;
831 }
832
833 static unsigned dfmt[] = {
834 V_008F0C_BUF_DATA_FORMAT_32,
835 V_008F0C_BUF_DATA_FORMAT_32_32,
836 V_008F0C_BUF_DATA_FORMAT_32_32_32,
837 V_008F0C_BUF_DATA_FORMAT_32_32_32_32
838 };
839 assert(num_channels >= 1 && num_channels <= 4);
840
841 LLVMValueRef args[] = {
842 rsrc,
843 vdata,
844 LLVMConstInt(ctx->i32, num_channels, 0),
845 voffset ? voffset : LLVMGetUndef(ctx->i32),
846 soffset,
847 LLVMConstInt(ctx->i32, inst_offset, 0),
848 LLVMConstInt(ctx->i32, dfmt[num_channels - 1], 0),
849 LLVMConstInt(ctx->i32, V_008F0C_BUF_NUM_FORMAT_UINT, 0),
850 LLVMConstInt(ctx->i32, voffset != NULL, 0),
851 LLVMConstInt(ctx->i32, 0, 0), /* idxen */
852 LLVMConstInt(ctx->i32, glc, 0),
853 LLVMConstInt(ctx->i32, slc, 0),
854 LLVMConstInt(ctx->i32, 0, 0), /* tfe*/
855 };
856
857 /* The instruction offset field has 12 bits */
858 assert(voffset || inst_offset < (1 << 12));
859
860 /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
861 unsigned func = CLAMP(num_channels, 1, 3) - 1;
862 const char *types[] = {"i32", "v2i32", "v4i32"};
863 char name[256];
864 snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
865
866 ac_build_intrinsic(ctx, name, ctx->voidt,
867 args, ARRAY_SIZE(args),
868 AC_FUNC_ATTR_LEGACY);
869 }
870
871 LLVMValueRef
872 ac_build_buffer_load(struct ac_llvm_context *ctx,
873 LLVMValueRef rsrc,
874 int num_channels,
875 LLVMValueRef vindex,
876 LLVMValueRef voffset,
877 LLVMValueRef soffset,
878 unsigned inst_offset,
879 unsigned glc,
880 unsigned slc,
881 bool can_speculate,
882 bool allow_smem)
883 {
884 LLVMValueRef offset = LLVMConstInt(ctx->i32, inst_offset, 0);
885 if (voffset)
886 offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
887 if (soffset)
888 offset = LLVMBuildAdd(ctx->builder, offset, soffset, "");
889
890 /* TODO: VI and later generations can use SMEM with GLC=1.*/
891 if (allow_smem && !glc && !slc) {
892 assert(vindex == NULL);
893
894 LLVMValueRef result[4];
895
896 for (int i = 0; i < num_channels; i++) {
897 if (i) {
898 offset = LLVMBuildAdd(ctx->builder, offset,
899 LLVMConstInt(ctx->i32, 4, 0), "");
900 }
901 LLVMValueRef args[2] = {rsrc, offset};
902 result[i] = ac_build_intrinsic(ctx, "llvm.SI.load.const.v4i32",
903 ctx->f32, args, 2,
904 AC_FUNC_ATTR_READNONE |
905 AC_FUNC_ATTR_LEGACY);
906 }
907 if (num_channels == 1)
908 return result[0];
909
910 if (num_channels == 3)
911 result[num_channels++] = LLVMGetUndef(ctx->f32);
912 return ac_build_gather_values(ctx, result, num_channels);
913 }
914
915 unsigned func = CLAMP(num_channels, 1, 3) - 1;
916
917 LLVMValueRef args[] = {
918 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
919 vindex ? vindex : LLVMConstInt(ctx->i32, 0, 0),
920 offset,
921 LLVMConstInt(ctx->i1, glc, 0),
922 LLVMConstInt(ctx->i1, slc, 0)
923 };
924
925 LLVMTypeRef types[] = {ctx->f32, LLVMVectorType(ctx->f32, 2),
926 ctx->v4f32};
927 const char *type_names[] = {"f32", "v2f32", "v4f32"};
928 char name[256];
929
930 snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s",
931 type_names[func]);
932
933 return ac_build_intrinsic(ctx, name, types[func], args,
934 ARRAY_SIZE(args),
935 /* READNONE means writes can't affect it, while
936 * READONLY means that writes can affect it. */
937 can_speculate && HAVE_LLVM >= 0x0400 ?
938 AC_FUNC_ATTR_READNONE :
939 AC_FUNC_ATTR_READONLY);
940 }
941
942 LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx,
943 LLVMValueRef rsrc,
944 LLVMValueRef vindex,
945 LLVMValueRef voffset,
946 bool can_speculate)
947 {
948 LLVMValueRef args [] = {
949 LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, ""),
950 vindex,
951 voffset,
952 ctx->i1false, /* glc */
953 ctx->i1false, /* slc */
954 };
955
956 return ac_build_intrinsic(ctx,
957 "llvm.amdgcn.buffer.load.format.v4f32",
958 ctx->v4f32, args, ARRAY_SIZE(args),
959 /* READNONE means writes can't affect it, while
960 * READONLY means that writes can affect it. */
961 can_speculate && HAVE_LLVM >= 0x0400 ?
962 AC_FUNC_ATTR_READNONE :
963 AC_FUNC_ATTR_READONLY);
964 }
965
966 /**
967 * Set range metadata on an instruction. This can only be used on load and
968 * call instructions. If you know an instruction can only produce the values
969 * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
970 * \p lo is the minimum value inclusive.
971 * \p hi is the maximum value exclusive.
972 */
973 static void set_range_metadata(struct ac_llvm_context *ctx,
974 LLVMValueRef value, unsigned lo, unsigned hi)
975 {
976 LLVMValueRef range_md, md_args[2];
977 LLVMTypeRef type = LLVMTypeOf(value);
978 LLVMContextRef context = LLVMGetTypeContext(type);
979
980 md_args[0] = LLVMConstInt(type, lo, false);
981 md_args[1] = LLVMConstInt(type, hi, false);
982 range_md = LLVMMDNodeInContext(context, md_args, 2);
983 LLVMSetMetadata(value, ctx->range_md_kind, range_md);
984 }
985
986 LLVMValueRef
987 ac_get_thread_id(struct ac_llvm_context *ctx)
988 {
989 LLVMValueRef tid;
990
991 LLVMValueRef tid_args[2];
992 tid_args[0] = LLVMConstInt(ctx->i32, 0xffffffff, false);
993 tid_args[1] = LLVMConstInt(ctx->i32, 0, false);
994 tid_args[1] = ac_build_intrinsic(ctx,
995 "llvm.amdgcn.mbcnt.lo", ctx->i32,
996 tid_args, 2, AC_FUNC_ATTR_READNONE);
997
998 tid = ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.hi",
999 ctx->i32, tid_args,
1000 2, AC_FUNC_ATTR_READNONE);
1001 set_range_metadata(ctx, tid, 0, 64);
1002 return tid;
1003 }
1004
1005 /*
1006 * SI implements derivatives using the local data store (LDS)
1007 * All writes to the LDS happen in all executing threads at
1008 * the same time. TID is the Thread ID for the current
1009 * thread and is a value between 0 and 63, representing
1010 * the thread's position in the wavefront.
1011 *
1012 * For the pixel shader threads are grouped into quads of four pixels.
1013 * The TIDs of the pixels of a quad are:
1014 *
1015 * +------+------+
1016 * |4n + 0|4n + 1|
1017 * +------+------+
1018 * |4n + 2|4n + 3|
1019 * +------+------+
1020 *
1021 * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
1022 * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
1023 * the current pixel's column, and masking with 0xfffffffe yields the TID
1024 * of the left pixel of the current pixel's row.
1025 *
1026 * Adding 1 yields the TID of the pixel to the right of the left pixel, and
1027 * adding 2 yields the TID of the pixel below the top pixel.
1028 */
1029 LLVMValueRef
1030 ac_build_ddxy(struct ac_llvm_context *ctx,
1031 uint32_t mask,
1032 int idx,
1033 LLVMValueRef val)
1034 {
1035 LLVMValueRef tl, trbl, args[2];
1036 LLVMValueRef result;
1037
1038 if (ctx->chip_class >= VI) {
1039 LLVMValueRef thread_id, tl_tid, trbl_tid;
1040 thread_id = ac_get_thread_id(ctx);
1041
1042 tl_tid = LLVMBuildAnd(ctx->builder, thread_id,
1043 LLVMConstInt(ctx->i32, mask, false), "");
1044
1045 trbl_tid = LLVMBuildAdd(ctx->builder, tl_tid,
1046 LLVMConstInt(ctx->i32, idx, false), "");
1047
1048 args[0] = LLVMBuildMul(ctx->builder, tl_tid,
1049 LLVMConstInt(ctx->i32, 4, false), "");
1050 args[1] = val;
1051 tl = ac_build_intrinsic(ctx,
1052 "llvm.amdgcn.ds.bpermute", ctx->i32,
1053 args, 2,
1054 AC_FUNC_ATTR_READNONE |
1055 AC_FUNC_ATTR_CONVERGENT);
1056
1057 args[0] = LLVMBuildMul(ctx->builder, trbl_tid,
1058 LLVMConstInt(ctx->i32, 4, false), "");
1059 trbl = ac_build_intrinsic(ctx,
1060 "llvm.amdgcn.ds.bpermute", ctx->i32,
1061 args, 2,
1062 AC_FUNC_ATTR_READNONE |
1063 AC_FUNC_ATTR_CONVERGENT);
1064 } else {
1065 uint32_t masks[2] = {};
1066
1067 switch (mask) {
1068 case AC_TID_MASK_TOP_LEFT:
1069 masks[0] = 0x8000;
1070 if (idx == 1)
1071 masks[1] = 0x8055;
1072 else
1073 masks[1] = 0x80aa;
1074
1075 break;
1076 case AC_TID_MASK_TOP:
1077 masks[0] = 0x8044;
1078 masks[1] = 0x80ee;
1079 break;
1080 case AC_TID_MASK_LEFT:
1081 masks[0] = 0x80a0;
1082 masks[1] = 0x80f5;
1083 break;
1084 default:
1085 assert(0);
1086 }
1087
1088 args[0] = val;
1089 args[1] = LLVMConstInt(ctx->i32, masks[0], false);
1090
1091 tl = ac_build_intrinsic(ctx,
1092 "llvm.amdgcn.ds.swizzle", ctx->i32,
1093 args, 2,
1094 AC_FUNC_ATTR_READNONE |
1095 AC_FUNC_ATTR_CONVERGENT);
1096
1097 args[1] = LLVMConstInt(ctx->i32, masks[1], false);
1098 trbl = ac_build_intrinsic(ctx,
1099 "llvm.amdgcn.ds.swizzle", ctx->i32,
1100 args, 2,
1101 AC_FUNC_ATTR_READNONE |
1102 AC_FUNC_ATTR_CONVERGENT);
1103 }
1104
1105 tl = LLVMBuildBitCast(ctx->builder, tl, ctx->f32, "");
1106 trbl = LLVMBuildBitCast(ctx->builder, trbl, ctx->f32, "");
1107 result = LLVMBuildFSub(ctx->builder, trbl, tl, "");
1108 return result;
1109 }
1110
1111 void
1112 ac_build_sendmsg(struct ac_llvm_context *ctx,
1113 uint32_t msg,
1114 LLVMValueRef wave_id)
1115 {
1116 LLVMValueRef args[2];
1117 const char *intr_name = (HAVE_LLVM < 0x0400) ? "llvm.SI.sendmsg" : "llvm.amdgcn.s.sendmsg";
1118 args[0] = LLVMConstInt(ctx->i32, msg, false);
1119 args[1] = wave_id;
1120 ac_build_intrinsic(ctx, intr_name, ctx->voidt, args, 2, 0);
1121 }
1122
1123 LLVMValueRef
1124 ac_build_imsb(struct ac_llvm_context *ctx,
1125 LLVMValueRef arg,
1126 LLVMTypeRef dst_type)
1127 {
1128 const char *intr_name = (HAVE_LLVM < 0x0400) ? "llvm.AMDGPU.flbit.i32" :
1129 "llvm.amdgcn.sffbh.i32";
1130 LLVMValueRef msb = ac_build_intrinsic(ctx, intr_name,
1131 dst_type, &arg, 1,
1132 AC_FUNC_ATTR_READNONE);
1133
1134 /* The HW returns the last bit index from MSB, but NIR/TGSI wants
1135 * the index from LSB. Invert it by doing "31 - msb". */
1136 msb = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, 31, false),
1137 msb, "");
1138
1139 LLVMValueRef all_ones = LLVMConstInt(ctx->i32, -1, true);
1140 LLVMValueRef cond = LLVMBuildOr(ctx->builder,
1141 LLVMBuildICmp(ctx->builder, LLVMIntEQ,
1142 arg, LLVMConstInt(ctx->i32, 0, 0), ""),
1143 LLVMBuildICmp(ctx->builder, LLVMIntEQ,
1144 arg, all_ones, ""), "");
1145
1146 return LLVMBuildSelect(ctx->builder, cond, all_ones, msb, "");
1147 }
1148
1149 LLVMValueRef
1150 ac_build_umsb(struct ac_llvm_context *ctx,
1151 LLVMValueRef arg,
1152 LLVMTypeRef dst_type)
1153 {
1154 LLVMValueRef args[2] = {
1155 arg,
1156 ctx->i1true,
1157 };
1158 LLVMValueRef msb = ac_build_intrinsic(ctx, "llvm.ctlz.i32",
1159 dst_type, args, ARRAY_SIZE(args),
1160 AC_FUNC_ATTR_READNONE);
1161
1162 /* The HW returns the last bit index from MSB, but TGSI/NIR wants
1163 * the index from LSB. Invert it by doing "31 - msb". */
1164 msb = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, 31, false),
1165 msb, "");
1166
1167 /* check for zero */
1168 return LLVMBuildSelect(ctx->builder,
1169 LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg,
1170 LLVMConstInt(ctx->i32, 0, 0), ""),
1171 LLVMConstInt(ctx->i32, -1, true), msb, "");
1172 }
1173
1174 LLVMValueRef ac_build_umin(struct ac_llvm_context *ctx, LLVMValueRef a,
1175 LLVMValueRef b)
1176 {
1177 LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntULE, a, b, "");
1178 return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1179 }
1180
1181 LLVMValueRef ac_build_clamp(struct ac_llvm_context *ctx, LLVMValueRef value)
1182 {
1183 if (HAVE_LLVM >= 0x0500) {
1184 LLVMValueRef max[2] = {
1185 value,
1186 LLVMConstReal(ctx->f32, 0),
1187 };
1188 LLVMValueRef min[2] = {
1189 LLVMConstReal(ctx->f32, 1),
1190 };
1191
1192 min[1] = ac_build_intrinsic(ctx, "llvm.maxnum.f32",
1193 ctx->f32, max, 2,
1194 AC_FUNC_ATTR_READNONE);
1195 return ac_build_intrinsic(ctx, "llvm.minnum.f32",
1196 ctx->f32, min, 2,
1197 AC_FUNC_ATTR_READNONE);
1198 }
1199
1200 LLVMValueRef args[3] = {
1201 value,
1202 LLVMConstReal(ctx->f32, 0),
1203 LLVMConstReal(ctx->f32, 1),
1204 };
1205
1206 return ac_build_intrinsic(ctx, "llvm.AMDGPU.clamp.", ctx->f32, args, 3,
1207 AC_FUNC_ATTR_READNONE |
1208 AC_FUNC_ATTR_LEGACY);
1209 }
1210
1211 void ac_build_export(struct ac_llvm_context *ctx, struct ac_export_args *a)
1212 {
1213 LLVMValueRef args[9];
1214
1215 if (HAVE_LLVM >= 0x0500) {
1216 args[0] = LLVMConstInt(ctx->i32, a->target, 0);
1217 args[1] = LLVMConstInt(ctx->i32, a->enabled_channels, 0);
1218
1219 if (a->compr) {
1220 LLVMTypeRef i16 = LLVMInt16TypeInContext(ctx->context);
1221 LLVMTypeRef v2i16 = LLVMVectorType(i16, 2);
1222
1223 args[2] = LLVMBuildBitCast(ctx->builder, a->out[0],
1224 v2i16, "");
1225 args[3] = LLVMBuildBitCast(ctx->builder, a->out[1],
1226 v2i16, "");
1227 args[4] = LLVMConstInt(ctx->i1, a->done, 0);
1228 args[5] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
1229
1230 ac_build_intrinsic(ctx, "llvm.amdgcn.exp.compr.v2i16",
1231 ctx->voidt, args, 6, 0);
1232 } else {
1233 args[2] = a->out[0];
1234 args[3] = a->out[1];
1235 args[4] = a->out[2];
1236 args[5] = a->out[3];
1237 args[6] = LLVMConstInt(ctx->i1, a->done, 0);
1238 args[7] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
1239
1240 ac_build_intrinsic(ctx, "llvm.amdgcn.exp.f32",
1241 ctx->voidt, args, 8, 0);
1242 }
1243 return;
1244 }
1245
1246 args[0] = LLVMConstInt(ctx->i32, a->enabled_channels, 0);
1247 args[1] = LLVMConstInt(ctx->i32, a->valid_mask, 0);
1248 args[2] = LLVMConstInt(ctx->i32, a->done, 0);
1249 args[3] = LLVMConstInt(ctx->i32, a->target, 0);
1250 args[4] = LLVMConstInt(ctx->i32, a->compr, 0);
1251 memcpy(args + 5, a->out, sizeof(a->out[0]) * 4);
1252
1253 ac_build_intrinsic(ctx, "llvm.SI.export", ctx->voidt, args, 9,
1254 AC_FUNC_ATTR_LEGACY);
1255 }
1256
1257 LLVMValueRef ac_build_image_opcode(struct ac_llvm_context *ctx,
1258 struct ac_image_args *a)
1259 {
1260 LLVMTypeRef dst_type;
1261 LLVMValueRef args[11];
1262 unsigned num_args = 0;
1263 const char *name = NULL;
1264 char intr_name[128], type[64];
1265
1266 if (HAVE_LLVM >= 0x0400) {
1267 bool sample = a->opcode == ac_image_sample ||
1268 a->opcode == ac_image_gather4 ||
1269 a->opcode == ac_image_get_lod;
1270
1271 if (sample)
1272 args[num_args++] = ac_to_float(ctx, a->addr);
1273 else
1274 args[num_args++] = a->addr;
1275
1276 args[num_args++] = a->resource;
1277 if (sample)
1278 args[num_args++] = a->sampler;
1279 args[num_args++] = LLVMConstInt(ctx->i32, a->dmask, 0);
1280 if (sample)
1281 args[num_args++] = LLVMConstInt(ctx->i1, a->unorm, 0);
1282 args[num_args++] = ctx->i1false; /* glc */
1283 args[num_args++] = ctx->i1false; /* slc */
1284 args[num_args++] = ctx->i1false; /* lwe */
1285 args[num_args++] = LLVMConstInt(ctx->i1, a->da, 0);
1286
1287 switch (a->opcode) {
1288 case ac_image_sample:
1289 name = "llvm.amdgcn.image.sample";
1290 break;
1291 case ac_image_gather4:
1292 name = "llvm.amdgcn.image.gather4";
1293 break;
1294 case ac_image_load:
1295 name = "llvm.amdgcn.image.load";
1296 break;
1297 case ac_image_load_mip:
1298 name = "llvm.amdgcn.image.load.mip";
1299 break;
1300 case ac_image_get_lod:
1301 name = "llvm.amdgcn.image.getlod";
1302 break;
1303 case ac_image_get_resinfo:
1304 name = "llvm.amdgcn.image.getresinfo";
1305 break;
1306 default:
1307 unreachable("invalid image opcode");
1308 }
1309
1310 ac_build_type_name_for_intr(LLVMTypeOf(args[0]), type,
1311 sizeof(type));
1312
1313 snprintf(intr_name, sizeof(intr_name), "%s%s%s%s.v4f32.%s.v8i32",
1314 name,
1315 a->compare ? ".c" : "",
1316 a->bias ? ".b" :
1317 a->lod ? ".l" :
1318 a->deriv ? ".d" :
1319 a->level_zero ? ".lz" : "",
1320 a->offset ? ".o" : "",
1321 type);
1322
1323 LLVMValueRef result =
1324 ac_build_intrinsic(ctx, intr_name,
1325 ctx->v4f32, args, num_args,
1326 AC_FUNC_ATTR_READNONE);
1327 if (!sample) {
1328 result = LLVMBuildBitCast(ctx->builder, result,
1329 ctx->v4i32, "");
1330 }
1331 return result;
1332 }
1333
1334 args[num_args++] = a->addr;
1335 args[num_args++] = a->resource;
1336
1337 if (a->opcode == ac_image_load ||
1338 a->opcode == ac_image_load_mip ||
1339 a->opcode == ac_image_get_resinfo) {
1340 dst_type = ctx->v4i32;
1341 } else {
1342 dst_type = ctx->v4f32;
1343 args[num_args++] = a->sampler;
1344 }
1345
1346 args[num_args++] = LLVMConstInt(ctx->i32, a->dmask, 0);
1347 args[num_args++] = LLVMConstInt(ctx->i32, a->unorm, 0);
1348 args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* r128 */
1349 args[num_args++] = LLVMConstInt(ctx->i32, a->da, 0);
1350 args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* glc */
1351 args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* slc */
1352 args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* tfe */
1353 args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* lwe */
1354
1355 switch (a->opcode) {
1356 case ac_image_sample:
1357 name = "llvm.SI.image.sample";
1358 break;
1359 case ac_image_gather4:
1360 name = "llvm.SI.gather4";
1361 break;
1362 case ac_image_load:
1363 name = "llvm.SI.image.load";
1364 break;
1365 case ac_image_load_mip:
1366 name = "llvm.SI.image.load.mip";
1367 break;
1368 case ac_image_get_lod:
1369 name = "llvm.SI.getlod";
1370 break;
1371 case ac_image_get_resinfo:
1372 name = "llvm.SI.getresinfo";
1373 break;
1374 }
1375
1376 ac_build_type_name_for_intr(LLVMTypeOf(a->addr), type, sizeof(type));
1377 snprintf(intr_name, sizeof(intr_name), "%s%s%s%s.%s",
1378 name,
1379 a->compare ? ".c" : "",
1380 a->bias ? ".b" :
1381 a->lod ? ".l" :
1382 a->deriv ? ".d" :
1383 a->level_zero ? ".lz" : "",
1384 a->offset ? ".o" : "",
1385 type);
1386
1387 return ac_build_intrinsic(ctx, intr_name,
1388 dst_type, args, num_args,
1389 AC_FUNC_ATTR_READNONE |
1390 AC_FUNC_ATTR_LEGACY);
1391 }
1392
1393 LLVMValueRef ac_build_cvt_pkrtz_f16(struct ac_llvm_context *ctx,
1394 LLVMValueRef args[2])
1395 {
1396 if (HAVE_LLVM >= 0x0500) {
1397 LLVMTypeRef v2f16 =
1398 LLVMVectorType(LLVMHalfTypeInContext(ctx->context), 2);
1399 LLVMValueRef res =
1400 ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pkrtz",
1401 v2f16, args, 2,
1402 AC_FUNC_ATTR_READNONE);
1403 return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
1404 }
1405
1406 return ac_build_intrinsic(ctx, "llvm.SI.packf16", ctx->i32, args, 2,
1407 AC_FUNC_ATTR_READNONE |
1408 AC_FUNC_ATTR_LEGACY);
1409 }
1410
1411 LLVMValueRef ac_build_wqm_vote(struct ac_llvm_context *ctx, LLVMValueRef i1)
1412 {
1413 assert(HAVE_LLVM >= 0x0600);
1414 return ac_build_intrinsic(ctx, "llvm.amdgcn.wqm.vote", ctx->i1,
1415 &i1, 1, AC_FUNC_ATTR_READNONE);
1416 }
1417
1418 void ac_build_kill_if_false(struct ac_llvm_context *ctx, LLVMValueRef i1)
1419 {
1420 if (HAVE_LLVM >= 0x0600) {
1421 ac_build_intrinsic(ctx, "llvm.amdgcn.kill", ctx->voidt,
1422 &i1, 1, 0);
1423 return;
1424 }
1425
1426 LLVMValueRef value = LLVMBuildSelect(ctx->builder, i1,
1427 LLVMConstReal(ctx->f32, 1),
1428 LLVMConstReal(ctx->f32, -1), "");
1429 ac_build_intrinsic(ctx, "llvm.AMDGPU.kill", ctx->voidt,
1430 &value, 1, AC_FUNC_ATTR_LEGACY);
1431 }
1432
1433 LLVMValueRef ac_build_bfe(struct ac_llvm_context *ctx, LLVMValueRef input,
1434 LLVMValueRef offset, LLVMValueRef width,
1435 bool is_signed)
1436 {
1437 LLVMValueRef args[] = {
1438 input,
1439 offset,
1440 width,
1441 };
1442
1443 if (HAVE_LLVM >= 0x0500) {
1444 return ac_build_intrinsic(ctx,
1445 is_signed ? "llvm.amdgcn.sbfe.i32" :
1446 "llvm.amdgcn.ubfe.i32",
1447 ctx->i32, args, 3,
1448 AC_FUNC_ATTR_READNONE);
1449 }
1450
1451 return ac_build_intrinsic(ctx,
1452 is_signed ? "llvm.AMDGPU.bfe.i32" :
1453 "llvm.AMDGPU.bfe.u32",
1454 ctx->i32, args, 3,
1455 AC_FUNC_ATTR_READNONE |
1456 AC_FUNC_ATTR_LEGACY);
1457 }
1458
1459 void ac_get_image_intr_name(const char *base_name,
1460 LLVMTypeRef data_type,
1461 LLVMTypeRef coords_type,
1462 LLVMTypeRef rsrc_type,
1463 char *out_name, unsigned out_len)
1464 {
1465 char coords_type_name[8];
1466
1467 ac_build_type_name_for_intr(coords_type, coords_type_name,
1468 sizeof(coords_type_name));
1469
1470 if (HAVE_LLVM <= 0x0309) {
1471 snprintf(out_name, out_len, "%s.%s", base_name, coords_type_name);
1472 } else {
1473 char data_type_name[8];
1474 char rsrc_type_name[8];
1475
1476 ac_build_type_name_for_intr(data_type, data_type_name,
1477 sizeof(data_type_name));
1478 ac_build_type_name_for_intr(rsrc_type, rsrc_type_name,
1479 sizeof(rsrc_type_name));
1480 snprintf(out_name, out_len, "%s.%s.%s.%s", base_name,
1481 data_type_name, coords_type_name, rsrc_type_name);
1482 }
1483 }
1484
1485 #define AC_EXP_TARGET (HAVE_LLVM >= 0x0500 ? 0 : 3)
1486 #define AC_EXP_OUT0 (HAVE_LLVM >= 0x0500 ? 2 : 5)
1487
1488 enum ac_ir_type {
1489 AC_IR_UNDEF,
1490 AC_IR_CONST,
1491 AC_IR_VALUE,
1492 };
1493
1494 struct ac_vs_exp_chan
1495 {
1496 LLVMValueRef value;
1497 float const_float;
1498 enum ac_ir_type type;
1499 };
1500
1501 struct ac_vs_exp_inst {
1502 unsigned offset;
1503 LLVMValueRef inst;
1504 struct ac_vs_exp_chan chan[4];
1505 };
1506
1507 struct ac_vs_exports {
1508 unsigned num;
1509 struct ac_vs_exp_inst exp[VARYING_SLOT_MAX];
1510 };
1511
1512 /* Return true if the PARAM export has been eliminated. */
1513 static bool ac_eliminate_const_output(uint8_t *vs_output_param_offset,
1514 uint32_t num_outputs,
1515 struct ac_vs_exp_inst *exp)
1516 {
1517 unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */
1518 bool is_zero[4] = {}, is_one[4] = {};
1519
1520 for (i = 0; i < 4; i++) {
1521 /* It's a constant expression. Undef outputs are eliminated too. */
1522 if (exp->chan[i].type == AC_IR_UNDEF) {
1523 is_zero[i] = true;
1524 is_one[i] = true;
1525 } else if (exp->chan[i].type == AC_IR_CONST) {
1526 if (exp->chan[i].const_float == 0)
1527 is_zero[i] = true;
1528 else if (exp->chan[i].const_float == 1)
1529 is_one[i] = true;
1530 else
1531 return false; /* other constant */
1532 } else
1533 return false;
1534 }
1535
1536 /* Only certain combinations of 0 and 1 can be eliminated. */
1537 if (is_zero[0] && is_zero[1] && is_zero[2])
1538 default_val = is_zero[3] ? 0 : 1;
1539 else if (is_one[0] && is_one[1] && is_one[2])
1540 default_val = is_zero[3] ? 2 : 3;
1541 else
1542 return false;
1543
1544 /* The PARAM export can be represented as DEFAULT_VAL. Kill it. */
1545 LLVMInstructionEraseFromParent(exp->inst);
1546
1547 /* Change OFFSET to DEFAULT_VAL. */
1548 for (i = 0; i < num_outputs; i++) {
1549 if (vs_output_param_offset[i] == exp->offset) {
1550 vs_output_param_offset[i] =
1551 AC_EXP_PARAM_DEFAULT_VAL_0000 + default_val;
1552 break;
1553 }
1554 }
1555 return true;
1556 }
1557
1558 static bool ac_eliminate_duplicated_output(uint8_t *vs_output_param_offset,
1559 uint32_t num_outputs,
1560 struct ac_vs_exports *processed,
1561 struct ac_vs_exp_inst *exp)
1562 {
1563 unsigned p, copy_back_channels = 0;
1564
1565 /* See if the output is already in the list of processed outputs.
1566 * The LLVMValueRef comparison relies on SSA.
1567 */
1568 for (p = 0; p < processed->num; p++) {
1569 bool different = false;
1570
1571 for (unsigned j = 0; j < 4; j++) {
1572 struct ac_vs_exp_chan *c1 = &processed->exp[p].chan[j];
1573 struct ac_vs_exp_chan *c2 = &exp->chan[j];
1574
1575 /* Treat undef as a match. */
1576 if (c2->type == AC_IR_UNDEF)
1577 continue;
1578
1579 /* If c1 is undef but c2 isn't, we can copy c2 to c1
1580 * and consider the instruction duplicated.
1581 */
1582 if (c1->type == AC_IR_UNDEF) {
1583 copy_back_channels |= 1 << j;
1584 continue;
1585 }
1586
1587 /* Test whether the channels are not equal. */
1588 if (c1->type != c2->type ||
1589 (c1->type == AC_IR_CONST &&
1590 c1->const_float != c2->const_float) ||
1591 (c1->type == AC_IR_VALUE &&
1592 c1->value != c2->value)) {
1593 different = true;
1594 break;
1595 }
1596 }
1597 if (!different)
1598 break;
1599
1600 copy_back_channels = 0;
1601 }
1602 if (p == processed->num)
1603 return false;
1604
1605 /* If a match was found, but the matching export has undef where the new
1606 * one has a normal value, copy the normal value to the undef channel.
1607 */
1608 struct ac_vs_exp_inst *match = &processed->exp[p];
1609
1610 while (copy_back_channels) {
1611 unsigned chan = u_bit_scan(&copy_back_channels);
1612
1613 assert(match->chan[chan].type == AC_IR_UNDEF);
1614 LLVMSetOperand(match->inst, AC_EXP_OUT0 + chan,
1615 exp->chan[chan].value);
1616 match->chan[chan] = exp->chan[chan];
1617 }
1618
1619 /* The PARAM export is duplicated. Kill it. */
1620 LLVMInstructionEraseFromParent(exp->inst);
1621
1622 /* Change OFFSET to the matching export. */
1623 for (unsigned i = 0; i < num_outputs; i++) {
1624 if (vs_output_param_offset[i] == exp->offset) {
1625 vs_output_param_offset[i] = match->offset;
1626 break;
1627 }
1628 }
1629 return true;
1630 }
1631
1632 void ac_optimize_vs_outputs(struct ac_llvm_context *ctx,
1633 LLVMValueRef main_fn,
1634 uint8_t *vs_output_param_offset,
1635 uint32_t num_outputs,
1636 uint8_t *num_param_exports)
1637 {
1638 LLVMBasicBlockRef bb;
1639 bool removed_any = false;
1640 struct ac_vs_exports exports;
1641
1642 exports.num = 0;
1643
1644 /* Process all LLVM instructions. */
1645 bb = LLVMGetFirstBasicBlock(main_fn);
1646 while (bb) {
1647 LLVMValueRef inst = LLVMGetFirstInstruction(bb);
1648
1649 while (inst) {
1650 LLVMValueRef cur = inst;
1651 inst = LLVMGetNextInstruction(inst);
1652 struct ac_vs_exp_inst exp;
1653
1654 if (LLVMGetInstructionOpcode(cur) != LLVMCall)
1655 continue;
1656
1657 LLVMValueRef callee = ac_llvm_get_called_value(cur);
1658
1659 if (!ac_llvm_is_function(callee))
1660 continue;
1661
1662 const char *name = LLVMGetValueName(callee);
1663 unsigned num_args = LLVMCountParams(callee);
1664
1665 /* Check if this is an export instruction. */
1666 if ((num_args != 9 && num_args != 8) ||
1667 (strcmp(name, "llvm.SI.export") &&
1668 strcmp(name, "llvm.amdgcn.exp.f32")))
1669 continue;
1670
1671 LLVMValueRef arg = LLVMGetOperand(cur, AC_EXP_TARGET);
1672 unsigned target = LLVMConstIntGetZExtValue(arg);
1673
1674 if (target < V_008DFC_SQ_EXP_PARAM)
1675 continue;
1676
1677 target -= V_008DFC_SQ_EXP_PARAM;
1678
1679 /* Parse the instruction. */
1680 memset(&exp, 0, sizeof(exp));
1681 exp.offset = target;
1682 exp.inst = cur;
1683
1684 for (unsigned i = 0; i < 4; i++) {
1685 LLVMValueRef v = LLVMGetOperand(cur, AC_EXP_OUT0 + i);
1686
1687 exp.chan[i].value = v;
1688
1689 if (LLVMIsUndef(v)) {
1690 exp.chan[i].type = AC_IR_UNDEF;
1691 } else if (LLVMIsAConstantFP(v)) {
1692 LLVMBool loses_info;
1693 exp.chan[i].type = AC_IR_CONST;
1694 exp.chan[i].const_float =
1695 LLVMConstRealGetDouble(v, &loses_info);
1696 } else {
1697 exp.chan[i].type = AC_IR_VALUE;
1698 }
1699 }
1700
1701 /* Eliminate constant and duplicated PARAM exports. */
1702 if (ac_eliminate_const_output(vs_output_param_offset,
1703 num_outputs, &exp) ||
1704 ac_eliminate_duplicated_output(vs_output_param_offset,
1705 num_outputs, &exports,
1706 &exp)) {
1707 removed_any = true;
1708 } else {
1709 exports.exp[exports.num++] = exp;
1710 }
1711 }
1712 bb = LLVMGetNextBasicBlock(bb);
1713 }
1714
1715 /* Remove holes in export memory due to removed PARAM exports.
1716 * This is done by renumbering all PARAM exports.
1717 */
1718 if (removed_any) {
1719 uint8_t old_offset[VARYING_SLOT_MAX];
1720 unsigned out, i;
1721
1722 /* Make a copy of the offsets. We need the old version while
1723 * we are modifying some of them. */
1724 memcpy(old_offset, vs_output_param_offset,
1725 sizeof(old_offset));
1726
1727 for (i = 0; i < exports.num; i++) {
1728 unsigned offset = exports.exp[i].offset;
1729
1730 /* Update vs_output_param_offset. Multiple outputs can
1731 * have the same offset.
1732 */
1733 for (out = 0; out < num_outputs; out++) {
1734 if (old_offset[out] == offset)
1735 vs_output_param_offset[out] = i;
1736 }
1737
1738 /* Change the PARAM offset in the instruction. */
1739 LLVMSetOperand(exports.exp[i].inst, AC_EXP_TARGET,
1740 LLVMConstInt(ctx->i32,
1741 V_008DFC_SQ_EXP_PARAM + i, 0));
1742 }
1743 *num_param_exports = exports.num;
1744 }
1745 }
1746
1747 void ac_init_exec_full_mask(struct ac_llvm_context *ctx)
1748 {
1749 LLVMValueRef full_mask = LLVMConstInt(ctx->i64, ~0ull, 0);
1750 ac_build_intrinsic(ctx,
1751 "llvm.amdgcn.init.exec", ctx->voidt,
1752 &full_mask, 1, AC_FUNC_ATTR_CONVERGENT);
1753 }
1754
1755 void ac_declare_lds_as_pointer(struct ac_llvm_context *ctx)
1756 {
1757 unsigned lds_size = ctx->chip_class >= CIK ? 65536 : 32768;
1758 ctx->lds = LLVMBuildIntToPtr(ctx->builder, ctx->i32_0,
1759 LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), AC_LOCAL_ADDR_SPACE),
1760 "lds");
1761 }
1762
1763 LLVMValueRef ac_lds_load(struct ac_llvm_context *ctx,
1764 LLVMValueRef dw_addr)
1765 {
1766 return ac_build_load(ctx, ctx->lds, dw_addr);
1767 }
1768
1769 void ac_lds_store(struct ac_llvm_context *ctx,
1770 LLVMValueRef dw_addr,
1771 LLVMValueRef value)
1772 {
1773 value = ac_to_integer(ctx, value);
1774 ac_build_indexed_store(ctx, ctx->lds,
1775 dw_addr, value);
1776 }
1777
1778 LLVMValueRef ac_find_lsb(struct ac_llvm_context *ctx,
1779 LLVMTypeRef dst_type,
1780 LLVMValueRef src0)
1781 {
1782 LLVMValueRef params[2] = {
1783 src0,
1784
1785 /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
1786 * add special code to check for x=0. The reason is that
1787 * the LLVM behavior for x=0 is different from what we
1788 * need here. However, LLVM also assumes that ffs(x) is
1789 * in [0, 31], but GLSL expects that ffs(0) = -1, so
1790 * a conditional assignment to handle 0 is still required.
1791 *
1792 * The hardware already implements the correct behavior.
1793 */
1794 LLVMConstInt(ctx->i1, 1, false),
1795 };
1796
1797 LLVMValueRef lsb = ac_build_intrinsic(ctx, "llvm.cttz.i32", ctx->i32,
1798 params, 2,
1799 AC_FUNC_ATTR_READNONE);
1800
1801 /* TODO: We need an intrinsic to skip this conditional. */
1802 /* Check for zero: */
1803 return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder,
1804 LLVMIntEQ, src0,
1805 ctx->i32_0, ""),
1806 LLVMConstInt(ctx->i32, -1, 0), lsb, "");
1807 }