gallivm/nir: fix const compact
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_gather.h"
31 #include "lp_bld_logic.h"
32 #include "lp_bld_quad.h"
33 #include "lp_bld_flow.h"
34 #include "lp_bld_struct.h"
35 #include "lp_bld_debug.h"
36 #include "lp_bld_printf.h"
37 #include "nir_deref.h"
38
39 static void visit_cf_list(struct lp_build_nir_context *bld_base,
40 struct exec_list *list);
41
42 static LLVMValueRef cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
43 nir_alu_type alu_type, unsigned bit_size)
44 {
45 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
46 switch (alu_type) {
47 case nir_type_float:
48 switch (bit_size) {
49 case 32:
50 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
51 case 64:
52 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
53 default:
54 assert(0);
55 break;
56 }
57 break;
58 case nir_type_int:
59 switch (bit_size) {
60 case 8:
61 return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
62 case 16:
63 return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
64 case 32:
65 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
66 case 64:
67 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
68 default:
69 assert(0);
70 break;
71 }
72 break;
73 case nir_type_uint:
74 switch (bit_size) {
75 case 8:
76 return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
77 case 16:
78 return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
79 case 32:
80 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
81 case 64:
82 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
83 default:
84 assert(0);
85 break;
86 }
87 break;
88 case nir_type_uint32:
89 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
90 default:
91 return val;
92 }
93 return NULL;
94 }
95
96
97 static struct lp_build_context *get_flt_bld(struct lp_build_nir_context *bld_base,
98 unsigned op_bit_size)
99 {
100 if (op_bit_size == 64)
101 return &bld_base->dbl_bld;
102 else
103 return &bld_base->base;
104 }
105
106 static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
107 {
108 unsigned pipe_target = PIPE_BUFFER;
109 switch (sampler_dim) {
110 case GLSL_SAMPLER_DIM_1D:
111 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
112 break;
113 case GLSL_SAMPLER_DIM_2D:
114 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
115 break;
116 case GLSL_SAMPLER_DIM_3D:
117 pipe_target = PIPE_TEXTURE_3D;
118 break;
119 case GLSL_SAMPLER_DIM_MS:
120 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
121 break;
122 case GLSL_SAMPLER_DIM_CUBE:
123 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
124 break;
125 case GLSL_SAMPLER_DIM_RECT:
126 pipe_target = PIPE_TEXTURE_RECT;
127 break;
128 case GLSL_SAMPLER_DIM_BUF:
129 pipe_target = PIPE_BUFFER;
130 break;
131 default:
132 break;
133 }
134 return pipe_target;
135 }
136
137 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
138 {
139 return bld_base->ssa_defs[ssa->index];
140 }
141
142 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src);
143
144 static LLVMValueRef get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
145 {
146 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
147 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
148 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
149 LLVMValueRef indir_src = NULL;
150 if (src.indirect)
151 indir_src = get_src(bld_base, *src.indirect);
152 return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
153 }
154
155 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src)
156 {
157 if (src.is_ssa)
158 return get_ssa_src(bld_base, src.ssa);
159 else
160 return get_reg_src(bld_base, src.reg);
161 }
162
163 static void assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
164 {
165 bld_base->ssa_defs[idx] = ptr;
166 }
167
168 static void assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
169 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
170 {
171 assign_ssa(bld_base, ssa->index, ssa->num_components == 1 ? vals[0] : lp_nir_array_build_gather_values(bld_base->base.gallivm->builder, vals, ssa->num_components));
172 }
173
174 static void assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
175 unsigned write_mask,
176 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
177 {
178 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
179 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
180 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
181 LLVMValueRef indir_src = NULL;
182 if (reg->indirect)
183 indir_src = get_src(bld_base, *reg->indirect);
184 bld_base->store_reg(bld_base, reg_bld, reg, write_mask ? write_mask : 0xf, indir_src, reg_storage, vals);
185 }
186
187 static void assign_dest(struct lp_build_nir_context *bld_base, const nir_dest *dest, LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
188 {
189 if (dest->is_ssa)
190 assign_ssa_dest(bld_base, &dest->ssa, vals);
191 else
192 assign_reg(bld_base, &dest->reg, 0, vals);
193 }
194
195 static void assign_alu_dest(struct lp_build_nir_context *bld_base, const nir_alu_dest *dest, LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
196 {
197 if (dest->dest.is_ssa)
198 assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
199 else
200 assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
201 }
202
203 static LLVMValueRef int_to_bool32(struct lp_build_nir_context *bld_base,
204 uint32_t src_bit_size,
205 bool is_unsigned,
206 LLVMValueRef val)
207 {
208 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
209 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
210 LLVMValueRef result = lp_build_compare(bld_base->base.gallivm, int_bld->type, PIPE_FUNC_NOTEQUAL, val, int_bld->zero);
211 if (src_bit_size == 64)
212 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
213 return result;
214 }
215
216 static LLVMValueRef flt_to_bool32(struct lp_build_nir_context *bld_base,
217 uint32_t src_bit_size,
218 LLVMValueRef val)
219 {
220 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
221 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
222 LLVMValueRef result = lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
223 if (src_bit_size == 64)
224 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
225 return result;
226 }
227
228 static LLVMValueRef fcmp32(struct lp_build_nir_context *bld_base,
229 enum pipe_compare_func compare,
230 uint32_t src_bit_size,
231 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
232 {
233 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
234 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
235 LLVMValueRef result;
236
237 if (compare != PIPE_FUNC_NOTEQUAL)
238 result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
239 else
240 result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
241 if (src_bit_size == 64)
242 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
243 return result;
244 }
245
246 static LLVMValueRef icmp32(struct lp_build_nir_context *bld_base,
247 enum pipe_compare_func compare,
248 bool is_unsigned,
249 uint32_t src_bit_size,
250 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
251 {
252 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
253 struct lp_build_context *i_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
254 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
255 if (src_bit_size < 32)
256 result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
257 else if (src_bit_size == 64)
258 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
259 return result;
260 }
261
262 static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
263 nir_alu_src src,
264 unsigned num_components)
265 {
266 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
267 struct gallivm_state *gallivm = bld_base->base.gallivm;
268 LLVMValueRef value = get_src(bld_base, src.src);
269 bool need_swizzle = false;
270
271 assert(value);
272 unsigned src_components = nir_src_num_components(src.src);
273 for (unsigned i = 0; i < num_components; ++i) {
274 assert(src.swizzle[i] < src_components);
275 if (src.swizzle[i] != i)
276 need_swizzle = true;
277 }
278
279 if (need_swizzle || num_components != src_components) {
280 if (src_components > 1 && num_components == 1) {
281 value = LLVMBuildExtractValue(gallivm->builder, value,
282 src.swizzle[0], "");
283 } else if (src_components == 1 && num_components > 1) {
284 LLVMValueRef values[] = {value, value, value, value, value, value, value, value, value, value, value, value, value, value, value, value};
285 value = lp_nir_array_build_gather_values(builder, values, num_components);
286 } else {
287 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
288 for (unsigned i = 0; i < num_components; i++)
289 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
290 value = arr;
291 }
292 }
293 assert(!src.negate);
294 assert(!src.abs);
295 return value;
296 }
297
298 static LLVMValueRef emit_b2f(struct lp_build_nir_context *bld_base,
299 LLVMValueRef src0,
300 unsigned bitsize)
301 {
302 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
303 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
304 LLVMBuildBitCast(builder, lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type,
305 1.0), bld_base->int_bld.vec_type, ""),
306 "");
307 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
308 switch (bitsize) {
309 case 32:
310 break;
311 case 64:
312 result = LLVMBuildFPExt(builder, result, bld_base->dbl_bld.vec_type, "");
313 break;
314 default:
315 unreachable("unsupported bit size.");
316 }
317 return result;
318 }
319
320 static LLVMValueRef emit_b2i(struct lp_build_nir_context *bld_base,
321 LLVMValueRef src0,
322 unsigned bitsize)
323 {
324 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
325 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
326 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, 1), "");
327 switch (bitsize) {
328 case 32:
329 return result;
330 case 64:
331 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
332 default:
333 unreachable("unsupported bit size.");
334 }
335 }
336
337 static LLVMValueRef emit_b32csel(struct lp_build_nir_context *bld_base,
338 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
339 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
340 {
341 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
342 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
343 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
344 return lp_build_select(bld, v, src[1], src[2]);
345 }
346
347 static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
348 LLVMValueRef src,
349 bool hi)
350 {
351 struct gallivm_state *gallivm = bld_base->base.gallivm;
352 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
353 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
354 int len = bld_base->base.type.length * 2;
355 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
356 #if UTIL_ARCH_LITTLE_ENDIAN
357 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
358 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
359 #else
360 shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
361 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
362 #endif
363 }
364
365 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
366 return LLVMBuildShuffleVector(gallivm->builder, src,
367 LLVMGetUndef(LLVMTypeOf(src)),
368 LLVMConstVector(hi ? shuffles2 : shuffles,
369 bld_base->base.type.length),
370 "");
371 }
372
373 static LLVMValueRef
374 merge_64bit(struct lp_build_nir_context *bld_base,
375 LLVMValueRef input,
376 LLVMValueRef input2)
377 {
378 struct gallivm_state *gallivm = bld_base->base.gallivm;
379 LLVMBuilderRef builder = gallivm->builder;
380 int i;
381 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
382 int len = bld_base->base.type.length * 2;
383 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
384
385 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
386 #if UTIL_ARCH_LITTLE_ENDIAN
387 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
388 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
389 #else
390 shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
391 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
392 #endif
393 }
394 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
395 }
396
397 static LLVMValueRef
398 do_int_divide(struct lp_build_nir_context *bld_base,
399 bool is_unsigned, unsigned src_bit_size,
400 LLVMValueRef src, LLVMValueRef src2)
401 {
402 struct gallivm_state *gallivm = bld_base->base.gallivm;
403 LLVMBuilderRef builder = gallivm->builder;
404 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
405 struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
406 LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
407 mask_bld->zero);
408
409 if (!is_unsigned) {
410 /* INT_MIN (0x80000000) / -1 (0xffffffff) causes sigfpe, seen with blender. */
411 div_mask = LLVMBuildAnd(builder, div_mask, lp_build_const_int_vec(gallivm, int_bld->type, 0x7fffffff), "");
412 }
413 LLVMValueRef divisor = LLVMBuildOr(builder,
414 div_mask,
415 src2, "");
416 LLVMValueRef result = lp_build_div(int_bld, src, divisor);
417
418 if (!is_unsigned) {
419 LLVMValueRef not_div_mask = LLVMBuildNot(builder, div_mask, "");
420 return LLVMBuildAnd(builder, not_div_mask, result, "");
421 } else
422 /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
423 * may as well do same for idiv */
424 return LLVMBuildOr(builder, div_mask, result, "");
425 }
426
427 static LLVMValueRef
428 do_int_mod(struct lp_build_nir_context *bld_base,
429 bool is_unsigned, unsigned src_bit_size,
430 LLVMValueRef src, LLVMValueRef src2)
431 {
432 struct gallivm_state *gallivm = bld_base->base.gallivm;
433 LLVMBuilderRef builder = gallivm->builder;
434 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
435 LLVMValueRef div_mask = lp_build_cmp(int_bld, PIPE_FUNC_EQUAL, src2,
436 int_bld->zero);
437 LLVMValueRef divisor = LLVMBuildOr(builder,
438 div_mask,
439 src2, "");
440 LLVMValueRef result = lp_build_mod(int_bld, src, divisor);
441 return LLVMBuildOr(builder, div_mask, result, "");
442 }
443
444 static LLVMValueRef
445 do_quantize_to_f16(struct lp_build_nir_context *bld_base,
446 LLVMValueRef src)
447 {
448 struct gallivm_state *gallivm = bld_base->base.gallivm;
449 LLVMBuilderRef builder = gallivm->builder;
450 LLVMValueRef result;
451 result = LLVMBuildFPTrunc(builder, src, LLVMVectorType(LLVMHalfTypeInContext(gallivm->context), bld_base->base.type.length), "");
452 result = LLVMBuildFPExt(builder, result, bld_base->base.vec_type, "");
453 return result;
454 }
455
456 static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
457 nir_op op, unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS], LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
458 {
459 struct gallivm_state *gallivm = bld_base->base.gallivm;
460 LLVMBuilderRef builder = gallivm->builder;
461 LLVMValueRef result;
462 switch (op) {
463 case nir_op_b2f32:
464 result = emit_b2f(bld_base, src[0], 32);
465 break;
466 case nir_op_b2f64:
467 result = emit_b2f(bld_base, src[0], 64);
468 break;
469 case nir_op_b2i32:
470 result = emit_b2i(bld_base, src[0], 32);
471 break;
472 case nir_op_b2i64:
473 result = emit_b2i(bld_base, src[0], 64);
474 break;
475 case nir_op_b32csel:
476 result = emit_b32csel(bld_base, src_bit_size, src);
477 break;
478 case nir_op_bit_count:
479 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
480 break;
481 case nir_op_bitfield_select:
482 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
483 break;
484 case nir_op_bitfield_reverse:
485 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
486 break;
487 case nir_op_f2b32:
488 result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
489 break;
490 case nir_op_f2f32:
491 result = LLVMBuildFPTrunc(builder, src[0],
492 bld_base->base.vec_type, "");
493 break;
494 case nir_op_f2f64:
495 result = LLVMBuildFPExt(builder, src[0],
496 bld_base->dbl_bld.vec_type, "");
497 break;
498 case nir_op_f2i32:
499 result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
500 break;
501 case nir_op_f2u32:
502 result = LLVMBuildFPToUI(builder,
503 src[0],
504 bld_base->base.int_vec_type, "");
505 break;
506 case nir_op_f2i64:
507 result = LLVMBuildFPToSI(builder,
508 src[0],
509 bld_base->int64_bld.vec_type, "");
510 break;
511 case nir_op_f2u64:
512 result = LLVMBuildFPToUI(builder,
513 src[0],
514 bld_base->uint64_bld.vec_type, "");
515 break;
516 case nir_op_fabs:
517 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
518 break;
519 case nir_op_fadd:
520 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
521 src[0], src[1]);
522 break;
523 case nir_op_fceil:
524 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
525 break;
526 case nir_op_fcos:
527 result = lp_build_cos(&bld_base->base, src[0]);
528 break;
529 case nir_op_fddx:
530 case nir_op_fddx_coarse:
531 case nir_op_fddx_fine:
532 result = lp_build_ddx(&bld_base->base, src[0]);
533 break;
534 case nir_op_fddy:
535 case nir_op_fddy_coarse:
536 case nir_op_fddy_fine:
537 result = lp_build_ddy(&bld_base->base, src[0]);
538 break;
539 case nir_op_fdiv:
540 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
541 src[0], src[1]);
542 break;
543 case nir_op_feq32:
544 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
545 break;
546 case nir_op_fexp2:
547 result = lp_build_exp2(&bld_base->base, src[0]);
548 break;
549 case nir_op_ffloor:
550 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
551 break;
552 case nir_op_ffma:
553 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
554 break;
555 case nir_op_ffract: {
556 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
557 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
558 result = lp_build_sub(flt_bld, src[0], tmp);
559 break;
560 }
561 case nir_op_fge32:
562 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
563 break;
564 case nir_op_find_lsb:
565 result = lp_build_cttz(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
566 break;
567 case nir_op_flog2:
568 result = lp_build_log2_safe(&bld_base->base, src[0]);
569 break;
570 case nir_op_flt32:
571 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
572 break;
573 case nir_op_fmin:
574 result = lp_build_min(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
575 break;
576 case nir_op_fmod: {
577 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
578 result = lp_build_div(flt_bld, src[0], src[1]);
579 result = lp_build_floor(flt_bld, result);
580 result = lp_build_mul(flt_bld, src[1], result);
581 result = lp_build_sub(flt_bld, src[0], result);
582 break;
583 }
584 case nir_op_fmul:
585 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
586 src[0], src[1]);
587 break;
588 case nir_op_fmax:
589 result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
590 break;
591 case nir_op_fneu32:
592 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
593 break;
594 case nir_op_fneg:
595 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
596 break;
597 case nir_op_fpow:
598 result = lp_build_pow(&bld_base->base, src[0], src[1]);
599 break;
600 case nir_op_fquantize2f16:
601 result = do_quantize_to_f16(bld_base, src[0]);
602 break;
603 case nir_op_frcp:
604 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
605 break;
606 case nir_op_fround_even:
607 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
608 break;
609 case nir_op_frsq:
610 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
611 break;
612 case nir_op_fsat:
613 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
614 break;
615 case nir_op_fsign:
616 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
617 break;
618 case nir_op_fsin:
619 result = lp_build_sin(&bld_base->base, src[0]);
620 break;
621 case nir_op_fsqrt:
622 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
623 break;
624 case nir_op_ftrunc:
625 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
626 break;
627 case nir_op_i2b32:
628 result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
629 break;
630 case nir_op_i2f32:
631 result = lp_build_int_to_float(&bld_base->base, src[0]);
632 break;
633 case nir_op_i2f64:
634 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
635 break;
636 case nir_op_i2i8:
637 result = LLVMBuildTrunc(builder, src[0], bld_base->int8_bld.vec_type, "");
638 break;
639 case nir_op_i2i16:
640 if (src_bit_size[0] < 16)
641 result = LLVMBuildSExt(builder, src[0], bld_base->int16_bld.vec_type, "");
642 else
643 result = LLVMBuildTrunc(builder, src[0], bld_base->int16_bld.vec_type, "");
644 break;
645 case nir_op_i2i32:
646 if (src_bit_size[0] < 32)
647 result = LLVMBuildSExt(builder, src[0], bld_base->int_bld.vec_type, "");
648 else
649 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
650 break;
651 case nir_op_i2i64:
652 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
653 break;
654 case nir_op_iabs:
655 result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
656 break;
657 case nir_op_iadd:
658 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
659 src[0], src[1]);
660 break;
661 case nir_op_iand:
662 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
663 src[0], src[1]);
664 break;
665 case nir_op_idiv:
666 result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
667 break;
668 case nir_op_ieq32:
669 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
670 break;
671 case nir_op_ige32:
672 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
673 break;
674 case nir_op_ilt32:
675 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
676 break;
677 case nir_op_imax:
678 result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
679 break;
680 case nir_op_imin:
681 result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
682 break;
683 case nir_op_imul:
684 case nir_op_imul24:
685 result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
686 src[0], src[1]);
687 break;
688 case nir_op_imul_high: {
689 LLVMValueRef hi_bits;
690 lp_build_mul_32_lohi(&bld_base->int_bld, src[0], src[1], &hi_bits);
691 result = hi_bits;
692 break;
693 }
694 case nir_op_ine32:
695 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
696 break;
697 case nir_op_ineg:
698 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
699 break;
700 case nir_op_inot:
701 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
702 break;
703 case nir_op_ior:
704 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
705 src[0], src[1]);
706 break;
707 case nir_op_imod:
708 case nir_op_irem:
709 result = do_int_mod(bld_base, false, src_bit_size[0], src[0], src[1]);
710 break;
711 case nir_op_ishl: {
712 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
713 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
714 if (src_bit_size[0] == 64)
715 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
716 if (src_bit_size[0] < 32)
717 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
718 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
719 result = lp_build_shl(int_bld, src[0], src[1]);
720 break;
721 }
722 case nir_op_ishr: {
723 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
724 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
725 if (src_bit_size[0] == 64)
726 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
727 if (src_bit_size[0] < 32)
728 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
729 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
730 result = lp_build_shr(int_bld, src[0], src[1]);
731 break;
732 }
733 case nir_op_isign:
734 result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
735 break;
736 case nir_op_isub:
737 result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
738 src[0], src[1]);
739 break;
740 case nir_op_ixor:
741 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
742 src[0], src[1]);
743 break;
744 case nir_op_mov:
745 result = src[0];
746 break;
747 case nir_op_unpack_64_2x32_split_x:
748 result = split_64bit(bld_base, src[0], false);
749 break;
750 case nir_op_unpack_64_2x32_split_y:
751 result = split_64bit(bld_base, src[0], true);
752 break;
753
754 case nir_op_pack_64_2x32_split: {
755 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
756 result = LLVMBuildBitCast(builder, tmp, bld_base->dbl_bld.vec_type, "");
757 break;
758 }
759 case nir_op_u2f32:
760 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
761 break;
762 case nir_op_u2f64:
763 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
764 break;
765 case nir_op_u2u8:
766 result = LLVMBuildTrunc(builder, src[0], bld_base->uint8_bld.vec_type, "");
767 break;
768 case nir_op_u2u16:
769 if (src_bit_size[0] < 16)
770 result = LLVMBuildZExt(builder, src[0], bld_base->uint16_bld.vec_type, "");
771 else
772 result = LLVMBuildTrunc(builder, src[0], bld_base->uint16_bld.vec_type, "");
773 break;
774 case nir_op_u2u32:
775 if (src_bit_size[0] < 32)
776 result = LLVMBuildZExt(builder, src[0], bld_base->uint_bld.vec_type, "");
777 else
778 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
779 break;
780 case nir_op_u2u64:
781 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
782 break;
783 case nir_op_udiv:
784 result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
785 break;
786 case nir_op_ufind_msb: {
787 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
788 result = lp_build_ctlz(uint_bld, src[0]);
789 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
790 break;
791 }
792 case nir_op_uge32:
793 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
794 break;
795 case nir_op_ult32:
796 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
797 break;
798 case nir_op_umax:
799 result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
800 break;
801 case nir_op_umin:
802 result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
803 break;
804 case nir_op_umod:
805 result = do_int_mod(bld_base, true, src_bit_size[0], src[0], src[1]);
806 break;
807 case nir_op_umul_high: {
808 LLVMValueRef hi_bits;
809 lp_build_mul_32_lohi(&bld_base->uint_bld, src[0], src[1], &hi_bits);
810 result = hi_bits;
811 break;
812 }
813 case nir_op_ushr: {
814 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
815 if (src_bit_size[0] == 64)
816 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
817 if (src_bit_size[0] < 32)
818 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
819 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
820 result = lp_build_shr(uint_bld, src[0], src[1]);
821 break;
822 }
823 default:
824 assert(0);
825 break;
826 }
827 return result;
828 }
829
830 static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr *instr)
831 {
832 struct gallivm_state *gallivm = bld_base->base.gallivm;
833 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS];
834 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS];
835 unsigned num_components = nir_dest_num_components(instr->dest.dest);
836 unsigned src_components;
837 switch (instr->op) {
838 case nir_op_vec2:
839 case nir_op_vec3:
840 case nir_op_vec4:
841 case nir_op_vec8:
842 case nir_op_vec16:
843 src_components = 1;
844 break;
845 case nir_op_pack_half_2x16:
846 src_components = 2;
847 break;
848 case nir_op_unpack_half_2x16:
849 src_components = 1;
850 break;
851 case nir_op_cube_face_coord:
852 case nir_op_cube_face_index:
853 src_components = 3;
854 break;
855 default:
856 src_components = num_components;
857 break;
858 }
859 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
860 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
861 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
862 }
863
864 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
865 if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2 || instr->op == nir_op_vec8 || instr->op == nir_op_vec16) {
866 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
867 result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
868 }
869 } else {
870 for (unsigned c = 0; c < num_components; c++) {
871 LLVMValueRef src_chan[NIR_MAX_VEC_COMPONENTS];
872
873 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
874 if (num_components > 1) {
875 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
876 src[i], c, "");
877 } else
878 src_chan[i] = src[i];
879 src_chan[i] = cast_type(bld_base, src_chan[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
880 }
881 result[c] = do_alu_action(bld_base, instr->op, src_bit_size, src_chan);
882 result[c] = cast_type(bld_base, result[c], nir_op_infos[instr->op].output_type, nir_dest_bit_size(instr->dest.dest));
883 }
884 }
885 assign_alu_dest(bld_base, &instr->dest, result);
886 }
887
888 static void visit_load_const(struct lp_build_nir_context *bld_base,
889 const nir_load_const_instr *instr)
890 {
891 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
892 struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
893 for (unsigned i = 0; i < instr->def.num_components; i++)
894 result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->def.bit_size == 32 ? instr->value[i].u32 : instr->value[i].u64);
895 assign_ssa_dest(bld_base, &instr->def, result);
896 }
897
898 static void
899 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
900 bool vs_in, unsigned *vertex_index_out,
901 LLVMValueRef *vertex_index_ref,
902 unsigned *const_out, LLVMValueRef *indir_out)
903 {
904 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
905 nir_variable *var = nir_deref_instr_get_variable(instr);
906 nir_deref_path path;
907 unsigned idx_lvl = 1;
908
909 nir_deref_path_init(&path, instr, NULL);
910
911 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
912 if (vertex_index_ref) {
913 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
914 if (vertex_index_out)
915 *vertex_index_out = 0;
916 } else {
917 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
918 }
919 ++idx_lvl;
920 }
921
922 uint32_t const_offset = 0;
923 LLVMValueRef offset = NULL;
924
925 if (var->data.compact && nir_src_is_const(instr->arr.index)) {
926 assert(instr->deref_type == nir_deref_type_array);
927 const_offset = nir_src_as_uint(instr->arr.index);
928 goto out;
929 }
930
931 for (; path.path[idx_lvl]; ++idx_lvl) {
932 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
933 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
934 unsigned index = path.path[idx_lvl]->strct.index;
935
936 for (unsigned i = 0; i < index; i++) {
937 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
938 const_offset += glsl_count_attribute_slots(ft, vs_in);
939 }
940 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
941 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
942 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
943 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
944 } else {
945 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
946 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
947 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
948 idx_src);
949 if (offset)
950 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
951 else
952 offset = array_off;
953 }
954 } else
955 unreachable("Uhandled deref type in get_deref_instr_offset");
956 }
957
958 out:
959 nir_deref_path_finish(&path);
960
961 if (const_offset && offset)
962 offset = LLVMBuildAdd(builder, offset,
963 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
964 "");
965 *const_out = const_offset;
966 *indir_out = offset;
967 }
968
969 static void visit_load_var(struct lp_build_nir_context *bld_base,
970 nir_intrinsic_instr *instr,
971 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
972 {
973 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
974 nir_variable *var = nir_deref_instr_get_variable(deref);
975 nir_variable_mode mode = deref->mode;
976 unsigned const_index;
977 LLVMValueRef indir_index;
978 LLVMValueRef indir_vertex_index = NULL;
979 unsigned vertex_index = 0;
980 unsigned nc = nir_dest_num_components(instr->dest);
981 unsigned bit_size = nir_dest_bit_size(instr->dest);
982 if (var) {
983 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
984 var->data.mode == nir_var_shader_in;
985 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
986 var->data.mode == nir_var_shader_in;
987 bool tcs_in = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
988 var->data.mode == nir_var_shader_in;
989 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
990 var->data.mode == nir_var_shader_out && !var->data.patch;
991 bool tes_in = bld_base->shader->info.stage == MESA_SHADER_TESS_EVAL &&
992 var->data.mode == nir_var_shader_in && !var->data.patch;
993
994 mode = var->data.mode;
995
996 get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, (tcs_in || tcs_out || tes_in) ? &indir_vertex_index : NULL,
997 &const_index, &indir_index);
998 }
999 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, indir_vertex_index, const_index, indir_index, result);
1000 }
1001
1002 static void
1003 visit_store_var(struct lp_build_nir_context *bld_base,
1004 nir_intrinsic_instr *instr)
1005 {
1006 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1007 nir_variable *var = nir_deref_instr_get_variable(deref);
1008 nir_variable_mode mode = deref->mode;
1009 int writemask = instr->const_index[0];
1010 unsigned bit_size = nir_src_bit_size(instr->src[1]);
1011 LLVMValueRef src = get_src(bld_base, instr->src[1]);
1012 unsigned const_index = 0;
1013 LLVMValueRef indir_index, indir_vertex_index = NULL;
1014 if (var) {
1015 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1016 var->data.mode == nir_var_shader_out && !var->data.patch;
1017 get_deref_offset(bld_base, deref, false, NULL, tcs_out ? &indir_vertex_index : NULL,
1018 &const_index, &indir_index);
1019 }
1020 bld_base->store_var(bld_base, mode, instr->num_components, bit_size, var, writemask, indir_vertex_index, const_index, indir_index, src);
1021 }
1022
1023 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
1024 nir_intrinsic_instr *instr,
1025 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1026 {
1027 struct gallivm_state *gallivm = bld_base->base.gallivm;
1028 LLVMBuilderRef builder = gallivm->builder;
1029 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1030 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1031
1032 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
1033 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
1034 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1035 offset_is_uniform, idx, offset, result);
1036 }
1037
1038 static void visit_load_push_constant(struct lp_build_nir_context *bld_base,
1039 nir_intrinsic_instr *instr,
1040 LLVMValueRef result[4])
1041 {
1042 struct gallivm_state *gallivm = bld_base->base.gallivm;
1043 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1044 LLVMValueRef idx = lp_build_const_int32(gallivm, 0);
1045 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
1046
1047 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1048 offset_is_uniform, idx, offset, result);
1049 }
1050
1051
1052 static void visit_load_ssbo(struct lp_build_nir_context *bld_base,
1053 nir_intrinsic_instr *instr,
1054 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1055 {
1056 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1057 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1058 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1059 idx, offset, result);
1060 }
1061
1062 static void visit_store_ssbo(struct lp_build_nir_context *bld_base,
1063 nir_intrinsic_instr *instr)
1064 {
1065 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1066 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
1067 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
1068 int writemask = instr->const_index[0];
1069 int nc = nir_src_num_components(instr->src[0]);
1070 int bitsize = nir_src_bit_size(instr->src[0]);
1071 bld_base->store_mem(bld_base, writemask, nc, bitsize, idx, offset, val);
1072 }
1073
1074 static void visit_get_buffer_size(struct lp_build_nir_context *bld_base,
1075 nir_intrinsic_instr *instr,
1076 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1077 {
1078 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1079 result[0] = bld_base->get_buffer_size(bld_base, idx);
1080 }
1081
1082 static void visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
1083 nir_intrinsic_instr *instr,
1084 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1085 {
1086 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1087 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1088 LLVMValueRef val = get_src(bld_base, instr->src[2]);
1089 LLVMValueRef val2 = NULL;
1090 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
1091 val2 = get_src(bld_base, instr->src[3]);
1092
1093 bld_base->atomic_mem(bld_base, instr->intrinsic, idx, offset, val, val2, &result[0]);
1094
1095 }
1096
1097 static void visit_load_image(struct lp_build_nir_context *bld_base,
1098 nir_intrinsic_instr *instr,
1099 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1100 {
1101 struct gallivm_state *gallivm = bld_base->base.gallivm;
1102 LLVMBuilderRef builder = gallivm->builder;
1103 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1104 nir_variable *var = nir_deref_instr_get_variable(deref);
1105 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1106 LLVMValueRef coords[5];
1107 struct lp_img_params params;
1108 const struct glsl_type *type = glsl_without_array(var->type);
1109 unsigned const_index;
1110 LLVMValueRef indir_index;
1111 get_deref_offset(bld_base, deref, false, NULL, NULL,
1112 &const_index, &indir_index);
1113
1114 memset(&params, 0, sizeof(params));
1115 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1116 for (unsigned i = 0; i < 4; i++)
1117 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1118 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1119 coords[2] = coords[1];
1120
1121 params.coords = coords;
1122 params.outdata = result;
1123 params.img_op = LP_IMG_LOAD;
1124 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1125 params.ms_index = get_src(bld_base, instr->src[2]);
1126 params.image_index = var->data.binding + (indir_index ? 0 : const_index);
1127 params.image_index_offset = indir_index;
1128 bld_base->image_op(bld_base, &params);
1129 }
1130
1131 static void visit_store_image(struct lp_build_nir_context *bld_base,
1132 nir_intrinsic_instr *instr)
1133 {
1134 struct gallivm_state *gallivm = bld_base->base.gallivm;
1135 LLVMBuilderRef builder = gallivm->builder;
1136 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1137 nir_variable *var = nir_deref_instr_get_variable(deref);
1138 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1139 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1140 LLVMValueRef coords[5];
1141 struct lp_img_params params;
1142 const struct glsl_type *type = glsl_without_array(var->type);
1143 unsigned const_index;
1144 LLVMValueRef indir_index;
1145 get_deref_offset(bld_base, deref, false, NULL, NULL,
1146 &const_index, &indir_index);
1147
1148 memset(&params, 0, sizeof(params));
1149 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1150 for (unsigned i = 0; i < 4; i++)
1151 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1152 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1153 coords[2] = coords[1];
1154 params.coords = coords;
1155
1156 for (unsigned i = 0; i < 4; i++) {
1157 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1158 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1159 }
1160 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1161 params.ms_index = get_src(bld_base, instr->src[2]);
1162 params.img_op = LP_IMG_STORE;
1163 params.image_index = var->data.binding + (indir_index ? 0 : const_index);
1164 params.image_index_offset = indir_index;
1165
1166 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1167 coords[2] = coords[1];
1168 bld_base->image_op(bld_base, &params);
1169 }
1170
1171 static void visit_atomic_image(struct lp_build_nir_context *bld_base,
1172 nir_intrinsic_instr *instr,
1173 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1174 {
1175 struct gallivm_state *gallivm = bld_base->base.gallivm;
1176 LLVMBuilderRef builder = gallivm->builder;
1177 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1178 nir_variable *var = nir_deref_instr_get_variable(deref);
1179 struct lp_img_params params;
1180 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1181 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1182 LLVMValueRef coords[5];
1183 const struct glsl_type *type = glsl_without_array(var->type);
1184 unsigned const_index;
1185 LLVMValueRef indir_index;
1186 get_deref_offset(bld_base, deref, false, NULL, NULL,
1187 &const_index, &indir_index);
1188
1189 memset(&params, 0, sizeof(params));
1190
1191 switch (instr->intrinsic) {
1192 case nir_intrinsic_image_deref_atomic_add:
1193 params.op = LLVMAtomicRMWBinOpAdd;
1194 break;
1195 case nir_intrinsic_image_deref_atomic_exchange:
1196 params.op = LLVMAtomicRMWBinOpXchg;
1197 break;
1198 case nir_intrinsic_image_deref_atomic_and:
1199 params.op = LLVMAtomicRMWBinOpAnd;
1200 break;
1201 case nir_intrinsic_image_deref_atomic_or:
1202 params.op = LLVMAtomicRMWBinOpOr;
1203 break;
1204 case nir_intrinsic_image_deref_atomic_xor:
1205 params.op = LLVMAtomicRMWBinOpXor;
1206 break;
1207 case nir_intrinsic_image_deref_atomic_umin:
1208 params.op = LLVMAtomicRMWBinOpUMin;
1209 break;
1210 case nir_intrinsic_image_deref_atomic_umax:
1211 params.op = LLVMAtomicRMWBinOpUMax;
1212 break;
1213 case nir_intrinsic_image_deref_atomic_imin:
1214 params.op = LLVMAtomicRMWBinOpMin;
1215 break;
1216 case nir_intrinsic_image_deref_atomic_imax:
1217 params.op = LLVMAtomicRMWBinOpMax;
1218 break;
1219 default:
1220 break;
1221 }
1222
1223 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1224 for (unsigned i = 0; i < 4; i++)
1225 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1226 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1227 coords[2] = coords[1];
1228 params.coords = coords;
1229 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1230 params.ms_index = get_src(bld_base, instr->src[2]);
1231 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
1232 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1233 params.indata[0] = in_val;
1234 params.indata2[0] = cas_val;
1235 } else
1236 params.indata[0] = in_val;
1237
1238 params.outdata = result;
1239 params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1240 params.image_index = var->data.binding + (indir_index ? 0 : const_index);
1241 params.image_index_offset = indir_index;
1242
1243 bld_base->image_op(bld_base, &params);
1244 }
1245
1246
1247 static void visit_image_size(struct lp_build_nir_context *bld_base,
1248 nir_intrinsic_instr *instr,
1249 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1250 {
1251 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1252 nir_variable *var = nir_deref_instr_get_variable(deref);
1253 struct lp_sampler_size_query_params params = { 0 };
1254 unsigned const_index;
1255 LLVMValueRef indir_index;
1256 const struct glsl_type *type = glsl_without_array(var->type);
1257 get_deref_offset(bld_base, deref, false, NULL, NULL,
1258 &const_index, &indir_index);
1259 params.texture_unit = var->data.binding + (indir_index ? 0 : const_index);
1260 params.texture_unit_offset = indir_index;
1261 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1262 params.sizes_out = result;
1263
1264 bld_base->image_size(bld_base, &params);
1265 }
1266
1267 static void visit_image_samples(struct lp_build_nir_context *bld_base,
1268 nir_intrinsic_instr *instr,
1269 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1270 {
1271 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1272 nir_variable *var = nir_deref_instr_get_variable(deref);
1273 struct lp_sampler_size_query_params params = { 0 };
1274 unsigned const_index;
1275 LLVMValueRef indir_index;
1276 const struct glsl_type *type = glsl_without_array(var->type);
1277 get_deref_offset(bld_base, deref, false, NULL, NULL,
1278 &const_index, &indir_index);
1279
1280 params.texture_unit = var->data.binding + (indir_index ? 0 : const_index);
1281 params.texture_unit_offset = indir_index;
1282 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1283 params.sizes_out = result;
1284 params.samples_only = true;
1285
1286 bld_base->image_size(bld_base, &params);
1287 }
1288
1289 static void visit_shared_load(struct lp_build_nir_context *bld_base,
1290 nir_intrinsic_instr *instr,
1291 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1292 {
1293 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1294 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1295 NULL, offset, result);
1296 }
1297
1298 static void visit_shared_store(struct lp_build_nir_context *bld_base,
1299 nir_intrinsic_instr *instr)
1300 {
1301 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1302 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1303 int writemask = instr->const_index[1];
1304 int nc = nir_src_num_components(instr->src[0]);
1305 int bitsize = nir_src_bit_size(instr->src[0]);
1306 bld_base->store_mem(bld_base, writemask, nc, bitsize, NULL, offset, val);
1307 }
1308
1309 static void visit_shared_atomic(struct lp_build_nir_context *bld_base,
1310 nir_intrinsic_instr *instr,
1311 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1312 {
1313 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1314 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1315 LLVMValueRef val2 = NULL;
1316 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1317 val2 = get_src(bld_base, instr->src[2]);
1318
1319 bld_base->atomic_mem(bld_base, instr->intrinsic, NULL, offset, val, val2, &result[0]);
1320
1321 }
1322
1323 static void visit_barrier(struct lp_build_nir_context *bld_base)
1324 {
1325 bld_base->barrier(bld_base);
1326 }
1327
1328 static void visit_discard(struct lp_build_nir_context *bld_base,
1329 nir_intrinsic_instr *instr)
1330 {
1331 LLVMValueRef cond = NULL;
1332 if (instr->intrinsic == nir_intrinsic_discard_if) {
1333 cond = get_src(bld_base, instr->src[0]);
1334 cond = cast_type(bld_base, cond, nir_type_int, 32);
1335 }
1336 bld_base->discard(bld_base, cond);
1337 }
1338
1339 static void visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1340 nir_intrinsic_instr *instr, LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1341 {
1342 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1343
1344 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
1345 bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1346 nir_src_bit_size(instr->src[0]),
1347 offset_is_uniform, offset, result);
1348 }
1349
1350 static void visit_load_global(struct lp_build_nir_context *bld_base,
1351 nir_intrinsic_instr *instr, LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1352 {
1353 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1354 bld_base->load_global(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1355 nir_src_bit_size(instr->src[0]),
1356 addr, result);
1357 }
1358
1359 static void visit_store_global(struct lp_build_nir_context *bld_base,
1360 nir_intrinsic_instr *instr)
1361 {
1362 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1363 int nc = nir_src_num_components(instr->src[0]);
1364 int bitsize = nir_src_bit_size(instr->src[0]);
1365 LLVMValueRef addr = get_src(bld_base, instr->src[1]);
1366 int addr_bitsize = nir_src_bit_size(instr->src[1]);
1367 int writemask = instr->const_index[0];
1368 bld_base->store_global(bld_base, writemask, nc, bitsize, addr_bitsize, addr, val);
1369 }
1370
1371 static void visit_global_atomic(struct lp_build_nir_context *bld_base,
1372 nir_intrinsic_instr *instr,
1373 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1374 {
1375 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1376 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1377 LLVMValueRef val2 = NULL;
1378 int addr_bitsize = nir_src_bit_size(instr->src[0]);
1379 if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap)
1380 val2 = get_src(bld_base, instr->src[2]);
1381
1382 bld_base->atomic_global(bld_base, instr->intrinsic, addr_bitsize, addr, val, val2, &result[0]);
1383 }
1384
1385 static void visit_interp(struct lp_build_nir_context *bld_base,
1386 nir_intrinsic_instr *instr,
1387 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1388 {
1389 struct gallivm_state *gallivm = bld_base->base.gallivm;
1390 LLVMBuilderRef builder = gallivm->builder;
1391 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1392 unsigned num_components = nir_dest_num_components(instr->dest);
1393 nir_variable *var = nir_deref_instr_get_variable(deref);
1394 unsigned const_index;
1395 LLVMValueRef indir_index;
1396 LLVMValueRef offsets[2] = { NULL, NULL };
1397 get_deref_offset(bld_base, deref, false, NULL, NULL,
1398 &const_index, &indir_index);
1399 bool centroid = instr->intrinsic == nir_intrinsic_interp_deref_at_centroid;
1400 bool sample = false;
1401 if (instr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
1402 for (unsigned i = 0; i < 2; i++) {
1403 offsets[i] = LLVMBuildExtractValue(builder, get_src(bld_base, instr->src[1]), i, "");
1404 offsets[i] = cast_type(bld_base, offsets[i], nir_type_float, 32);
1405 }
1406 } else if (instr->intrinsic == nir_intrinsic_interp_deref_at_sample) {
1407 offsets[0] = get_src(bld_base, instr->src[1]);
1408 offsets[0] = cast_type(bld_base, offsets[0], nir_type_int, 32);
1409 sample = true;
1410 }
1411 bld_base->interp_at(bld_base, num_components, var, centroid, sample, const_index, indir_index, offsets, result);
1412 }
1413
1414 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
1415 nir_intrinsic_instr *instr)
1416 {
1417 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS] = {0};
1418 switch (instr->intrinsic) {
1419 case nir_intrinsic_load_deref:
1420 visit_load_var(bld_base, instr, result);
1421 break;
1422 case nir_intrinsic_store_deref:
1423 visit_store_var(bld_base, instr);
1424 break;
1425 case nir_intrinsic_load_ubo:
1426 visit_load_ubo(bld_base, instr, result);
1427 break;
1428 case nir_intrinsic_load_push_constant:
1429 visit_load_push_constant(bld_base, instr, result);
1430 break;
1431 case nir_intrinsic_load_ssbo:
1432 visit_load_ssbo(bld_base, instr, result);
1433 break;
1434 case nir_intrinsic_store_ssbo:
1435 visit_store_ssbo(bld_base, instr);
1436 break;
1437 case nir_intrinsic_get_buffer_size:
1438 visit_get_buffer_size(bld_base, instr, result);
1439 break;
1440 case nir_intrinsic_load_vertex_id:
1441 case nir_intrinsic_load_primitive_id:
1442 case nir_intrinsic_load_instance_id:
1443 case nir_intrinsic_load_base_instance:
1444 case nir_intrinsic_load_base_vertex:
1445 case nir_intrinsic_load_work_group_id:
1446 case nir_intrinsic_load_local_invocation_id:
1447 case nir_intrinsic_load_num_work_groups:
1448 case nir_intrinsic_load_invocation_id:
1449 case nir_intrinsic_load_front_face:
1450 case nir_intrinsic_load_draw_id:
1451 case nir_intrinsic_load_local_group_size:
1452 case nir_intrinsic_load_work_dim:
1453 case nir_intrinsic_load_tess_coord:
1454 case nir_intrinsic_load_tess_level_outer:
1455 case nir_intrinsic_load_tess_level_inner:
1456 case nir_intrinsic_load_patch_vertices_in:
1457 case nir_intrinsic_load_sample_id:
1458 case nir_intrinsic_load_sample_pos:
1459 case nir_intrinsic_load_sample_mask_in:
1460 bld_base->sysval_intrin(bld_base, instr, result);
1461 break;
1462 case nir_intrinsic_load_helper_invocation:
1463 bld_base->helper_invocation(bld_base, &result[0]);
1464 break;
1465 case nir_intrinsic_discard_if:
1466 case nir_intrinsic_discard:
1467 visit_discard(bld_base, instr);
1468 break;
1469 case nir_intrinsic_emit_vertex:
1470 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
1471 break;
1472 case nir_intrinsic_end_primitive:
1473 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
1474 break;
1475 case nir_intrinsic_ssbo_atomic_add:
1476 case nir_intrinsic_ssbo_atomic_imin:
1477 case nir_intrinsic_ssbo_atomic_imax:
1478 case nir_intrinsic_ssbo_atomic_umin:
1479 case nir_intrinsic_ssbo_atomic_umax:
1480 case nir_intrinsic_ssbo_atomic_and:
1481 case nir_intrinsic_ssbo_atomic_or:
1482 case nir_intrinsic_ssbo_atomic_xor:
1483 case nir_intrinsic_ssbo_atomic_exchange:
1484 case nir_intrinsic_ssbo_atomic_comp_swap:
1485 visit_ssbo_atomic(bld_base, instr, result);
1486 break;
1487 case nir_intrinsic_image_deref_load:
1488 visit_load_image(bld_base, instr, result);
1489 break;
1490 case nir_intrinsic_image_deref_store:
1491 visit_store_image(bld_base, instr);
1492 break;
1493 case nir_intrinsic_image_deref_atomic_add:
1494 case nir_intrinsic_image_deref_atomic_imin:
1495 case nir_intrinsic_image_deref_atomic_imax:
1496 case nir_intrinsic_image_deref_atomic_umin:
1497 case nir_intrinsic_image_deref_atomic_umax:
1498 case nir_intrinsic_image_deref_atomic_and:
1499 case nir_intrinsic_image_deref_atomic_or:
1500 case nir_intrinsic_image_deref_atomic_xor:
1501 case nir_intrinsic_image_deref_atomic_exchange:
1502 case nir_intrinsic_image_deref_atomic_comp_swap:
1503 visit_atomic_image(bld_base, instr, result);
1504 break;
1505 case nir_intrinsic_image_deref_size:
1506 visit_image_size(bld_base, instr, result);
1507 break;
1508 case nir_intrinsic_image_deref_samples:
1509 visit_image_samples(bld_base, instr, result);
1510 break;
1511 case nir_intrinsic_load_shared:
1512 visit_shared_load(bld_base, instr, result);
1513 break;
1514 case nir_intrinsic_store_shared:
1515 visit_shared_store(bld_base, instr);
1516 break;
1517 case nir_intrinsic_shared_atomic_add:
1518 case nir_intrinsic_shared_atomic_imin:
1519 case nir_intrinsic_shared_atomic_umin:
1520 case nir_intrinsic_shared_atomic_imax:
1521 case nir_intrinsic_shared_atomic_umax:
1522 case nir_intrinsic_shared_atomic_and:
1523 case nir_intrinsic_shared_atomic_or:
1524 case nir_intrinsic_shared_atomic_xor:
1525 case nir_intrinsic_shared_atomic_exchange:
1526 case nir_intrinsic_shared_atomic_comp_swap:
1527 visit_shared_atomic(bld_base, instr, result);
1528 break;
1529 case nir_intrinsic_control_barrier:
1530 visit_barrier(bld_base);
1531 break;
1532 case nir_intrinsic_group_memory_barrier:
1533 case nir_intrinsic_memory_barrier:
1534 case nir_intrinsic_memory_barrier_shared:
1535 case nir_intrinsic_memory_barrier_buffer:
1536 case nir_intrinsic_memory_barrier_image:
1537 case nir_intrinsic_memory_barrier_tcs_patch:
1538 break;
1539 case nir_intrinsic_load_kernel_input:
1540 visit_load_kernel_input(bld_base, instr, result);
1541 break;
1542 case nir_intrinsic_load_global:
1543 visit_load_global(bld_base, instr, result);
1544 break;
1545 case nir_intrinsic_store_global:
1546 visit_store_global(bld_base, instr);
1547 break;
1548 case nir_intrinsic_global_atomic_add:
1549 case nir_intrinsic_global_atomic_imin:
1550 case nir_intrinsic_global_atomic_umin:
1551 case nir_intrinsic_global_atomic_imax:
1552 case nir_intrinsic_global_atomic_umax:
1553 case nir_intrinsic_global_atomic_and:
1554 case nir_intrinsic_global_atomic_or:
1555 case nir_intrinsic_global_atomic_xor:
1556 case nir_intrinsic_global_atomic_exchange:
1557 case nir_intrinsic_global_atomic_comp_swap:
1558 visit_global_atomic(bld_base, instr, result);
1559 break;
1560 case nir_intrinsic_vote_all:
1561 case nir_intrinsic_vote_any:
1562 case nir_intrinsic_vote_ieq:
1563 bld_base->vote(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, 32), instr, result);
1564 break;
1565 case nir_intrinsic_interp_deref_at_offset:
1566 case nir_intrinsic_interp_deref_at_centroid:
1567 case nir_intrinsic_interp_deref_at_sample:
1568 visit_interp(bld_base, instr, result);
1569 break;
1570 default:
1571 assert(0);
1572 break;
1573 }
1574 if (result[0]) {
1575 assign_dest(bld_base, &instr->dest, result);
1576 }
1577 }
1578
1579 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1580 {
1581 struct lp_sampler_size_query_params params = { 0 };
1582 LLVMValueRef sizes_out[NIR_MAX_VEC_COMPONENTS];
1583 LLVMValueRef explicit_lod = NULL;
1584 LLVMValueRef texture_unit_offset = NULL;
1585 for (unsigned i = 0; i < instr->num_srcs; i++) {
1586 switch (instr->src[i].src_type) {
1587 case nir_tex_src_lod:
1588 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1589 break;
1590 case nir_tex_src_texture_offset:
1591 texture_unit_offset = get_src(bld_base, instr->src[i].src);
1592 break;
1593 default:
1594 break;
1595 }
1596 }
1597
1598 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
1599 params.texture_unit = instr->texture_index;
1600 params.explicit_lod = explicit_lod;
1601 params.is_sviewinfo = TRUE;
1602 params.sizes_out = sizes_out;
1603 params.samples_only = (instr->op == nir_texop_texture_samples);
1604 params.texture_unit_offset = texture_unit_offset;
1605
1606 if (instr->op == nir_texop_query_levels)
1607 params.explicit_lod = bld_base->uint_bld.zero;
1608 bld_base->tex_size(bld_base, &params);
1609 assign_dest(bld_base, &instr->dest, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
1610 }
1611
1612 static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
1613 nir_src lod_src)
1614 {
1615 enum lp_sampler_lod_property lod_property;
1616
1617 if (nir_src_is_dynamically_uniform(lod_src))
1618 lod_property = LP_SAMPLER_LOD_SCALAR;
1619 else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1620 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1621 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1622 else
1623 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1624 }
1625 else
1626 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1627 return lod_property;
1628 }
1629
1630 static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1631 {
1632 struct gallivm_state *gallivm = bld_base->base.gallivm;
1633 LLVMBuilderRef builder = gallivm->builder;
1634 LLVMValueRef coords[5];
1635 LLVMValueRef offsets[3] = { NULL };
1636 LLVMValueRef explicit_lod = NULL, projector = NULL, ms_index = NULL;
1637 struct lp_sampler_params params;
1638 struct lp_derivatives derivs;
1639 unsigned sample_key = 0;
1640 nir_deref_instr *texture_deref_instr = NULL;
1641 nir_deref_instr *sampler_deref_instr = NULL;
1642 LLVMValueRef texture_unit_offset = NULL;
1643 LLVMValueRef texel[NIR_MAX_VEC_COMPONENTS];
1644 unsigned lod_src = 0;
1645 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
1646
1647 memset(&params, 0, sizeof(params));
1648 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
1649
1650 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels || instr->op == nir_texop_texture_samples) {
1651 visit_txs(bld_base, instr);
1652 return;
1653 }
1654 if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1655 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
1656 else if (instr->op == nir_texop_tg4) {
1657 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
1658 sample_key |= (instr->component << LP_SAMPLER_GATHER_COMP_SHIFT);
1659 } else if (instr->op == nir_texop_lod)
1660 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
1661 for (unsigned i = 0; i < instr->num_srcs; i++) {
1662 switch (instr->src[i].src_type) {
1663 case nir_tex_src_coord: {
1664 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
1665 if (instr->coord_components == 1)
1666 coords[0] = coord;
1667 else {
1668 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1669 coords[chan] = LLVMBuildExtractValue(builder, coord,
1670 chan, "");
1671 }
1672 for (unsigned chan = instr->coord_components; chan < 5; chan++)
1673 coords[chan] = coord_undef;
1674
1675 break;
1676 }
1677 case nir_tex_src_texture_deref:
1678 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
1679 break;
1680 case nir_tex_src_sampler_deref:
1681 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
1682 break;
1683 case nir_tex_src_projector:
1684 projector = lp_build_rcp(&bld_base->base, cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32));
1685 break;
1686 case nir_tex_src_comparator:
1687 sample_key |= LP_SAMPLER_SHADOW;
1688 coords[4] = get_src(bld_base, instr->src[i].src);
1689 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
1690 break;
1691 case nir_tex_src_bias:
1692 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
1693 lod_src = i;
1694 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1695 break;
1696 case nir_tex_src_lod:
1697 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
1698 lod_src = i;
1699 if (instr->op == nir_texop_txf)
1700 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1701 else
1702 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1703 break;
1704 case nir_tex_src_ddx: {
1705 int deriv_cnt = instr->coord_components;
1706 if (instr->is_array)
1707 deriv_cnt--;
1708 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1709 if (deriv_cnt == 1)
1710 derivs.ddx[0] = deriv_val;
1711 else
1712 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1713 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
1714 chan, "");
1715 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1716 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
1717 break;
1718 }
1719 case nir_tex_src_ddy: {
1720 int deriv_cnt = instr->coord_components;
1721 if (instr->is_array)
1722 deriv_cnt--;
1723 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1724 if (deriv_cnt == 1)
1725 derivs.ddy[0] = deriv_val;
1726 else
1727 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1728 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
1729 chan, "");
1730 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1731 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
1732 break;
1733 }
1734 case nir_tex_src_offset: {
1735 int offset_cnt = instr->coord_components;
1736 if (instr->is_array)
1737 offset_cnt--;
1738 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
1739 sample_key |= LP_SAMPLER_OFFSETS;
1740 if (offset_cnt == 1)
1741 offsets[0] = cast_type(bld_base, offset_val, nir_type_int, 32);
1742 else {
1743 for (unsigned chan = 0; chan < offset_cnt; ++chan) {
1744 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
1745 chan, "");
1746 offsets[chan] = cast_type(bld_base, offsets[chan], nir_type_int, 32);
1747 }
1748 }
1749 break;
1750 }
1751 case nir_tex_src_ms_index:
1752 sample_key |= LP_SAMPLER_FETCH_MS;
1753 ms_index = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1754 break;
1755
1756 case nir_tex_src_texture_offset:
1757 texture_unit_offset = get_src(bld_base, instr->src[i].src);
1758 break;
1759 case nir_tex_src_sampler_offset:
1760 break;
1761 default:
1762 assert(0);
1763 break;
1764 }
1765 }
1766 if (!sampler_deref_instr)
1767 sampler_deref_instr = texture_deref_instr;
1768
1769 if (explicit_lod)
1770 lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
1771
1772 if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
1773 instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
1774 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1775 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
1776 else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1777 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1778 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
1779
1780 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
1781 /* move layer coord for 1d arrays. */
1782 coords[2] = coords[1];
1783 coords[1] = coord_undef;
1784 }
1785
1786 if (projector) {
1787 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1788 coords[chan] = lp_build_mul(&bld_base->base, coords[chan], projector);
1789 if (sample_key & LP_SAMPLER_SHADOW)
1790 coords[4] = lp_build_mul(&bld_base->base, coords[4], projector);
1791 }
1792
1793 uint32_t samp_base_index = 0, tex_base_index = 0;
1794 if (!sampler_deref_instr) {
1795 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
1796 if (samp_src_index == -1) {
1797 samp_base_index = instr->sampler_index;
1798 }
1799 }
1800 if (!texture_deref_instr) {
1801 int tex_src_index = nir_tex_instr_src_index(instr, nir_tex_src_texture_handle);
1802 if (tex_src_index == -1) {
1803 tex_base_index = instr->texture_index;
1804 }
1805 }
1806
1807 if (instr->op == nir_texop_txd) {
1808 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
1809 params.derivs = &derivs;
1810 if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1811 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1812 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1813 else
1814 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1815 } else
1816 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1817 }
1818
1819 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
1820 params.sample_key = sample_key;
1821 params.offsets = offsets;
1822 params.texture_index = tex_base_index;
1823 params.texture_index_offset = texture_unit_offset;
1824 params.sampler_index = samp_base_index;
1825 params.coords = coords;
1826 params.texel = texel;
1827 params.lod = explicit_lod;
1828 params.ms_index = ms_index;
1829 bld_base->tex(bld_base, &params);
1830 assign_dest(bld_base, &instr->dest, texel);
1831 }
1832
1833 static void visit_ssa_undef(struct lp_build_nir_context *bld_base,
1834 const nir_ssa_undef_instr *instr)
1835 {
1836 unsigned num_components = instr->def.num_components;
1837 LLVMValueRef undef[NIR_MAX_VEC_COMPONENTS];
1838 struct lp_build_context *undef_bld = get_int_bld(bld_base, true, instr->def.bit_size);
1839 for (unsigned i = 0; i < num_components; i++)
1840 undef[i] = LLVMGetUndef(undef_bld->vec_type);
1841 assign_ssa_dest(bld_base, &instr->def, undef);
1842 }
1843
1844 static void visit_jump(struct lp_build_nir_context *bld_base,
1845 const nir_jump_instr *instr)
1846 {
1847 switch (instr->type) {
1848 case nir_jump_break:
1849 bld_base->break_stmt(bld_base);
1850 break;
1851 case nir_jump_continue:
1852 bld_base->continue_stmt(bld_base);
1853 break;
1854 default:
1855 unreachable("Unknown jump instr\n");
1856 }
1857 }
1858
1859 static void visit_deref(struct lp_build_nir_context *bld_base,
1860 nir_deref_instr *instr)
1861 {
1862 if (instr->mode != nir_var_mem_shared &&
1863 instr->mode != nir_var_mem_global)
1864 return;
1865 LLVMValueRef result = NULL;
1866 switch(instr->deref_type) {
1867 case nir_deref_type_var: {
1868 struct hash_entry *entry = _mesa_hash_table_search(bld_base->vars, instr->var);
1869 result = entry->data;
1870 break;
1871 }
1872 default:
1873 unreachable("Unhandled deref_instr deref type");
1874 }
1875
1876 assign_ssa(bld_base, instr->dest.ssa.index, result);
1877 }
1878
1879 static void visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
1880 {
1881 nir_foreach_instr(instr, block)
1882 {
1883 switch (instr->type) {
1884 case nir_instr_type_alu:
1885 visit_alu(bld_base, nir_instr_as_alu(instr));
1886 break;
1887 case nir_instr_type_load_const:
1888 visit_load_const(bld_base, nir_instr_as_load_const(instr));
1889 break;
1890 case nir_instr_type_intrinsic:
1891 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
1892 break;
1893 case nir_instr_type_tex:
1894 visit_tex(bld_base, nir_instr_as_tex(instr));
1895 break;
1896 case nir_instr_type_phi:
1897 assert(0);
1898 break;
1899 case nir_instr_type_ssa_undef:
1900 visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
1901 break;
1902 case nir_instr_type_jump:
1903 visit_jump(bld_base, nir_instr_as_jump(instr));
1904 break;
1905 case nir_instr_type_deref:
1906 visit_deref(bld_base, nir_instr_as_deref(instr));
1907 break;
1908 default:
1909 fprintf(stderr, "Unknown NIR instr type: ");
1910 nir_print_instr(instr, stderr);
1911 fprintf(stderr, "\n");
1912 abort();
1913 }
1914 }
1915 }
1916
1917 static void visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
1918 {
1919 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
1920
1921 bld_base->if_cond(bld_base, cond);
1922 visit_cf_list(bld_base, &if_stmt->then_list);
1923
1924 if (!exec_list_is_empty(&if_stmt->else_list)) {
1925 bld_base->else_stmt(bld_base);
1926 visit_cf_list(bld_base, &if_stmt->else_list);
1927 }
1928 bld_base->endif_stmt(bld_base);
1929 }
1930
1931 static void visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
1932 {
1933 bld_base->bgnloop(bld_base);
1934 visit_cf_list(bld_base, &loop->body);
1935 bld_base->endloop(bld_base);
1936 }
1937
1938 static void visit_cf_list(struct lp_build_nir_context *bld_base,
1939 struct exec_list *list)
1940 {
1941 foreach_list_typed(nir_cf_node, node, node, list)
1942 {
1943 switch (node->type) {
1944 case nir_cf_node_block:
1945 visit_block(bld_base, nir_cf_node_as_block(node));
1946 break;
1947
1948 case nir_cf_node_if:
1949 visit_if(bld_base, nir_cf_node_as_if(node));
1950 break;
1951
1952 case nir_cf_node_loop:
1953 visit_loop(bld_base, nir_cf_node_as_loop(node));
1954 break;
1955
1956 default:
1957 assert(0);
1958 }
1959 }
1960 }
1961
1962 static void
1963 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
1964 struct nir_shader *nir,
1965 struct nir_variable *variable)
1966 {
1967 bld_base->emit_var_decl(bld_base, variable);
1968 }
1969
1970 /* vector registers are stored as arrays in LLVM side,
1971 so we can use GEP on them, as to do exec mask stores
1972 we need to operate on a single components.
1973 arrays are:
1974 0.x, 1.x, 2.x, 3.x
1975 0.y, 1.y, 2.y, 3.y
1976 ....
1977 */
1978 static LLVMTypeRef get_register_type(struct lp_build_nir_context *bld_base,
1979 nir_register *reg)
1980 {
1981 struct lp_build_context *int_bld = get_int_bld(bld_base, true, reg->bit_size);
1982
1983 LLVMTypeRef type = int_bld->vec_type;
1984 if (reg->num_array_elems)
1985 type = LLVMArrayType(type, reg->num_array_elems);
1986 if (reg->num_components > 1)
1987 type = LLVMArrayType(type, reg->num_components);
1988
1989 return type;
1990 }
1991
1992
1993 bool lp_build_nir_llvm(
1994 struct lp_build_nir_context *bld_base,
1995 struct nir_shader *nir)
1996 {
1997 struct nir_function *func;
1998
1999 nir_convert_from_ssa(nir, true);
2000 nir_lower_locals_to_regs(nir);
2001 nir_remove_dead_derefs(nir);
2002 nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
2003
2004 nir_foreach_shader_out_variable(variable, nir)
2005 handle_shader_output_decl(bld_base, nir, variable);
2006
2007 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2008 _mesa_key_pointer_equal);
2009 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2010 _mesa_key_pointer_equal);
2011
2012 func = (struct nir_function *)exec_list_get_head(&nir->functions);
2013
2014 nir_foreach_register(reg, &func->impl->registers) {
2015 LLVMTypeRef type = get_register_type(bld_base, reg);
2016 LLVMValueRef reg_alloc = lp_build_alloca_undef(bld_base->base.gallivm,
2017 type, "reg");
2018 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
2019 }
2020 nir_index_ssa_defs(func->impl);
2021 bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
2022 visit_cf_list(bld_base, &func->impl->body);
2023
2024 free(bld_base->ssa_defs);
2025 ralloc_free(bld_base->vars);
2026 ralloc_free(bld_base->regs);
2027 return true;
2028 }
2029
2030 /* do some basic opts to remove some things we don't want to see. */
2031 void lp_build_opt_nir(struct nir_shader *nir)
2032 {
2033 bool progress;
2034
2035 static const struct nir_lower_tex_options lower_tex_options = {
2036 .lower_tg4_offsets = true,
2037 };
2038 NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
2039
2040 do {
2041 progress = false;
2042 NIR_PASS_V(nir, nir_opt_constant_folding);
2043 NIR_PASS_V(nir, nir_opt_algebraic);
2044 NIR_PASS_V(nir, nir_lower_pack);
2045
2046 nir_lower_tex_options options = { .lower_tex_without_implicit_lod = true };
2047 NIR_PASS_V(nir, nir_lower_tex, &options);
2048 } while (progress);
2049 nir_lower_bool_to_int32(nir);
2050 }