fe5068b1db916af21c54f806d6022c6fa9f7e49b
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_gather.h"
31 #include "lp_bld_logic.h"
32 #include "lp_bld_quad.h"
33 #include "lp_bld_flow.h"
34 #include "lp_bld_struct.h"
35 #include "lp_bld_debug.h"
36 #include "lp_bld_printf.h"
37 #include "nir_deref.h"
38
39 static void visit_cf_list(struct lp_build_nir_context *bld_base,
40 struct exec_list *list);
41
42 static LLVMValueRef cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
43 nir_alu_type alu_type, unsigned bit_size)
44 {
45 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
46 switch (alu_type) {
47 case nir_type_float:
48 switch (bit_size) {
49 case 32:
50 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
51 case 64:
52 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
53 default:
54 assert(0);
55 break;
56 }
57 break;
58 case nir_type_int:
59 switch (bit_size) {
60 case 8:
61 return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
62 case 16:
63 return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
64 case 32:
65 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
66 case 64:
67 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
68 default:
69 assert(0);
70 break;
71 }
72 break;
73 case nir_type_uint:
74 switch (bit_size) {
75 case 8:
76 return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
77 case 16:
78 return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
79 case 32:
80 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
81 case 64:
82 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
83 default:
84 assert(0);
85 break;
86 }
87 break;
88 case nir_type_uint32:
89 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
90 default:
91 return val;
92 }
93 return NULL;
94 }
95
96
97 static struct lp_build_context *get_flt_bld(struct lp_build_nir_context *bld_base,
98 unsigned op_bit_size)
99 {
100 if (op_bit_size == 64)
101 return &bld_base->dbl_bld;
102 else
103 return &bld_base->base;
104 }
105
106 static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
107 {
108 unsigned pipe_target = PIPE_BUFFER;
109 switch (sampler_dim) {
110 case GLSL_SAMPLER_DIM_1D:
111 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
112 break;
113 case GLSL_SAMPLER_DIM_2D:
114 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
115 break;
116 case GLSL_SAMPLER_DIM_3D:
117 pipe_target = PIPE_TEXTURE_3D;
118 break;
119 case GLSL_SAMPLER_DIM_MS:
120 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
121 break;
122 case GLSL_SAMPLER_DIM_CUBE:
123 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
124 break;
125 case GLSL_SAMPLER_DIM_RECT:
126 pipe_target = PIPE_TEXTURE_RECT;
127 break;
128 case GLSL_SAMPLER_DIM_BUF:
129 pipe_target = PIPE_BUFFER;
130 break;
131 default:
132 break;
133 }
134 return pipe_target;
135 }
136
137 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
138 {
139 return bld_base->ssa_defs[ssa->index];
140 }
141
142 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src);
143
144 static LLVMValueRef get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
145 {
146 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
147 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
148 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
149 LLVMValueRef indir_src = NULL;
150 if (src.indirect)
151 indir_src = get_src(bld_base, *src.indirect);
152 return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
153 }
154
155 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src)
156 {
157 if (src.is_ssa)
158 return get_ssa_src(bld_base, src.ssa);
159 else
160 return get_reg_src(bld_base, src.reg);
161 }
162
163 static void assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
164 {
165 bld_base->ssa_defs[idx] = ptr;
166 }
167
168 static void assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
169 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
170 {
171 assign_ssa(bld_base, ssa->index, ssa->num_components == 1 ? vals[0] : lp_nir_array_build_gather_values(bld_base->base.gallivm->builder, vals, ssa->num_components));
172 }
173
174 static void assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
175 unsigned write_mask,
176 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
177 {
178 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
179 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
180 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
181 LLVMValueRef indir_src = NULL;
182 if (reg->indirect)
183 indir_src = get_src(bld_base, *reg->indirect);
184 bld_base->store_reg(bld_base, reg_bld, reg, write_mask ? write_mask : 0xf, indir_src, reg_storage, vals);
185 }
186
187 static void assign_dest(struct lp_build_nir_context *bld_base, const nir_dest *dest, LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
188 {
189 if (dest->is_ssa)
190 assign_ssa_dest(bld_base, &dest->ssa, vals);
191 else
192 assign_reg(bld_base, &dest->reg, 0, vals);
193 }
194
195 static void assign_alu_dest(struct lp_build_nir_context *bld_base, const nir_alu_dest *dest, LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
196 {
197 if (dest->dest.is_ssa)
198 assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
199 else
200 assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
201 }
202
203 static LLVMValueRef int_to_bool32(struct lp_build_nir_context *bld_base,
204 uint32_t src_bit_size,
205 bool is_unsigned,
206 LLVMValueRef val)
207 {
208 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
209 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
210 LLVMValueRef result = lp_build_compare(bld_base->base.gallivm, int_bld->type, PIPE_FUNC_NOTEQUAL, val, int_bld->zero);
211 if (src_bit_size == 64)
212 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
213 return result;
214 }
215
216 static LLVMValueRef flt_to_bool32(struct lp_build_nir_context *bld_base,
217 uint32_t src_bit_size,
218 LLVMValueRef val)
219 {
220 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
221 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
222 LLVMValueRef result = lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
223 if (src_bit_size == 64)
224 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
225 return result;
226 }
227
228 static LLVMValueRef fcmp32(struct lp_build_nir_context *bld_base,
229 enum pipe_compare_func compare,
230 uint32_t src_bit_size,
231 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
232 {
233 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
234 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
235 LLVMValueRef result;
236
237 if (compare != PIPE_FUNC_NOTEQUAL)
238 result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
239 else
240 result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
241 if (src_bit_size == 64)
242 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
243 return result;
244 }
245
246 static LLVMValueRef icmp32(struct lp_build_nir_context *bld_base,
247 enum pipe_compare_func compare,
248 bool is_unsigned,
249 uint32_t src_bit_size,
250 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
251 {
252 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
253 struct lp_build_context *i_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
254 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
255 if (src_bit_size < 32)
256 result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
257 else if (src_bit_size == 64)
258 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
259 return result;
260 }
261
262 static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
263 nir_alu_src src,
264 unsigned num_components)
265 {
266 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
267 struct gallivm_state *gallivm = bld_base->base.gallivm;
268 LLVMValueRef value = get_src(bld_base, src.src);
269 bool need_swizzle = false;
270
271 assert(value);
272 unsigned src_components = nir_src_num_components(src.src);
273 for (unsigned i = 0; i < num_components; ++i) {
274 assert(src.swizzle[i] < src_components);
275 if (src.swizzle[i] != i)
276 need_swizzle = true;
277 }
278
279 if (need_swizzle || num_components != src_components) {
280 if (src_components > 1 && num_components == 1) {
281 value = LLVMBuildExtractValue(gallivm->builder, value,
282 src.swizzle[0], "");
283 } else if (src_components == 1 && num_components > 1) {
284 LLVMValueRef values[] = {value, value, value, value, value, value, value, value, value, value, value, value, value, value, value, value};
285 value = lp_nir_array_build_gather_values(builder, values, num_components);
286 } else {
287 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
288 for (unsigned i = 0; i < num_components; i++)
289 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
290 value = arr;
291 }
292 }
293 assert(!src.negate);
294 assert(!src.abs);
295 return value;
296 }
297
298 static LLVMValueRef emit_b2f(struct lp_build_nir_context *bld_base,
299 LLVMValueRef src0,
300 unsigned bitsize)
301 {
302 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
303 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
304 LLVMBuildBitCast(builder, lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type,
305 1.0), bld_base->int_bld.vec_type, ""),
306 "");
307 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
308 switch (bitsize) {
309 case 32:
310 break;
311 case 64:
312 result = LLVMBuildFPExt(builder, result, bld_base->dbl_bld.vec_type, "");
313 break;
314 default:
315 unreachable("unsupported bit size.");
316 }
317 return result;
318 }
319
320 static LLVMValueRef emit_b2i(struct lp_build_nir_context *bld_base,
321 LLVMValueRef src0,
322 unsigned bitsize)
323 {
324 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
325 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
326 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, 1), "");
327 switch (bitsize) {
328 case 32:
329 return result;
330 case 64:
331 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
332 default:
333 unreachable("unsupported bit size.");
334 }
335 }
336
337 static LLVMValueRef emit_b32csel(struct lp_build_nir_context *bld_base,
338 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
339 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
340 {
341 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
342 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
343 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
344 return lp_build_select(bld, v, src[1], src[2]);
345 }
346
347 static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
348 LLVMValueRef src,
349 bool hi)
350 {
351 struct gallivm_state *gallivm = bld_base->base.gallivm;
352 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
353 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
354 int len = bld_base->base.type.length * 2;
355 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
356 #if UTIL_ARCH_LITTLE_ENDIAN
357 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
358 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
359 #else
360 shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
361 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
362 #endif
363 }
364
365 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
366 return LLVMBuildShuffleVector(gallivm->builder, src,
367 LLVMGetUndef(LLVMTypeOf(src)),
368 LLVMConstVector(hi ? shuffles2 : shuffles,
369 bld_base->base.type.length),
370 "");
371 }
372
373 static LLVMValueRef
374 merge_64bit(struct lp_build_nir_context *bld_base,
375 LLVMValueRef input,
376 LLVMValueRef input2)
377 {
378 struct gallivm_state *gallivm = bld_base->base.gallivm;
379 LLVMBuilderRef builder = gallivm->builder;
380 int i;
381 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
382 int len = bld_base->base.type.length * 2;
383 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
384
385 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
386 #if UTIL_ARCH_LITTLE_ENDIAN
387 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
388 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
389 #else
390 shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
391 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
392 #endif
393 }
394 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
395 }
396
397 static LLVMValueRef
398 do_int_divide(struct lp_build_nir_context *bld_base,
399 bool is_unsigned, unsigned src_bit_size,
400 LLVMValueRef src, LLVMValueRef src2)
401 {
402 struct gallivm_state *gallivm = bld_base->base.gallivm;
403 LLVMBuilderRef builder = gallivm->builder;
404 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
405 struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
406 LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
407 mask_bld->zero);
408
409 if (!is_unsigned) {
410 /* INT_MIN (0x80000000) / -1 (0xffffffff) causes sigfpe, seen with blender. */
411 div_mask = LLVMBuildAnd(builder, div_mask, lp_build_const_int_vec(gallivm, int_bld->type, 0x7fffffff), "");
412 }
413 LLVMValueRef divisor = LLVMBuildOr(builder,
414 div_mask,
415 src2, "");
416 LLVMValueRef result = lp_build_div(int_bld, src, divisor);
417
418 if (!is_unsigned) {
419 LLVMValueRef not_div_mask = LLVMBuildNot(builder, div_mask, "");
420 return LLVMBuildAnd(builder, not_div_mask, result, "");
421 } else
422 /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
423 * may as well do same for idiv */
424 return LLVMBuildOr(builder, div_mask, result, "");
425 }
426
427 static LLVMValueRef
428 do_int_mod(struct lp_build_nir_context *bld_base,
429 bool is_unsigned, unsigned src_bit_size,
430 LLVMValueRef src, LLVMValueRef src2)
431 {
432 struct gallivm_state *gallivm = bld_base->base.gallivm;
433 LLVMBuilderRef builder = gallivm->builder;
434 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
435 LLVMValueRef div_mask = lp_build_cmp(int_bld, PIPE_FUNC_EQUAL, src2,
436 int_bld->zero);
437 LLVMValueRef divisor = LLVMBuildOr(builder,
438 div_mask,
439 src2, "");
440 LLVMValueRef result = lp_build_mod(int_bld, src, divisor);
441 return LLVMBuildOr(builder, div_mask, result, "");
442 }
443
444 static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
445 nir_op op, unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS], LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
446 {
447 struct gallivm_state *gallivm = bld_base->base.gallivm;
448 LLVMBuilderRef builder = gallivm->builder;
449 LLVMValueRef result;
450 switch (op) {
451 case nir_op_b2f32:
452 result = emit_b2f(bld_base, src[0], 32);
453 break;
454 case nir_op_b2f64:
455 result = emit_b2f(bld_base, src[0], 64);
456 break;
457 case nir_op_b2i32:
458 result = emit_b2i(bld_base, src[0], 32);
459 break;
460 case nir_op_b2i64:
461 result = emit_b2i(bld_base, src[0], 64);
462 break;
463 case nir_op_b32csel:
464 result = emit_b32csel(bld_base, src_bit_size, src);
465 break;
466 case nir_op_bit_count:
467 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
468 break;
469 case nir_op_bitfield_select:
470 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
471 break;
472 case nir_op_bitfield_reverse:
473 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
474 break;
475 case nir_op_f2b32:
476 result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
477 break;
478 case nir_op_f2f32:
479 result = LLVMBuildFPTrunc(builder, src[0],
480 bld_base->base.vec_type, "");
481 break;
482 case nir_op_f2f64:
483 result = LLVMBuildFPExt(builder, src[0],
484 bld_base->dbl_bld.vec_type, "");
485 break;
486 case nir_op_f2i32:
487 result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
488 break;
489 case nir_op_f2u32:
490 result = LLVMBuildFPToUI(builder,
491 src[0],
492 bld_base->base.int_vec_type, "");
493 break;
494 case nir_op_f2i64:
495 result = LLVMBuildFPToSI(builder,
496 src[0],
497 bld_base->int64_bld.vec_type, "");
498 break;
499 case nir_op_f2u64:
500 result = LLVMBuildFPToUI(builder,
501 src[0],
502 bld_base->uint64_bld.vec_type, "");
503 break;
504 case nir_op_fabs:
505 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
506 break;
507 case nir_op_fadd:
508 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
509 src[0], src[1]);
510 break;
511 case nir_op_fceil:
512 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
513 break;
514 case nir_op_fcos:
515 result = lp_build_cos(&bld_base->base, src[0]);
516 break;
517 case nir_op_fddx:
518 case nir_op_fddx_coarse:
519 case nir_op_fddx_fine:
520 result = lp_build_ddx(&bld_base->base, src[0]);
521 break;
522 case nir_op_fddy:
523 case nir_op_fddy_coarse:
524 case nir_op_fddy_fine:
525 result = lp_build_ddy(&bld_base->base, src[0]);
526 break;
527 case nir_op_fdiv:
528 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
529 src[0], src[1]);
530 break;
531 case nir_op_feq32:
532 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
533 break;
534 case nir_op_fexp2:
535 result = lp_build_exp2(&bld_base->base, src[0]);
536 break;
537 case nir_op_ffloor:
538 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
539 break;
540 case nir_op_ffma:
541 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
542 break;
543 case nir_op_ffract: {
544 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
545 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
546 result = lp_build_sub(flt_bld, src[0], tmp);
547 break;
548 }
549 case nir_op_fge32:
550 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
551 break;
552 case nir_op_find_lsb:
553 result = lp_build_cttz(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
554 break;
555 case nir_op_flog2:
556 result = lp_build_log2_safe(&bld_base->base, src[0]);
557 break;
558 case nir_op_flt32:
559 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
560 break;
561 case nir_op_fmin:
562 result = lp_build_min(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
563 break;
564 case nir_op_fmod: {
565 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
566 result = lp_build_div(flt_bld, src[0], src[1]);
567 result = lp_build_floor(flt_bld, result);
568 result = lp_build_mul(flt_bld, src[1], result);
569 result = lp_build_sub(flt_bld, src[0], result);
570 break;
571 }
572 case nir_op_fmul:
573 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
574 src[0], src[1]);
575 break;
576 case nir_op_fmax:
577 result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
578 break;
579 case nir_op_fne32:
580 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
581 break;
582 case nir_op_fneg:
583 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
584 break;
585 case nir_op_fpow:
586 result = lp_build_pow(&bld_base->base, src[0], src[1]);
587 break;
588 case nir_op_frcp:
589 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
590 break;
591 case nir_op_fround_even:
592 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
593 break;
594 case nir_op_frsq:
595 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
596 break;
597 case nir_op_fsat:
598 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
599 break;
600 case nir_op_fsign:
601 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
602 break;
603 case nir_op_fsin:
604 result = lp_build_sin(&bld_base->base, src[0]);
605 break;
606 case nir_op_fsqrt:
607 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
608 break;
609 case nir_op_ftrunc:
610 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
611 break;
612 case nir_op_i2b32:
613 result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
614 break;
615 case nir_op_i2f32:
616 result = lp_build_int_to_float(&bld_base->base, src[0]);
617 break;
618 case nir_op_i2f64:
619 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
620 break;
621 case nir_op_i2i8:
622 result = LLVMBuildTrunc(builder, src[0], bld_base->int8_bld.vec_type, "");
623 break;
624 case nir_op_i2i16:
625 if (src_bit_size[0] < 16)
626 result = LLVMBuildSExt(builder, src[0], bld_base->int16_bld.vec_type, "");
627 else
628 result = LLVMBuildTrunc(builder, src[0], bld_base->int16_bld.vec_type, "");
629 break;
630 case nir_op_i2i32:
631 if (src_bit_size[0] < 32)
632 result = LLVMBuildSExt(builder, src[0], bld_base->int_bld.vec_type, "");
633 else
634 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
635 break;
636 case nir_op_i2i64:
637 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
638 break;
639 case nir_op_iabs:
640 result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
641 break;
642 case nir_op_iadd:
643 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
644 src[0], src[1]);
645 break;
646 case nir_op_iand:
647 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
648 src[0], src[1]);
649 break;
650 case nir_op_idiv:
651 result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
652 break;
653 case nir_op_ieq32:
654 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
655 break;
656 case nir_op_ige32:
657 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
658 break;
659 case nir_op_ilt32:
660 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
661 break;
662 case nir_op_imax:
663 result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
664 break;
665 case nir_op_imin:
666 result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
667 break;
668 case nir_op_imul:
669 case nir_op_imul24:
670 result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
671 src[0], src[1]);
672 break;
673 case nir_op_imul_high: {
674 LLVMValueRef hi_bits;
675 lp_build_mul_32_lohi(&bld_base->int_bld, src[0], src[1], &hi_bits);
676 result = hi_bits;
677 break;
678 }
679 case nir_op_ine32:
680 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
681 break;
682 case nir_op_ineg:
683 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
684 break;
685 case nir_op_inot:
686 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
687 break;
688 case nir_op_ior:
689 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
690 src[0], src[1]);
691 break;
692 case nir_op_imod:
693 case nir_op_irem:
694 result = do_int_mod(bld_base, false, src_bit_size[0], src[0], src[1]);
695 break;
696 case nir_op_ishl: {
697 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
698 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
699 if (src_bit_size[0] == 64)
700 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
701 if (src_bit_size[0] < 32)
702 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
703 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
704 result = lp_build_shl(int_bld, src[0], src[1]);
705 break;
706 }
707 case nir_op_ishr: {
708 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
709 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
710 if (src_bit_size[0] == 64)
711 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
712 if (src_bit_size[0] < 32)
713 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
714 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
715 result = lp_build_shr(int_bld, src[0], src[1]);
716 break;
717 }
718 case nir_op_isign:
719 result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
720 break;
721 case nir_op_isub:
722 result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
723 src[0], src[1]);
724 break;
725 case nir_op_ixor:
726 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
727 src[0], src[1]);
728 break;
729 case nir_op_mov:
730 result = src[0];
731 break;
732 case nir_op_unpack_64_2x32_split_x:
733 result = split_64bit(bld_base, src[0], false);
734 break;
735 case nir_op_unpack_64_2x32_split_y:
736 result = split_64bit(bld_base, src[0], true);
737 break;
738
739 case nir_op_pack_64_2x32_split: {
740 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
741 result = LLVMBuildBitCast(builder, tmp, bld_base->dbl_bld.vec_type, "");
742 break;
743 }
744 case nir_op_u2f32:
745 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
746 break;
747 case nir_op_u2f64:
748 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
749 break;
750 case nir_op_u2u8:
751 result = LLVMBuildTrunc(builder, src[0], bld_base->uint8_bld.vec_type, "");
752 break;
753 case nir_op_u2u16:
754 if (src_bit_size[0] < 16)
755 result = LLVMBuildZExt(builder, src[0], bld_base->uint16_bld.vec_type, "");
756 else
757 result = LLVMBuildTrunc(builder, src[0], bld_base->uint16_bld.vec_type, "");
758 break;
759 case nir_op_u2u32:
760 if (src_bit_size[0] < 32)
761 result = LLVMBuildZExt(builder, src[0], bld_base->uint_bld.vec_type, "");
762 else
763 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
764 break;
765 case nir_op_u2u64:
766 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
767 break;
768 case nir_op_udiv:
769 result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
770 break;
771 case nir_op_ufind_msb: {
772 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
773 result = lp_build_ctlz(uint_bld, src[0]);
774 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
775 break;
776 }
777 case nir_op_uge32:
778 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
779 break;
780 case nir_op_ult32:
781 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
782 break;
783 case nir_op_umax:
784 result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
785 break;
786 case nir_op_umin:
787 result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
788 break;
789 case nir_op_umod:
790 result = do_int_mod(bld_base, true, src_bit_size[0], src[0], src[1]);
791 break;
792 case nir_op_umul_high: {
793 LLVMValueRef hi_bits;
794 lp_build_mul_32_lohi(&bld_base->uint_bld, src[0], src[1], &hi_bits);
795 result = hi_bits;
796 break;
797 }
798 case nir_op_ushr: {
799 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
800 if (src_bit_size[0] == 64)
801 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
802 if (src_bit_size[0] < 32)
803 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
804 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
805 result = lp_build_shr(uint_bld, src[0], src[1]);
806 break;
807 }
808 default:
809 assert(0);
810 break;
811 }
812 return result;
813 }
814
815 static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr *instr)
816 {
817 struct gallivm_state *gallivm = bld_base->base.gallivm;
818 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS];
819 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS];
820 unsigned num_components = nir_dest_num_components(instr->dest.dest);
821 unsigned src_components;
822 switch (instr->op) {
823 case nir_op_vec2:
824 case nir_op_vec3:
825 case nir_op_vec4:
826 case nir_op_vec8:
827 case nir_op_vec16:
828 src_components = 1;
829 break;
830 case nir_op_pack_half_2x16:
831 src_components = 2;
832 break;
833 case nir_op_unpack_half_2x16:
834 src_components = 1;
835 break;
836 case nir_op_cube_face_coord:
837 case nir_op_cube_face_index:
838 src_components = 3;
839 break;
840 default:
841 src_components = num_components;
842 break;
843 }
844 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
845 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
846 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
847 }
848
849 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
850 if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2 || instr->op == nir_op_vec8 || instr->op == nir_op_vec16) {
851 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
852 result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
853 }
854 } else {
855 for (unsigned c = 0; c < num_components; c++) {
856 LLVMValueRef src_chan[NIR_MAX_VEC_COMPONENTS];
857
858 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
859 if (num_components > 1) {
860 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
861 src[i], c, "");
862 } else
863 src_chan[i] = src[i];
864 src_chan[i] = cast_type(bld_base, src_chan[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
865 }
866 result[c] = do_alu_action(bld_base, instr->op, src_bit_size, src_chan);
867 result[c] = cast_type(bld_base, result[c], nir_op_infos[instr->op].output_type, nir_dest_bit_size(instr->dest.dest));
868 }
869 }
870 assign_alu_dest(bld_base, &instr->dest, result);
871 }
872
873 static void visit_load_const(struct lp_build_nir_context *bld_base,
874 const nir_load_const_instr *instr)
875 {
876 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
877 struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
878 for (unsigned i = 0; i < instr->def.num_components; i++)
879 result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->def.bit_size == 32 ? instr->value[i].u32 : instr->value[i].u64);
880 assign_ssa_dest(bld_base, &instr->def, result);
881 }
882
883 static void
884 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
885 bool vs_in, unsigned *vertex_index_out,
886 LLVMValueRef *vertex_index_ref,
887 unsigned *const_out, LLVMValueRef *indir_out)
888 {
889 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
890 nir_variable *var = nir_deref_instr_get_variable(instr);
891 nir_deref_path path;
892 unsigned idx_lvl = 1;
893
894 nir_deref_path_init(&path, instr, NULL);
895
896 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
897 if (vertex_index_ref) {
898 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
899 if (vertex_index_out)
900 *vertex_index_out = 0;
901 } else {
902 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
903 }
904 ++idx_lvl;
905 }
906
907 uint32_t const_offset = 0;
908 LLVMValueRef offset = NULL;
909
910 if (var->data.compact) {
911 assert(instr->deref_type == nir_deref_type_array);
912 const_offset = nir_src_as_uint(instr->arr.index);
913 goto out;
914 }
915
916 for (; path.path[idx_lvl]; ++idx_lvl) {
917 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
918 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
919 unsigned index = path.path[idx_lvl]->strct.index;
920
921 for (unsigned i = 0; i < index; i++) {
922 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
923 const_offset += glsl_count_attribute_slots(ft, vs_in);
924 }
925 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
926 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
927 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
928 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
929 } else {
930 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
931 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
932 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
933 idx_src);
934 if (offset)
935 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
936 else
937 offset = array_off;
938 }
939 } else
940 unreachable("Uhandled deref type in get_deref_instr_offset");
941 }
942
943 out:
944 nir_deref_path_finish(&path);
945
946 if (const_offset && offset)
947 offset = LLVMBuildAdd(builder, offset,
948 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
949 "");
950 *const_out = const_offset;
951 *indir_out = offset;
952 }
953
954 static void visit_load_var(struct lp_build_nir_context *bld_base,
955 nir_intrinsic_instr *instr,
956 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
957 {
958 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
959 nir_variable *var = nir_deref_instr_get_variable(deref);
960 nir_variable_mode mode = deref->mode;
961 unsigned const_index;
962 LLVMValueRef indir_index;
963 LLVMValueRef indir_vertex_index = NULL;
964 unsigned vertex_index = 0;
965 unsigned nc = nir_dest_num_components(instr->dest);
966 unsigned bit_size = nir_dest_bit_size(instr->dest);
967 if (var) {
968 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
969 var->data.mode == nir_var_shader_in;
970 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
971 var->data.mode == nir_var_shader_in;
972 bool tcs_in = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
973 var->data.mode == nir_var_shader_in;
974 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
975 var->data.mode == nir_var_shader_out && !var->data.patch;
976 bool tes_in = bld_base->shader->info.stage == MESA_SHADER_TESS_EVAL &&
977 var->data.mode == nir_var_shader_in && !var->data.patch;
978
979 mode = var->data.mode;
980
981 get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, (tcs_in || tcs_out || tes_in) ? &indir_vertex_index : NULL,
982 &const_index, &indir_index);
983 }
984 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, indir_vertex_index, const_index, indir_index, result);
985 }
986
987 static void
988 visit_store_var(struct lp_build_nir_context *bld_base,
989 nir_intrinsic_instr *instr)
990 {
991 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
992 nir_variable *var = nir_deref_instr_get_variable(deref);
993 nir_variable_mode mode = deref->mode;
994 int writemask = instr->const_index[0];
995 unsigned bit_size = nir_src_bit_size(instr->src[1]);
996 LLVMValueRef src = get_src(bld_base, instr->src[1]);
997 unsigned const_index = 0;
998 LLVMValueRef indir_index, indir_vertex_index = NULL;
999 if (var) {
1000 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1001 var->data.mode == nir_var_shader_out && !var->data.patch;
1002 get_deref_offset(bld_base, deref, false, NULL, tcs_out ? &indir_vertex_index : NULL,
1003 &const_index, &indir_index);
1004 }
1005 bld_base->store_var(bld_base, mode, instr->num_components, bit_size, var, writemask, indir_vertex_index, const_index, indir_index, src);
1006 }
1007
1008 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
1009 nir_intrinsic_instr *instr,
1010 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1011 {
1012 struct gallivm_state *gallivm = bld_base->base.gallivm;
1013 LLVMBuilderRef builder = gallivm->builder;
1014 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1015 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1016
1017 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
1018 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
1019 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1020 offset_is_uniform, idx, offset, result);
1021 }
1022
1023 static void visit_load_push_constant(struct lp_build_nir_context *bld_base,
1024 nir_intrinsic_instr *instr,
1025 LLVMValueRef result[4])
1026 {
1027 struct gallivm_state *gallivm = bld_base->base.gallivm;
1028 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1029 LLVMValueRef idx = lp_build_const_int32(gallivm, 0);
1030 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
1031
1032 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1033 offset_is_uniform, idx, offset, result);
1034 }
1035
1036
1037 static void visit_load_ssbo(struct lp_build_nir_context *bld_base,
1038 nir_intrinsic_instr *instr,
1039 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1040 {
1041 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1042 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1043 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1044 idx, offset, result);
1045 }
1046
1047 static void visit_store_ssbo(struct lp_build_nir_context *bld_base,
1048 nir_intrinsic_instr *instr)
1049 {
1050 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1051 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
1052 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
1053 int writemask = instr->const_index[0];
1054 int nc = nir_src_num_components(instr->src[0]);
1055 int bitsize = nir_src_bit_size(instr->src[0]);
1056 bld_base->store_mem(bld_base, writemask, nc, bitsize, idx, offset, val);
1057 }
1058
1059 static void visit_get_buffer_size(struct lp_build_nir_context *bld_base,
1060 nir_intrinsic_instr *instr,
1061 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1062 {
1063 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1064 result[0] = bld_base->get_buffer_size(bld_base, idx);
1065 }
1066
1067 static void visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
1068 nir_intrinsic_instr *instr,
1069 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1070 {
1071 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1072 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1073 LLVMValueRef val = get_src(bld_base, instr->src[2]);
1074 LLVMValueRef val2 = NULL;
1075 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
1076 val2 = get_src(bld_base, instr->src[3]);
1077
1078 bld_base->atomic_mem(bld_base, instr->intrinsic, idx, offset, val, val2, &result[0]);
1079
1080 }
1081
1082 static void visit_load_image(struct lp_build_nir_context *bld_base,
1083 nir_intrinsic_instr *instr,
1084 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1085 {
1086 struct gallivm_state *gallivm = bld_base->base.gallivm;
1087 LLVMBuilderRef builder = gallivm->builder;
1088 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1089 nir_variable *var = nir_deref_instr_get_variable(deref);
1090 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1091 LLVMValueRef coords[5];
1092 struct lp_img_params params;
1093 const struct glsl_type *type = glsl_without_array(var->type);
1094 unsigned const_index;
1095 LLVMValueRef indir_index;
1096 get_deref_offset(bld_base, deref, false, NULL, NULL,
1097 &const_index, &indir_index);
1098
1099 memset(&params, 0, sizeof(params));
1100 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1101 for (unsigned i = 0; i < 4; i++)
1102 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1103 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1104 coords[2] = coords[1];
1105
1106 params.coords = coords;
1107 params.outdata = result;
1108 params.img_op = LP_IMG_LOAD;
1109 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1110 params.ms_index = get_src(bld_base, instr->src[2]);
1111 params.image_index = var->data.binding + (indir_index ? 0 : const_index);
1112 params.image_index_offset = indir_index;
1113 bld_base->image_op(bld_base, &params);
1114 }
1115
1116 static void visit_store_image(struct lp_build_nir_context *bld_base,
1117 nir_intrinsic_instr *instr)
1118 {
1119 struct gallivm_state *gallivm = bld_base->base.gallivm;
1120 LLVMBuilderRef builder = gallivm->builder;
1121 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1122 nir_variable *var = nir_deref_instr_get_variable(deref);
1123 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1124 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1125 LLVMValueRef coords[5];
1126 struct lp_img_params params;
1127 const struct glsl_type *type = glsl_without_array(var->type);
1128 unsigned const_index;
1129 LLVMValueRef indir_index;
1130 get_deref_offset(bld_base, deref, false, NULL, NULL,
1131 &const_index, &indir_index);
1132
1133 memset(&params, 0, sizeof(params));
1134 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1135 for (unsigned i = 0; i < 4; i++)
1136 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1137 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1138 coords[2] = coords[1];
1139 params.coords = coords;
1140
1141 for (unsigned i = 0; i < 4; i++) {
1142 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1143 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1144 }
1145 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1146 params.ms_index = get_src(bld_base, instr->src[2]);
1147 params.img_op = LP_IMG_STORE;
1148 params.image_index = var->data.binding + (indir_index ? 0 : const_index);
1149 params.image_index_offset = indir_index;
1150
1151 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1152 coords[2] = coords[1];
1153 bld_base->image_op(bld_base, &params);
1154 }
1155
1156 static void visit_atomic_image(struct lp_build_nir_context *bld_base,
1157 nir_intrinsic_instr *instr,
1158 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1159 {
1160 struct gallivm_state *gallivm = bld_base->base.gallivm;
1161 LLVMBuilderRef builder = gallivm->builder;
1162 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1163 nir_variable *var = nir_deref_instr_get_variable(deref);
1164 struct lp_img_params params;
1165 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1166 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1167 LLVMValueRef coords[5];
1168 const struct glsl_type *type = glsl_without_array(var->type);
1169 unsigned const_index;
1170 LLVMValueRef indir_index;
1171 get_deref_offset(bld_base, deref, false, NULL, NULL,
1172 &const_index, &indir_index);
1173
1174 memset(&params, 0, sizeof(params));
1175
1176 switch (instr->intrinsic) {
1177 case nir_intrinsic_image_deref_atomic_add:
1178 params.op = LLVMAtomicRMWBinOpAdd;
1179 break;
1180 case nir_intrinsic_image_deref_atomic_exchange:
1181 params.op = LLVMAtomicRMWBinOpXchg;
1182 break;
1183 case nir_intrinsic_image_deref_atomic_and:
1184 params.op = LLVMAtomicRMWBinOpAnd;
1185 break;
1186 case nir_intrinsic_image_deref_atomic_or:
1187 params.op = LLVMAtomicRMWBinOpOr;
1188 break;
1189 case nir_intrinsic_image_deref_atomic_xor:
1190 params.op = LLVMAtomicRMWBinOpXor;
1191 break;
1192 case nir_intrinsic_image_deref_atomic_umin:
1193 params.op = LLVMAtomicRMWBinOpUMin;
1194 break;
1195 case nir_intrinsic_image_deref_atomic_umax:
1196 params.op = LLVMAtomicRMWBinOpUMax;
1197 break;
1198 case nir_intrinsic_image_deref_atomic_imin:
1199 params.op = LLVMAtomicRMWBinOpMin;
1200 break;
1201 case nir_intrinsic_image_deref_atomic_imax:
1202 params.op = LLVMAtomicRMWBinOpMax;
1203 break;
1204 default:
1205 break;
1206 }
1207
1208 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1209 for (unsigned i = 0; i < 4; i++)
1210 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1211 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1212 coords[2] = coords[1];
1213 params.coords = coords;
1214 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1215 params.ms_index = get_src(bld_base, instr->src[2]);
1216 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
1217 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1218 params.indata[0] = in_val;
1219 params.indata2[0] = cas_val;
1220 } else
1221 params.indata[0] = in_val;
1222
1223 params.outdata = result;
1224 params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1225 params.image_index = var->data.binding + (indir_index ? 0 : const_index);
1226 params.image_index_offset = indir_index;
1227
1228 bld_base->image_op(bld_base, &params);
1229 }
1230
1231
1232 static void visit_image_size(struct lp_build_nir_context *bld_base,
1233 nir_intrinsic_instr *instr,
1234 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1235 {
1236 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1237 nir_variable *var = nir_deref_instr_get_variable(deref);
1238 struct lp_sampler_size_query_params params = { 0 };
1239 unsigned const_index;
1240 LLVMValueRef indir_index;
1241 const struct glsl_type *type = glsl_without_array(var->type);
1242 get_deref_offset(bld_base, deref, false, NULL, NULL,
1243 &const_index, &indir_index);
1244 params.texture_unit = var->data.binding + (indir_index ? 0 : const_index);
1245 params.texture_unit_offset = indir_index;
1246 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1247 params.sizes_out = result;
1248
1249 bld_base->image_size(bld_base, &params);
1250 }
1251
1252 static void visit_image_samples(struct lp_build_nir_context *bld_base,
1253 nir_intrinsic_instr *instr,
1254 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1255 {
1256 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1257 nir_variable *var = nir_deref_instr_get_variable(deref);
1258 struct lp_sampler_size_query_params params = { 0 };
1259 unsigned const_index;
1260 LLVMValueRef indir_index;
1261 const struct glsl_type *type = glsl_without_array(var->type);
1262 get_deref_offset(bld_base, deref, false, NULL, NULL,
1263 &const_index, &indir_index);
1264
1265 params.texture_unit = var->data.binding + (indir_index ? 0 : const_index);
1266 params.texture_unit_offset = indir_index;
1267 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1268 params.sizes_out = result;
1269 params.samples_only = true;
1270
1271 bld_base->image_size(bld_base, &params);
1272 }
1273
1274 static void visit_shared_load(struct lp_build_nir_context *bld_base,
1275 nir_intrinsic_instr *instr,
1276 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1277 {
1278 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1279 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1280 NULL, offset, result);
1281 }
1282
1283 static void visit_shared_store(struct lp_build_nir_context *bld_base,
1284 nir_intrinsic_instr *instr)
1285 {
1286 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1287 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1288 int writemask = instr->const_index[1];
1289 int nc = nir_src_num_components(instr->src[0]);
1290 int bitsize = nir_src_bit_size(instr->src[0]);
1291 bld_base->store_mem(bld_base, writemask, nc, bitsize, NULL, offset, val);
1292 }
1293
1294 static void visit_shared_atomic(struct lp_build_nir_context *bld_base,
1295 nir_intrinsic_instr *instr,
1296 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1297 {
1298 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1299 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1300 LLVMValueRef val2 = NULL;
1301 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1302 val2 = get_src(bld_base, instr->src[2]);
1303
1304 bld_base->atomic_mem(bld_base, instr->intrinsic, NULL, offset, val, val2, &result[0]);
1305
1306 }
1307
1308 static void visit_barrier(struct lp_build_nir_context *bld_base)
1309 {
1310 bld_base->barrier(bld_base);
1311 }
1312
1313 static void visit_discard(struct lp_build_nir_context *bld_base,
1314 nir_intrinsic_instr *instr)
1315 {
1316 LLVMValueRef cond = NULL;
1317 if (instr->intrinsic == nir_intrinsic_discard_if) {
1318 cond = get_src(bld_base, instr->src[0]);
1319 cond = cast_type(bld_base, cond, nir_type_int, 32);
1320 }
1321 bld_base->discard(bld_base, cond);
1322 }
1323
1324 static void visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1325 nir_intrinsic_instr *instr, LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1326 {
1327 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1328
1329 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
1330 bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1331 nir_src_bit_size(instr->src[0]),
1332 offset_is_uniform, offset, result);
1333 }
1334
1335 static void visit_load_global(struct lp_build_nir_context *bld_base,
1336 nir_intrinsic_instr *instr, LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1337 {
1338 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1339 bld_base->load_global(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1340 nir_src_bit_size(instr->src[0]),
1341 addr, result);
1342 }
1343
1344 static void visit_store_global(struct lp_build_nir_context *bld_base,
1345 nir_intrinsic_instr *instr)
1346 {
1347 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1348 int nc = nir_src_num_components(instr->src[0]);
1349 int bitsize = nir_src_bit_size(instr->src[0]);
1350 LLVMValueRef addr = get_src(bld_base, instr->src[1]);
1351 int addr_bitsize = nir_src_bit_size(instr->src[1]);
1352 int writemask = instr->const_index[0];
1353 bld_base->store_global(bld_base, writemask, nc, bitsize, addr_bitsize, addr, val);
1354 }
1355
1356 static void visit_global_atomic(struct lp_build_nir_context *bld_base,
1357 nir_intrinsic_instr *instr,
1358 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1359 {
1360 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1361 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1362 LLVMValueRef val2 = NULL;
1363 int addr_bitsize = nir_src_bit_size(instr->src[0]);
1364 if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap)
1365 val2 = get_src(bld_base, instr->src[2]);
1366
1367 bld_base->atomic_global(bld_base, instr->intrinsic, addr_bitsize, addr, val, val2, &result[0]);
1368 }
1369
1370 static void visit_interp(struct lp_build_nir_context *bld_base,
1371 nir_intrinsic_instr *instr,
1372 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1373 {
1374 struct gallivm_state *gallivm = bld_base->base.gallivm;
1375 LLVMBuilderRef builder = gallivm->builder;
1376 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1377 unsigned num_components = nir_dest_num_components(instr->dest);
1378 nir_variable *var = nir_deref_instr_get_variable(deref);
1379 unsigned const_index;
1380 LLVMValueRef indir_index;
1381 LLVMValueRef offsets[2] = { NULL, NULL };
1382 get_deref_offset(bld_base, deref, false, NULL, NULL,
1383 &const_index, &indir_index);
1384 bool centroid = instr->intrinsic == nir_intrinsic_interp_deref_at_centroid;
1385 bool sample = false;
1386 if (instr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
1387 for (unsigned i = 0; i < 2; i++) {
1388 offsets[i] = LLVMBuildExtractValue(builder, get_src(bld_base, instr->src[1]), i, "");
1389 offsets[i] = cast_type(bld_base, offsets[i], nir_type_float, 32);
1390 }
1391 } else if (instr->intrinsic == nir_intrinsic_interp_deref_at_sample) {
1392 offsets[0] = get_src(bld_base, instr->src[1]);
1393 offsets[0] = cast_type(bld_base, offsets[0], nir_type_int, 32);
1394 sample = true;
1395 }
1396 bld_base->interp_at(bld_base, num_components, var, centroid, sample, const_index, indir_index, offsets, result);
1397 }
1398
1399 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
1400 nir_intrinsic_instr *instr)
1401 {
1402 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS] = {0};
1403 switch (instr->intrinsic) {
1404 case nir_intrinsic_load_deref:
1405 visit_load_var(bld_base, instr, result);
1406 break;
1407 case nir_intrinsic_store_deref:
1408 visit_store_var(bld_base, instr);
1409 break;
1410 case nir_intrinsic_load_ubo:
1411 visit_load_ubo(bld_base, instr, result);
1412 break;
1413 case nir_intrinsic_load_push_constant:
1414 visit_load_push_constant(bld_base, instr, result);
1415 break;
1416 case nir_intrinsic_load_ssbo:
1417 visit_load_ssbo(bld_base, instr, result);
1418 break;
1419 case nir_intrinsic_store_ssbo:
1420 visit_store_ssbo(bld_base, instr);
1421 break;
1422 case nir_intrinsic_get_buffer_size:
1423 visit_get_buffer_size(bld_base, instr, result);
1424 break;
1425 case nir_intrinsic_load_vertex_id:
1426 case nir_intrinsic_load_primitive_id:
1427 case nir_intrinsic_load_instance_id:
1428 case nir_intrinsic_load_base_instance:
1429 case nir_intrinsic_load_base_vertex:
1430 case nir_intrinsic_load_work_group_id:
1431 case nir_intrinsic_load_local_invocation_id:
1432 case nir_intrinsic_load_num_work_groups:
1433 case nir_intrinsic_load_invocation_id:
1434 case nir_intrinsic_load_front_face:
1435 case nir_intrinsic_load_draw_id:
1436 case nir_intrinsic_load_local_group_size:
1437 case nir_intrinsic_load_work_dim:
1438 case nir_intrinsic_load_tess_coord:
1439 case nir_intrinsic_load_tess_level_outer:
1440 case nir_intrinsic_load_tess_level_inner:
1441 case nir_intrinsic_load_patch_vertices_in:
1442 case nir_intrinsic_load_sample_id:
1443 case nir_intrinsic_load_sample_pos:
1444 case nir_intrinsic_load_sample_mask_in:
1445 bld_base->sysval_intrin(bld_base, instr, result);
1446 break;
1447 case nir_intrinsic_load_helper_invocation:
1448 bld_base->helper_invocation(bld_base, &result[0]);
1449 break;
1450 case nir_intrinsic_discard_if:
1451 case nir_intrinsic_discard:
1452 visit_discard(bld_base, instr);
1453 break;
1454 case nir_intrinsic_emit_vertex:
1455 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
1456 break;
1457 case nir_intrinsic_end_primitive:
1458 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
1459 break;
1460 case nir_intrinsic_ssbo_atomic_add:
1461 case nir_intrinsic_ssbo_atomic_imin:
1462 case nir_intrinsic_ssbo_atomic_imax:
1463 case nir_intrinsic_ssbo_atomic_umin:
1464 case nir_intrinsic_ssbo_atomic_umax:
1465 case nir_intrinsic_ssbo_atomic_and:
1466 case nir_intrinsic_ssbo_atomic_or:
1467 case nir_intrinsic_ssbo_atomic_xor:
1468 case nir_intrinsic_ssbo_atomic_exchange:
1469 case nir_intrinsic_ssbo_atomic_comp_swap:
1470 visit_ssbo_atomic(bld_base, instr, result);
1471 break;
1472 case nir_intrinsic_image_deref_load:
1473 visit_load_image(bld_base, instr, result);
1474 break;
1475 case nir_intrinsic_image_deref_store:
1476 visit_store_image(bld_base, instr);
1477 break;
1478 case nir_intrinsic_image_deref_atomic_add:
1479 case nir_intrinsic_image_deref_atomic_imin:
1480 case nir_intrinsic_image_deref_atomic_imax:
1481 case nir_intrinsic_image_deref_atomic_umin:
1482 case nir_intrinsic_image_deref_atomic_umax:
1483 case nir_intrinsic_image_deref_atomic_and:
1484 case nir_intrinsic_image_deref_atomic_or:
1485 case nir_intrinsic_image_deref_atomic_xor:
1486 case nir_intrinsic_image_deref_atomic_exchange:
1487 case nir_intrinsic_image_deref_atomic_comp_swap:
1488 visit_atomic_image(bld_base, instr, result);
1489 break;
1490 case nir_intrinsic_image_deref_size:
1491 visit_image_size(bld_base, instr, result);
1492 break;
1493 case nir_intrinsic_image_deref_samples:
1494 visit_image_samples(bld_base, instr, result);
1495 break;
1496 case nir_intrinsic_load_shared:
1497 visit_shared_load(bld_base, instr, result);
1498 break;
1499 case nir_intrinsic_store_shared:
1500 visit_shared_store(bld_base, instr);
1501 break;
1502 case nir_intrinsic_shared_atomic_add:
1503 case nir_intrinsic_shared_atomic_imin:
1504 case nir_intrinsic_shared_atomic_umin:
1505 case nir_intrinsic_shared_atomic_imax:
1506 case nir_intrinsic_shared_atomic_umax:
1507 case nir_intrinsic_shared_atomic_and:
1508 case nir_intrinsic_shared_atomic_or:
1509 case nir_intrinsic_shared_atomic_xor:
1510 case nir_intrinsic_shared_atomic_exchange:
1511 case nir_intrinsic_shared_atomic_comp_swap:
1512 visit_shared_atomic(bld_base, instr, result);
1513 break;
1514 case nir_intrinsic_control_barrier:
1515 visit_barrier(bld_base);
1516 break;
1517 case nir_intrinsic_group_memory_barrier:
1518 case nir_intrinsic_memory_barrier:
1519 case nir_intrinsic_memory_barrier_shared:
1520 case nir_intrinsic_memory_barrier_buffer:
1521 case nir_intrinsic_memory_barrier_image:
1522 case nir_intrinsic_memory_barrier_tcs_patch:
1523 break;
1524 case nir_intrinsic_load_kernel_input:
1525 visit_load_kernel_input(bld_base, instr, result);
1526 break;
1527 case nir_intrinsic_load_global:
1528 visit_load_global(bld_base, instr, result);
1529 break;
1530 case nir_intrinsic_store_global:
1531 visit_store_global(bld_base, instr);
1532 break;
1533 case nir_intrinsic_global_atomic_add:
1534 case nir_intrinsic_global_atomic_imin:
1535 case nir_intrinsic_global_atomic_umin:
1536 case nir_intrinsic_global_atomic_imax:
1537 case nir_intrinsic_global_atomic_umax:
1538 case nir_intrinsic_global_atomic_and:
1539 case nir_intrinsic_global_atomic_or:
1540 case nir_intrinsic_global_atomic_xor:
1541 case nir_intrinsic_global_atomic_exchange:
1542 case nir_intrinsic_global_atomic_comp_swap:
1543 visit_global_atomic(bld_base, instr, result);
1544 break;
1545 case nir_intrinsic_vote_all:
1546 case nir_intrinsic_vote_any:
1547 case nir_intrinsic_vote_ieq:
1548 bld_base->vote(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, 32), instr, result);
1549 break;
1550 case nir_intrinsic_interp_deref_at_offset:
1551 case nir_intrinsic_interp_deref_at_centroid:
1552 case nir_intrinsic_interp_deref_at_sample:
1553 visit_interp(bld_base, instr, result);
1554 break;
1555 default:
1556 assert(0);
1557 break;
1558 }
1559 if (result[0]) {
1560 assign_dest(bld_base, &instr->dest, result);
1561 }
1562 }
1563
1564 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1565 {
1566 struct lp_sampler_size_query_params params = { 0 };
1567 LLVMValueRef sizes_out[NIR_MAX_VEC_COMPONENTS];
1568 LLVMValueRef explicit_lod = NULL;
1569 LLVMValueRef texture_unit_offset = NULL;
1570 for (unsigned i = 0; i < instr->num_srcs; i++) {
1571 switch (instr->src[i].src_type) {
1572 case nir_tex_src_lod:
1573 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1574 break;
1575 case nir_tex_src_texture_offset:
1576 texture_unit_offset = get_src(bld_base, instr->src[i].src);
1577 break;
1578 default:
1579 break;
1580 }
1581 }
1582
1583 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
1584 params.texture_unit = instr->texture_index;
1585 params.explicit_lod = explicit_lod;
1586 params.is_sviewinfo = TRUE;
1587 params.sizes_out = sizes_out;
1588 params.samples_only = (instr->op == nir_texop_texture_samples);
1589 params.texture_unit_offset = texture_unit_offset;
1590
1591 if (instr->op == nir_texop_query_levels)
1592 params.explicit_lod = bld_base->uint_bld.zero;
1593 bld_base->tex_size(bld_base, &params);
1594 assign_dest(bld_base, &instr->dest, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
1595 }
1596
1597 static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
1598 nir_src lod_src)
1599 {
1600 enum lp_sampler_lod_property lod_property;
1601
1602 if (nir_src_is_dynamically_uniform(lod_src))
1603 lod_property = LP_SAMPLER_LOD_SCALAR;
1604 else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1605 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1606 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1607 else
1608 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1609 }
1610 else
1611 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1612 return lod_property;
1613 }
1614
1615 static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1616 {
1617 struct gallivm_state *gallivm = bld_base->base.gallivm;
1618 LLVMBuilderRef builder = gallivm->builder;
1619 LLVMValueRef coords[5];
1620 LLVMValueRef offsets[3] = { NULL };
1621 LLVMValueRef explicit_lod = NULL, projector = NULL, ms_index = NULL;
1622 struct lp_sampler_params params;
1623 struct lp_derivatives derivs;
1624 unsigned sample_key = 0;
1625 nir_deref_instr *texture_deref_instr = NULL;
1626 nir_deref_instr *sampler_deref_instr = NULL;
1627 LLVMValueRef texture_unit_offset = NULL;
1628 LLVMValueRef texel[NIR_MAX_VEC_COMPONENTS];
1629 unsigned lod_src = 0;
1630 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
1631
1632 memset(&params, 0, sizeof(params));
1633 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
1634
1635 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels || instr->op == nir_texop_texture_samples) {
1636 visit_txs(bld_base, instr);
1637 return;
1638 }
1639 if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1640 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
1641 else if (instr->op == nir_texop_tg4) {
1642 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
1643 sample_key |= (instr->component << LP_SAMPLER_GATHER_COMP_SHIFT);
1644 } else if (instr->op == nir_texop_lod)
1645 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
1646 for (unsigned i = 0; i < instr->num_srcs; i++) {
1647 switch (instr->src[i].src_type) {
1648 case nir_tex_src_coord: {
1649 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
1650 if (instr->coord_components == 1)
1651 coords[0] = coord;
1652 else {
1653 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1654 coords[chan] = LLVMBuildExtractValue(builder, coord,
1655 chan, "");
1656 }
1657 for (unsigned chan = instr->coord_components; chan < 5; chan++)
1658 coords[chan] = coord_undef;
1659
1660 break;
1661 }
1662 case nir_tex_src_texture_deref:
1663 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
1664 break;
1665 case nir_tex_src_sampler_deref:
1666 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
1667 break;
1668 case nir_tex_src_projector:
1669 projector = lp_build_rcp(&bld_base->base, cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32));
1670 break;
1671 case nir_tex_src_comparator:
1672 sample_key |= LP_SAMPLER_SHADOW;
1673 coords[4] = get_src(bld_base, instr->src[i].src);
1674 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
1675 break;
1676 case nir_tex_src_bias:
1677 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
1678 lod_src = i;
1679 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1680 break;
1681 case nir_tex_src_lod:
1682 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
1683 lod_src = i;
1684 if (instr->op == nir_texop_txf)
1685 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1686 else
1687 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1688 break;
1689 case nir_tex_src_ddx: {
1690 int deriv_cnt = instr->coord_components;
1691 if (instr->is_array)
1692 deriv_cnt--;
1693 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1694 if (deriv_cnt == 1)
1695 derivs.ddx[0] = deriv_val;
1696 else
1697 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1698 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
1699 chan, "");
1700 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1701 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
1702 break;
1703 }
1704 case nir_tex_src_ddy: {
1705 int deriv_cnt = instr->coord_components;
1706 if (instr->is_array)
1707 deriv_cnt--;
1708 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1709 if (deriv_cnt == 1)
1710 derivs.ddy[0] = deriv_val;
1711 else
1712 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1713 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
1714 chan, "");
1715 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1716 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
1717 break;
1718 }
1719 case nir_tex_src_offset: {
1720 int offset_cnt = instr->coord_components;
1721 if (instr->is_array)
1722 offset_cnt--;
1723 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
1724 sample_key |= LP_SAMPLER_OFFSETS;
1725 if (offset_cnt == 1)
1726 offsets[0] = cast_type(bld_base, offset_val, nir_type_int, 32);
1727 else {
1728 for (unsigned chan = 0; chan < offset_cnt; ++chan) {
1729 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
1730 chan, "");
1731 offsets[chan] = cast_type(bld_base, offsets[chan], nir_type_int, 32);
1732 }
1733 }
1734 break;
1735 }
1736 case nir_tex_src_ms_index:
1737 sample_key |= LP_SAMPLER_FETCH_MS;
1738 ms_index = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1739 break;
1740
1741 case nir_tex_src_texture_offset:
1742 texture_unit_offset = get_src(bld_base, instr->src[i].src);
1743 break;
1744 case nir_tex_src_sampler_offset:
1745 break;
1746 default:
1747 assert(0);
1748 break;
1749 }
1750 }
1751 if (!sampler_deref_instr)
1752 sampler_deref_instr = texture_deref_instr;
1753
1754 if (explicit_lod)
1755 lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
1756
1757 if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
1758 instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
1759 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1760 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
1761 else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1762 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1763 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
1764
1765 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
1766 /* move layer coord for 1d arrays. */
1767 coords[2] = coords[1];
1768 coords[1] = coord_undef;
1769 }
1770
1771 if (projector) {
1772 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1773 coords[chan] = lp_build_mul(&bld_base->base, coords[chan], projector);
1774 if (sample_key & LP_SAMPLER_SHADOW)
1775 coords[4] = lp_build_mul(&bld_base->base, coords[4], projector);
1776 }
1777
1778 uint32_t samp_base_index = 0, tex_base_index = 0;
1779 if (!sampler_deref_instr) {
1780 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
1781 if (samp_src_index == -1) {
1782 samp_base_index = instr->sampler_index;
1783 }
1784 }
1785 if (!texture_deref_instr) {
1786 int tex_src_index = nir_tex_instr_src_index(instr, nir_tex_src_texture_handle);
1787 if (tex_src_index == -1) {
1788 tex_base_index = instr->texture_index;
1789 }
1790 }
1791
1792 if (instr->op == nir_texop_txd) {
1793 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
1794 params.derivs = &derivs;
1795 if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1796 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1797 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1798 else
1799 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1800 } else
1801 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1802 }
1803
1804 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
1805 params.sample_key = sample_key;
1806 params.offsets = offsets;
1807 params.texture_index = tex_base_index;
1808 params.texture_index_offset = texture_unit_offset;
1809 params.sampler_index = samp_base_index;
1810 params.coords = coords;
1811 params.texel = texel;
1812 params.lod = explicit_lod;
1813 params.ms_index = ms_index;
1814 bld_base->tex(bld_base, &params);
1815 assign_dest(bld_base, &instr->dest, texel);
1816 }
1817
1818 static void visit_ssa_undef(struct lp_build_nir_context *bld_base,
1819 const nir_ssa_undef_instr *instr)
1820 {
1821 unsigned num_components = instr->def.num_components;
1822 LLVMValueRef undef[NIR_MAX_VEC_COMPONENTS];
1823 struct lp_build_context *undef_bld = get_int_bld(bld_base, true, instr->def.bit_size);
1824 for (unsigned i = 0; i < num_components; i++)
1825 undef[i] = LLVMGetUndef(undef_bld->vec_type);
1826 assign_ssa_dest(bld_base, &instr->def, undef);
1827 }
1828
1829 static void visit_jump(struct lp_build_nir_context *bld_base,
1830 const nir_jump_instr *instr)
1831 {
1832 switch (instr->type) {
1833 case nir_jump_break:
1834 bld_base->break_stmt(bld_base);
1835 break;
1836 case nir_jump_continue:
1837 bld_base->continue_stmt(bld_base);
1838 break;
1839 default:
1840 unreachable("Unknown jump instr\n");
1841 }
1842 }
1843
1844 static void visit_deref(struct lp_build_nir_context *bld_base,
1845 nir_deref_instr *instr)
1846 {
1847 if (instr->mode != nir_var_mem_shared &&
1848 instr->mode != nir_var_mem_global)
1849 return;
1850 LLVMValueRef result = NULL;
1851 switch(instr->deref_type) {
1852 case nir_deref_type_var: {
1853 struct hash_entry *entry = _mesa_hash_table_search(bld_base->vars, instr->var);
1854 result = entry->data;
1855 break;
1856 }
1857 default:
1858 unreachable("Unhandled deref_instr deref type");
1859 }
1860
1861 assign_ssa(bld_base, instr->dest.ssa.index, result);
1862 }
1863
1864 static void visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
1865 {
1866 nir_foreach_instr(instr, block)
1867 {
1868 switch (instr->type) {
1869 case nir_instr_type_alu:
1870 visit_alu(bld_base, nir_instr_as_alu(instr));
1871 break;
1872 case nir_instr_type_load_const:
1873 visit_load_const(bld_base, nir_instr_as_load_const(instr));
1874 break;
1875 case nir_instr_type_intrinsic:
1876 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
1877 break;
1878 case nir_instr_type_tex:
1879 visit_tex(bld_base, nir_instr_as_tex(instr));
1880 break;
1881 case nir_instr_type_phi:
1882 assert(0);
1883 break;
1884 case nir_instr_type_ssa_undef:
1885 visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
1886 break;
1887 case nir_instr_type_jump:
1888 visit_jump(bld_base, nir_instr_as_jump(instr));
1889 break;
1890 case nir_instr_type_deref:
1891 visit_deref(bld_base, nir_instr_as_deref(instr));
1892 break;
1893 default:
1894 fprintf(stderr, "Unknown NIR instr type: ");
1895 nir_print_instr(instr, stderr);
1896 fprintf(stderr, "\n");
1897 abort();
1898 }
1899 }
1900 }
1901
1902 static void visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
1903 {
1904 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
1905
1906 bld_base->if_cond(bld_base, cond);
1907 visit_cf_list(bld_base, &if_stmt->then_list);
1908
1909 if (!exec_list_is_empty(&if_stmt->else_list)) {
1910 bld_base->else_stmt(bld_base);
1911 visit_cf_list(bld_base, &if_stmt->else_list);
1912 }
1913 bld_base->endif_stmt(bld_base);
1914 }
1915
1916 static void visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
1917 {
1918 bld_base->bgnloop(bld_base);
1919 visit_cf_list(bld_base, &loop->body);
1920 bld_base->endloop(bld_base);
1921 }
1922
1923 static void visit_cf_list(struct lp_build_nir_context *bld_base,
1924 struct exec_list *list)
1925 {
1926 foreach_list_typed(nir_cf_node, node, node, list)
1927 {
1928 switch (node->type) {
1929 case nir_cf_node_block:
1930 visit_block(bld_base, nir_cf_node_as_block(node));
1931 break;
1932
1933 case nir_cf_node_if:
1934 visit_if(bld_base, nir_cf_node_as_if(node));
1935 break;
1936
1937 case nir_cf_node_loop:
1938 visit_loop(bld_base, nir_cf_node_as_loop(node));
1939 break;
1940
1941 default:
1942 assert(0);
1943 }
1944 }
1945 }
1946
1947 static void
1948 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
1949 struct nir_shader *nir,
1950 struct nir_variable *variable)
1951 {
1952 bld_base->emit_var_decl(bld_base, variable);
1953 }
1954
1955 /* vector registers are stored as arrays in LLVM side,
1956 so we can use GEP on them, as to do exec mask stores
1957 we need to operate on a single components.
1958 arrays are:
1959 0.x, 1.x, 2.x, 3.x
1960 0.y, 1.y, 2.y, 3.y
1961 ....
1962 */
1963 static LLVMTypeRef get_register_type(struct lp_build_nir_context *bld_base,
1964 nir_register *reg)
1965 {
1966 struct lp_build_context *int_bld = get_int_bld(bld_base, true, reg->bit_size);
1967
1968 LLVMTypeRef type = int_bld->vec_type;
1969 if (reg->num_array_elems)
1970 type = LLVMArrayType(type, reg->num_array_elems);
1971 if (reg->num_components > 1)
1972 type = LLVMArrayType(type, reg->num_components);
1973
1974 return type;
1975 }
1976
1977
1978 bool lp_build_nir_llvm(
1979 struct lp_build_nir_context *bld_base,
1980 struct nir_shader *nir)
1981 {
1982 struct nir_function *func;
1983
1984 nir_convert_from_ssa(nir, true);
1985 nir_lower_locals_to_regs(nir);
1986 nir_remove_dead_derefs(nir);
1987 nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
1988
1989 nir_foreach_shader_out_variable(variable, nir)
1990 handle_shader_output_decl(bld_base, nir, variable);
1991
1992 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1993 _mesa_key_pointer_equal);
1994 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1995 _mesa_key_pointer_equal);
1996
1997 func = (struct nir_function *)exec_list_get_head(&nir->functions);
1998
1999 nir_foreach_register(reg, &func->impl->registers) {
2000 LLVMTypeRef type = get_register_type(bld_base, reg);
2001 LLVMValueRef reg_alloc = lp_build_alloca_undef(bld_base->base.gallivm,
2002 type, "reg");
2003 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
2004 }
2005 nir_index_ssa_defs(func->impl);
2006 bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
2007 visit_cf_list(bld_base, &func->impl->body);
2008
2009 free(bld_base->ssa_defs);
2010 ralloc_free(bld_base->vars);
2011 ralloc_free(bld_base->regs);
2012 return true;
2013 }
2014
2015 /* do some basic opts to remove some things we don't want to see. */
2016 void lp_build_opt_nir(struct nir_shader *nir)
2017 {
2018 bool progress;
2019 do {
2020 progress = false;
2021 NIR_PASS_V(nir, nir_opt_constant_folding);
2022 NIR_PASS_V(nir, nir_opt_algebraic);
2023 NIR_PASS_V(nir, nir_lower_pack);
2024
2025 nir_lower_tex_options options = { .lower_tex_without_implicit_lod = true };
2026 NIR_PASS_V(nir, nir_lower_tex, &options);
2027 } while (progress);
2028 nir_lower_bool_to_int32(nir);
2029 }