gallivm/nir: add subpass sampler type support
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_gather.h"
31 #include "lp_bld_logic.h"
32 #include "lp_bld_quad.h"
33 #include "lp_bld_flow.h"
34 #include "lp_bld_struct.h"
35 #include "lp_bld_debug.h"
36 #include "lp_bld_printf.h"
37 #include "nir_deref.h"
38
39 static void visit_cf_list(struct lp_build_nir_context *bld_base,
40 struct exec_list *list);
41
42 static LLVMValueRef cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
43 nir_alu_type alu_type, unsigned bit_size)
44 {
45 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
46 switch (alu_type) {
47 case nir_type_float:
48 switch (bit_size) {
49 case 32:
50 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
51 case 64:
52 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
53 default:
54 assert(0);
55 break;
56 }
57 break;
58 case nir_type_int:
59 switch (bit_size) {
60 case 8:
61 return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
62 case 16:
63 return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
64 case 32:
65 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
66 case 64:
67 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
68 default:
69 assert(0);
70 break;
71 }
72 break;
73 case nir_type_uint:
74 switch (bit_size) {
75 case 8:
76 return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
77 case 16:
78 return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
79 case 32:
80 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
81 case 64:
82 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
83 default:
84 assert(0);
85 break;
86 }
87 break;
88 case nir_type_uint32:
89 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
90 default:
91 return val;
92 }
93 return NULL;
94 }
95
96
97 static struct lp_build_context *get_flt_bld(struct lp_build_nir_context *bld_base,
98 unsigned op_bit_size)
99 {
100 if (op_bit_size == 64)
101 return &bld_base->dbl_bld;
102 else
103 return &bld_base->base;
104 }
105
106 static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
107 {
108 unsigned pipe_target = PIPE_BUFFER;
109 switch (sampler_dim) {
110 case GLSL_SAMPLER_DIM_1D:
111 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
112 break;
113 case GLSL_SAMPLER_DIM_2D:
114 case GLSL_SAMPLER_DIM_SUBPASS:
115 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
116 break;
117 case GLSL_SAMPLER_DIM_3D:
118 pipe_target = PIPE_TEXTURE_3D;
119 break;
120 case GLSL_SAMPLER_DIM_MS:
121 case GLSL_SAMPLER_DIM_SUBPASS_MS:
122 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
123 break;
124 case GLSL_SAMPLER_DIM_CUBE:
125 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
126 break;
127 case GLSL_SAMPLER_DIM_RECT:
128 pipe_target = PIPE_TEXTURE_RECT;
129 break;
130 case GLSL_SAMPLER_DIM_BUF:
131 pipe_target = PIPE_BUFFER;
132 break;
133 default:
134 break;
135 }
136 return pipe_target;
137 }
138
139 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
140 {
141 return bld_base->ssa_defs[ssa->index];
142 }
143
144 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src);
145
146 static LLVMValueRef get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
147 {
148 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
149 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
150 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
151 LLVMValueRef indir_src = NULL;
152 if (src.indirect)
153 indir_src = get_src(bld_base, *src.indirect);
154 return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
155 }
156
157 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src)
158 {
159 if (src.is_ssa)
160 return get_ssa_src(bld_base, src.ssa);
161 else
162 return get_reg_src(bld_base, src.reg);
163 }
164
165 static void assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
166 {
167 bld_base->ssa_defs[idx] = ptr;
168 }
169
170 static void assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
171 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
172 {
173 assign_ssa(bld_base, ssa->index, ssa->num_components == 1 ? vals[0] : lp_nir_array_build_gather_values(bld_base->base.gallivm->builder, vals, ssa->num_components));
174 }
175
176 static void assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
177 unsigned write_mask,
178 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
179 {
180 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
181 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
182 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
183 LLVMValueRef indir_src = NULL;
184 if (reg->indirect)
185 indir_src = get_src(bld_base, *reg->indirect);
186 bld_base->store_reg(bld_base, reg_bld, reg, write_mask ? write_mask : 0xf, indir_src, reg_storage, vals);
187 }
188
189 static void assign_dest(struct lp_build_nir_context *bld_base, const nir_dest *dest, LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
190 {
191 if (dest->is_ssa)
192 assign_ssa_dest(bld_base, &dest->ssa, vals);
193 else
194 assign_reg(bld_base, &dest->reg, 0, vals);
195 }
196
197 static void assign_alu_dest(struct lp_build_nir_context *bld_base, const nir_alu_dest *dest, LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
198 {
199 if (dest->dest.is_ssa)
200 assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
201 else
202 assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
203 }
204
205 static LLVMValueRef int_to_bool32(struct lp_build_nir_context *bld_base,
206 uint32_t src_bit_size,
207 bool is_unsigned,
208 LLVMValueRef val)
209 {
210 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
211 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
212 LLVMValueRef result = lp_build_compare(bld_base->base.gallivm, int_bld->type, PIPE_FUNC_NOTEQUAL, val, int_bld->zero);
213 if (src_bit_size == 64)
214 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
215 return result;
216 }
217
218 static LLVMValueRef flt_to_bool32(struct lp_build_nir_context *bld_base,
219 uint32_t src_bit_size,
220 LLVMValueRef val)
221 {
222 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
223 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
224 LLVMValueRef result = lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
225 if (src_bit_size == 64)
226 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
227 return result;
228 }
229
230 static LLVMValueRef fcmp32(struct lp_build_nir_context *bld_base,
231 enum pipe_compare_func compare,
232 uint32_t src_bit_size,
233 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
234 {
235 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
236 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
237 LLVMValueRef result;
238
239 if (compare != PIPE_FUNC_NOTEQUAL)
240 result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
241 else
242 result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
243 if (src_bit_size == 64)
244 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
245 return result;
246 }
247
248 static LLVMValueRef icmp32(struct lp_build_nir_context *bld_base,
249 enum pipe_compare_func compare,
250 bool is_unsigned,
251 uint32_t src_bit_size,
252 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
253 {
254 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
255 struct lp_build_context *i_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
256 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
257 if (src_bit_size < 32)
258 result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
259 else if (src_bit_size == 64)
260 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
261 return result;
262 }
263
264 static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
265 nir_alu_src src,
266 unsigned num_components)
267 {
268 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
269 struct gallivm_state *gallivm = bld_base->base.gallivm;
270 LLVMValueRef value = get_src(bld_base, src.src);
271 bool need_swizzle = false;
272
273 assert(value);
274 unsigned src_components = nir_src_num_components(src.src);
275 for (unsigned i = 0; i < num_components; ++i) {
276 assert(src.swizzle[i] < src_components);
277 if (src.swizzle[i] != i)
278 need_swizzle = true;
279 }
280
281 if (need_swizzle || num_components != src_components) {
282 if (src_components > 1 && num_components == 1) {
283 value = LLVMBuildExtractValue(gallivm->builder, value,
284 src.swizzle[0], "");
285 } else if (src_components == 1 && num_components > 1) {
286 LLVMValueRef values[] = {value, value, value, value, value, value, value, value, value, value, value, value, value, value, value, value};
287 value = lp_nir_array_build_gather_values(builder, values, num_components);
288 } else {
289 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
290 for (unsigned i = 0; i < num_components; i++)
291 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
292 value = arr;
293 }
294 }
295 assert(!src.negate);
296 assert(!src.abs);
297 return value;
298 }
299
300 static LLVMValueRef emit_b2f(struct lp_build_nir_context *bld_base,
301 LLVMValueRef src0,
302 unsigned bitsize)
303 {
304 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
305 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
306 LLVMBuildBitCast(builder, lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type,
307 1.0), bld_base->int_bld.vec_type, ""),
308 "");
309 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
310 switch (bitsize) {
311 case 32:
312 break;
313 case 64:
314 result = LLVMBuildFPExt(builder, result, bld_base->dbl_bld.vec_type, "");
315 break;
316 default:
317 unreachable("unsupported bit size.");
318 }
319 return result;
320 }
321
322 static LLVMValueRef emit_b2i(struct lp_build_nir_context *bld_base,
323 LLVMValueRef src0,
324 unsigned bitsize)
325 {
326 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
327 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
328 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, 1), "");
329 switch (bitsize) {
330 case 32:
331 return result;
332 case 64:
333 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
334 default:
335 unreachable("unsupported bit size.");
336 }
337 }
338
339 static LLVMValueRef emit_b32csel(struct lp_build_nir_context *bld_base,
340 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
341 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
342 {
343 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
344 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
345 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
346 return lp_build_select(bld, v, src[1], src[2]);
347 }
348
349 static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
350 LLVMValueRef src,
351 bool hi)
352 {
353 struct gallivm_state *gallivm = bld_base->base.gallivm;
354 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
355 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
356 int len = bld_base->base.type.length * 2;
357 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
358 #if UTIL_ARCH_LITTLE_ENDIAN
359 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
360 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
361 #else
362 shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
363 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
364 #endif
365 }
366
367 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
368 return LLVMBuildShuffleVector(gallivm->builder, src,
369 LLVMGetUndef(LLVMTypeOf(src)),
370 LLVMConstVector(hi ? shuffles2 : shuffles,
371 bld_base->base.type.length),
372 "");
373 }
374
375 static LLVMValueRef
376 merge_64bit(struct lp_build_nir_context *bld_base,
377 LLVMValueRef input,
378 LLVMValueRef input2)
379 {
380 struct gallivm_state *gallivm = bld_base->base.gallivm;
381 LLVMBuilderRef builder = gallivm->builder;
382 int i;
383 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
384 int len = bld_base->base.type.length * 2;
385 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
386
387 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
388 #if UTIL_ARCH_LITTLE_ENDIAN
389 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
390 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
391 #else
392 shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
393 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
394 #endif
395 }
396 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
397 }
398
399 static LLVMValueRef
400 do_int_divide(struct lp_build_nir_context *bld_base,
401 bool is_unsigned, unsigned src_bit_size,
402 LLVMValueRef src, LLVMValueRef src2)
403 {
404 struct gallivm_state *gallivm = bld_base->base.gallivm;
405 LLVMBuilderRef builder = gallivm->builder;
406 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
407 struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
408 LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
409 mask_bld->zero);
410
411 if (!is_unsigned) {
412 /* INT_MIN (0x80000000) / -1 (0xffffffff) causes sigfpe, seen with blender. */
413 div_mask = LLVMBuildAnd(builder, div_mask, lp_build_const_int_vec(gallivm, int_bld->type, 0x7fffffff), "");
414 }
415 LLVMValueRef divisor = LLVMBuildOr(builder,
416 div_mask,
417 src2, "");
418 LLVMValueRef result = lp_build_div(int_bld, src, divisor);
419
420 if (!is_unsigned) {
421 LLVMValueRef not_div_mask = LLVMBuildNot(builder, div_mask, "");
422 return LLVMBuildAnd(builder, not_div_mask, result, "");
423 } else
424 /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
425 * may as well do same for idiv */
426 return LLVMBuildOr(builder, div_mask, result, "");
427 }
428
429 static LLVMValueRef
430 do_int_mod(struct lp_build_nir_context *bld_base,
431 bool is_unsigned, unsigned src_bit_size,
432 LLVMValueRef src, LLVMValueRef src2)
433 {
434 struct gallivm_state *gallivm = bld_base->base.gallivm;
435 LLVMBuilderRef builder = gallivm->builder;
436 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
437 LLVMValueRef div_mask = lp_build_cmp(int_bld, PIPE_FUNC_EQUAL, src2,
438 int_bld->zero);
439 LLVMValueRef divisor = LLVMBuildOr(builder,
440 div_mask,
441 src2, "");
442 LLVMValueRef result = lp_build_mod(int_bld, src, divisor);
443 return LLVMBuildOr(builder, div_mask, result, "");
444 }
445
446 static LLVMValueRef
447 do_quantize_to_f16(struct lp_build_nir_context *bld_base,
448 LLVMValueRef src)
449 {
450 struct gallivm_state *gallivm = bld_base->base.gallivm;
451 LLVMBuilderRef builder = gallivm->builder;
452 LLVMValueRef result;
453 result = LLVMBuildFPTrunc(builder, src, LLVMVectorType(LLVMHalfTypeInContext(gallivm->context), bld_base->base.type.length), "");
454 result = LLVMBuildFPExt(builder, result, bld_base->base.vec_type, "");
455 return result;
456 }
457
458 static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
459 nir_op op, unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS], LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
460 {
461 struct gallivm_state *gallivm = bld_base->base.gallivm;
462 LLVMBuilderRef builder = gallivm->builder;
463 LLVMValueRef result;
464 switch (op) {
465 case nir_op_b2f32:
466 result = emit_b2f(bld_base, src[0], 32);
467 break;
468 case nir_op_b2f64:
469 result = emit_b2f(bld_base, src[0], 64);
470 break;
471 case nir_op_b2i32:
472 result = emit_b2i(bld_base, src[0], 32);
473 break;
474 case nir_op_b2i64:
475 result = emit_b2i(bld_base, src[0], 64);
476 break;
477 case nir_op_b32csel:
478 result = emit_b32csel(bld_base, src_bit_size, src);
479 break;
480 case nir_op_bit_count:
481 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
482 break;
483 case nir_op_bitfield_select:
484 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
485 break;
486 case nir_op_bitfield_reverse:
487 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
488 break;
489 case nir_op_f2b32:
490 result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
491 break;
492 case nir_op_f2f32:
493 result = LLVMBuildFPTrunc(builder, src[0],
494 bld_base->base.vec_type, "");
495 break;
496 case nir_op_f2f64:
497 result = LLVMBuildFPExt(builder, src[0],
498 bld_base->dbl_bld.vec_type, "");
499 break;
500 case nir_op_f2i32:
501 result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
502 break;
503 case nir_op_f2u32:
504 result = LLVMBuildFPToUI(builder,
505 src[0],
506 bld_base->base.int_vec_type, "");
507 break;
508 case nir_op_f2i64:
509 result = LLVMBuildFPToSI(builder,
510 src[0],
511 bld_base->int64_bld.vec_type, "");
512 break;
513 case nir_op_f2u64:
514 result = LLVMBuildFPToUI(builder,
515 src[0],
516 bld_base->uint64_bld.vec_type, "");
517 break;
518 case nir_op_fabs:
519 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
520 break;
521 case nir_op_fadd:
522 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
523 src[0], src[1]);
524 break;
525 case nir_op_fceil:
526 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
527 break;
528 case nir_op_fcos:
529 result = lp_build_cos(&bld_base->base, src[0]);
530 break;
531 case nir_op_fddx:
532 case nir_op_fddx_coarse:
533 case nir_op_fddx_fine:
534 result = lp_build_ddx(&bld_base->base, src[0]);
535 break;
536 case nir_op_fddy:
537 case nir_op_fddy_coarse:
538 case nir_op_fddy_fine:
539 result = lp_build_ddy(&bld_base->base, src[0]);
540 break;
541 case nir_op_fdiv:
542 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
543 src[0], src[1]);
544 break;
545 case nir_op_feq32:
546 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
547 break;
548 case nir_op_fexp2:
549 result = lp_build_exp2(&bld_base->base, src[0]);
550 break;
551 case nir_op_ffloor:
552 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
553 break;
554 case nir_op_ffma:
555 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
556 break;
557 case nir_op_ffract: {
558 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
559 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
560 result = lp_build_sub(flt_bld, src[0], tmp);
561 break;
562 }
563 case nir_op_fge32:
564 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
565 break;
566 case nir_op_find_lsb:
567 result = lp_build_cttz(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
568 break;
569 case nir_op_flog2:
570 result = lp_build_log2_safe(&bld_base->base, src[0]);
571 break;
572 case nir_op_flt32:
573 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
574 break;
575 case nir_op_fmin:
576 result = lp_build_min(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
577 break;
578 case nir_op_fmod: {
579 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
580 result = lp_build_div(flt_bld, src[0], src[1]);
581 result = lp_build_floor(flt_bld, result);
582 result = lp_build_mul(flt_bld, src[1], result);
583 result = lp_build_sub(flt_bld, src[0], result);
584 break;
585 }
586 case nir_op_fmul:
587 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
588 src[0], src[1]);
589 break;
590 case nir_op_fmax:
591 result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
592 break;
593 case nir_op_fneu32:
594 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
595 break;
596 case nir_op_fneg:
597 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
598 break;
599 case nir_op_fpow:
600 result = lp_build_pow(&bld_base->base, src[0], src[1]);
601 break;
602 case nir_op_fquantize2f16:
603 result = do_quantize_to_f16(bld_base, src[0]);
604 break;
605 case nir_op_frcp:
606 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
607 break;
608 case nir_op_fround_even:
609 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
610 break;
611 case nir_op_frsq:
612 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
613 break;
614 case nir_op_fsat:
615 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
616 break;
617 case nir_op_fsign:
618 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
619 break;
620 case nir_op_fsin:
621 result = lp_build_sin(&bld_base->base, src[0]);
622 break;
623 case nir_op_fsqrt:
624 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
625 break;
626 case nir_op_ftrunc:
627 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
628 break;
629 case nir_op_i2b32:
630 result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
631 break;
632 case nir_op_i2f32:
633 result = lp_build_int_to_float(&bld_base->base, src[0]);
634 break;
635 case nir_op_i2f64:
636 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
637 break;
638 case nir_op_i2i8:
639 result = LLVMBuildTrunc(builder, src[0], bld_base->int8_bld.vec_type, "");
640 break;
641 case nir_op_i2i16:
642 if (src_bit_size[0] < 16)
643 result = LLVMBuildSExt(builder, src[0], bld_base->int16_bld.vec_type, "");
644 else
645 result = LLVMBuildTrunc(builder, src[0], bld_base->int16_bld.vec_type, "");
646 break;
647 case nir_op_i2i32:
648 if (src_bit_size[0] < 32)
649 result = LLVMBuildSExt(builder, src[0], bld_base->int_bld.vec_type, "");
650 else
651 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
652 break;
653 case nir_op_i2i64:
654 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
655 break;
656 case nir_op_iabs:
657 result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
658 break;
659 case nir_op_iadd:
660 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
661 src[0], src[1]);
662 break;
663 case nir_op_iand:
664 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
665 src[0], src[1]);
666 break;
667 case nir_op_idiv:
668 result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
669 break;
670 case nir_op_ieq32:
671 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
672 break;
673 case nir_op_ige32:
674 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
675 break;
676 case nir_op_ilt32:
677 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
678 break;
679 case nir_op_imax:
680 result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
681 break;
682 case nir_op_imin:
683 result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
684 break;
685 case nir_op_imul:
686 case nir_op_imul24:
687 result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
688 src[0], src[1]);
689 break;
690 case nir_op_imul_high: {
691 LLVMValueRef hi_bits;
692 lp_build_mul_32_lohi(&bld_base->int_bld, src[0], src[1], &hi_bits);
693 result = hi_bits;
694 break;
695 }
696 case nir_op_ine32:
697 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
698 break;
699 case nir_op_ineg:
700 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
701 break;
702 case nir_op_inot:
703 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
704 break;
705 case nir_op_ior:
706 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
707 src[0], src[1]);
708 break;
709 case nir_op_imod:
710 case nir_op_irem:
711 result = do_int_mod(bld_base, false, src_bit_size[0], src[0], src[1]);
712 break;
713 case nir_op_ishl: {
714 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
715 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
716 if (src_bit_size[0] == 64)
717 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
718 if (src_bit_size[0] < 32)
719 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
720 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
721 result = lp_build_shl(int_bld, src[0], src[1]);
722 break;
723 }
724 case nir_op_ishr: {
725 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
726 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
727 if (src_bit_size[0] == 64)
728 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
729 if (src_bit_size[0] < 32)
730 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
731 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
732 result = lp_build_shr(int_bld, src[0], src[1]);
733 break;
734 }
735 case nir_op_isign:
736 result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
737 break;
738 case nir_op_isub:
739 result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
740 src[0], src[1]);
741 break;
742 case nir_op_ixor:
743 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
744 src[0], src[1]);
745 break;
746 case nir_op_mov:
747 result = src[0];
748 break;
749 case nir_op_unpack_64_2x32_split_x:
750 result = split_64bit(bld_base, src[0], false);
751 break;
752 case nir_op_unpack_64_2x32_split_y:
753 result = split_64bit(bld_base, src[0], true);
754 break;
755
756 case nir_op_pack_64_2x32_split: {
757 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
758 result = LLVMBuildBitCast(builder, tmp, bld_base->dbl_bld.vec_type, "");
759 break;
760 }
761 case nir_op_u2f32:
762 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
763 break;
764 case nir_op_u2f64:
765 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
766 break;
767 case nir_op_u2u8:
768 result = LLVMBuildTrunc(builder, src[0], bld_base->uint8_bld.vec_type, "");
769 break;
770 case nir_op_u2u16:
771 if (src_bit_size[0] < 16)
772 result = LLVMBuildZExt(builder, src[0], bld_base->uint16_bld.vec_type, "");
773 else
774 result = LLVMBuildTrunc(builder, src[0], bld_base->uint16_bld.vec_type, "");
775 break;
776 case nir_op_u2u32:
777 if (src_bit_size[0] < 32)
778 result = LLVMBuildZExt(builder, src[0], bld_base->uint_bld.vec_type, "");
779 else
780 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
781 break;
782 case nir_op_u2u64:
783 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
784 break;
785 case nir_op_udiv:
786 result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
787 break;
788 case nir_op_ufind_msb: {
789 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
790 result = lp_build_ctlz(uint_bld, src[0]);
791 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
792 break;
793 }
794 case nir_op_uge32:
795 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
796 break;
797 case nir_op_ult32:
798 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
799 break;
800 case nir_op_umax:
801 result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
802 break;
803 case nir_op_umin:
804 result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
805 break;
806 case nir_op_umod:
807 result = do_int_mod(bld_base, true, src_bit_size[0], src[0], src[1]);
808 break;
809 case nir_op_umul_high: {
810 LLVMValueRef hi_bits;
811 lp_build_mul_32_lohi(&bld_base->uint_bld, src[0], src[1], &hi_bits);
812 result = hi_bits;
813 break;
814 }
815 case nir_op_ushr: {
816 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
817 if (src_bit_size[0] == 64)
818 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
819 if (src_bit_size[0] < 32)
820 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
821 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
822 result = lp_build_shr(uint_bld, src[0], src[1]);
823 break;
824 }
825 default:
826 assert(0);
827 break;
828 }
829 return result;
830 }
831
832 static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr *instr)
833 {
834 struct gallivm_state *gallivm = bld_base->base.gallivm;
835 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS];
836 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS];
837 unsigned num_components = nir_dest_num_components(instr->dest.dest);
838 unsigned src_components;
839 switch (instr->op) {
840 case nir_op_vec2:
841 case nir_op_vec3:
842 case nir_op_vec4:
843 case nir_op_vec8:
844 case nir_op_vec16:
845 src_components = 1;
846 break;
847 case nir_op_pack_half_2x16:
848 src_components = 2;
849 break;
850 case nir_op_unpack_half_2x16:
851 src_components = 1;
852 break;
853 case nir_op_cube_face_coord:
854 case nir_op_cube_face_index:
855 src_components = 3;
856 break;
857 default:
858 src_components = num_components;
859 break;
860 }
861 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
862 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
863 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
864 }
865
866 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
867 if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2 || instr->op == nir_op_vec8 || instr->op == nir_op_vec16) {
868 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
869 result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
870 }
871 } else {
872 for (unsigned c = 0; c < num_components; c++) {
873 LLVMValueRef src_chan[NIR_MAX_VEC_COMPONENTS];
874
875 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
876 if (num_components > 1) {
877 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
878 src[i], c, "");
879 } else
880 src_chan[i] = src[i];
881 src_chan[i] = cast_type(bld_base, src_chan[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
882 }
883 result[c] = do_alu_action(bld_base, instr->op, src_bit_size, src_chan);
884 result[c] = cast_type(bld_base, result[c], nir_op_infos[instr->op].output_type, nir_dest_bit_size(instr->dest.dest));
885 }
886 }
887 assign_alu_dest(bld_base, &instr->dest, result);
888 }
889
890 static void visit_load_const(struct lp_build_nir_context *bld_base,
891 const nir_load_const_instr *instr)
892 {
893 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
894 struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
895 for (unsigned i = 0; i < instr->def.num_components; i++)
896 result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->def.bit_size == 32 ? instr->value[i].u32 : instr->value[i].u64);
897 assign_ssa_dest(bld_base, &instr->def, result);
898 }
899
900 static void
901 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
902 bool vs_in, unsigned *vertex_index_out,
903 LLVMValueRef *vertex_index_ref,
904 unsigned *const_out, LLVMValueRef *indir_out)
905 {
906 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
907 nir_variable *var = nir_deref_instr_get_variable(instr);
908 nir_deref_path path;
909 unsigned idx_lvl = 1;
910
911 nir_deref_path_init(&path, instr, NULL);
912
913 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
914 if (vertex_index_ref) {
915 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
916 if (vertex_index_out)
917 *vertex_index_out = 0;
918 } else {
919 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
920 }
921 ++idx_lvl;
922 }
923
924 uint32_t const_offset = 0;
925 LLVMValueRef offset = NULL;
926
927 if (var->data.compact && nir_src_is_const(instr->arr.index)) {
928 assert(instr->deref_type == nir_deref_type_array);
929 const_offset = nir_src_as_uint(instr->arr.index);
930 goto out;
931 }
932
933 for (; path.path[idx_lvl]; ++idx_lvl) {
934 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
935 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
936 unsigned index = path.path[idx_lvl]->strct.index;
937
938 for (unsigned i = 0; i < index; i++) {
939 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
940 const_offset += glsl_count_attribute_slots(ft, vs_in);
941 }
942 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
943 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
944 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
945 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
946 } else {
947 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
948 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
949 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
950 idx_src);
951 if (offset)
952 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
953 else
954 offset = array_off;
955 }
956 } else
957 unreachable("Uhandled deref type in get_deref_instr_offset");
958 }
959
960 out:
961 nir_deref_path_finish(&path);
962
963 if (const_offset && offset)
964 offset = LLVMBuildAdd(builder, offset,
965 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
966 "");
967 *const_out = const_offset;
968 *indir_out = offset;
969 }
970
971 static void visit_load_var(struct lp_build_nir_context *bld_base,
972 nir_intrinsic_instr *instr,
973 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
974 {
975 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
976 nir_variable *var = nir_deref_instr_get_variable(deref);
977 nir_variable_mode mode = deref->mode;
978 unsigned const_index;
979 LLVMValueRef indir_index;
980 LLVMValueRef indir_vertex_index = NULL;
981 unsigned vertex_index = 0;
982 unsigned nc = nir_dest_num_components(instr->dest);
983 unsigned bit_size = nir_dest_bit_size(instr->dest);
984 if (var) {
985 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
986 var->data.mode == nir_var_shader_in;
987 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
988 var->data.mode == nir_var_shader_in;
989 bool tcs_in = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
990 var->data.mode == nir_var_shader_in;
991 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
992 var->data.mode == nir_var_shader_out && !var->data.patch;
993 bool tes_in = bld_base->shader->info.stage == MESA_SHADER_TESS_EVAL &&
994 var->data.mode == nir_var_shader_in && !var->data.patch;
995
996 mode = var->data.mode;
997
998 get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, (tcs_in || tcs_out || tes_in) ? &indir_vertex_index : NULL,
999 &const_index, &indir_index);
1000 }
1001 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, indir_vertex_index, const_index, indir_index, result);
1002 }
1003
1004 static void
1005 visit_store_var(struct lp_build_nir_context *bld_base,
1006 nir_intrinsic_instr *instr)
1007 {
1008 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1009 nir_variable *var = nir_deref_instr_get_variable(deref);
1010 nir_variable_mode mode = deref->mode;
1011 int writemask = instr->const_index[0];
1012 unsigned bit_size = nir_src_bit_size(instr->src[1]);
1013 LLVMValueRef src = get_src(bld_base, instr->src[1]);
1014 unsigned const_index = 0;
1015 LLVMValueRef indir_index, indir_vertex_index = NULL;
1016 if (var) {
1017 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1018 var->data.mode == nir_var_shader_out && !var->data.patch;
1019 get_deref_offset(bld_base, deref, false, NULL, tcs_out ? &indir_vertex_index : NULL,
1020 &const_index, &indir_index);
1021 }
1022 bld_base->store_var(bld_base, mode, instr->num_components, bit_size, var, writemask, indir_vertex_index, const_index, indir_index, src);
1023 }
1024
1025 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
1026 nir_intrinsic_instr *instr,
1027 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1028 {
1029 struct gallivm_state *gallivm = bld_base->base.gallivm;
1030 LLVMBuilderRef builder = gallivm->builder;
1031 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1032 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1033
1034 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
1035 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
1036 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1037 offset_is_uniform, idx, offset, result);
1038 }
1039
1040 static void visit_load_push_constant(struct lp_build_nir_context *bld_base,
1041 nir_intrinsic_instr *instr,
1042 LLVMValueRef result[4])
1043 {
1044 struct gallivm_state *gallivm = bld_base->base.gallivm;
1045 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1046 LLVMValueRef idx = lp_build_const_int32(gallivm, 0);
1047 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
1048
1049 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1050 offset_is_uniform, idx, offset, result);
1051 }
1052
1053
1054 static void visit_load_ssbo(struct lp_build_nir_context *bld_base,
1055 nir_intrinsic_instr *instr,
1056 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1057 {
1058 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1059 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1060 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1061 idx, offset, result);
1062 }
1063
1064 static void visit_store_ssbo(struct lp_build_nir_context *bld_base,
1065 nir_intrinsic_instr *instr)
1066 {
1067 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1068 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
1069 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
1070 int writemask = instr->const_index[0];
1071 int nc = nir_src_num_components(instr->src[0]);
1072 int bitsize = nir_src_bit_size(instr->src[0]);
1073 bld_base->store_mem(bld_base, writemask, nc, bitsize, idx, offset, val);
1074 }
1075
1076 static void visit_get_buffer_size(struct lp_build_nir_context *bld_base,
1077 nir_intrinsic_instr *instr,
1078 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1079 {
1080 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1081 result[0] = bld_base->get_buffer_size(bld_base, idx);
1082 }
1083
1084 static void visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
1085 nir_intrinsic_instr *instr,
1086 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1087 {
1088 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1089 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1090 LLVMValueRef val = get_src(bld_base, instr->src[2]);
1091 LLVMValueRef val2 = NULL;
1092 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
1093 val2 = get_src(bld_base, instr->src[3]);
1094
1095 bld_base->atomic_mem(bld_base, instr->intrinsic, idx, offset, val, val2, &result[0]);
1096
1097 }
1098
1099 static void visit_load_image(struct lp_build_nir_context *bld_base,
1100 nir_intrinsic_instr *instr,
1101 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1102 {
1103 struct gallivm_state *gallivm = bld_base->base.gallivm;
1104 LLVMBuilderRef builder = gallivm->builder;
1105 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1106 nir_variable *var = nir_deref_instr_get_variable(deref);
1107 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1108 LLVMValueRef coords[5];
1109 struct lp_img_params params;
1110 const struct glsl_type *type = glsl_without_array(var->type);
1111 unsigned const_index;
1112 LLVMValueRef indir_index;
1113 get_deref_offset(bld_base, deref, false, NULL, NULL,
1114 &const_index, &indir_index);
1115
1116 memset(&params, 0, sizeof(params));
1117 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1118 for (unsigned i = 0; i < 4; i++)
1119 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1120 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1121 coords[2] = coords[1];
1122
1123 params.coords = coords;
1124 params.outdata = result;
1125 params.img_op = LP_IMG_LOAD;
1126 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS || glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_SUBPASS_MS)
1127 params.ms_index = cast_type(bld_base, get_src(bld_base, instr->src[2]), nir_type_uint, 32);
1128 params.image_index = var->data.binding + (indir_index ? 0 : const_index);
1129 params.image_index_offset = indir_index;
1130 bld_base->image_op(bld_base, &params);
1131 }
1132
1133 static void visit_store_image(struct lp_build_nir_context *bld_base,
1134 nir_intrinsic_instr *instr)
1135 {
1136 struct gallivm_state *gallivm = bld_base->base.gallivm;
1137 LLVMBuilderRef builder = gallivm->builder;
1138 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1139 nir_variable *var = nir_deref_instr_get_variable(deref);
1140 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1141 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1142 LLVMValueRef coords[5];
1143 struct lp_img_params params;
1144 const struct glsl_type *type = glsl_without_array(var->type);
1145 unsigned const_index;
1146 LLVMValueRef indir_index;
1147 get_deref_offset(bld_base, deref, false, NULL, NULL,
1148 &const_index, &indir_index);
1149
1150 memset(&params, 0, sizeof(params));
1151 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1152 for (unsigned i = 0; i < 4; i++)
1153 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1154 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1155 coords[2] = coords[1];
1156 params.coords = coords;
1157
1158 for (unsigned i = 0; i < 4; i++) {
1159 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1160 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1161 }
1162 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1163 params.ms_index = get_src(bld_base, instr->src[2]);
1164 params.img_op = LP_IMG_STORE;
1165 params.image_index = var->data.binding + (indir_index ? 0 : const_index);
1166 params.image_index_offset = indir_index;
1167
1168 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1169 coords[2] = coords[1];
1170 bld_base->image_op(bld_base, &params);
1171 }
1172
1173 static void visit_atomic_image(struct lp_build_nir_context *bld_base,
1174 nir_intrinsic_instr *instr,
1175 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1176 {
1177 struct gallivm_state *gallivm = bld_base->base.gallivm;
1178 LLVMBuilderRef builder = gallivm->builder;
1179 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1180 nir_variable *var = nir_deref_instr_get_variable(deref);
1181 struct lp_img_params params;
1182 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1183 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1184 LLVMValueRef coords[5];
1185 const struct glsl_type *type = glsl_without_array(var->type);
1186 unsigned const_index;
1187 LLVMValueRef indir_index;
1188 get_deref_offset(bld_base, deref, false, NULL, NULL,
1189 &const_index, &indir_index);
1190
1191 memset(&params, 0, sizeof(params));
1192
1193 switch (instr->intrinsic) {
1194 case nir_intrinsic_image_deref_atomic_add:
1195 params.op = LLVMAtomicRMWBinOpAdd;
1196 break;
1197 case nir_intrinsic_image_deref_atomic_exchange:
1198 params.op = LLVMAtomicRMWBinOpXchg;
1199 break;
1200 case nir_intrinsic_image_deref_atomic_and:
1201 params.op = LLVMAtomicRMWBinOpAnd;
1202 break;
1203 case nir_intrinsic_image_deref_atomic_or:
1204 params.op = LLVMAtomicRMWBinOpOr;
1205 break;
1206 case nir_intrinsic_image_deref_atomic_xor:
1207 params.op = LLVMAtomicRMWBinOpXor;
1208 break;
1209 case nir_intrinsic_image_deref_atomic_umin:
1210 params.op = LLVMAtomicRMWBinOpUMin;
1211 break;
1212 case nir_intrinsic_image_deref_atomic_umax:
1213 params.op = LLVMAtomicRMWBinOpUMax;
1214 break;
1215 case nir_intrinsic_image_deref_atomic_imin:
1216 params.op = LLVMAtomicRMWBinOpMin;
1217 break;
1218 case nir_intrinsic_image_deref_atomic_imax:
1219 params.op = LLVMAtomicRMWBinOpMax;
1220 break;
1221 default:
1222 break;
1223 }
1224
1225 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1226 for (unsigned i = 0; i < 4; i++)
1227 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1228 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1229 coords[2] = coords[1];
1230 params.coords = coords;
1231 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1232 params.ms_index = get_src(bld_base, instr->src[2]);
1233 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
1234 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1235 params.indata[0] = in_val;
1236 params.indata2[0] = cas_val;
1237 } else
1238 params.indata[0] = in_val;
1239
1240 params.outdata = result;
1241 params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1242 params.image_index = var->data.binding + (indir_index ? 0 : const_index);
1243 params.image_index_offset = indir_index;
1244
1245 bld_base->image_op(bld_base, &params);
1246 }
1247
1248
1249 static void visit_image_size(struct lp_build_nir_context *bld_base,
1250 nir_intrinsic_instr *instr,
1251 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1252 {
1253 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1254 nir_variable *var = nir_deref_instr_get_variable(deref);
1255 struct lp_sampler_size_query_params params = { 0 };
1256 unsigned const_index;
1257 LLVMValueRef indir_index;
1258 const struct glsl_type *type = glsl_without_array(var->type);
1259 get_deref_offset(bld_base, deref, false, NULL, NULL,
1260 &const_index, &indir_index);
1261 params.texture_unit = var->data.binding + (indir_index ? 0 : const_index);
1262 params.texture_unit_offset = indir_index;
1263 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1264 params.sizes_out = result;
1265
1266 bld_base->image_size(bld_base, &params);
1267 }
1268
1269 static void visit_image_samples(struct lp_build_nir_context *bld_base,
1270 nir_intrinsic_instr *instr,
1271 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1272 {
1273 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1274 nir_variable *var = nir_deref_instr_get_variable(deref);
1275 struct lp_sampler_size_query_params params = { 0 };
1276 unsigned const_index;
1277 LLVMValueRef indir_index;
1278 const struct glsl_type *type = glsl_without_array(var->type);
1279 get_deref_offset(bld_base, deref, false, NULL, NULL,
1280 &const_index, &indir_index);
1281
1282 params.texture_unit = var->data.binding + (indir_index ? 0 : const_index);
1283 params.texture_unit_offset = indir_index;
1284 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1285 params.sizes_out = result;
1286 params.samples_only = true;
1287
1288 bld_base->image_size(bld_base, &params);
1289 }
1290
1291 static void visit_shared_load(struct lp_build_nir_context *bld_base,
1292 nir_intrinsic_instr *instr,
1293 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1294 {
1295 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1296 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1297 NULL, offset, result);
1298 }
1299
1300 static void visit_shared_store(struct lp_build_nir_context *bld_base,
1301 nir_intrinsic_instr *instr)
1302 {
1303 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1304 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1305 int writemask = instr->const_index[1];
1306 int nc = nir_src_num_components(instr->src[0]);
1307 int bitsize = nir_src_bit_size(instr->src[0]);
1308 bld_base->store_mem(bld_base, writemask, nc, bitsize, NULL, offset, val);
1309 }
1310
1311 static void visit_shared_atomic(struct lp_build_nir_context *bld_base,
1312 nir_intrinsic_instr *instr,
1313 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1314 {
1315 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1316 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1317 LLVMValueRef val2 = NULL;
1318 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1319 val2 = get_src(bld_base, instr->src[2]);
1320
1321 bld_base->atomic_mem(bld_base, instr->intrinsic, NULL, offset, val, val2, &result[0]);
1322
1323 }
1324
1325 static void visit_barrier(struct lp_build_nir_context *bld_base)
1326 {
1327 bld_base->barrier(bld_base);
1328 }
1329
1330 static void visit_discard(struct lp_build_nir_context *bld_base,
1331 nir_intrinsic_instr *instr)
1332 {
1333 LLVMValueRef cond = NULL;
1334 if (instr->intrinsic == nir_intrinsic_discard_if) {
1335 cond = get_src(bld_base, instr->src[0]);
1336 cond = cast_type(bld_base, cond, nir_type_int, 32);
1337 }
1338 bld_base->discard(bld_base, cond);
1339 }
1340
1341 static void visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1342 nir_intrinsic_instr *instr, LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1343 {
1344 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1345
1346 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
1347 bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1348 nir_src_bit_size(instr->src[0]),
1349 offset_is_uniform, offset, result);
1350 }
1351
1352 static void visit_load_global(struct lp_build_nir_context *bld_base,
1353 nir_intrinsic_instr *instr, LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1354 {
1355 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1356 bld_base->load_global(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1357 nir_src_bit_size(instr->src[0]),
1358 addr, result);
1359 }
1360
1361 static void visit_store_global(struct lp_build_nir_context *bld_base,
1362 nir_intrinsic_instr *instr)
1363 {
1364 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1365 int nc = nir_src_num_components(instr->src[0]);
1366 int bitsize = nir_src_bit_size(instr->src[0]);
1367 LLVMValueRef addr = get_src(bld_base, instr->src[1]);
1368 int addr_bitsize = nir_src_bit_size(instr->src[1]);
1369 int writemask = instr->const_index[0];
1370 bld_base->store_global(bld_base, writemask, nc, bitsize, addr_bitsize, addr, val);
1371 }
1372
1373 static void visit_global_atomic(struct lp_build_nir_context *bld_base,
1374 nir_intrinsic_instr *instr,
1375 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1376 {
1377 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1378 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1379 LLVMValueRef val2 = NULL;
1380 int addr_bitsize = nir_src_bit_size(instr->src[0]);
1381 if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap)
1382 val2 = get_src(bld_base, instr->src[2]);
1383
1384 bld_base->atomic_global(bld_base, instr->intrinsic, addr_bitsize, addr, val, val2, &result[0]);
1385 }
1386
1387 static void visit_interp(struct lp_build_nir_context *bld_base,
1388 nir_intrinsic_instr *instr,
1389 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1390 {
1391 struct gallivm_state *gallivm = bld_base->base.gallivm;
1392 LLVMBuilderRef builder = gallivm->builder;
1393 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1394 unsigned num_components = nir_dest_num_components(instr->dest);
1395 nir_variable *var = nir_deref_instr_get_variable(deref);
1396 unsigned const_index;
1397 LLVMValueRef indir_index;
1398 LLVMValueRef offsets[2] = { NULL, NULL };
1399 get_deref_offset(bld_base, deref, false, NULL, NULL,
1400 &const_index, &indir_index);
1401 bool centroid = instr->intrinsic == nir_intrinsic_interp_deref_at_centroid;
1402 bool sample = false;
1403 if (instr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
1404 for (unsigned i = 0; i < 2; i++) {
1405 offsets[i] = LLVMBuildExtractValue(builder, get_src(bld_base, instr->src[1]), i, "");
1406 offsets[i] = cast_type(bld_base, offsets[i], nir_type_float, 32);
1407 }
1408 } else if (instr->intrinsic == nir_intrinsic_interp_deref_at_sample) {
1409 offsets[0] = get_src(bld_base, instr->src[1]);
1410 offsets[0] = cast_type(bld_base, offsets[0], nir_type_int, 32);
1411 sample = true;
1412 }
1413 bld_base->interp_at(bld_base, num_components, var, centroid, sample, const_index, indir_index, offsets, result);
1414 }
1415
1416 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
1417 nir_intrinsic_instr *instr)
1418 {
1419 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS] = {0};
1420 switch (instr->intrinsic) {
1421 case nir_intrinsic_load_deref:
1422 visit_load_var(bld_base, instr, result);
1423 break;
1424 case nir_intrinsic_store_deref:
1425 visit_store_var(bld_base, instr);
1426 break;
1427 case nir_intrinsic_load_ubo:
1428 visit_load_ubo(bld_base, instr, result);
1429 break;
1430 case nir_intrinsic_load_push_constant:
1431 visit_load_push_constant(bld_base, instr, result);
1432 break;
1433 case nir_intrinsic_load_ssbo:
1434 visit_load_ssbo(bld_base, instr, result);
1435 break;
1436 case nir_intrinsic_store_ssbo:
1437 visit_store_ssbo(bld_base, instr);
1438 break;
1439 case nir_intrinsic_get_buffer_size:
1440 visit_get_buffer_size(bld_base, instr, result);
1441 break;
1442 case nir_intrinsic_load_vertex_id:
1443 case nir_intrinsic_load_primitive_id:
1444 case nir_intrinsic_load_instance_id:
1445 case nir_intrinsic_load_base_instance:
1446 case nir_intrinsic_load_base_vertex:
1447 case nir_intrinsic_load_work_group_id:
1448 case nir_intrinsic_load_local_invocation_id:
1449 case nir_intrinsic_load_num_work_groups:
1450 case nir_intrinsic_load_invocation_id:
1451 case nir_intrinsic_load_front_face:
1452 case nir_intrinsic_load_draw_id:
1453 case nir_intrinsic_load_local_group_size:
1454 case nir_intrinsic_load_work_dim:
1455 case nir_intrinsic_load_tess_coord:
1456 case nir_intrinsic_load_tess_level_outer:
1457 case nir_intrinsic_load_tess_level_inner:
1458 case nir_intrinsic_load_patch_vertices_in:
1459 case nir_intrinsic_load_sample_id:
1460 case nir_intrinsic_load_sample_pos:
1461 case nir_intrinsic_load_sample_mask_in:
1462 bld_base->sysval_intrin(bld_base, instr, result);
1463 break;
1464 case nir_intrinsic_load_helper_invocation:
1465 bld_base->helper_invocation(bld_base, &result[0]);
1466 break;
1467 case nir_intrinsic_discard_if:
1468 case nir_intrinsic_discard:
1469 visit_discard(bld_base, instr);
1470 break;
1471 case nir_intrinsic_emit_vertex:
1472 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
1473 break;
1474 case nir_intrinsic_end_primitive:
1475 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
1476 break;
1477 case nir_intrinsic_ssbo_atomic_add:
1478 case nir_intrinsic_ssbo_atomic_imin:
1479 case nir_intrinsic_ssbo_atomic_imax:
1480 case nir_intrinsic_ssbo_atomic_umin:
1481 case nir_intrinsic_ssbo_atomic_umax:
1482 case nir_intrinsic_ssbo_atomic_and:
1483 case nir_intrinsic_ssbo_atomic_or:
1484 case nir_intrinsic_ssbo_atomic_xor:
1485 case nir_intrinsic_ssbo_atomic_exchange:
1486 case nir_intrinsic_ssbo_atomic_comp_swap:
1487 visit_ssbo_atomic(bld_base, instr, result);
1488 break;
1489 case nir_intrinsic_image_deref_load:
1490 visit_load_image(bld_base, instr, result);
1491 break;
1492 case nir_intrinsic_image_deref_store:
1493 visit_store_image(bld_base, instr);
1494 break;
1495 case nir_intrinsic_image_deref_atomic_add:
1496 case nir_intrinsic_image_deref_atomic_imin:
1497 case nir_intrinsic_image_deref_atomic_imax:
1498 case nir_intrinsic_image_deref_atomic_umin:
1499 case nir_intrinsic_image_deref_atomic_umax:
1500 case nir_intrinsic_image_deref_atomic_and:
1501 case nir_intrinsic_image_deref_atomic_or:
1502 case nir_intrinsic_image_deref_atomic_xor:
1503 case nir_intrinsic_image_deref_atomic_exchange:
1504 case nir_intrinsic_image_deref_atomic_comp_swap:
1505 visit_atomic_image(bld_base, instr, result);
1506 break;
1507 case nir_intrinsic_image_deref_size:
1508 visit_image_size(bld_base, instr, result);
1509 break;
1510 case nir_intrinsic_image_deref_samples:
1511 visit_image_samples(bld_base, instr, result);
1512 break;
1513 case nir_intrinsic_load_shared:
1514 visit_shared_load(bld_base, instr, result);
1515 break;
1516 case nir_intrinsic_store_shared:
1517 visit_shared_store(bld_base, instr);
1518 break;
1519 case nir_intrinsic_shared_atomic_add:
1520 case nir_intrinsic_shared_atomic_imin:
1521 case nir_intrinsic_shared_atomic_umin:
1522 case nir_intrinsic_shared_atomic_imax:
1523 case nir_intrinsic_shared_atomic_umax:
1524 case nir_intrinsic_shared_atomic_and:
1525 case nir_intrinsic_shared_atomic_or:
1526 case nir_intrinsic_shared_atomic_xor:
1527 case nir_intrinsic_shared_atomic_exchange:
1528 case nir_intrinsic_shared_atomic_comp_swap:
1529 visit_shared_atomic(bld_base, instr, result);
1530 break;
1531 case nir_intrinsic_control_barrier:
1532 visit_barrier(bld_base);
1533 break;
1534 case nir_intrinsic_group_memory_barrier:
1535 case nir_intrinsic_memory_barrier:
1536 case nir_intrinsic_memory_barrier_shared:
1537 case nir_intrinsic_memory_barrier_buffer:
1538 case nir_intrinsic_memory_barrier_image:
1539 case nir_intrinsic_memory_barrier_tcs_patch:
1540 break;
1541 case nir_intrinsic_load_kernel_input:
1542 visit_load_kernel_input(bld_base, instr, result);
1543 break;
1544 case nir_intrinsic_load_global:
1545 visit_load_global(bld_base, instr, result);
1546 break;
1547 case nir_intrinsic_store_global:
1548 visit_store_global(bld_base, instr);
1549 break;
1550 case nir_intrinsic_global_atomic_add:
1551 case nir_intrinsic_global_atomic_imin:
1552 case nir_intrinsic_global_atomic_umin:
1553 case nir_intrinsic_global_atomic_imax:
1554 case nir_intrinsic_global_atomic_umax:
1555 case nir_intrinsic_global_atomic_and:
1556 case nir_intrinsic_global_atomic_or:
1557 case nir_intrinsic_global_atomic_xor:
1558 case nir_intrinsic_global_atomic_exchange:
1559 case nir_intrinsic_global_atomic_comp_swap:
1560 visit_global_atomic(bld_base, instr, result);
1561 break;
1562 case nir_intrinsic_vote_all:
1563 case nir_intrinsic_vote_any:
1564 case nir_intrinsic_vote_ieq:
1565 bld_base->vote(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, 32), instr, result);
1566 break;
1567 case nir_intrinsic_interp_deref_at_offset:
1568 case nir_intrinsic_interp_deref_at_centroid:
1569 case nir_intrinsic_interp_deref_at_sample:
1570 visit_interp(bld_base, instr, result);
1571 break;
1572 default:
1573 assert(0);
1574 break;
1575 }
1576 if (result[0]) {
1577 assign_dest(bld_base, &instr->dest, result);
1578 }
1579 }
1580
1581 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1582 {
1583 struct lp_sampler_size_query_params params = { 0 };
1584 LLVMValueRef sizes_out[NIR_MAX_VEC_COMPONENTS];
1585 LLVMValueRef explicit_lod = NULL;
1586 LLVMValueRef texture_unit_offset = NULL;
1587 for (unsigned i = 0; i < instr->num_srcs; i++) {
1588 switch (instr->src[i].src_type) {
1589 case nir_tex_src_lod:
1590 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1591 break;
1592 case nir_tex_src_texture_offset:
1593 texture_unit_offset = get_src(bld_base, instr->src[i].src);
1594 break;
1595 default:
1596 break;
1597 }
1598 }
1599
1600 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
1601 params.texture_unit = instr->texture_index;
1602 params.explicit_lod = explicit_lod;
1603 params.is_sviewinfo = TRUE;
1604 params.sizes_out = sizes_out;
1605 params.samples_only = (instr->op == nir_texop_texture_samples);
1606 params.texture_unit_offset = texture_unit_offset;
1607
1608 if (instr->op == nir_texop_query_levels)
1609 params.explicit_lod = bld_base->uint_bld.zero;
1610 bld_base->tex_size(bld_base, &params);
1611 assign_dest(bld_base, &instr->dest, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
1612 }
1613
1614 static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
1615 nir_src lod_src)
1616 {
1617 enum lp_sampler_lod_property lod_property;
1618
1619 if (nir_src_is_dynamically_uniform(lod_src))
1620 lod_property = LP_SAMPLER_LOD_SCALAR;
1621 else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1622 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1623 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1624 else
1625 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1626 }
1627 else
1628 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1629 return lod_property;
1630 }
1631
1632 static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1633 {
1634 struct gallivm_state *gallivm = bld_base->base.gallivm;
1635 LLVMBuilderRef builder = gallivm->builder;
1636 LLVMValueRef coords[5];
1637 LLVMValueRef offsets[3] = { NULL };
1638 LLVMValueRef explicit_lod = NULL, projector = NULL, ms_index = NULL;
1639 struct lp_sampler_params params;
1640 struct lp_derivatives derivs;
1641 unsigned sample_key = 0;
1642 nir_deref_instr *texture_deref_instr = NULL;
1643 nir_deref_instr *sampler_deref_instr = NULL;
1644 LLVMValueRef texture_unit_offset = NULL;
1645 LLVMValueRef texel[NIR_MAX_VEC_COMPONENTS];
1646 unsigned lod_src = 0;
1647 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
1648
1649 memset(&params, 0, sizeof(params));
1650 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
1651
1652 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels || instr->op == nir_texop_texture_samples) {
1653 visit_txs(bld_base, instr);
1654 return;
1655 }
1656 if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1657 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
1658 else if (instr->op == nir_texop_tg4) {
1659 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
1660 sample_key |= (instr->component << LP_SAMPLER_GATHER_COMP_SHIFT);
1661 } else if (instr->op == nir_texop_lod)
1662 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
1663 for (unsigned i = 0; i < instr->num_srcs; i++) {
1664 switch (instr->src[i].src_type) {
1665 case nir_tex_src_coord: {
1666 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
1667 if (instr->coord_components == 1)
1668 coords[0] = coord;
1669 else {
1670 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1671 coords[chan] = LLVMBuildExtractValue(builder, coord,
1672 chan, "");
1673 }
1674 for (unsigned chan = instr->coord_components; chan < 5; chan++)
1675 coords[chan] = coord_undef;
1676
1677 break;
1678 }
1679 case nir_tex_src_texture_deref:
1680 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
1681 break;
1682 case nir_tex_src_sampler_deref:
1683 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
1684 break;
1685 case nir_tex_src_projector:
1686 projector = lp_build_rcp(&bld_base->base, cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32));
1687 break;
1688 case nir_tex_src_comparator:
1689 sample_key |= LP_SAMPLER_SHADOW;
1690 coords[4] = get_src(bld_base, instr->src[i].src);
1691 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
1692 break;
1693 case nir_tex_src_bias:
1694 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
1695 lod_src = i;
1696 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1697 break;
1698 case nir_tex_src_lod:
1699 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
1700 lod_src = i;
1701 if (instr->op == nir_texop_txf)
1702 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1703 else
1704 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1705 break;
1706 case nir_tex_src_ddx: {
1707 int deriv_cnt = instr->coord_components;
1708 if (instr->is_array)
1709 deriv_cnt--;
1710 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1711 if (deriv_cnt == 1)
1712 derivs.ddx[0] = deriv_val;
1713 else
1714 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1715 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
1716 chan, "");
1717 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1718 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
1719 break;
1720 }
1721 case nir_tex_src_ddy: {
1722 int deriv_cnt = instr->coord_components;
1723 if (instr->is_array)
1724 deriv_cnt--;
1725 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1726 if (deriv_cnt == 1)
1727 derivs.ddy[0] = deriv_val;
1728 else
1729 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1730 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
1731 chan, "");
1732 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1733 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
1734 break;
1735 }
1736 case nir_tex_src_offset: {
1737 int offset_cnt = instr->coord_components;
1738 if (instr->is_array)
1739 offset_cnt--;
1740 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
1741 sample_key |= LP_SAMPLER_OFFSETS;
1742 if (offset_cnt == 1)
1743 offsets[0] = cast_type(bld_base, offset_val, nir_type_int, 32);
1744 else {
1745 for (unsigned chan = 0; chan < offset_cnt; ++chan) {
1746 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
1747 chan, "");
1748 offsets[chan] = cast_type(bld_base, offsets[chan], nir_type_int, 32);
1749 }
1750 }
1751 break;
1752 }
1753 case nir_tex_src_ms_index:
1754 sample_key |= LP_SAMPLER_FETCH_MS;
1755 ms_index = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1756 break;
1757
1758 case nir_tex_src_texture_offset:
1759 texture_unit_offset = get_src(bld_base, instr->src[i].src);
1760 break;
1761 case nir_tex_src_sampler_offset:
1762 break;
1763 default:
1764 assert(0);
1765 break;
1766 }
1767 }
1768 if (!sampler_deref_instr)
1769 sampler_deref_instr = texture_deref_instr;
1770
1771 if (explicit_lod)
1772 lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
1773
1774 if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
1775 instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
1776 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1777 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
1778 else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1779 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1780 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
1781
1782 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
1783 /* move layer coord for 1d arrays. */
1784 coords[2] = coords[1];
1785 coords[1] = coord_undef;
1786 }
1787
1788 if (projector) {
1789 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1790 coords[chan] = lp_build_mul(&bld_base->base, coords[chan], projector);
1791 if (sample_key & LP_SAMPLER_SHADOW)
1792 coords[4] = lp_build_mul(&bld_base->base, coords[4], projector);
1793 }
1794
1795 uint32_t samp_base_index = 0, tex_base_index = 0;
1796 if (!sampler_deref_instr) {
1797 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
1798 if (samp_src_index == -1) {
1799 samp_base_index = instr->sampler_index;
1800 }
1801 }
1802 if (!texture_deref_instr) {
1803 int tex_src_index = nir_tex_instr_src_index(instr, nir_tex_src_texture_handle);
1804 if (tex_src_index == -1) {
1805 tex_base_index = instr->texture_index;
1806 }
1807 }
1808
1809 if (instr->op == nir_texop_txd) {
1810 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
1811 params.derivs = &derivs;
1812 if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1813 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1814 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1815 else
1816 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1817 } else
1818 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1819 }
1820
1821 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
1822 params.sample_key = sample_key;
1823 params.offsets = offsets;
1824 params.texture_index = tex_base_index;
1825 params.texture_index_offset = texture_unit_offset;
1826 params.sampler_index = samp_base_index;
1827 params.coords = coords;
1828 params.texel = texel;
1829 params.lod = explicit_lod;
1830 params.ms_index = ms_index;
1831 bld_base->tex(bld_base, &params);
1832 assign_dest(bld_base, &instr->dest, texel);
1833 }
1834
1835 static void visit_ssa_undef(struct lp_build_nir_context *bld_base,
1836 const nir_ssa_undef_instr *instr)
1837 {
1838 unsigned num_components = instr->def.num_components;
1839 LLVMValueRef undef[NIR_MAX_VEC_COMPONENTS];
1840 struct lp_build_context *undef_bld = get_int_bld(bld_base, true, instr->def.bit_size);
1841 for (unsigned i = 0; i < num_components; i++)
1842 undef[i] = LLVMGetUndef(undef_bld->vec_type);
1843 assign_ssa_dest(bld_base, &instr->def, undef);
1844 }
1845
1846 static void visit_jump(struct lp_build_nir_context *bld_base,
1847 const nir_jump_instr *instr)
1848 {
1849 switch (instr->type) {
1850 case nir_jump_break:
1851 bld_base->break_stmt(bld_base);
1852 break;
1853 case nir_jump_continue:
1854 bld_base->continue_stmt(bld_base);
1855 break;
1856 default:
1857 unreachable("Unknown jump instr\n");
1858 }
1859 }
1860
1861 static void visit_deref(struct lp_build_nir_context *bld_base,
1862 nir_deref_instr *instr)
1863 {
1864 if (instr->mode != nir_var_mem_shared &&
1865 instr->mode != nir_var_mem_global)
1866 return;
1867 LLVMValueRef result = NULL;
1868 switch(instr->deref_type) {
1869 case nir_deref_type_var: {
1870 struct hash_entry *entry = _mesa_hash_table_search(bld_base->vars, instr->var);
1871 result = entry->data;
1872 break;
1873 }
1874 default:
1875 unreachable("Unhandled deref_instr deref type");
1876 }
1877
1878 assign_ssa(bld_base, instr->dest.ssa.index, result);
1879 }
1880
1881 static void visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
1882 {
1883 nir_foreach_instr(instr, block)
1884 {
1885 switch (instr->type) {
1886 case nir_instr_type_alu:
1887 visit_alu(bld_base, nir_instr_as_alu(instr));
1888 break;
1889 case nir_instr_type_load_const:
1890 visit_load_const(bld_base, nir_instr_as_load_const(instr));
1891 break;
1892 case nir_instr_type_intrinsic:
1893 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
1894 break;
1895 case nir_instr_type_tex:
1896 visit_tex(bld_base, nir_instr_as_tex(instr));
1897 break;
1898 case nir_instr_type_phi:
1899 assert(0);
1900 break;
1901 case nir_instr_type_ssa_undef:
1902 visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
1903 break;
1904 case nir_instr_type_jump:
1905 visit_jump(bld_base, nir_instr_as_jump(instr));
1906 break;
1907 case nir_instr_type_deref:
1908 visit_deref(bld_base, nir_instr_as_deref(instr));
1909 break;
1910 default:
1911 fprintf(stderr, "Unknown NIR instr type: ");
1912 nir_print_instr(instr, stderr);
1913 fprintf(stderr, "\n");
1914 abort();
1915 }
1916 }
1917 }
1918
1919 static void visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
1920 {
1921 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
1922
1923 bld_base->if_cond(bld_base, cond);
1924 visit_cf_list(bld_base, &if_stmt->then_list);
1925
1926 if (!exec_list_is_empty(&if_stmt->else_list)) {
1927 bld_base->else_stmt(bld_base);
1928 visit_cf_list(bld_base, &if_stmt->else_list);
1929 }
1930 bld_base->endif_stmt(bld_base);
1931 }
1932
1933 static void visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
1934 {
1935 bld_base->bgnloop(bld_base);
1936 visit_cf_list(bld_base, &loop->body);
1937 bld_base->endloop(bld_base);
1938 }
1939
1940 static void visit_cf_list(struct lp_build_nir_context *bld_base,
1941 struct exec_list *list)
1942 {
1943 foreach_list_typed(nir_cf_node, node, node, list)
1944 {
1945 switch (node->type) {
1946 case nir_cf_node_block:
1947 visit_block(bld_base, nir_cf_node_as_block(node));
1948 break;
1949
1950 case nir_cf_node_if:
1951 visit_if(bld_base, nir_cf_node_as_if(node));
1952 break;
1953
1954 case nir_cf_node_loop:
1955 visit_loop(bld_base, nir_cf_node_as_loop(node));
1956 break;
1957
1958 default:
1959 assert(0);
1960 }
1961 }
1962 }
1963
1964 static void
1965 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
1966 struct nir_shader *nir,
1967 struct nir_variable *variable)
1968 {
1969 bld_base->emit_var_decl(bld_base, variable);
1970 }
1971
1972 /* vector registers are stored as arrays in LLVM side,
1973 so we can use GEP on them, as to do exec mask stores
1974 we need to operate on a single components.
1975 arrays are:
1976 0.x, 1.x, 2.x, 3.x
1977 0.y, 1.y, 2.y, 3.y
1978 ....
1979 */
1980 static LLVMTypeRef get_register_type(struct lp_build_nir_context *bld_base,
1981 nir_register *reg)
1982 {
1983 struct lp_build_context *int_bld = get_int_bld(bld_base, true, reg->bit_size);
1984
1985 LLVMTypeRef type = int_bld->vec_type;
1986 if (reg->num_array_elems)
1987 type = LLVMArrayType(type, reg->num_array_elems);
1988 if (reg->num_components > 1)
1989 type = LLVMArrayType(type, reg->num_components);
1990
1991 return type;
1992 }
1993
1994
1995 bool lp_build_nir_llvm(
1996 struct lp_build_nir_context *bld_base,
1997 struct nir_shader *nir)
1998 {
1999 struct nir_function *func;
2000
2001 nir_convert_from_ssa(nir, true);
2002 nir_lower_locals_to_regs(nir);
2003 nir_remove_dead_derefs(nir);
2004 nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
2005
2006 nir_foreach_shader_out_variable(variable, nir)
2007 handle_shader_output_decl(bld_base, nir, variable);
2008
2009 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2010 _mesa_key_pointer_equal);
2011 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2012 _mesa_key_pointer_equal);
2013
2014 func = (struct nir_function *)exec_list_get_head(&nir->functions);
2015
2016 nir_foreach_register(reg, &func->impl->registers) {
2017 LLVMTypeRef type = get_register_type(bld_base, reg);
2018 LLVMValueRef reg_alloc = lp_build_alloca_undef(bld_base->base.gallivm,
2019 type, "reg");
2020 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
2021 }
2022 nir_index_ssa_defs(func->impl);
2023 bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
2024 visit_cf_list(bld_base, &func->impl->body);
2025
2026 free(bld_base->ssa_defs);
2027 ralloc_free(bld_base->vars);
2028 ralloc_free(bld_base->regs);
2029 return true;
2030 }
2031
2032 /* do some basic opts to remove some things we don't want to see. */
2033 void lp_build_opt_nir(struct nir_shader *nir)
2034 {
2035 bool progress;
2036
2037 static const struct nir_lower_tex_options lower_tex_options = {
2038 .lower_tg4_offsets = true,
2039 };
2040 NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
2041 NIR_PASS_V(nir, nir_lower_frexp);
2042
2043 do {
2044 progress = false;
2045 NIR_PASS_V(nir, nir_opt_constant_folding);
2046 NIR_PASS_V(nir, nir_opt_algebraic);
2047 NIR_PASS_V(nir, nir_lower_pack);
2048
2049 nir_lower_tex_options options = { .lower_tex_without_implicit_lod = true };
2050 NIR_PASS_V(nir, nir_lower_tex, &options);
2051 } while (progress);
2052 nir_lower_bool_to_int32(nir);
2053 }