gallivm/nir: add helper invocation support
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_gather.h"
31 #include "lp_bld_logic.h"
32 #include "lp_bld_quad.h"
33 #include "lp_bld_flow.h"
34 #include "lp_bld_struct.h"
35 #include "lp_bld_debug.h"
36 #include "lp_bld_printf.h"
37 #include "nir_deref.h"
38
39 static void visit_cf_list(struct lp_build_nir_context *bld_base,
40 struct exec_list *list);
41
42 static LLVMValueRef cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
43 nir_alu_type alu_type, unsigned bit_size)
44 {
45 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
46 switch (alu_type) {
47 case nir_type_float:
48 switch (bit_size) {
49 case 32:
50 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
51 case 64:
52 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
53 default:
54 assert(0);
55 break;
56 }
57 break;
58 case nir_type_int:
59 switch (bit_size) {
60 case 8:
61 return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
62 case 16:
63 return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
64 case 32:
65 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
66 case 64:
67 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
68 default:
69 assert(0);
70 break;
71 }
72 break;
73 case nir_type_uint:
74 switch (bit_size) {
75 case 8:
76 return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
77 case 16:
78 return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
79 case 32:
80 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
81 case 64:
82 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
83 default:
84 assert(0);
85 break;
86 }
87 break;
88 case nir_type_uint32:
89 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
90 default:
91 return val;
92 }
93 return NULL;
94 }
95
96
97 static struct lp_build_context *get_flt_bld(struct lp_build_nir_context *bld_base,
98 unsigned op_bit_size)
99 {
100 if (op_bit_size == 64)
101 return &bld_base->dbl_bld;
102 else
103 return &bld_base->base;
104 }
105
106 static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
107 {
108 unsigned pipe_target = PIPE_BUFFER;
109 switch (sampler_dim) {
110 case GLSL_SAMPLER_DIM_1D:
111 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
112 break;
113 case GLSL_SAMPLER_DIM_2D:
114 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
115 break;
116 case GLSL_SAMPLER_DIM_3D:
117 pipe_target = PIPE_TEXTURE_3D;
118 break;
119 case GLSL_SAMPLER_DIM_CUBE:
120 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
121 break;
122 case GLSL_SAMPLER_DIM_RECT:
123 pipe_target = PIPE_TEXTURE_RECT;
124 break;
125 case GLSL_SAMPLER_DIM_BUF:
126 pipe_target = PIPE_BUFFER;
127 break;
128 default:
129 break;
130 }
131 return pipe_target;
132 }
133
134 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
135 {
136 return bld_base->ssa_defs[ssa->index];
137 }
138
139 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src);
140
141 static LLVMValueRef get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
142 {
143 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
144 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
145 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
146 LLVMValueRef indir_src = NULL;
147 if (src.indirect)
148 indir_src = get_src(bld_base, *src.indirect);
149 return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
150 }
151
152 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src)
153 {
154 if (src.is_ssa)
155 return get_ssa_src(bld_base, src.ssa);
156 else
157 return get_reg_src(bld_base, src.reg);
158 }
159
160 static void assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
161 {
162 bld_base->ssa_defs[idx] = ptr;
163 }
164
165 static void assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
166 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
167 {
168 assign_ssa(bld_base, ssa->index, ssa->num_components == 1 ? vals[0] : lp_nir_array_build_gather_values(bld_base->base.gallivm->builder, vals, ssa->num_components));
169 }
170
171 static void assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
172 unsigned write_mask,
173 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
174 {
175 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
176 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
177 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
178 LLVMValueRef indir_src = NULL;
179 if (reg->indirect)
180 indir_src = get_src(bld_base, *reg->indirect);
181 bld_base->store_reg(bld_base, reg_bld, reg, write_mask ? write_mask : 0xf, indir_src, reg_storage, vals);
182 }
183
184 static void assign_dest(struct lp_build_nir_context *bld_base, const nir_dest *dest, LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
185 {
186 if (dest->is_ssa)
187 assign_ssa_dest(bld_base, &dest->ssa, vals);
188 else
189 assign_reg(bld_base, &dest->reg, 0, vals);
190 }
191
192 static void assign_alu_dest(struct lp_build_nir_context *bld_base, const nir_alu_dest *dest, LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
193 {
194 if (dest->dest.is_ssa)
195 assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
196 else
197 assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
198 }
199
200 static LLVMValueRef int_to_bool32(struct lp_build_nir_context *bld_base,
201 uint32_t src_bit_size,
202 bool is_unsigned,
203 LLVMValueRef val)
204 {
205 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
206 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
207 LLVMValueRef result = lp_build_compare(bld_base->base.gallivm, int_bld->type, PIPE_FUNC_NOTEQUAL, val, int_bld->zero);
208 if (src_bit_size == 64)
209 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
210 return result;
211 }
212
213 static LLVMValueRef flt_to_bool32(struct lp_build_nir_context *bld_base,
214 uint32_t src_bit_size,
215 LLVMValueRef val)
216 {
217 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
218 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
219 LLVMValueRef result = lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
220 if (src_bit_size == 64)
221 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
222 return result;
223 }
224
225 static LLVMValueRef fcmp32(struct lp_build_nir_context *bld_base,
226 enum pipe_compare_func compare,
227 uint32_t src_bit_size,
228 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
229 {
230 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
231 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
232 LLVMValueRef result;
233
234 if (compare != PIPE_FUNC_NOTEQUAL)
235 result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
236 else
237 result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
238 if (src_bit_size == 64)
239 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
240 return result;
241 }
242
243 static LLVMValueRef icmp32(struct lp_build_nir_context *bld_base,
244 enum pipe_compare_func compare,
245 bool is_unsigned,
246 uint32_t src_bit_size,
247 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
248 {
249 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
250 struct lp_build_context *i_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
251 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
252 if (src_bit_size < 32)
253 result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
254 else if (src_bit_size == 64)
255 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
256 return result;
257 }
258
259 static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
260 nir_alu_src src,
261 unsigned num_components)
262 {
263 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
264 struct gallivm_state *gallivm = bld_base->base.gallivm;
265 LLVMValueRef value = get_src(bld_base, src.src);
266 bool need_swizzle = false;
267
268 assert(value);
269 unsigned src_components = nir_src_num_components(src.src);
270 for (unsigned i = 0; i < num_components; ++i) {
271 assert(src.swizzle[i] < src_components);
272 if (src.swizzle[i] != i)
273 need_swizzle = true;
274 }
275
276 if (need_swizzle || num_components != src_components) {
277 if (src_components > 1 && num_components == 1) {
278 value = LLVMBuildExtractValue(gallivm->builder, value,
279 src.swizzle[0], "");
280 } else if (src_components == 1 && num_components > 1) {
281 LLVMValueRef values[] = {value, value, value, value, value, value, value, value, value, value, value, value, value, value, value, value};
282 value = lp_nir_array_build_gather_values(builder, values, num_components);
283 } else {
284 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
285 for (unsigned i = 0; i < num_components; i++)
286 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
287 value = arr;
288 }
289 }
290 assert(!src.negate);
291 assert(!src.abs);
292 return value;
293 }
294
295 static LLVMValueRef emit_b2f(struct lp_build_nir_context *bld_base,
296 LLVMValueRef src0,
297 unsigned bitsize)
298 {
299 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
300 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
301 LLVMBuildBitCast(builder, lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type,
302 1.0), bld_base->int_bld.vec_type, ""),
303 "");
304 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
305 switch (bitsize) {
306 case 32:
307 break;
308 case 64:
309 result = LLVMBuildFPExt(builder, result, bld_base->dbl_bld.vec_type, "");
310 break;
311 default:
312 unreachable("unsupported bit size.");
313 }
314 return result;
315 }
316
317 static LLVMValueRef emit_b2i(struct lp_build_nir_context *bld_base,
318 LLVMValueRef src0,
319 unsigned bitsize)
320 {
321 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
322 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
323 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, 1), "");
324 switch (bitsize) {
325 case 32:
326 return result;
327 case 64:
328 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
329 default:
330 unreachable("unsupported bit size.");
331 }
332 }
333
334 static LLVMValueRef emit_b32csel(struct lp_build_nir_context *bld_base,
335 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
336 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
337 {
338 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
339 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
340 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
341 return lp_build_select(bld, v, src[1], src[2]);
342 }
343
344 static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
345 LLVMValueRef src,
346 bool hi)
347 {
348 struct gallivm_state *gallivm = bld_base->base.gallivm;
349 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
350 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
351 int len = bld_base->base.type.length * 2;
352 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
353 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
354 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
355 }
356
357 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
358 return LLVMBuildShuffleVector(gallivm->builder, src,
359 LLVMGetUndef(LLVMTypeOf(src)),
360 LLVMConstVector(hi ? shuffles2 : shuffles,
361 bld_base->base.type.length),
362 "");
363 }
364
365 static LLVMValueRef
366 merge_64bit(struct lp_build_nir_context *bld_base,
367 LLVMValueRef input,
368 LLVMValueRef input2)
369 {
370 struct gallivm_state *gallivm = bld_base->base.gallivm;
371 LLVMBuilderRef builder = gallivm->builder;
372 int i;
373 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
374 int len = bld_base->base.type.length * 2;
375 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
376
377 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
378 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
379 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
380 }
381 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
382 }
383
384 static LLVMValueRef
385 do_int_divide(struct lp_build_nir_context *bld_base,
386 bool is_unsigned, unsigned src_bit_size,
387 LLVMValueRef src, LLVMValueRef src2)
388 {
389 struct gallivm_state *gallivm = bld_base->base.gallivm;
390 LLVMBuilderRef builder = gallivm->builder;
391 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
392 struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
393 LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
394 mask_bld->zero);
395
396 if (!is_unsigned) {
397 /* INT_MIN (0x80000000) / -1 (0xffffffff) causes sigfpe, seen with blender. */
398 div_mask = LLVMBuildAnd(builder, div_mask, lp_build_const_int_vec(gallivm, int_bld->type, 0x7fffffff), "");
399 }
400 LLVMValueRef divisor = LLVMBuildOr(builder,
401 div_mask,
402 src2, "");
403 LLVMValueRef result = lp_build_div(int_bld, src, divisor);
404
405 if (!is_unsigned) {
406 LLVMValueRef not_div_mask = LLVMBuildNot(builder, div_mask, "");
407 return LLVMBuildAnd(builder, not_div_mask, result, "");
408 } else
409 /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
410 * may as well do same for idiv */
411 return LLVMBuildOr(builder, div_mask, result, "");
412 }
413
414 static LLVMValueRef
415 do_int_mod(struct lp_build_nir_context *bld_base,
416 bool is_unsigned, unsigned src_bit_size,
417 LLVMValueRef src, LLVMValueRef src2)
418 {
419 struct gallivm_state *gallivm = bld_base->base.gallivm;
420 LLVMBuilderRef builder = gallivm->builder;
421 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
422 LLVMValueRef div_mask = lp_build_cmp(int_bld, PIPE_FUNC_EQUAL, src2,
423 int_bld->zero);
424 LLVMValueRef divisor = LLVMBuildOr(builder,
425 div_mask,
426 src2, "");
427 LLVMValueRef result = lp_build_mod(int_bld, src, divisor);
428 return LLVMBuildOr(builder, div_mask, result, "");
429 }
430
431 static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
432 nir_op op, unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS], LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
433 {
434 struct gallivm_state *gallivm = bld_base->base.gallivm;
435 LLVMBuilderRef builder = gallivm->builder;
436 LLVMValueRef result;
437 switch (op) {
438 case nir_op_b2f32:
439 result = emit_b2f(bld_base, src[0], 32);
440 break;
441 case nir_op_b2f64:
442 result = emit_b2f(bld_base, src[0], 64);
443 break;
444 case nir_op_b2i32:
445 result = emit_b2i(bld_base, src[0], 32);
446 break;
447 case nir_op_b2i64:
448 result = emit_b2i(bld_base, src[0], 64);
449 break;
450 case nir_op_b32csel:
451 result = emit_b32csel(bld_base, src_bit_size, src);
452 break;
453 case nir_op_bit_count:
454 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
455 break;
456 case nir_op_bitfield_select:
457 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
458 break;
459 case nir_op_bitfield_reverse:
460 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
461 break;
462 case nir_op_f2b32:
463 result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
464 break;
465 case nir_op_f2f32:
466 result = LLVMBuildFPTrunc(builder, src[0],
467 bld_base->base.vec_type, "");
468 break;
469 case nir_op_f2f64:
470 result = LLVMBuildFPExt(builder, src[0],
471 bld_base->dbl_bld.vec_type, "");
472 break;
473 case nir_op_f2i32:
474 result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
475 break;
476 case nir_op_f2u32:
477 result = LLVMBuildFPToUI(builder,
478 src[0],
479 bld_base->base.int_vec_type, "");
480 break;
481 case nir_op_f2i64:
482 result = LLVMBuildFPToSI(builder,
483 src[0],
484 bld_base->int64_bld.vec_type, "");
485 break;
486 case nir_op_f2u64:
487 result = LLVMBuildFPToUI(builder,
488 src[0],
489 bld_base->uint64_bld.vec_type, "");
490 break;
491 case nir_op_fabs:
492 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
493 break;
494 case nir_op_fadd:
495 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
496 src[0], src[1]);
497 break;
498 case nir_op_fceil:
499 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
500 break;
501 case nir_op_fcos:
502 result = lp_build_cos(&bld_base->base, src[0]);
503 break;
504 case nir_op_fddx:
505 case nir_op_fddx_coarse:
506 case nir_op_fddx_fine:
507 result = lp_build_ddx(&bld_base->base, src[0]);
508 break;
509 case nir_op_fddy:
510 case nir_op_fddy_coarse:
511 case nir_op_fddy_fine:
512 result = lp_build_ddy(&bld_base->base, src[0]);
513 break;
514 case nir_op_fdiv:
515 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
516 src[0], src[1]);
517 break;
518 case nir_op_feq32:
519 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
520 break;
521 case nir_op_fexp2:
522 result = lp_build_exp2(&bld_base->base, src[0]);
523 break;
524 case nir_op_ffloor:
525 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
526 break;
527 case nir_op_ffma:
528 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
529 break;
530 case nir_op_ffract: {
531 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
532 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
533 result = lp_build_sub(flt_bld, src[0], tmp);
534 break;
535 }
536 case nir_op_fge32:
537 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
538 break;
539 case nir_op_find_lsb:
540 result = lp_build_cttz(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
541 break;
542 case nir_op_flog2:
543 result = lp_build_log2_safe(&bld_base->base, src[0]);
544 break;
545 case nir_op_flt32:
546 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
547 break;
548 case nir_op_fmin:
549 result = lp_build_min(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
550 break;
551 case nir_op_fmod: {
552 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
553 result = lp_build_div(flt_bld, src[0], src[1]);
554 result = lp_build_floor(flt_bld, result);
555 result = lp_build_mul(flt_bld, src[1], result);
556 result = lp_build_sub(flt_bld, src[0], result);
557 break;
558 }
559 case nir_op_fmul:
560 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
561 src[0], src[1]);
562 break;
563 case nir_op_fmax:
564 result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
565 break;
566 case nir_op_fne32:
567 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
568 break;
569 case nir_op_fneg:
570 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
571 break;
572 case nir_op_fpow:
573 result = lp_build_pow(&bld_base->base, src[0], src[1]);
574 break;
575 case nir_op_frcp:
576 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
577 break;
578 case nir_op_fround_even:
579 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
580 break;
581 case nir_op_frsq:
582 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
583 break;
584 case nir_op_fsat:
585 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
586 break;
587 case nir_op_fsign:
588 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
589 break;
590 case nir_op_fsin:
591 result = lp_build_sin(&bld_base->base, src[0]);
592 break;
593 case nir_op_fsqrt:
594 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
595 break;
596 case nir_op_ftrunc:
597 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
598 break;
599 case nir_op_i2b32:
600 result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
601 break;
602 case nir_op_i2f32:
603 result = lp_build_int_to_float(&bld_base->base, src[0]);
604 break;
605 case nir_op_i2f64:
606 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
607 break;
608 case nir_op_i2i8:
609 result = LLVMBuildTrunc(builder, src[0], bld_base->int8_bld.vec_type, "");
610 break;
611 case nir_op_i2i16:
612 if (src_bit_size[0] < 16)
613 result = LLVMBuildSExt(builder, src[0], bld_base->int16_bld.vec_type, "");
614 else
615 result = LLVMBuildTrunc(builder, src[0], bld_base->int16_bld.vec_type, "");
616 break;
617 case nir_op_i2i32:
618 if (src_bit_size[0] < 32)
619 result = LLVMBuildSExt(builder, src[0], bld_base->int_bld.vec_type, "");
620 else
621 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
622 break;
623 case nir_op_i2i64:
624 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
625 break;
626 case nir_op_iabs:
627 result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
628 break;
629 case nir_op_iadd:
630 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
631 src[0], src[1]);
632 break;
633 case nir_op_iand:
634 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
635 src[0], src[1]);
636 break;
637 case nir_op_idiv:
638 result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
639 break;
640 case nir_op_ieq32:
641 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
642 break;
643 case nir_op_ige32:
644 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
645 break;
646 case nir_op_ilt32:
647 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
648 break;
649 case nir_op_imax:
650 result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
651 break;
652 case nir_op_imin:
653 result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
654 break;
655 case nir_op_imul:
656 case nir_op_imul24:
657 result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
658 src[0], src[1]);
659 break;
660 case nir_op_imul_high: {
661 LLVMValueRef hi_bits;
662 lp_build_mul_32_lohi(&bld_base->int_bld, src[0], src[1], &hi_bits);
663 result = hi_bits;
664 break;
665 }
666 case nir_op_ine32:
667 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
668 break;
669 case nir_op_ineg:
670 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
671 break;
672 case nir_op_inot:
673 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
674 break;
675 case nir_op_ior:
676 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
677 src[0], src[1]);
678 break;
679 case nir_op_irem:
680 result = do_int_mod(bld_base, false, src_bit_size[0], src[0], src[1]);
681 break;
682 case nir_op_ishl: {
683 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
684 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
685 if (src_bit_size[0] == 64)
686 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
687 if (src_bit_size[0] < 32)
688 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
689 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
690 result = lp_build_shl(int_bld, src[0], src[1]);
691 break;
692 }
693 case nir_op_ishr: {
694 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
695 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
696 if (src_bit_size[0] == 64)
697 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
698 if (src_bit_size[0] < 32)
699 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
700 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
701 result = lp_build_shr(int_bld, src[0], src[1]);
702 break;
703 }
704 case nir_op_isign:
705 result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
706 break;
707 case nir_op_isub:
708 result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
709 src[0], src[1]);
710 break;
711 case nir_op_ixor:
712 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
713 src[0], src[1]);
714 break;
715 case nir_op_mov:
716 result = src[0];
717 break;
718 case nir_op_unpack_64_2x32_split_x:
719 result = split_64bit(bld_base, src[0], false);
720 break;
721 case nir_op_unpack_64_2x32_split_y:
722 result = split_64bit(bld_base, src[0], true);
723 break;
724
725 case nir_op_pack_64_2x32_split: {
726 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
727 result = LLVMBuildBitCast(builder, tmp, bld_base->dbl_bld.vec_type, "");
728 break;
729 }
730 case nir_op_u2f32:
731 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
732 break;
733 case nir_op_u2f64:
734 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
735 break;
736 case nir_op_u2u8:
737 result = LLVMBuildTrunc(builder, src[0], bld_base->uint8_bld.vec_type, "");
738 break;
739 case nir_op_u2u16:
740 if (src_bit_size[0] < 16)
741 result = LLVMBuildZExt(builder, src[0], bld_base->uint16_bld.vec_type, "");
742 else
743 result = LLVMBuildTrunc(builder, src[0], bld_base->uint16_bld.vec_type, "");
744 break;
745 case nir_op_u2u32:
746 if (src_bit_size[0] < 32)
747 result = LLVMBuildZExt(builder, src[0], bld_base->uint_bld.vec_type, "");
748 else
749 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
750 break;
751 case nir_op_u2u64:
752 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
753 break;
754 case nir_op_udiv:
755 result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
756 break;
757 case nir_op_ufind_msb: {
758 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
759 result = lp_build_ctlz(uint_bld, src[0]);
760 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
761 break;
762 }
763 case nir_op_uge32:
764 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
765 break;
766 case nir_op_ult32:
767 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
768 break;
769 case nir_op_umax:
770 result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
771 break;
772 case nir_op_umin:
773 result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
774 break;
775 case nir_op_umod:
776 result = do_int_mod(bld_base, true, src_bit_size[0], src[0], src[1]);
777 break;
778 case nir_op_umul_high: {
779 LLVMValueRef hi_bits;
780 lp_build_mul_32_lohi(&bld_base->uint_bld, src[0], src[1], &hi_bits);
781 result = hi_bits;
782 break;
783 }
784 case nir_op_ushr: {
785 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
786 if (src_bit_size[0] == 64)
787 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
788 if (src_bit_size[0] < 32)
789 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
790 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
791 result = lp_build_shr(uint_bld, src[0], src[1]);
792 break;
793 }
794 default:
795 assert(0);
796 break;
797 }
798 return result;
799 }
800
801 static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr *instr)
802 {
803 struct gallivm_state *gallivm = bld_base->base.gallivm;
804 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS];
805 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS];
806 unsigned num_components = nir_dest_num_components(instr->dest.dest);
807 unsigned src_components;
808 switch (instr->op) {
809 case nir_op_vec2:
810 case nir_op_vec3:
811 case nir_op_vec4:
812 case nir_op_vec8:
813 case nir_op_vec16:
814 src_components = 1;
815 break;
816 case nir_op_pack_half_2x16:
817 src_components = 2;
818 break;
819 case nir_op_unpack_half_2x16:
820 src_components = 1;
821 break;
822 case nir_op_cube_face_coord:
823 case nir_op_cube_face_index:
824 src_components = 3;
825 break;
826 default:
827 src_components = num_components;
828 break;
829 }
830 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
831 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
832 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
833 }
834
835 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
836 if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2 || instr->op == nir_op_vec8 || instr->op == nir_op_vec16) {
837 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
838 result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
839 }
840 } else {
841 for (unsigned c = 0; c < num_components; c++) {
842 LLVMValueRef src_chan[NIR_MAX_VEC_COMPONENTS];
843
844 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
845 if (num_components > 1) {
846 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
847 src[i], c, "");
848 } else
849 src_chan[i] = src[i];
850 src_chan[i] = cast_type(bld_base, src_chan[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
851 }
852 result[c] = do_alu_action(bld_base, instr->op, src_bit_size, src_chan);
853 result[c] = cast_type(bld_base, result[c], nir_op_infos[instr->op].output_type, nir_dest_bit_size(instr->dest.dest));
854 }
855 }
856 assign_alu_dest(bld_base, &instr->dest, result);
857 }
858
859 static void visit_load_const(struct lp_build_nir_context *bld_base,
860 const nir_load_const_instr *instr)
861 {
862 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
863 struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
864 for (unsigned i = 0; i < instr->def.num_components; i++)
865 result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->value[i].u64);
866 assign_ssa_dest(bld_base, &instr->def, result);
867 }
868
869 static void
870 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
871 bool vs_in, unsigned *vertex_index_out,
872 LLVMValueRef *vertex_index_ref,
873 unsigned *const_out, LLVMValueRef *indir_out)
874 {
875 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
876 nir_variable *var = nir_deref_instr_get_variable(instr);
877 nir_deref_path path;
878 unsigned idx_lvl = 1;
879
880 nir_deref_path_init(&path, instr, NULL);
881
882 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
883 if (vertex_index_ref) {
884 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
885 if (vertex_index_out)
886 *vertex_index_out = 0;
887 } else {
888 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
889 }
890 ++idx_lvl;
891 }
892
893 uint32_t const_offset = 0;
894 LLVMValueRef offset = NULL;
895
896 if (var->data.compact) {
897 assert(instr->deref_type == nir_deref_type_array);
898 const_offset = nir_src_as_uint(instr->arr.index);
899 goto out;
900 }
901
902 for (; path.path[idx_lvl]; ++idx_lvl) {
903 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
904 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
905 unsigned index = path.path[idx_lvl]->strct.index;
906
907 for (unsigned i = 0; i < index; i++) {
908 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
909 const_offset += glsl_count_attribute_slots(ft, vs_in);
910 }
911 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
912 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
913 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
914 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
915 } else {
916 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
917 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
918 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
919 idx_src);
920 if (offset)
921 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
922 else
923 offset = array_off;
924 }
925 } else
926 unreachable("Uhandled deref type in get_deref_instr_offset");
927 }
928
929 out:
930 nir_deref_path_finish(&path);
931
932 if (const_offset && offset)
933 offset = LLVMBuildAdd(builder, offset,
934 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
935 "");
936 *const_out = const_offset;
937 *indir_out = offset;
938 }
939
940 static void visit_load_var(struct lp_build_nir_context *bld_base,
941 nir_intrinsic_instr *instr,
942 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
943 {
944 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
945 nir_variable *var = nir_deref_instr_get_variable(deref);
946 nir_variable_mode mode = deref->mode;
947 unsigned const_index;
948 LLVMValueRef indir_index;
949 LLVMValueRef indir_vertex_index = NULL;
950 unsigned vertex_index = 0;
951 unsigned nc = nir_dest_num_components(instr->dest);
952 unsigned bit_size = nir_dest_bit_size(instr->dest);
953 if (var) {
954 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
955 var->data.mode == nir_var_shader_in;
956 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
957 var->data.mode == nir_var_shader_in;
958 bool tcs_in = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
959 var->data.mode == nir_var_shader_in;
960 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
961 var->data.mode == nir_var_shader_out && !var->data.patch;
962 bool tes_in = bld_base->shader->info.stage == MESA_SHADER_TESS_EVAL &&
963 var->data.mode == nir_var_shader_in && !var->data.patch;
964
965 mode = var->data.mode;
966
967 get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, (tcs_in || tcs_out || tes_in) ? &indir_vertex_index : NULL,
968 &const_index, &indir_index);
969 }
970 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, indir_vertex_index, const_index, indir_index, result);
971 }
972
973 static void
974 visit_store_var(struct lp_build_nir_context *bld_base,
975 nir_intrinsic_instr *instr)
976 {
977 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
978 nir_variable *var = nir_deref_instr_get_variable(deref);
979 nir_variable_mode mode = deref->mode;
980 int writemask = instr->const_index[0];
981 unsigned bit_size = nir_src_bit_size(instr->src[1]);
982 LLVMValueRef src = get_src(bld_base, instr->src[1]);
983 unsigned const_index = 0;
984 LLVMValueRef indir_index, indir_vertex_index = NULL;
985 if (var) {
986 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
987 var->data.mode == nir_var_shader_out && !var->data.patch;
988 get_deref_offset(bld_base, deref, false, NULL, tcs_out ? &indir_vertex_index : NULL,
989 &const_index, &indir_index);
990 }
991 bld_base->store_var(bld_base, mode, instr->num_components, bit_size, var, writemask, indir_vertex_index, const_index, indir_index, src);
992 }
993
994 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
995 nir_intrinsic_instr *instr,
996 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
997 {
998 struct gallivm_state *gallivm = bld_base->base.gallivm;
999 LLVMBuilderRef builder = gallivm->builder;
1000 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1001 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1002
1003 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
1004 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
1005 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1006 offset_is_uniform, idx, offset, result);
1007 }
1008
1009
1010 static void visit_load_ssbo(struct lp_build_nir_context *bld_base,
1011 nir_intrinsic_instr *instr,
1012 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1013 {
1014 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1015 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1016 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1017 idx, offset, result);
1018 }
1019
1020 static void visit_store_ssbo(struct lp_build_nir_context *bld_base,
1021 nir_intrinsic_instr *instr)
1022 {
1023 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1024 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
1025 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
1026 int writemask = instr->const_index[0];
1027 int nc = nir_src_num_components(instr->src[0]);
1028 int bitsize = nir_src_bit_size(instr->src[0]);
1029 bld_base->store_mem(bld_base, writemask, nc, bitsize, idx, offset, val);
1030 }
1031
1032 static void visit_get_buffer_size(struct lp_build_nir_context *bld_base,
1033 nir_intrinsic_instr *instr,
1034 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1035 {
1036 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1037 result[0] = bld_base->get_buffer_size(bld_base, idx);
1038 }
1039
1040 static void visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
1041 nir_intrinsic_instr *instr,
1042 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1043 {
1044 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1045 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1046 LLVMValueRef val = get_src(bld_base, instr->src[2]);
1047 LLVMValueRef val2 = NULL;
1048 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
1049 val2 = get_src(bld_base, instr->src[3]);
1050
1051 bld_base->atomic_mem(bld_base, instr->intrinsic, idx, offset, val, val2, &result[0]);
1052
1053 }
1054
1055 static void visit_load_image(struct lp_build_nir_context *bld_base,
1056 nir_intrinsic_instr *instr,
1057 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1058 {
1059 struct gallivm_state *gallivm = bld_base->base.gallivm;
1060 LLVMBuilderRef builder = gallivm->builder;
1061 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1062 nir_variable *var = nir_deref_instr_get_variable(deref);
1063 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1064 LLVMValueRef coords[5];
1065 struct lp_img_params params;
1066 const struct glsl_type *type = glsl_without_array(var->type);
1067
1068 memset(&params, 0, sizeof(params));
1069 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1070 for (unsigned i = 0; i < 4; i++)
1071 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1072 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1073 coords[2] = coords[1];
1074
1075 params.coords = coords;
1076 params.outdata = result;
1077 params.img_op = LP_IMG_LOAD;
1078 params.image_index = var->data.binding;
1079 bld_base->image_op(bld_base, &params);
1080 }
1081
1082 static void visit_store_image(struct lp_build_nir_context *bld_base,
1083 nir_intrinsic_instr *instr)
1084 {
1085 struct gallivm_state *gallivm = bld_base->base.gallivm;
1086 LLVMBuilderRef builder = gallivm->builder;
1087 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1088 nir_variable *var = nir_deref_instr_get_variable(deref);
1089 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1090 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1091 LLVMValueRef coords[5];
1092 struct lp_img_params params;
1093 const struct glsl_type *type = glsl_without_array(var->type);
1094
1095 memset(&params, 0, sizeof(params));
1096 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1097 for (unsigned i = 0; i < 4; i++)
1098 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1099 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1100 coords[2] = coords[1];
1101 params.coords = coords;
1102
1103 for (unsigned i = 0; i < 4; i++) {
1104 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1105 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1106 }
1107 params.img_op = LP_IMG_STORE;
1108 params.image_index = var->data.binding;
1109
1110 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1111 coords[2] = coords[1];
1112 bld_base->image_op(bld_base, &params);
1113 }
1114
1115 static void visit_atomic_image(struct lp_build_nir_context *bld_base,
1116 nir_intrinsic_instr *instr,
1117 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1118 {
1119 struct gallivm_state *gallivm = bld_base->base.gallivm;
1120 LLVMBuilderRef builder = gallivm->builder;
1121 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1122 nir_variable *var = nir_deref_instr_get_variable(deref);
1123 struct lp_img_params params;
1124 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1125 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1126 LLVMValueRef coords[5];
1127 const struct glsl_type *type = glsl_without_array(var->type);
1128
1129 memset(&params, 0, sizeof(params));
1130
1131 switch (instr->intrinsic) {
1132 case nir_intrinsic_image_deref_atomic_add:
1133 params.op = LLVMAtomicRMWBinOpAdd;
1134 break;
1135 case nir_intrinsic_image_deref_atomic_exchange:
1136 params.op = LLVMAtomicRMWBinOpXchg;
1137 break;
1138 case nir_intrinsic_image_deref_atomic_and:
1139 params.op = LLVMAtomicRMWBinOpAnd;
1140 break;
1141 case nir_intrinsic_image_deref_atomic_or:
1142 params.op = LLVMAtomicRMWBinOpOr;
1143 break;
1144 case nir_intrinsic_image_deref_atomic_xor:
1145 params.op = LLVMAtomicRMWBinOpXor;
1146 break;
1147 case nir_intrinsic_image_deref_atomic_umin:
1148 params.op = LLVMAtomicRMWBinOpUMin;
1149 break;
1150 case nir_intrinsic_image_deref_atomic_umax:
1151 params.op = LLVMAtomicRMWBinOpUMax;
1152 break;
1153 case nir_intrinsic_image_deref_atomic_imin:
1154 params.op = LLVMAtomicRMWBinOpMin;
1155 break;
1156 case nir_intrinsic_image_deref_atomic_imax:
1157 params.op = LLVMAtomicRMWBinOpMax;
1158 break;
1159 default:
1160 break;
1161 }
1162
1163 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1164 for (unsigned i = 0; i < 4; i++)
1165 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1166 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1167 coords[2] = coords[1];
1168 params.coords = coords;
1169 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
1170 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1171 params.indata[0] = in_val;
1172 params.indata2[0] = cas_val;
1173 } else
1174 params.indata[0] = in_val;
1175
1176 params.outdata = result;
1177 params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1178 params.image_index = var->data.binding;
1179
1180 bld_base->image_op(bld_base, &params);
1181 }
1182
1183
1184 static void visit_image_size(struct lp_build_nir_context *bld_base,
1185 nir_intrinsic_instr *instr,
1186 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1187 {
1188 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1189 nir_variable *var = nir_deref_instr_get_variable(deref);
1190 struct lp_sampler_size_query_params params = { 0 };
1191 params.texture_unit = var->data.binding;
1192 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(var->type), glsl_sampler_type_is_array(var->type));
1193 params.sizes_out = result;
1194
1195 bld_base->image_size(bld_base, &params);
1196 }
1197
1198 static void visit_shared_load(struct lp_build_nir_context *bld_base,
1199 nir_intrinsic_instr *instr,
1200 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1201 {
1202 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1203 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1204 NULL, offset, result);
1205 }
1206
1207 static void visit_shared_store(struct lp_build_nir_context *bld_base,
1208 nir_intrinsic_instr *instr)
1209 {
1210 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1211 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1212 int writemask = instr->const_index[1];
1213 int nc = nir_src_num_components(instr->src[0]);
1214 int bitsize = nir_src_bit_size(instr->src[0]);
1215 bld_base->store_mem(bld_base, writemask, nc, bitsize, NULL, offset, val);
1216 }
1217
1218 static void visit_shared_atomic(struct lp_build_nir_context *bld_base,
1219 nir_intrinsic_instr *instr,
1220 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1221 {
1222 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1223 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1224 LLVMValueRef val2 = NULL;
1225 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1226 val2 = get_src(bld_base, instr->src[2]);
1227
1228 bld_base->atomic_mem(bld_base, instr->intrinsic, NULL, offset, val, val2, &result[0]);
1229
1230 }
1231
1232 static void visit_barrier(struct lp_build_nir_context *bld_base)
1233 {
1234 bld_base->barrier(bld_base);
1235 }
1236
1237 static void visit_discard(struct lp_build_nir_context *bld_base,
1238 nir_intrinsic_instr *instr)
1239 {
1240 LLVMValueRef cond = NULL;
1241 if (instr->intrinsic == nir_intrinsic_discard_if) {
1242 cond = get_src(bld_base, instr->src[0]);
1243 cond = cast_type(bld_base, cond, nir_type_int, 32);
1244 }
1245 bld_base->discard(bld_base, cond);
1246 }
1247
1248 static void visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1249 nir_intrinsic_instr *instr, LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1250 {
1251 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1252
1253 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
1254 bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1255 nir_src_bit_size(instr->src[0]),
1256 offset_is_uniform, offset, result);
1257 }
1258
1259 static void visit_load_global(struct lp_build_nir_context *bld_base,
1260 nir_intrinsic_instr *instr, LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1261 {
1262 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1263 bld_base->load_global(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1264 nir_src_bit_size(instr->src[0]),
1265 addr, result);
1266 }
1267
1268 static void visit_store_global(struct lp_build_nir_context *bld_base,
1269 nir_intrinsic_instr *instr)
1270 {
1271 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1272 int nc = nir_src_num_components(instr->src[0]);
1273 int bitsize = nir_src_bit_size(instr->src[0]);
1274 LLVMValueRef addr = get_src(bld_base, instr->src[1]);
1275 int addr_bitsize = nir_src_bit_size(instr->src[1]);
1276 int writemask = instr->const_index[0];
1277 bld_base->store_global(bld_base, writemask, nc, bitsize, addr_bitsize, addr, val);
1278 }
1279
1280 static void visit_global_atomic(struct lp_build_nir_context *bld_base,
1281 nir_intrinsic_instr *instr,
1282 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1283 {
1284 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1285 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1286 LLVMValueRef val2 = NULL;
1287 int addr_bitsize = nir_src_bit_size(instr->src[0]);
1288 if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap)
1289 val2 = get_src(bld_base, instr->src[2]);
1290
1291 bld_base->atomic_global(bld_base, instr->intrinsic, addr_bitsize, addr, val, val2, &result[0]);
1292 }
1293
1294 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
1295 nir_intrinsic_instr *instr)
1296 {
1297 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS] = {0};
1298 switch (instr->intrinsic) {
1299 case nir_intrinsic_load_deref:
1300 visit_load_var(bld_base, instr, result);
1301 break;
1302 case nir_intrinsic_store_deref:
1303 visit_store_var(bld_base, instr);
1304 break;
1305 case nir_intrinsic_load_ubo:
1306 visit_load_ubo(bld_base, instr, result);
1307 break;
1308 case nir_intrinsic_load_ssbo:
1309 visit_load_ssbo(bld_base, instr, result);
1310 break;
1311 case nir_intrinsic_store_ssbo:
1312 visit_store_ssbo(bld_base, instr);
1313 break;
1314 case nir_intrinsic_get_buffer_size:
1315 visit_get_buffer_size(bld_base, instr, result);
1316 break;
1317 case nir_intrinsic_load_vertex_id:
1318 case nir_intrinsic_load_primitive_id:
1319 case nir_intrinsic_load_instance_id:
1320 case nir_intrinsic_load_base_instance:
1321 case nir_intrinsic_load_base_vertex:
1322 case nir_intrinsic_load_work_group_id:
1323 case nir_intrinsic_load_local_invocation_id:
1324 case nir_intrinsic_load_num_work_groups:
1325 case nir_intrinsic_load_invocation_id:
1326 case nir_intrinsic_load_front_face:
1327 case nir_intrinsic_load_draw_id:
1328 case nir_intrinsic_load_local_group_size:
1329 case nir_intrinsic_load_work_dim:
1330 case nir_intrinsic_load_tess_coord:
1331 case nir_intrinsic_load_tess_level_outer:
1332 case nir_intrinsic_load_tess_level_inner:
1333 case nir_intrinsic_load_patch_vertices_in:
1334 bld_base->sysval_intrin(bld_base, instr, result);
1335 break;
1336 case nir_intrinsic_load_helper_invocation:
1337 bld_base->helper_invocation(bld_base, &result[0]);
1338 break;
1339 case nir_intrinsic_discard_if:
1340 case nir_intrinsic_discard:
1341 visit_discard(bld_base, instr);
1342 break;
1343 case nir_intrinsic_emit_vertex:
1344 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
1345 break;
1346 case nir_intrinsic_end_primitive:
1347 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
1348 break;
1349 case nir_intrinsic_ssbo_atomic_add:
1350 case nir_intrinsic_ssbo_atomic_imin:
1351 case nir_intrinsic_ssbo_atomic_imax:
1352 case nir_intrinsic_ssbo_atomic_umin:
1353 case nir_intrinsic_ssbo_atomic_umax:
1354 case nir_intrinsic_ssbo_atomic_and:
1355 case nir_intrinsic_ssbo_atomic_or:
1356 case nir_intrinsic_ssbo_atomic_xor:
1357 case nir_intrinsic_ssbo_atomic_exchange:
1358 case nir_intrinsic_ssbo_atomic_comp_swap:
1359 visit_ssbo_atomic(bld_base, instr, result);
1360 break;
1361 case nir_intrinsic_image_deref_load:
1362 visit_load_image(bld_base, instr, result);
1363 break;
1364 case nir_intrinsic_image_deref_store:
1365 visit_store_image(bld_base, instr);
1366 break;
1367 case nir_intrinsic_image_deref_atomic_add:
1368 case nir_intrinsic_image_deref_atomic_imin:
1369 case nir_intrinsic_image_deref_atomic_imax:
1370 case nir_intrinsic_image_deref_atomic_umin:
1371 case nir_intrinsic_image_deref_atomic_umax:
1372 case nir_intrinsic_image_deref_atomic_and:
1373 case nir_intrinsic_image_deref_atomic_or:
1374 case nir_intrinsic_image_deref_atomic_xor:
1375 case nir_intrinsic_image_deref_atomic_exchange:
1376 case nir_intrinsic_image_deref_atomic_comp_swap:
1377 visit_atomic_image(bld_base, instr, result);
1378 break;
1379 case nir_intrinsic_image_deref_size:
1380 visit_image_size(bld_base, instr, result);
1381 break;
1382 case nir_intrinsic_load_shared:
1383 visit_shared_load(bld_base, instr, result);
1384 break;
1385 case nir_intrinsic_store_shared:
1386 visit_shared_store(bld_base, instr);
1387 break;
1388 case nir_intrinsic_shared_atomic_add:
1389 case nir_intrinsic_shared_atomic_imin:
1390 case nir_intrinsic_shared_atomic_umin:
1391 case nir_intrinsic_shared_atomic_imax:
1392 case nir_intrinsic_shared_atomic_umax:
1393 case nir_intrinsic_shared_atomic_and:
1394 case nir_intrinsic_shared_atomic_or:
1395 case nir_intrinsic_shared_atomic_xor:
1396 case nir_intrinsic_shared_atomic_exchange:
1397 case nir_intrinsic_shared_atomic_comp_swap:
1398 visit_shared_atomic(bld_base, instr, result);
1399 break;
1400 case nir_intrinsic_control_barrier:
1401 visit_barrier(bld_base);
1402 break;
1403 case nir_intrinsic_memory_barrier:
1404 case nir_intrinsic_memory_barrier_shared:
1405 case nir_intrinsic_memory_barrier_buffer:
1406 case nir_intrinsic_memory_barrier_image:
1407 case nir_intrinsic_memory_barrier_tcs_patch:
1408 break;
1409 case nir_intrinsic_load_kernel_input:
1410 visit_load_kernel_input(bld_base, instr, result);
1411 break;
1412 case nir_intrinsic_load_global:
1413 visit_load_global(bld_base, instr, result);
1414 break;
1415 case nir_intrinsic_store_global:
1416 visit_store_global(bld_base, instr);
1417 break;
1418 case nir_intrinsic_global_atomic_add:
1419 case nir_intrinsic_global_atomic_imin:
1420 case nir_intrinsic_global_atomic_umin:
1421 case nir_intrinsic_global_atomic_imax:
1422 case nir_intrinsic_global_atomic_umax:
1423 case nir_intrinsic_global_atomic_and:
1424 case nir_intrinsic_global_atomic_or:
1425 case nir_intrinsic_global_atomic_xor:
1426 case nir_intrinsic_global_atomic_exchange:
1427 case nir_intrinsic_global_atomic_comp_swap:
1428 visit_global_atomic(bld_base, instr, result);
1429 case nir_intrinsic_vote_all:
1430 case nir_intrinsic_vote_any:
1431 case nir_intrinsic_vote_ieq:
1432 bld_base->vote(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, 32), instr, result);
1433 break;
1434 default:
1435 assert(0);
1436 break;
1437 }
1438 if (result[0]) {
1439 assign_dest(bld_base, &instr->dest, result);
1440 }
1441 }
1442
1443 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1444 {
1445 struct lp_sampler_size_query_params params;
1446 LLVMValueRef sizes_out[NIR_MAX_VEC_COMPONENTS];
1447 LLVMValueRef explicit_lod = NULL;
1448
1449 for (unsigned i = 0; i < instr->num_srcs; i++) {
1450 switch (instr->src[i].src_type) {
1451 case nir_tex_src_lod:
1452 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1453 break;
1454 default:
1455 break;
1456 }
1457 }
1458
1459 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
1460 params.texture_unit = instr->texture_index;
1461 params.explicit_lod = explicit_lod;
1462 params.is_sviewinfo = TRUE;
1463 params.sizes_out = sizes_out;
1464
1465 if (instr->op == nir_texop_query_levels)
1466 params.explicit_lod = bld_base->uint_bld.zero;
1467 bld_base->tex_size(bld_base, &params);
1468 assign_dest(bld_base, &instr->dest, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
1469 }
1470
1471 static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
1472 nir_src lod_src)
1473 {
1474 enum lp_sampler_lod_property lod_property;
1475
1476 if (nir_src_is_dynamically_uniform(lod_src))
1477 lod_property = LP_SAMPLER_LOD_SCALAR;
1478 else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1479 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1480 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1481 else
1482 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1483 }
1484 else
1485 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1486 return lod_property;
1487 }
1488
1489 static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1490 {
1491 struct gallivm_state *gallivm = bld_base->base.gallivm;
1492 LLVMBuilderRef builder = gallivm->builder;
1493 LLVMValueRef coords[5];
1494 LLVMValueRef offsets[3] = { NULL };
1495 LLVMValueRef explicit_lod = NULL, projector = NULL;
1496 struct lp_sampler_params params;
1497 struct lp_derivatives derivs;
1498 unsigned sample_key = 0;
1499 nir_deref_instr *texture_deref_instr = NULL;
1500 nir_deref_instr *sampler_deref_instr = NULL;
1501 LLVMValueRef texel[NIR_MAX_VEC_COMPONENTS];
1502 unsigned lod_src = 0;
1503 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
1504
1505 memset(&params, 0, sizeof(params));
1506 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
1507
1508 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels) {
1509 visit_txs(bld_base, instr);
1510 return;
1511 }
1512 if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1513 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
1514 else if (instr->op == nir_texop_tg4) {
1515 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
1516 sample_key |= (instr->component << LP_SAMPLER_GATHER_COMP_SHIFT);
1517 } else if (instr->op == nir_texop_lod)
1518 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
1519 for (unsigned i = 0; i < instr->num_srcs; i++) {
1520 switch (instr->src[i].src_type) {
1521 case nir_tex_src_coord: {
1522 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
1523 if (instr->coord_components == 1)
1524 coords[0] = coord;
1525 else {
1526 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1527 coords[chan] = LLVMBuildExtractValue(builder, coord,
1528 chan, "");
1529 }
1530 for (unsigned chan = instr->coord_components; chan < 5; chan++)
1531 coords[chan] = coord_undef;
1532
1533 break;
1534 }
1535 case nir_tex_src_texture_deref:
1536 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
1537 break;
1538 case nir_tex_src_sampler_deref:
1539 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
1540 break;
1541 case nir_tex_src_projector:
1542 projector = lp_build_rcp(&bld_base->base, cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32));
1543 break;
1544 case nir_tex_src_comparator:
1545 sample_key |= LP_SAMPLER_SHADOW;
1546 coords[4] = get_src(bld_base, instr->src[i].src);
1547 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
1548 break;
1549 case nir_tex_src_bias:
1550 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
1551 lod_src = i;
1552 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1553 break;
1554 case nir_tex_src_lod:
1555 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
1556 lod_src = i;
1557 if (instr->op == nir_texop_txf)
1558 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1559 else
1560 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1561 break;
1562 case nir_tex_src_ddx: {
1563 int deriv_cnt = instr->coord_components;
1564 if (instr->is_array)
1565 deriv_cnt--;
1566 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1567 if (deriv_cnt == 1)
1568 derivs.ddx[0] = deriv_val;
1569 else
1570 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1571 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
1572 chan, "");
1573 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1574 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
1575 break;
1576 }
1577 case nir_tex_src_ddy: {
1578 int deriv_cnt = instr->coord_components;
1579 if (instr->is_array)
1580 deriv_cnt--;
1581 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1582 if (deriv_cnt == 1)
1583 derivs.ddy[0] = deriv_val;
1584 else
1585 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1586 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
1587 chan, "");
1588 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1589 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
1590 break;
1591 }
1592 case nir_tex_src_offset: {
1593 int offset_cnt = instr->coord_components;
1594 if (instr->is_array)
1595 offset_cnt--;
1596 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
1597 sample_key |= LP_SAMPLER_OFFSETS;
1598 if (offset_cnt == 1)
1599 offsets[0] = cast_type(bld_base, offset_val, nir_type_int, 32);
1600 else {
1601 for (unsigned chan = 0; chan < offset_cnt; ++chan) {
1602 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
1603 chan, "");
1604 offsets[chan] = cast_type(bld_base, offsets[chan], nir_type_int, 32);
1605 }
1606 }
1607 break;
1608 }
1609 case nir_tex_src_ms_index:
1610 break;
1611 default:
1612 assert(0);
1613 break;
1614 }
1615 }
1616 if (!sampler_deref_instr)
1617 sampler_deref_instr = texture_deref_instr;
1618
1619 if (explicit_lod)
1620 lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
1621
1622 if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
1623 instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
1624 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1625 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
1626 else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1627 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1628 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
1629
1630 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
1631 /* move layer coord for 1d arrays. */
1632 coords[2] = coords[1];
1633 coords[1] = coord_undef;
1634 }
1635
1636 if (projector) {
1637 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1638 coords[chan] = lp_build_mul(&bld_base->base, coords[chan], projector);
1639 if (sample_key & LP_SAMPLER_SHADOW)
1640 coords[4] = lp_build_mul(&bld_base->base, coords[4], projector);
1641 }
1642
1643 uint32_t base_index = 0;
1644 if (!texture_deref_instr) {
1645 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
1646 if (samp_src_index == -1) {
1647 base_index = instr->sampler_index;
1648 }
1649 }
1650
1651 if (instr->op == nir_texop_txd) {
1652 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
1653 params.derivs = &derivs;
1654 if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1655 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1656 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1657 else
1658 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1659 } else
1660 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1661 }
1662
1663 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
1664 params.sample_key = sample_key;
1665 params.offsets = offsets;
1666 params.texture_index = base_index;
1667 params.sampler_index = base_index;
1668 params.coords = coords;
1669 params.texel = texel;
1670 params.lod = explicit_lod;
1671 bld_base->tex(bld_base, &params);
1672 assign_dest(bld_base, &instr->dest, texel);
1673 }
1674
1675 static void visit_ssa_undef(struct lp_build_nir_context *bld_base,
1676 const nir_ssa_undef_instr *instr)
1677 {
1678 unsigned num_components = instr->def.num_components;
1679 LLVMValueRef undef[NIR_MAX_VEC_COMPONENTS];
1680 struct lp_build_context *undef_bld = get_int_bld(bld_base, true, instr->def.bit_size);
1681 for (unsigned i = 0; i < num_components; i++)
1682 undef[i] = LLVMGetUndef(undef_bld->vec_type);
1683 assign_ssa_dest(bld_base, &instr->def, undef);
1684 }
1685
1686 static void visit_jump(struct lp_build_nir_context *bld_base,
1687 const nir_jump_instr *instr)
1688 {
1689 switch (instr->type) {
1690 case nir_jump_break:
1691 bld_base->break_stmt(bld_base);
1692 break;
1693 case nir_jump_continue:
1694 bld_base->continue_stmt(bld_base);
1695 break;
1696 default:
1697 unreachable("Unknown jump instr\n");
1698 }
1699 }
1700
1701 static void visit_deref(struct lp_build_nir_context *bld_base,
1702 nir_deref_instr *instr)
1703 {
1704 if (instr->mode != nir_var_mem_shared &&
1705 instr->mode != nir_var_mem_global)
1706 return;
1707 LLVMValueRef result = NULL;
1708 switch(instr->deref_type) {
1709 case nir_deref_type_var: {
1710 struct hash_entry *entry = _mesa_hash_table_search(bld_base->vars, instr->var);
1711 result = entry->data;
1712 break;
1713 }
1714 default:
1715 unreachable("Unhandled deref_instr deref type");
1716 }
1717
1718 assign_ssa(bld_base, instr->dest.ssa.index, result);
1719 }
1720
1721 static void visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
1722 {
1723 nir_foreach_instr(instr, block)
1724 {
1725 switch (instr->type) {
1726 case nir_instr_type_alu:
1727 visit_alu(bld_base, nir_instr_as_alu(instr));
1728 break;
1729 case nir_instr_type_load_const:
1730 visit_load_const(bld_base, nir_instr_as_load_const(instr));
1731 break;
1732 case nir_instr_type_intrinsic:
1733 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
1734 break;
1735 case nir_instr_type_tex:
1736 visit_tex(bld_base, nir_instr_as_tex(instr));
1737 break;
1738 case nir_instr_type_phi:
1739 assert(0);
1740 break;
1741 case nir_instr_type_ssa_undef:
1742 visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
1743 break;
1744 case nir_instr_type_jump:
1745 visit_jump(bld_base, nir_instr_as_jump(instr));
1746 break;
1747 case nir_instr_type_deref:
1748 visit_deref(bld_base, nir_instr_as_deref(instr));
1749 break;
1750 default:
1751 fprintf(stderr, "Unknown NIR instr type: ");
1752 nir_print_instr(instr, stderr);
1753 fprintf(stderr, "\n");
1754 abort();
1755 }
1756 }
1757 }
1758
1759 static void visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
1760 {
1761 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
1762
1763 bld_base->if_cond(bld_base, cond);
1764 visit_cf_list(bld_base, &if_stmt->then_list);
1765
1766 if (!exec_list_is_empty(&if_stmt->else_list)) {
1767 bld_base->else_stmt(bld_base);
1768 visit_cf_list(bld_base, &if_stmt->else_list);
1769 }
1770 bld_base->endif_stmt(bld_base);
1771 }
1772
1773 static void visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
1774 {
1775 bld_base->bgnloop(bld_base);
1776 visit_cf_list(bld_base, &loop->body);
1777 bld_base->endloop(bld_base);
1778 }
1779
1780 static void visit_cf_list(struct lp_build_nir_context *bld_base,
1781 struct exec_list *list)
1782 {
1783 foreach_list_typed(nir_cf_node, node, node, list)
1784 {
1785 switch (node->type) {
1786 case nir_cf_node_block:
1787 visit_block(bld_base, nir_cf_node_as_block(node));
1788 break;
1789
1790 case nir_cf_node_if:
1791 visit_if(bld_base, nir_cf_node_as_if(node));
1792 break;
1793
1794 case nir_cf_node_loop:
1795 visit_loop(bld_base, nir_cf_node_as_loop(node));
1796 break;
1797
1798 default:
1799 assert(0);
1800 }
1801 }
1802 }
1803
1804 static void
1805 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
1806 struct nir_shader *nir,
1807 struct nir_variable *variable)
1808 {
1809 bld_base->emit_var_decl(bld_base, variable);
1810 }
1811
1812 /* vector registers are stored as arrays in LLVM side,
1813 so we can use GEP on them, as to do exec mask stores
1814 we need to operate on a single components.
1815 arrays are:
1816 0.x, 1.x, 2.x, 3.x
1817 0.y, 1.y, 2.y, 3.y
1818 ....
1819 */
1820 static LLVMTypeRef get_register_type(struct lp_build_nir_context *bld_base,
1821 nir_register *reg)
1822 {
1823 struct lp_build_context *int_bld = get_int_bld(bld_base, true, reg->bit_size);
1824
1825 LLVMTypeRef type = int_bld->vec_type;
1826 if (reg->num_array_elems)
1827 type = LLVMArrayType(type, reg->num_array_elems);
1828 if (reg->num_components > 1)
1829 type = LLVMArrayType(type, reg->num_components);
1830
1831 return type;
1832 }
1833
1834
1835 bool lp_build_nir_llvm(
1836 struct lp_build_nir_context *bld_base,
1837 struct nir_shader *nir)
1838 {
1839 struct nir_function *func;
1840
1841 nir_convert_from_ssa(nir, true);
1842 nir_lower_locals_to_regs(nir);
1843 nir_remove_dead_derefs(nir);
1844 nir_remove_dead_variables(nir, nir_var_function_temp);
1845
1846 nir_foreach_variable(variable, &nir->outputs)
1847 handle_shader_output_decl(bld_base, nir, variable);
1848
1849 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1850 _mesa_key_pointer_equal);
1851 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1852 _mesa_key_pointer_equal);
1853
1854 func = (struct nir_function *)exec_list_get_head(&nir->functions);
1855
1856 nir_foreach_register(reg, &func->impl->registers) {
1857 LLVMTypeRef type = get_register_type(bld_base, reg);
1858 LLVMValueRef reg_alloc = lp_build_alloca_undef(bld_base->base.gallivm,
1859 type, "reg");
1860 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
1861 }
1862 nir_index_ssa_defs(func->impl);
1863 bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
1864 visit_cf_list(bld_base, &func->impl->body);
1865
1866 free(bld_base->ssa_defs);
1867 ralloc_free(bld_base->vars);
1868 ralloc_free(bld_base->regs);
1869 return true;
1870 }
1871
1872 /* do some basic opts to remove some things we don't want to see. */
1873 void lp_build_opt_nir(struct nir_shader *nir)
1874 {
1875 bool progress;
1876 do {
1877 progress = false;
1878 NIR_PASS_V(nir, nir_opt_constant_folding);
1879 NIR_PASS_V(nir, nir_opt_algebraic);
1880 NIR_PASS_V(nir, nir_lower_pack);
1881
1882 nir_lower_tex_options options = { .lower_tex_without_implicit_lod = true };
1883 NIR_PASS_V(nir, nir_lower_tex, &options);
1884 } while (progress);
1885 nir_lower_bool_to_int32(nir);
1886 }