gallivm: pick integer builders for alu instructions.
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_gather.h"
31 #include "lp_bld_logic.h"
32 #include "lp_bld_quad.h"
33 #include "lp_bld_flow.h"
34 #include "lp_bld_struct.h"
35 #include "lp_bld_debug.h"
36 #include "lp_bld_printf.h"
37 #include "nir_deref.h"
38
39 static void visit_cf_list(struct lp_build_nir_context *bld_base,
40 struct exec_list *list);
41
42 static LLVMValueRef cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
43 nir_alu_type alu_type, unsigned bit_size)
44 {
45 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
46 switch (alu_type) {
47 case nir_type_float:
48 switch (bit_size) {
49 case 32:
50 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
51 case 64:
52 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
53 default:
54 assert(0);
55 break;
56 }
57 break;
58 case nir_type_int:
59 switch (bit_size) {
60 case 8:
61 return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
62 case 16:
63 return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
64 case 32:
65 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
66 case 64:
67 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
68 default:
69 assert(0);
70 break;
71 }
72 break;
73 case nir_type_uint:
74 switch (bit_size) {
75 case 8:
76 return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
77 case 16:
78 return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
79 case 32:
80 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
81 case 64:
82 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
83 default:
84 assert(0);
85 break;
86 }
87 break;
88 case nir_type_uint32:
89 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
90 default:
91 return val;
92 }
93 return NULL;
94 }
95
96
97 static struct lp_build_context *get_flt_bld(struct lp_build_nir_context *bld_base,
98 unsigned op_bit_size)
99 {
100 if (op_bit_size == 64)
101 return &bld_base->dbl_bld;
102 else
103 return &bld_base->base;
104 }
105
106 static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
107 {
108 unsigned pipe_target = PIPE_BUFFER;
109 switch (sampler_dim) {
110 case GLSL_SAMPLER_DIM_1D:
111 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
112 break;
113 case GLSL_SAMPLER_DIM_2D:
114 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
115 break;
116 case GLSL_SAMPLER_DIM_3D:
117 pipe_target = PIPE_TEXTURE_3D;
118 break;
119 case GLSL_SAMPLER_DIM_CUBE:
120 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
121 break;
122 case GLSL_SAMPLER_DIM_RECT:
123 pipe_target = PIPE_TEXTURE_RECT;
124 break;
125 case GLSL_SAMPLER_DIM_BUF:
126 pipe_target = PIPE_BUFFER;
127 break;
128 default:
129 break;
130 }
131 return pipe_target;
132 }
133
134 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
135 {
136 return bld_base->ssa_defs[ssa->index];
137 }
138
139 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src);
140
141 static LLVMValueRef get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
142 {
143 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
144 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
145 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
146 LLVMValueRef indir_src = NULL;
147 if (src.indirect)
148 indir_src = get_src(bld_base, *src.indirect);
149 return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
150 }
151
152 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src)
153 {
154 if (src.is_ssa)
155 return get_ssa_src(bld_base, src.ssa);
156 else
157 return get_reg_src(bld_base, src.reg);
158 }
159
160 static void assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
161 {
162 bld_base->ssa_defs[idx] = ptr;
163 }
164
165 static void assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
166 LLVMValueRef vals[4])
167 {
168 assign_ssa(bld_base, ssa->index, ssa->num_components == 1 ? vals[0] : lp_nir_array_build_gather_values(bld_base->base.gallivm->builder, vals, ssa->num_components));
169 }
170
171 static void assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
172 unsigned write_mask,
173 LLVMValueRef vals[4])
174 {
175 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
176 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
177 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
178 LLVMValueRef indir_src = NULL;
179 if (reg->indirect)
180 indir_src = get_src(bld_base, *reg->indirect);
181 bld_base->store_reg(bld_base, reg_bld, reg, write_mask ? write_mask : 0xf, indir_src, reg_storage, vals);
182 }
183
184 static void assign_dest(struct lp_build_nir_context *bld_base, const nir_dest *dest, LLVMValueRef vals[4])
185 {
186 if (dest->is_ssa)
187 assign_ssa_dest(bld_base, &dest->ssa, vals);
188 else
189 assign_reg(bld_base, &dest->reg, 0, vals);
190 }
191
192 static void assign_alu_dest(struct lp_build_nir_context *bld_base, const nir_alu_dest *dest, LLVMValueRef vals[4])
193 {
194 if (dest->dest.is_ssa)
195 assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
196 else
197 assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
198 }
199
200 static LLVMValueRef int_to_bool32(struct lp_build_nir_context *bld_base,
201 uint32_t src_bit_size,
202 bool is_unsigned,
203 LLVMValueRef val)
204 {
205 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
206 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
207 LLVMValueRef result = lp_build_compare(bld_base->base.gallivm, int_bld->type, PIPE_FUNC_NOTEQUAL, val, int_bld->zero);
208 if (src_bit_size == 64)
209 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
210 return result;
211 }
212
213 static LLVMValueRef flt_to_bool32(struct lp_build_nir_context *bld_base,
214 uint32_t src_bit_size,
215 LLVMValueRef val)
216 {
217 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
218 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
219 LLVMValueRef result = lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
220 if (src_bit_size == 64)
221 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
222 return result;
223 }
224
225 static LLVMValueRef fcmp32(struct lp_build_nir_context *bld_base,
226 enum pipe_compare_func compare,
227 uint32_t src_bit_size,
228 LLVMValueRef src[4])
229 {
230 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
231 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
232 LLVMValueRef result;
233
234 if (compare != PIPE_FUNC_NOTEQUAL)
235 result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
236 else
237 result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
238 if (src_bit_size == 64)
239 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
240 return result;
241 }
242
243 static LLVMValueRef icmp32(struct lp_build_nir_context *bld_base,
244 enum pipe_compare_func compare,
245 bool is_unsigned,
246 uint32_t src_bit_size,
247 LLVMValueRef src[4])
248 {
249 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
250 struct lp_build_context *i_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
251 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
252 if (src_bit_size == 64)
253 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
254 return result;
255 }
256
257 static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
258 nir_alu_src src,
259 unsigned num_components)
260 {
261 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
262 struct gallivm_state *gallivm = bld_base->base.gallivm;
263 LLVMValueRef value = get_src(bld_base, src.src);
264 bool need_swizzle = false;
265
266 assert(value);
267 unsigned src_components = nir_src_num_components(src.src);
268 for (unsigned i = 0; i < num_components; ++i) {
269 assert(src.swizzle[i] < src_components);
270 if (src.swizzle[i] != i)
271 need_swizzle = true;
272 }
273
274 if (need_swizzle || num_components != src_components) {
275 if (src_components > 1 && num_components == 1) {
276 value = LLVMBuildExtractValue(gallivm->builder, value,
277 src.swizzle[0], "");
278 } else if (src_components == 1 && num_components > 1) {
279 LLVMValueRef values[] = {value, value, value, value};
280 value = lp_nir_array_build_gather_values(builder, values, num_components);
281 } else {
282 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
283 for (unsigned i = 0; i < num_components; i++)
284 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
285 value = arr;
286 }
287 }
288 assert(!src.negate);
289 assert(!src.abs);
290 return value;
291 }
292
293 static LLVMValueRef emit_b2f(struct lp_build_nir_context *bld_base,
294 LLVMValueRef src0,
295 unsigned bitsize)
296 {
297 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
298 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
299 LLVMBuildBitCast(builder, lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type,
300 1.0), bld_base->int_bld.vec_type, ""),
301 "");
302 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
303 switch (bitsize) {
304 case 32:
305 break;
306 case 64:
307 result = LLVMBuildFPExt(builder, result, bld_base->dbl_bld.vec_type, "");
308 break;
309 default:
310 unreachable("unsupported bit size.");
311 }
312 return result;
313 }
314
315 static LLVMValueRef emit_b2i(struct lp_build_nir_context *bld_base,
316 LLVMValueRef src0,
317 unsigned bitsize)
318 {
319 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
320 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
321 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, 1), "");
322 switch (bitsize) {
323 case 32:
324 return result;
325 case 64:
326 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
327 default:
328 unreachable("unsupported bit size.");
329 }
330 }
331
332 static LLVMValueRef emit_b32csel(struct lp_build_nir_context *bld_base,
333 unsigned src_bit_size[4],
334 LLVMValueRef src[4])
335 {
336 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
337 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
338 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
339 return lp_build_select(bld, v, src[1], src[2]);
340 }
341
342 static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
343 LLVMValueRef src,
344 bool hi)
345 {
346 struct gallivm_state *gallivm = bld_base->base.gallivm;
347 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
348 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
349 int len = bld_base->base.type.length * 2;
350 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
351 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
352 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
353 }
354
355 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
356 return LLVMBuildShuffleVector(gallivm->builder, src,
357 LLVMGetUndef(LLVMTypeOf(src)),
358 LLVMConstVector(hi ? shuffles2 : shuffles,
359 bld_base->base.type.length),
360 "");
361 }
362
363 static LLVMValueRef
364 merge_64bit(struct lp_build_nir_context *bld_base,
365 LLVMValueRef input,
366 LLVMValueRef input2)
367 {
368 struct gallivm_state *gallivm = bld_base->base.gallivm;
369 LLVMBuilderRef builder = gallivm->builder;
370 int i;
371 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
372 int len = bld_base->base.type.length * 2;
373 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
374
375 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
376 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
377 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
378 }
379 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
380 }
381
382 static LLVMValueRef
383 do_int_divide(struct lp_build_nir_context *bld_base,
384 bool is_unsigned, unsigned src_bit_size,
385 LLVMValueRef src, LLVMValueRef src2)
386 {
387 struct gallivm_state *gallivm = bld_base->base.gallivm;
388 LLVMBuilderRef builder = gallivm->builder;
389 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
390 LLVMValueRef div_mask = lp_build_cmp(int_bld, PIPE_FUNC_EQUAL, src2,
391 int_bld->zero);
392 LLVMValueRef divisor = LLVMBuildOr(builder,
393 div_mask,
394 src2, "");
395 LLVMValueRef result = lp_build_div(int_bld, src, divisor);
396 /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
397 * may as well do same for idiv */
398 return LLVMBuildOr(builder, div_mask, result, "");
399 }
400
401 static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
402 nir_op op, unsigned src_bit_size[4], LLVMValueRef src[4])
403 {
404 struct gallivm_state *gallivm = bld_base->base.gallivm;
405 LLVMBuilderRef builder = gallivm->builder;
406 LLVMValueRef result;
407 switch (op) {
408 case nir_op_b2f32:
409 result = emit_b2f(bld_base, src[0], 32);
410 break;
411 case nir_op_b2f64:
412 result = emit_b2f(bld_base, src[0], 64);
413 break;
414 case nir_op_b2i32:
415 result = emit_b2i(bld_base, src[0], 32);
416 break;
417 case nir_op_b2i64:
418 result = emit_b2i(bld_base, src[0], 64);
419 break;
420 case nir_op_b32csel:
421 result = emit_b32csel(bld_base, src_bit_size, src);
422 break;
423 case nir_op_bit_count:
424 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
425 break;
426 case nir_op_bitfield_select:
427 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
428 break;
429 case nir_op_bitfield_reverse:
430 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
431 break;
432 case nir_op_f2b32:
433 result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
434 break;
435 case nir_op_f2f32:
436 result = LLVMBuildFPTrunc(builder, src[0],
437 bld_base->base.vec_type, "");
438 break;
439 case nir_op_f2f64:
440 result = LLVMBuildFPExt(builder, src[0],
441 bld_base->dbl_bld.vec_type, "");
442 break;
443 case nir_op_f2i32:
444 result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
445 break;
446 case nir_op_f2u32:
447 result = LLVMBuildFPToUI(builder,
448 src[0],
449 bld_base->base.int_vec_type, "");
450 break;
451 case nir_op_f2i64:
452 result = LLVMBuildFPToSI(builder,
453 src[0],
454 bld_base->int64_bld.vec_type, "");
455 break;
456 case nir_op_f2u64:
457 result = LLVMBuildFPToUI(builder,
458 src[0],
459 bld_base->uint64_bld.vec_type, "");
460 break;
461 case nir_op_fabs:
462 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
463 break;
464 case nir_op_fadd:
465 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
466 src[0], src[1]);
467 break;
468 case nir_op_fceil:
469 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
470 break;
471 case nir_op_fcos:
472 result = lp_build_cos(&bld_base->base, src[0]);
473 break;
474 case nir_op_fddx:
475 result = lp_build_ddx(&bld_base->base, src[0]);
476 break;
477 case nir_op_fddy:
478 result = lp_build_ddy(&bld_base->base, src[0]);
479 break;
480 case nir_op_fdiv:
481 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
482 src[0], src[1]);
483 break;
484 case nir_op_feq32:
485 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
486 break;
487 case nir_op_fexp2:
488 result = lp_build_exp2(&bld_base->base, src[0]);
489 break;
490 case nir_op_ffloor:
491 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
492 break;
493 case nir_op_ffma:
494 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
495 break;
496 case nir_op_ffract: {
497 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
498 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
499 result = lp_build_sub(flt_bld, src[0], tmp);
500 break;
501 }
502 case nir_op_fge32:
503 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
504 break;
505 case nir_op_find_lsb:
506 result = lp_build_cttz(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
507 break;
508 case nir_op_flog2:
509 result = lp_build_log2_safe(&bld_base->base, src[0]);
510 break;
511 case nir_op_flt32:
512 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
513 break;
514 case nir_op_fmin:
515 result = lp_build_min(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
516 break;
517 case nir_op_fmod: {
518 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
519 result = lp_build_div(flt_bld, src[0], src[1]);
520 result = lp_build_floor(flt_bld, result);
521 result = lp_build_mul(flt_bld, src[1], result);
522 result = lp_build_sub(flt_bld, src[0], result);
523 break;
524 }
525 case nir_op_fmul:
526 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
527 src[0], src[1]);
528 break;
529 case nir_op_fmax:
530 result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
531 break;
532 case nir_op_fne32:
533 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
534 break;
535 case nir_op_fneg:
536 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
537 break;
538 case nir_op_fpow:
539 result = lp_build_pow(&bld_base->base, src[0], src[1]);
540 break;
541 case nir_op_frcp:
542 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
543 break;
544 case nir_op_fround_even:
545 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
546 break;
547 case nir_op_frsq:
548 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
549 break;
550 case nir_op_fsat:
551 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
552 break;
553 case nir_op_fsign:
554 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
555 break;
556 case nir_op_fsin:
557 result = lp_build_sin(&bld_base->base, src[0]);
558 break;
559 case nir_op_fsqrt:
560 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
561 break;
562 case nir_op_ftrunc:
563 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
564 break;
565 case nir_op_i2b32:
566 result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
567 break;
568 case nir_op_i2f32:
569 result = lp_build_int_to_float(&bld_base->base, src[0]);
570 break;
571 case nir_op_i2f64:
572 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
573 break;
574 case nir_op_i2i32:
575 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
576 break;
577 case nir_op_i2i64:
578 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
579 break;
580 case nir_op_iabs:
581 result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
582 break;
583 case nir_op_iadd:
584 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
585 src[0], src[1]);
586 break;
587 case nir_op_iand:
588 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
589 src[0], src[1]);
590 break;
591 case nir_op_idiv:
592 result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
593 break;
594 case nir_op_ieq32:
595 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
596 break;
597 case nir_op_ige32:
598 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
599 break;
600 case nir_op_ilt32:
601 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
602 break;
603 case nir_op_imax:
604 result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
605 break;
606 case nir_op_imin:
607 result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
608 break;
609 case nir_op_imul:
610 case nir_op_imul24:
611 result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
612 src[0], src[1]);
613 break;
614 case nir_op_imul_high: {
615 LLVMValueRef hi_bits;
616 lp_build_mul_32_lohi(&bld_base->int_bld, src[0], src[1], &hi_bits);
617 result = hi_bits;
618 break;
619 }
620 case nir_op_ine32:
621 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
622 break;
623 case nir_op_ineg:
624 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
625 break;
626 case nir_op_inot:
627 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
628 break;
629 case nir_op_ior:
630 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
631 src[0], src[1]);
632 break;
633 case nir_op_irem:
634 result = lp_build_mod(get_int_bld(bld_base, false, src_bit_size[0]),
635 src[0], src[1]);
636 break;
637 case nir_op_ishl: {
638 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
639 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
640 if (src_bit_size[0] == 64)
641 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
642 if (src_bit_size[0] < 32)
643 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
644 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
645 result = lp_build_shl(int_bld, src[0], src[1]);
646 break;
647 }
648 case nir_op_ishr: {
649 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
650 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
651 if (src_bit_size[0] == 64)
652 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
653 if (src_bit_size[0] < 32)
654 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
655 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
656 result = lp_build_shr(int_bld, src[0], src[1]);
657 break;
658 }
659 case nir_op_isign:
660 result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
661 break;
662 case nir_op_isub:
663 result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
664 src[0], src[1]);
665
666 case nir_op_ixor:
667 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
668 src[0], src[1]);
669 break;
670 case nir_op_mov:
671 result = src[0];
672 break;
673 case nir_op_unpack_64_2x32_split_x:
674 result = split_64bit(bld_base, src[0], false);
675 break;
676 case nir_op_unpack_64_2x32_split_y:
677 result = split_64bit(bld_base, src[0], true);
678 break;
679
680 case nir_op_pack_64_2x32_split: {
681 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
682 result = LLVMBuildBitCast(builder, tmp, bld_base->dbl_bld.vec_type, "");
683 break;
684 }
685 case nir_op_u2f32:
686 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
687 break;
688 case nir_op_u2f64:
689 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
690 break;
691 case nir_op_u2u32:
692 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
693 break;
694 case nir_op_u2u64:
695 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
696 break;
697 case nir_op_udiv:
698 result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
699 break;
700 case nir_op_ufind_msb: {
701 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
702 result = lp_build_ctlz(uint_bld, src[0]);
703 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
704 break;
705 }
706 case nir_op_uge32:
707 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
708 break;
709 case nir_op_ult32:
710 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
711 break;
712 case nir_op_umax:
713 result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
714 break;
715 case nir_op_umin:
716 result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
717 break;
718 case nir_op_umod:
719 result = lp_build_mod(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
720 break;
721 case nir_op_umul_high: {
722 LLVMValueRef hi_bits;
723 lp_build_mul_32_lohi(&bld_base->uint_bld, src[0], src[1], &hi_bits);
724 result = hi_bits;
725 break;
726 }
727 case nir_op_ushr: {
728 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
729 if (src_bit_size[0] == 64)
730 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
731 if (src_bit_size[0] < 32)
732 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
733 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
734 result = lp_build_shr(uint_bld, src[0], src[1]);
735 break;
736 }
737 default:
738 assert(0);
739 break;
740 }
741 return result;
742 }
743
744 static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr *instr)
745 {
746 struct gallivm_state *gallivm = bld_base->base.gallivm;
747 LLVMValueRef src[4];
748 unsigned src_bit_size[4];
749 unsigned num_components = nir_dest_num_components(instr->dest.dest);
750 unsigned src_components;
751 switch (instr->op) {
752 case nir_op_vec2:
753 case nir_op_vec3:
754 case nir_op_vec4:
755 src_components = 1;
756 break;
757 case nir_op_pack_half_2x16:
758 src_components = 2;
759 break;
760 case nir_op_unpack_half_2x16:
761 src_components = 1;
762 break;
763 case nir_op_cube_face_coord:
764 case nir_op_cube_face_index:
765 src_components = 3;
766 break;
767 default:
768 src_components = num_components;
769 break;
770 }
771 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
772 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
773 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
774 }
775
776 LLVMValueRef result[4];
777 if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2) {
778 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
779 result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
780 }
781 } else {
782 for (unsigned c = 0; c < num_components; c++) {
783 LLVMValueRef src_chan[4];
784
785 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
786 if (num_components > 1) {
787 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
788 src[i], c, "");
789 } else
790 src_chan[i] = src[i];
791 src_chan[i] = cast_type(bld_base, src_chan[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
792 }
793 result[c] = do_alu_action(bld_base, instr->op, src_bit_size, src_chan);
794 result[c] = cast_type(bld_base, result[c], nir_op_infos[instr->op].output_type, nir_dest_bit_size(instr->dest.dest));
795 }
796 }
797 assign_alu_dest(bld_base, &instr->dest, result);
798 }
799
800 static void visit_load_const(struct lp_build_nir_context *bld_base,
801 const nir_load_const_instr *instr)
802 {
803 LLVMValueRef result[4];
804 struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
805 for (unsigned i = 0; i < instr->def.num_components; i++)
806 result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->value[i].u64);
807 assign_ssa_dest(bld_base, &instr->def, result);
808 }
809
810 static void
811 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
812 bool vs_in, unsigned *vertex_index_out,
813 LLVMValueRef *vertex_index_ref,
814 unsigned *const_out, LLVMValueRef *indir_out)
815 {
816 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
817 nir_variable *var = nir_deref_instr_get_variable(instr);
818 nir_deref_path path;
819 unsigned idx_lvl = 1;
820
821 nir_deref_path_init(&path, instr, NULL);
822
823 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
824 if (vertex_index_ref) {
825 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
826 if (vertex_index_out)
827 *vertex_index_out = 0;
828 } else {
829 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
830 }
831 ++idx_lvl;
832 }
833
834 uint32_t const_offset = 0;
835 LLVMValueRef offset = NULL;
836
837 if (var->data.compact) {
838 assert(instr->deref_type == nir_deref_type_array);
839 const_offset = nir_src_as_uint(instr->arr.index);
840 goto out;
841 }
842
843 for (; path.path[idx_lvl]; ++idx_lvl) {
844 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
845 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
846 unsigned index = path.path[idx_lvl]->strct.index;
847
848 for (unsigned i = 0; i < index; i++) {
849 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
850 const_offset += glsl_count_attribute_slots(ft, vs_in);
851 }
852 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
853 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
854 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
855 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
856 } else {
857 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
858 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
859 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
860 idx_src);
861 if (offset)
862 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
863 else
864 offset = array_off;
865 }
866 } else
867 unreachable("Uhandled deref type in get_deref_instr_offset");
868 }
869
870 out:
871 nir_deref_path_finish(&path);
872
873 if (const_offset && offset)
874 offset = LLVMBuildAdd(builder, offset,
875 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
876 "");
877 *const_out = const_offset;
878 *indir_out = offset;
879 }
880
881 static void visit_load_var(struct lp_build_nir_context *bld_base,
882 nir_intrinsic_instr *instr,
883 LLVMValueRef result[4])
884 {
885 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
886 nir_variable *var = nir_deref_instr_get_variable(deref);
887 nir_variable_mode mode = deref->mode;
888 unsigned const_index;
889 LLVMValueRef indir_index;
890 unsigned vertex_index = 0;
891 unsigned nc = nir_dest_num_components(instr->dest);
892 unsigned bit_size = nir_dest_bit_size(instr->dest);
893 if (var) {
894 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
895 var->data.mode == nir_var_shader_in;
896 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
897 var->data.mode == nir_var_shader_in;
898 mode = var->data.mode;
899
900 get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, NULL,
901 &const_index, &indir_index);
902 }
903 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, const_index, indir_index, result);
904 }
905
906 static void
907 visit_store_var(struct lp_build_nir_context *bld_base,
908 nir_intrinsic_instr *instr)
909 {
910 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
911 nir_variable *var = nir_deref_instr_get_variable(deref);
912 nir_variable_mode mode = deref->mode;
913 int writemask = instr->const_index[0];
914 unsigned bit_size = nir_src_bit_size(instr->src[1]);
915 LLVMValueRef src = get_src(bld_base, instr->src[1]);
916 unsigned const_index = 0;
917 LLVMValueRef indir_index;
918 if (var)
919 get_deref_offset(bld_base, deref, false, NULL, NULL,
920 &const_index, &indir_index);
921 bld_base->store_var(bld_base, mode, bit_size, instr->num_components, writemask, const_index, var, src);
922 }
923
924 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
925 nir_intrinsic_instr *instr,
926 LLVMValueRef result[4])
927 {
928 struct gallivm_state *gallivm = bld_base->base.gallivm;
929 LLVMBuilderRef builder = gallivm->builder;
930 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
931 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
932
933 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
934 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
935 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
936 offset_is_uniform, idx, offset, result);
937 }
938
939
940 static void visit_load_ssbo(struct lp_build_nir_context *bld_base,
941 nir_intrinsic_instr *instr,
942 LLVMValueRef result[4])
943 {
944 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
945 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
946 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
947 idx, offset, result);
948 }
949
950 static void visit_store_ssbo(struct lp_build_nir_context *bld_base,
951 nir_intrinsic_instr *instr)
952 {
953 LLVMValueRef val = get_src(bld_base, instr->src[0]);
954 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
955 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
956 int writemask = instr->const_index[0];
957 int nc = nir_src_num_components(instr->src[0]);
958 int bitsize = nir_src_bit_size(instr->src[0]);
959 bld_base->store_mem(bld_base, writemask, nc, bitsize, idx, offset, val);
960 }
961
962 static void visit_get_buffer_size(struct lp_build_nir_context *bld_base,
963 nir_intrinsic_instr *instr,
964 LLVMValueRef result[4])
965 {
966 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
967 result[0] = bld_base->get_buffer_size(bld_base, idx);
968 }
969
970 static void visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
971 nir_intrinsic_instr *instr,
972 LLVMValueRef result[4])
973 {
974 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
975 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
976 LLVMValueRef val = get_src(bld_base, instr->src[2]);
977 LLVMValueRef val2 = NULL;
978 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
979 val2 = get_src(bld_base, instr->src[3]);
980
981 bld_base->atomic_mem(bld_base, instr->intrinsic, idx, offset, val, val2, &result[0]);
982
983 }
984
985 static void visit_load_image(struct lp_build_nir_context *bld_base,
986 nir_intrinsic_instr *instr,
987 LLVMValueRef result[4])
988 {
989 struct gallivm_state *gallivm = bld_base->base.gallivm;
990 LLVMBuilderRef builder = gallivm->builder;
991 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
992 nir_variable *var = nir_deref_instr_get_variable(deref);
993 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
994 LLVMValueRef coords[5];
995 struct lp_img_params params;
996 const struct glsl_type *type = glsl_without_array(var->type);
997
998 memset(&params, 0, sizeof(params));
999 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1000 for (unsigned i = 0; i < 4; i++)
1001 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1002 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1003 coords[2] = coords[1];
1004
1005 params.coords = coords;
1006 params.outdata = result;
1007 params.img_op = LP_IMG_LOAD;
1008 params.image_index = var->data.binding;
1009 bld_base->image_op(bld_base, &params);
1010 }
1011
1012 static void visit_store_image(struct lp_build_nir_context *bld_base,
1013 nir_intrinsic_instr *instr)
1014 {
1015 struct gallivm_state *gallivm = bld_base->base.gallivm;
1016 LLVMBuilderRef builder = gallivm->builder;
1017 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1018 nir_variable *var = nir_deref_instr_get_variable(deref);
1019 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1020 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1021 LLVMValueRef coords[5];
1022 struct lp_img_params params;
1023 const struct glsl_type *type = glsl_without_array(var->type);
1024
1025 memset(&params, 0, sizeof(params));
1026 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1027 for (unsigned i = 0; i < 4; i++)
1028 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1029 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1030 coords[2] = coords[1];
1031 params.coords = coords;
1032
1033 for (unsigned i = 0; i < 4; i++) {
1034 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1035 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1036 }
1037 params.img_op = LP_IMG_STORE;
1038 params.image_index = var->data.binding;
1039
1040 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1041 coords[2] = coords[1];
1042 bld_base->image_op(bld_base, &params);
1043 }
1044
1045 static void visit_atomic_image(struct lp_build_nir_context *bld_base,
1046 nir_intrinsic_instr *instr,
1047 LLVMValueRef result[4])
1048 {
1049 struct gallivm_state *gallivm = bld_base->base.gallivm;
1050 LLVMBuilderRef builder = gallivm->builder;
1051 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1052 nir_variable *var = nir_deref_instr_get_variable(deref);
1053 struct lp_img_params params;
1054 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1055 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1056 LLVMValueRef coords[5];
1057 const struct glsl_type *type = glsl_without_array(var->type);
1058
1059 memset(&params, 0, sizeof(params));
1060
1061 switch (instr->intrinsic) {
1062 case nir_intrinsic_image_deref_atomic_add:
1063 params.op = LLVMAtomicRMWBinOpAdd;
1064 break;
1065 case nir_intrinsic_image_deref_atomic_exchange:
1066 params.op = LLVMAtomicRMWBinOpXchg;
1067 break;
1068 case nir_intrinsic_image_deref_atomic_and:
1069 params.op = LLVMAtomicRMWBinOpAnd;
1070 break;
1071 case nir_intrinsic_image_deref_atomic_or:
1072 params.op = LLVMAtomicRMWBinOpOr;
1073 break;
1074 case nir_intrinsic_image_deref_atomic_xor:
1075 params.op = LLVMAtomicRMWBinOpXor;
1076 break;
1077 case nir_intrinsic_image_deref_atomic_umin:
1078 params.op = LLVMAtomicRMWBinOpUMin;
1079 break;
1080 case nir_intrinsic_image_deref_atomic_umax:
1081 params.op = LLVMAtomicRMWBinOpUMax;
1082 break;
1083 case nir_intrinsic_image_deref_atomic_imin:
1084 params.op = LLVMAtomicRMWBinOpMin;
1085 break;
1086 case nir_intrinsic_image_deref_atomic_imax:
1087 params.op = LLVMAtomicRMWBinOpMax;
1088 break;
1089 default:
1090 break;
1091 }
1092
1093 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1094 for (unsigned i = 0; i < 4; i++)
1095 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1096 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1097 coords[2] = coords[1];
1098 params.coords = coords;
1099 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
1100 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1101 params.indata[0] = in_val;
1102 params.indata2[0] = cas_val;
1103 } else
1104 params.indata[0] = in_val;
1105
1106 params.outdata = result;
1107 params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1108 params.image_index = var->data.binding;
1109
1110 bld_base->image_op(bld_base, &params);
1111 }
1112
1113
1114 static void visit_image_size(struct lp_build_nir_context *bld_base,
1115 nir_intrinsic_instr *instr,
1116 LLVMValueRef result[4])
1117 {
1118 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1119 nir_variable *var = nir_deref_instr_get_variable(deref);
1120 struct lp_sampler_size_query_params params = { 0 };
1121 params.texture_unit = var->data.binding;
1122 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(var->type), glsl_sampler_type_is_array(var->type));
1123 params.sizes_out = result;
1124
1125 bld_base->image_size(bld_base, &params);
1126 }
1127
1128 static void visit_shared_load(struct lp_build_nir_context *bld_base,
1129 nir_intrinsic_instr *instr,
1130 LLVMValueRef result[4])
1131 {
1132 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1133 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1134 NULL, offset, result);
1135 }
1136
1137 static void visit_shared_store(struct lp_build_nir_context *bld_base,
1138 nir_intrinsic_instr *instr)
1139 {
1140 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1141 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1142 int writemask = instr->const_index[1];
1143 int nc = nir_src_num_components(instr->src[0]);
1144 int bitsize = nir_src_bit_size(instr->src[0]);
1145 bld_base->store_mem(bld_base, writemask, nc, bitsize, NULL, offset, val);
1146 }
1147
1148 static void visit_shared_atomic(struct lp_build_nir_context *bld_base,
1149 nir_intrinsic_instr *instr,
1150 LLVMValueRef result[4])
1151 {
1152 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1153 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1154 LLVMValueRef val2 = NULL;
1155 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1156 val2 = get_src(bld_base, instr->src[2]);
1157
1158 bld_base->atomic_mem(bld_base, instr->intrinsic, NULL, offset, val, val2, &result[0]);
1159
1160 }
1161
1162 static void visit_barrier(struct lp_build_nir_context *bld_base)
1163 {
1164 bld_base->barrier(bld_base);
1165 }
1166
1167 static void visit_discard(struct lp_build_nir_context *bld_base,
1168 nir_intrinsic_instr *instr)
1169 {
1170 LLVMValueRef cond = NULL;
1171 if (instr->intrinsic == nir_intrinsic_discard_if) {
1172 cond = get_src(bld_base, instr->src[0]);
1173 cond = cast_type(bld_base, cond, nir_type_int, 32);
1174 }
1175 bld_base->discard(bld_base, cond);
1176 }
1177
1178 static void visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1179 nir_intrinsic_instr *instr, LLVMValueRef result[4])
1180 {
1181 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1182
1183 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
1184 bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1185 nir_src_bit_size(instr->src[0]),
1186 offset_is_uniform, offset, result);
1187 }
1188
1189 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
1190 nir_intrinsic_instr *instr)
1191 {
1192 LLVMValueRef result[4] = {0};
1193 switch (instr->intrinsic) {
1194 case nir_intrinsic_load_deref:
1195 visit_load_var(bld_base, instr, result);
1196 break;
1197 case nir_intrinsic_store_deref:
1198 visit_store_var(bld_base, instr);
1199 break;
1200 case nir_intrinsic_load_ubo:
1201 visit_load_ubo(bld_base, instr, result);
1202 break;
1203 case nir_intrinsic_load_ssbo:
1204 visit_load_ssbo(bld_base, instr, result);
1205 break;
1206 case nir_intrinsic_store_ssbo:
1207 visit_store_ssbo(bld_base, instr);
1208 break;
1209 case nir_intrinsic_get_buffer_size:
1210 visit_get_buffer_size(bld_base, instr, result);
1211 break;
1212 case nir_intrinsic_load_vertex_id:
1213 case nir_intrinsic_load_primitive_id:
1214 case nir_intrinsic_load_instance_id:
1215 case nir_intrinsic_load_base_instance:
1216 case nir_intrinsic_load_base_vertex:
1217 case nir_intrinsic_load_work_group_id:
1218 case nir_intrinsic_load_local_invocation_id:
1219 case nir_intrinsic_load_num_work_groups:
1220 case nir_intrinsic_load_invocation_id:
1221 case nir_intrinsic_load_front_face:
1222 case nir_intrinsic_load_draw_id:
1223 bld_base->sysval_intrin(bld_base, instr, result);
1224 break;
1225 case nir_intrinsic_discard_if:
1226 case nir_intrinsic_discard:
1227 visit_discard(bld_base, instr);
1228 break;
1229 case nir_intrinsic_emit_vertex:
1230 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
1231 break;
1232 case nir_intrinsic_end_primitive:
1233 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
1234 break;
1235 case nir_intrinsic_ssbo_atomic_add:
1236 case nir_intrinsic_ssbo_atomic_imin:
1237 case nir_intrinsic_ssbo_atomic_imax:
1238 case nir_intrinsic_ssbo_atomic_umin:
1239 case nir_intrinsic_ssbo_atomic_umax:
1240 case nir_intrinsic_ssbo_atomic_and:
1241 case nir_intrinsic_ssbo_atomic_or:
1242 case nir_intrinsic_ssbo_atomic_xor:
1243 case nir_intrinsic_ssbo_atomic_exchange:
1244 case nir_intrinsic_ssbo_atomic_comp_swap:
1245 visit_ssbo_atomic(bld_base, instr, result);
1246 break;
1247 case nir_intrinsic_image_deref_load:
1248 visit_load_image(bld_base, instr, result);
1249 break;
1250 case nir_intrinsic_image_deref_store:
1251 visit_store_image(bld_base, instr);
1252 break;
1253 case nir_intrinsic_image_deref_atomic_add:
1254 case nir_intrinsic_image_deref_atomic_imin:
1255 case nir_intrinsic_image_deref_atomic_imax:
1256 case nir_intrinsic_image_deref_atomic_umin:
1257 case nir_intrinsic_image_deref_atomic_umax:
1258 case nir_intrinsic_image_deref_atomic_and:
1259 case nir_intrinsic_image_deref_atomic_or:
1260 case nir_intrinsic_image_deref_atomic_xor:
1261 case nir_intrinsic_image_deref_atomic_exchange:
1262 case nir_intrinsic_image_deref_atomic_comp_swap:
1263 visit_atomic_image(bld_base, instr, result);
1264 break;
1265 case nir_intrinsic_image_deref_size:
1266 visit_image_size(bld_base, instr, result);
1267 break;
1268 case nir_intrinsic_load_shared:
1269 visit_shared_load(bld_base, instr, result);
1270 break;
1271 case nir_intrinsic_store_shared:
1272 visit_shared_store(bld_base, instr);
1273 break;
1274 case nir_intrinsic_shared_atomic_add:
1275 case nir_intrinsic_shared_atomic_imin:
1276 case nir_intrinsic_shared_atomic_umin:
1277 case nir_intrinsic_shared_atomic_imax:
1278 case nir_intrinsic_shared_atomic_umax:
1279 case nir_intrinsic_shared_atomic_and:
1280 case nir_intrinsic_shared_atomic_or:
1281 case nir_intrinsic_shared_atomic_xor:
1282 case nir_intrinsic_shared_atomic_exchange:
1283 case nir_intrinsic_shared_atomic_comp_swap:
1284 visit_shared_atomic(bld_base, instr, result);
1285 break;
1286 case nir_intrinsic_barrier:
1287 visit_barrier(bld_base);
1288 break;
1289 case nir_intrinsic_memory_barrier:
1290 break;
1291 case nir_intrinsic_load_kernel_input:
1292 visit_load_kernel_input(bld_base, instr, result);
1293 break;
1294 default:
1295 assert(0);
1296 break;
1297 }
1298 if (result[0]) {
1299 assign_dest(bld_base, &instr->dest, result);
1300 }
1301 }
1302
1303 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1304 {
1305 struct lp_sampler_size_query_params params;
1306 LLVMValueRef sizes_out[4];
1307 LLVMValueRef explicit_lod = NULL;
1308
1309 for (unsigned i = 0; i < instr->num_srcs; i++) {
1310 switch (instr->src[i].src_type) {
1311 case nir_tex_src_lod:
1312 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1313 break;
1314 default:
1315 break;
1316 }
1317 }
1318
1319 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
1320 params.texture_unit = instr->texture_index;
1321 params.explicit_lod = explicit_lod;
1322 params.is_sviewinfo = TRUE;
1323 params.sizes_out = sizes_out;
1324
1325 if (instr->op == nir_texop_query_levels)
1326 params.explicit_lod = bld_base->uint_bld.zero;
1327 bld_base->tex_size(bld_base, &params);
1328 assign_dest(bld_base, &instr->dest, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
1329 }
1330
1331 static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
1332 nir_src lod_src)
1333 {
1334 enum lp_sampler_lod_property lod_property;
1335
1336 if (nir_src_is_dynamically_uniform(lod_src))
1337 lod_property = LP_SAMPLER_LOD_SCALAR;
1338 else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1339 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1340 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1341 else
1342 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1343 }
1344 else
1345 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1346 return lod_property;
1347 }
1348
1349 static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1350 {
1351 struct gallivm_state *gallivm = bld_base->base.gallivm;
1352 LLVMBuilderRef builder = gallivm->builder;
1353 LLVMValueRef coords[5];
1354 LLVMValueRef offsets[3] = { NULL };
1355 LLVMValueRef explicit_lod = NULL, projector = NULL;
1356 struct lp_sampler_params params;
1357 struct lp_derivatives derivs;
1358 unsigned sample_key = 0;
1359 nir_deref_instr *texture_deref_instr = NULL;
1360 nir_deref_instr *sampler_deref_instr = NULL;
1361 LLVMValueRef texel[4];
1362 unsigned lod_src = 0;
1363 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
1364
1365 memset(&params, 0, sizeof(params));
1366 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
1367
1368 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels) {
1369 visit_txs(bld_base, instr);
1370 return;
1371 }
1372 if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1373 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
1374 else if (instr->op == nir_texop_tg4)
1375 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
1376 else if (instr->op == nir_texop_lod)
1377 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
1378 for (unsigned i = 0; i < instr->num_srcs; i++) {
1379 switch (instr->src[i].src_type) {
1380 case nir_tex_src_coord: {
1381 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
1382 if (instr->coord_components == 1)
1383 coords[0] = coord;
1384 else {
1385 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1386 coords[chan] = LLVMBuildExtractValue(builder, coord,
1387 chan, "");
1388 }
1389 for (unsigned chan = instr->coord_components; chan < 5; chan++)
1390 coords[chan] = coord_undef;
1391
1392 break;
1393 }
1394 case nir_tex_src_texture_deref:
1395 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
1396 break;
1397 case nir_tex_src_sampler_deref:
1398 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
1399 break;
1400 case nir_tex_src_projector:
1401 projector = lp_build_rcp(&bld_base->base, cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32));
1402 break;
1403 case nir_tex_src_comparator:
1404 sample_key |= LP_SAMPLER_SHADOW;
1405 coords[4] = get_src(bld_base, instr->src[i].src);
1406 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
1407 break;
1408 case nir_tex_src_bias:
1409 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
1410 lod_src = i;
1411 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1412 break;
1413 case nir_tex_src_lod:
1414 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
1415 lod_src = i;
1416 if (instr->op == nir_texop_txf)
1417 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1418 else
1419 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1420 break;
1421 case nir_tex_src_ddx: {
1422 int deriv_cnt = instr->coord_components;
1423 if (instr->is_array)
1424 deriv_cnt--;
1425 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1426 if (deriv_cnt == 1)
1427 derivs.ddx[0] = deriv_val;
1428 else
1429 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1430 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
1431 chan, "");
1432 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1433 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
1434 break;
1435 }
1436 case nir_tex_src_ddy: {
1437 int deriv_cnt = instr->coord_components;
1438 if (instr->is_array)
1439 deriv_cnt--;
1440 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1441 if (deriv_cnt == 1)
1442 derivs.ddy[0] = deriv_val;
1443 else
1444 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1445 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
1446 chan, "");
1447 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1448 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
1449 break;
1450 }
1451 case nir_tex_src_offset: {
1452 int offset_cnt = instr->coord_components;
1453 if (instr->is_array)
1454 offset_cnt--;
1455 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
1456 sample_key |= LP_SAMPLER_OFFSETS;
1457 if (offset_cnt == 1)
1458 offsets[0] = offset_val;
1459 else {
1460 for (unsigned chan = 0; chan < offset_cnt; ++chan)
1461 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
1462 chan, "");
1463 }
1464 break;
1465 }
1466 case nir_tex_src_ms_index:
1467 break;
1468 default:
1469 assert(0);
1470 break;
1471 }
1472 }
1473 if (!sampler_deref_instr)
1474 sampler_deref_instr = texture_deref_instr;
1475
1476 if (explicit_lod)
1477 lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
1478
1479 if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
1480 instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
1481 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1482 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
1483 else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1484 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1485 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
1486
1487 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
1488 /* move layer coord for 1d arrays. */
1489 coords[2] = coords[1];
1490 coords[1] = coord_undef;
1491 }
1492
1493 if (projector) {
1494 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1495 coords[chan] = lp_build_mul(&bld_base->base, coords[chan], projector);
1496 if (sample_key & LP_SAMPLER_SHADOW)
1497 coords[4] = lp_build_mul(&bld_base->base, coords[4], projector);
1498 }
1499
1500 uint32_t base_index = 0;
1501 if (!texture_deref_instr) {
1502 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
1503 if (samp_src_index == -1) {
1504 base_index = instr->sampler_index;
1505 }
1506 }
1507
1508 if (instr->op == nir_texop_txd) {
1509 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
1510 params.derivs = &derivs;
1511 if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1512 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1513 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1514 else
1515 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1516 } else
1517 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1518 }
1519
1520 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
1521 params.sample_key = sample_key;
1522 params.offsets = offsets;
1523 params.texture_index = base_index;
1524 params.sampler_index = base_index;
1525 params.coords = coords;
1526 params.texel = texel;
1527 params.lod = explicit_lod;
1528 bld_base->tex(bld_base, &params);
1529 assign_dest(bld_base, &instr->dest, texel);
1530 }
1531
1532 static void visit_ssa_undef(struct lp_build_nir_context *bld_base,
1533 const nir_ssa_undef_instr *instr)
1534 {
1535 unsigned num_components = instr->def.num_components;
1536 LLVMValueRef undef[4];
1537 for (unsigned i = 0; i < num_components; i++)
1538 undef[i] = LLVMGetUndef(bld_base->base.vec_type);
1539 assign_ssa_dest(bld_base, &instr->def, undef);
1540 }
1541
1542 static void visit_jump(struct lp_build_nir_context *bld_base,
1543 const nir_jump_instr *instr)
1544 {
1545 switch (instr->type) {
1546 case nir_jump_break:
1547 bld_base->break_stmt(bld_base);
1548 break;
1549 case nir_jump_continue:
1550 bld_base->continue_stmt(bld_base);
1551 break;
1552 default:
1553 unreachable("Unknown jump instr\n");
1554 }
1555 }
1556
1557 static void visit_deref(struct lp_build_nir_context *bld_base,
1558 nir_deref_instr *instr)
1559 {
1560 if (instr->mode != nir_var_mem_shared &&
1561 instr->mode != nir_var_mem_global)
1562 return;
1563 LLVMValueRef result = NULL;
1564 switch(instr->deref_type) {
1565 case nir_deref_type_var: {
1566 struct hash_entry *entry = _mesa_hash_table_search(bld_base->vars, instr->var);
1567 result = entry->data;
1568 break;
1569 }
1570 default:
1571 unreachable("Unhandled deref_instr deref type");
1572 }
1573
1574 assign_ssa(bld_base, instr->dest.ssa.index, result);
1575 }
1576
1577 static void visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
1578 {
1579 nir_foreach_instr(instr, block)
1580 {
1581 switch (instr->type) {
1582 case nir_instr_type_alu:
1583 visit_alu(bld_base, nir_instr_as_alu(instr));
1584 break;
1585 case nir_instr_type_load_const:
1586 visit_load_const(bld_base, nir_instr_as_load_const(instr));
1587 break;
1588 case nir_instr_type_intrinsic:
1589 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
1590 break;
1591 case nir_instr_type_tex:
1592 visit_tex(bld_base, nir_instr_as_tex(instr));
1593 break;
1594 case nir_instr_type_phi:
1595 assert(0);
1596 break;
1597 case nir_instr_type_ssa_undef:
1598 visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
1599 break;
1600 case nir_instr_type_jump:
1601 visit_jump(bld_base, nir_instr_as_jump(instr));
1602 break;
1603 case nir_instr_type_deref:
1604 visit_deref(bld_base, nir_instr_as_deref(instr));
1605 break;
1606 default:
1607 fprintf(stderr, "Unknown NIR instr type: ");
1608 nir_print_instr(instr, stderr);
1609 fprintf(stderr, "\n");
1610 abort();
1611 }
1612 }
1613 }
1614
1615 static void visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
1616 {
1617 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
1618
1619 bld_base->if_cond(bld_base, cond);
1620 visit_cf_list(bld_base, &if_stmt->then_list);
1621
1622 if (!exec_list_is_empty(&if_stmt->else_list)) {
1623 bld_base->else_stmt(bld_base);
1624 visit_cf_list(bld_base, &if_stmt->else_list);
1625 }
1626 bld_base->endif_stmt(bld_base);
1627 }
1628
1629 static void visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
1630 {
1631 bld_base->bgnloop(bld_base);
1632 visit_cf_list(bld_base, &loop->body);
1633 bld_base->endloop(bld_base);
1634 }
1635
1636 static void visit_cf_list(struct lp_build_nir_context *bld_base,
1637 struct exec_list *list)
1638 {
1639 foreach_list_typed(nir_cf_node, node, node, list)
1640 {
1641 switch (node->type) {
1642 case nir_cf_node_block:
1643 visit_block(bld_base, nir_cf_node_as_block(node));
1644 break;
1645
1646 case nir_cf_node_if:
1647 visit_if(bld_base, nir_cf_node_as_if(node));
1648 break;
1649
1650 case nir_cf_node_loop:
1651 visit_loop(bld_base, nir_cf_node_as_loop(node));
1652 break;
1653
1654 default:
1655 assert(0);
1656 }
1657 }
1658 }
1659
1660 static void
1661 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
1662 struct nir_shader *nir,
1663 struct nir_variable *variable)
1664 {
1665 bld_base->emit_var_decl(bld_base, variable);
1666 }
1667
1668 /* vector registers are stored as arrays in LLVM side,
1669 so we can use GEP on them, as to do exec mask stores
1670 we need to operate on a single components.
1671 arrays are:
1672 0.x, 1.x, 2.x, 3.x
1673 0.y, 1.y, 2.y, 3.y
1674 ....
1675 */
1676 static LLVMTypeRef get_register_type(struct lp_build_nir_context *bld_base,
1677 nir_register *reg)
1678 {
1679 struct lp_build_context *int_bld = get_int_bld(bld_base, true, reg->bit_size);
1680
1681 LLVMTypeRef type = int_bld->vec_type;
1682 if (reg->num_array_elems)
1683 type = LLVMArrayType(type, reg->num_array_elems);
1684 if (reg->num_components > 1)
1685 type = LLVMArrayType(type, reg->num_components);
1686
1687 return type;
1688 }
1689
1690
1691 bool lp_build_nir_llvm(
1692 struct lp_build_nir_context *bld_base,
1693 struct nir_shader *nir)
1694 {
1695 struct nir_function *func;
1696
1697 nir_convert_from_ssa(nir, true);
1698 nir_lower_locals_to_regs(nir);
1699 nir_remove_dead_derefs(nir);
1700 nir_remove_dead_variables(nir, nir_var_function_temp);
1701
1702 nir_foreach_variable(variable, &nir->outputs)
1703 handle_shader_output_decl(bld_base, nir, variable);
1704
1705 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1706 _mesa_key_pointer_equal);
1707 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1708 _mesa_key_pointer_equal);
1709
1710 func = (struct nir_function *)exec_list_get_head(&nir->functions);
1711
1712 nir_foreach_register(reg, &func->impl->registers) {
1713 LLVMTypeRef type = get_register_type(bld_base, reg);
1714 LLVMValueRef reg_alloc = lp_build_alloca_undef(bld_base->base.gallivm,
1715 type, "reg");
1716 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
1717 }
1718 nir_index_ssa_defs(func->impl);
1719 bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
1720 visit_cf_list(bld_base, &func->impl->body);
1721
1722 free(bld_base->ssa_defs);
1723 ralloc_free(bld_base->vars);
1724 ralloc_free(bld_base->regs);
1725 return true;
1726 }
1727
1728 /* do some basic opts to remove some things we don't want to see. */
1729 void lp_build_opt_nir(struct nir_shader *nir)
1730 {
1731 bool progress;
1732 do {
1733 progress = false;
1734 NIR_PASS_V(nir, nir_opt_constant_folding);
1735 NIR_PASS_V(nir, nir_opt_algebraic);
1736 } while (progress);
1737 nir_lower_bool_to_int32(nir);
1738 }