gallivm/nir: cleanup code and call cmp wrapper
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_gather.h"
31 #include "lp_bld_logic.h"
32 #include "lp_bld_quad.h"
33 #include "lp_bld_flow.h"
34 #include "lp_bld_struct.h"
35 #include "lp_bld_debug.h"
36 #include "lp_bld_printf.h"
37 #include "nir_deref.h"
38
39 static void visit_cf_list(struct lp_build_nir_context *bld_base,
40 struct exec_list *list);
41
42 static LLVMValueRef cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
43 nir_alu_type alu_type, unsigned bit_size)
44 {
45 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
46 switch (alu_type) {
47 case nir_type_float:
48 switch (bit_size) {
49 case 32:
50 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
51 case 64:
52 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
53 default:
54 assert(0);
55 break;
56 }
57 break;
58 case nir_type_int:
59 switch (bit_size) {
60 case 32:
61 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
62 case 64:
63 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
64 default:
65 assert(0);
66 break;
67 }
68 break;
69 case nir_type_uint:
70 switch (bit_size) {
71 case 32:
72 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
73 case 64:
74 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
75 default:
76 assert(0);
77 break;
78 }
79 break;
80 case nir_type_uint32:
81 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
82 default:
83 return val;
84 }
85 return NULL;
86 }
87
88 static struct lp_build_context *get_int_bld(struct lp_build_nir_context *bld_base,
89 bool is_unsigned,
90 unsigned op_bit_size)
91 {
92 if (is_unsigned)
93 if (op_bit_size == 64)
94 return &bld_base->uint64_bld;
95 else
96 return &bld_base->uint_bld;
97 else if (op_bit_size == 64)
98 return &bld_base->int64_bld;
99 else
100 return &bld_base->int_bld;
101 }
102
103 static struct lp_build_context *get_flt_bld(struct lp_build_nir_context *bld_base,
104 unsigned op_bit_size)
105 {
106 if (op_bit_size == 64)
107 return &bld_base->dbl_bld;
108 else
109 return &bld_base->base;
110 }
111
112 static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
113 {
114 unsigned pipe_target = PIPE_BUFFER;
115 switch (sampler_dim) {
116 case GLSL_SAMPLER_DIM_1D:
117 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
118 break;
119 case GLSL_SAMPLER_DIM_2D:
120 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
121 break;
122 case GLSL_SAMPLER_DIM_3D:
123 pipe_target = PIPE_TEXTURE_3D;
124 break;
125 case GLSL_SAMPLER_DIM_CUBE:
126 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
127 break;
128 case GLSL_SAMPLER_DIM_RECT:
129 pipe_target = PIPE_TEXTURE_RECT;
130 break;
131 case GLSL_SAMPLER_DIM_BUF:
132 pipe_target = PIPE_BUFFER;
133 break;
134 default:
135 break;
136 }
137 return pipe_target;
138 }
139
140 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
141 {
142 return bld_base->ssa_defs[ssa->index];
143 }
144
145 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src);
146
147 static LLVMValueRef get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
148 {
149 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
150 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
151 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
152 LLVMValueRef indir_src = NULL;
153 if (src.indirect)
154 indir_src = get_src(bld_base, *src.indirect);
155 return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
156 }
157
158 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src)
159 {
160 if (src.is_ssa)
161 return get_ssa_src(bld_base, src.ssa);
162 else
163 return get_reg_src(bld_base, src.reg);
164 }
165
166 static void assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
167 {
168 bld_base->ssa_defs[idx] = ptr;
169 }
170
171 static void assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
172 LLVMValueRef vals[4])
173 {
174 assign_ssa(bld_base, ssa->index, ssa->num_components == 1 ? vals[0] : lp_nir_array_build_gather_values(bld_base->base.gallivm->builder, vals, ssa->num_components));
175 }
176
177 static void assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
178 unsigned write_mask,
179 LLVMValueRef vals[4])
180 {
181 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
182 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
183 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
184 LLVMValueRef indir_src = NULL;
185 if (reg->indirect)
186 indir_src = get_src(bld_base, *reg->indirect);
187 bld_base->store_reg(bld_base, reg_bld, reg, write_mask ? write_mask : 0xf, indir_src, reg_storage, vals);
188 }
189
190 static void assign_dest(struct lp_build_nir_context *bld_base, const nir_dest *dest, LLVMValueRef vals[4])
191 {
192 if (dest->is_ssa)
193 assign_ssa_dest(bld_base, &dest->ssa, vals);
194 else
195 assign_reg(bld_base, &dest->reg, 0, vals);
196 }
197
198 static void assign_alu_dest(struct lp_build_nir_context *bld_base, const nir_alu_dest *dest, LLVMValueRef vals[4])
199 {
200 if (dest->dest.is_ssa)
201 assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
202 else
203 assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
204 }
205
206 static LLVMValueRef int_to_bool32(struct lp_build_nir_context *bld_base,
207 uint32_t src_bit_size,
208 bool is_unsigned,
209 LLVMValueRef val)
210 {
211 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
212 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
213 LLVMValueRef result = lp_build_compare(bld_base->base.gallivm, int_bld->type, PIPE_FUNC_NOTEQUAL, val, int_bld->zero);
214 if (src_bit_size == 64)
215 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
216 return result;
217 }
218
219 static LLVMValueRef flt_to_bool32(struct lp_build_nir_context *bld_base,
220 uint32_t src_bit_size,
221 LLVMValueRef val)
222 {
223 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
224 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
225 LLVMValueRef result = lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
226 if (src_bit_size == 64)
227 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
228 return result;
229 }
230
231 static LLVMValueRef fcmp32(struct lp_build_nir_context *bld_base,
232 enum pipe_compare_func compare,
233 uint32_t src_bit_size,
234 LLVMValueRef src[4])
235 {
236 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
237 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
238 LLVMValueRef result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
239 if (src_bit_size == 64)
240 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
241 return result;
242 }
243
244 static LLVMValueRef icmp32(struct lp_build_nir_context *bld_base,
245 enum pipe_compare_func compare,
246 bool is_unsigned,
247 uint32_t src_bit_size,
248 LLVMValueRef src[4])
249 {
250 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
251 struct lp_build_context *i_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
252 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
253 if (src_bit_size == 64)
254 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
255 return result;
256 }
257
258 static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
259 nir_alu_src src,
260 unsigned num_components)
261 {
262 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
263 struct gallivm_state *gallivm = bld_base->base.gallivm;
264 LLVMValueRef value = get_src(bld_base, src.src);
265 bool need_swizzle = false;
266
267 assert(value);
268 unsigned src_components = nir_src_num_components(src.src);
269 for (unsigned i = 0; i < num_components; ++i) {
270 assert(src.swizzle[i] < src_components);
271 if (src.swizzle[i] != i)
272 need_swizzle = true;
273 }
274
275 if (need_swizzle || num_components != src_components) {
276 if (src_components > 1 && num_components == 1) {
277 value = LLVMBuildExtractValue(gallivm->builder, value,
278 src.swizzle[0], "");
279 } else if (src_components == 1 && num_components > 1) {
280 LLVMValueRef values[] = {value, value, value, value};
281 value = lp_nir_array_build_gather_values(builder, values, num_components);
282 } else {
283 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
284 for (unsigned i = 0; i < num_components; i++)
285 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
286 value = arr;
287 }
288 }
289 assert(!src.negate);
290 assert(!src.abs);
291 return value;
292 }
293
294 static LLVMValueRef emit_b2f(struct lp_build_nir_context *bld_base,
295 LLVMValueRef src0,
296 unsigned bitsize)
297 {
298 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
299 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
300 LLVMBuildBitCast(builder, lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type,
301 1.0), bld_base->int_bld.vec_type, ""),
302 "");
303 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
304 switch (bitsize) {
305 case 32:
306 break;
307 case 64:
308 result = LLVMBuildFPExt(builder, result, bld_base->dbl_bld.vec_type, "");
309 break;
310 default:
311 unreachable("unsupported bit size.");
312 }
313 return result;
314 }
315
316 static LLVMValueRef emit_b2i(struct lp_build_nir_context *bld_base,
317 LLVMValueRef src0,
318 unsigned bitsize)
319 {
320 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
321 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
322 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, 1), "");
323 switch (bitsize) {
324 case 32:
325 return result;
326 case 64:
327 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
328 default:
329 unreachable("unsupported bit size.");
330 }
331 }
332
333 static LLVMValueRef emit_b32csel(struct lp_build_nir_context *bld_base,
334 unsigned src_bit_size[4],
335 LLVMValueRef src[4])
336 {
337 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
338 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
339 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
340 return lp_build_select(bld, v, src[1], src[2]);
341 }
342
343 static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
344 LLVMValueRef src,
345 bool hi)
346 {
347 struct gallivm_state *gallivm = bld_base->base.gallivm;
348 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
349 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
350 int len = bld_base->base.type.length * 2;
351 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
352 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
353 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
354 }
355
356 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
357 return LLVMBuildShuffleVector(gallivm->builder, src,
358 LLVMGetUndef(LLVMTypeOf(src)),
359 LLVMConstVector(hi ? shuffles2 : shuffles,
360 bld_base->base.type.length),
361 "");
362 }
363
364 static LLVMValueRef
365 merge_64bit(struct lp_build_nir_context *bld_base,
366 LLVMValueRef input,
367 LLVMValueRef input2)
368 {
369 struct gallivm_state *gallivm = bld_base->base.gallivm;
370 LLVMBuilderRef builder = gallivm->builder;
371 int i;
372 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
373 int len = bld_base->base.type.length * 2;
374 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
375
376 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
377 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
378 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
379 }
380 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
381 }
382
383 static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
384 nir_op op, unsigned src_bit_size[4], LLVMValueRef src[4])
385 {
386 struct gallivm_state *gallivm = bld_base->base.gallivm;
387 LLVMBuilderRef builder = gallivm->builder;
388 LLVMValueRef result;
389 switch (op) {
390 case nir_op_b2f32:
391 result = emit_b2f(bld_base, src[0], 32);
392 break;
393 case nir_op_b2f64:
394 result = emit_b2f(bld_base, src[0], 64);
395 break;
396 case nir_op_b2i32:
397 result = emit_b2i(bld_base, src[0], 32);
398 break;
399 case nir_op_b2i64:
400 result = emit_b2i(bld_base, src[0], 64);
401 break;
402 case nir_op_b32csel:
403 result = emit_b32csel(bld_base, src_bit_size, src);
404 break;
405 case nir_op_bit_count:
406 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
407 break;
408 case nir_op_bitfield_select:
409 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
410 break;
411 case nir_op_bitfield_reverse:
412 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
413 break;
414 case nir_op_f2b32:
415 result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
416 break;
417 case nir_op_f2f32:
418 result = LLVMBuildFPTrunc(builder, src[0],
419 bld_base->base.vec_type, "");
420 break;
421 case nir_op_f2f64:
422 result = LLVMBuildFPExt(builder, src[0],
423 bld_base->dbl_bld.vec_type, "");
424 break;
425 case nir_op_f2i32:
426 result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
427 break;
428 case nir_op_f2u32:
429 result = LLVMBuildFPToUI(builder,
430 src[0],
431 bld_base->base.int_vec_type, "");
432 break;
433 case nir_op_f2i64:
434 result = LLVMBuildFPToSI(builder,
435 src[0],
436 bld_base->int64_bld.vec_type, "");
437 break;
438 case nir_op_f2u64:
439 result = LLVMBuildFPToUI(builder,
440 src[0],
441 bld_base->uint64_bld.vec_type, "");
442 break;
443 case nir_op_fabs:
444 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
445 break;
446 case nir_op_fadd:
447 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
448 src[0], src[1]);
449 break;
450 case nir_op_fceil:
451 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
452 break;
453 case nir_op_fcos:
454 result = lp_build_cos(&bld_base->base, src[0]);
455 break;
456 case nir_op_fddx:
457 result = lp_build_ddx(&bld_base->base, src[0]);
458 break;
459 case nir_op_fddy:
460 result = lp_build_ddy(&bld_base->base, src[0]);
461 break;
462 case nir_op_fdiv:
463 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
464 src[0], src[1]);
465 break;
466 case nir_op_feq32:
467 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
468 break;
469 case nir_op_fexp2:
470 result = lp_build_exp2(&bld_base->base, src[0]);
471 break;
472 case nir_op_ffloor:
473 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
474 break;
475 case nir_op_ffma:
476 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
477 break;
478 case nir_op_ffract: {
479 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
480 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
481 result = lp_build_sub(flt_bld, src[0], tmp);
482 break;
483 }
484 case nir_op_fge32:
485 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
486 break;
487 case nir_op_find_lsb:
488 result = lp_build_cttz(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
489 break;
490 case nir_op_flog2:
491 result = lp_build_log2_safe(&bld_base->base, src[0]);
492 break;
493 case nir_op_flt32:
494 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
495 break;
496 case nir_op_fmin:
497 result = lp_build_min(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
498 break;
499 case nir_op_fmod: {
500 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
501 result = lp_build_div(flt_bld, src[0], src[1]);
502 result = lp_build_floor(flt_bld, result);
503 result = lp_build_mul(flt_bld, src[1], result);
504 result = lp_build_sub(flt_bld, src[0], result);
505 break;
506 }
507 case nir_op_fmul:
508 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
509 src[0], src[1]);
510 break;
511 case nir_op_fmax:
512 result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
513 break;
514 case nir_op_fne32:
515 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
516 break;
517 case nir_op_fneg:
518 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
519 break;
520 case nir_op_fpow:
521 result = lp_build_pow(&bld_base->base, src[0], src[1]);
522 break;
523 case nir_op_frcp:
524 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
525 break;
526 case nir_op_fround_even:
527 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
528 break;
529 case nir_op_frsq:
530 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
531 break;
532 case nir_op_fsat:
533 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
534 break;
535 case nir_op_fsign:
536 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
537 break;
538 case nir_op_fsin:
539 result = lp_build_sin(&bld_base->base, src[0]);
540 break;
541 case nir_op_fsqrt:
542 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
543 break;
544 case nir_op_ftrunc:
545 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
546 break;
547 case nir_op_i2b32:
548 result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
549 break;
550 case nir_op_i2f32:
551 result = lp_build_int_to_float(&bld_base->base, src[0]);
552 break;
553 case nir_op_i2f64:
554 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
555 break;
556 case nir_op_i2i32:
557 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
558 break;
559 case nir_op_i2i64:
560 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
561 break;
562 case nir_op_iabs:
563 result = lp_build_abs(&bld_base->int_bld, src[0]);
564 break;
565 case nir_op_iadd:
566 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
567 src[0], src[1]);
568 break;
569 case nir_op_iand:
570 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
571 src[0], src[1]);
572 break;
573 case nir_op_idiv:
574 result = lp_build_div(&bld_base->int_bld,
575 src[0], src[1]);
576 break;
577 case nir_op_ieq32:
578 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
579 break;
580 case nir_op_ige32:
581 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
582 break;
583 case nir_op_ilt32:
584 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
585 break;
586 case nir_op_imax:
587 result = lp_build_max(&bld_base->int_bld, src[0], src[1]);
588 break;
589 case nir_op_imin:
590 result = lp_build_min(&bld_base->int_bld, src[0], src[1]);
591 break;
592 case nir_op_imul:
593 result = lp_build_mul(&bld_base->int_bld,
594 src[0], src[1]);
595 break;
596 case nir_op_imul_high: {
597 LLVMValueRef hi_bits;
598 lp_build_mul_32_lohi(&bld_base->int_bld, src[0], src[1], &hi_bits);
599 result = hi_bits;
600 break;
601 }
602 case nir_op_ine32:
603 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
604 break;
605 case nir_op_ineg:
606 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
607 break;
608 case nir_op_inot:
609 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
610 break;
611 case nir_op_ior:
612 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
613 src[0], src[1]);
614 break;
615 case nir_op_ishl:
616 src[1] = lp_build_and(&bld_base->uint_bld, src[1], lp_build_const_int_vec(gallivm, bld_base->uint_bld.type, (src_bit_size[0] - 1)));
617 result = lp_build_shl(&bld_base->int_bld, src[0], src[1]);
618 break;
619 case nir_op_ishr:
620 src[1] = lp_build_and(&bld_base->uint_bld, src[1], lp_build_const_int_vec(gallivm, bld_base->uint_bld.type, (src_bit_size[0] - 1)));
621 result = lp_build_shr(&bld_base->int_bld, src[0], src[1]);
622 break;
623 case nir_op_isign:
624 result = lp_build_sgn(&bld_base->int_bld, src[0]);
625 break;
626 case nir_op_ixor:
627 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
628 src[0], src[1]);
629 break;
630 case nir_op_mov:
631 result = src[0];
632 break;
633 case nir_op_unpack_64_2x32_split_x:
634 result = split_64bit(bld_base, src[0], false);
635 break;
636 case nir_op_unpack_64_2x32_split_y:
637 result = split_64bit(bld_base, src[0], true);
638 break;
639
640 case nir_op_pack_64_2x32_split: {
641 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
642 result = LLVMBuildBitCast(builder, tmp, bld_base->dbl_bld.vec_type, "");
643 break;
644 }
645 case nir_op_u2f32:
646 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
647 break;
648 case nir_op_u2f64:
649 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
650 break;
651 case nir_op_u2u32:
652 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
653 break;
654 case nir_op_u2u64:
655 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
656 break;
657 case nir_op_udiv:
658 result = lp_build_div(&bld_base->uint_bld,
659 src[0], src[1]);
660 break;
661 case nir_op_ufind_msb: {
662 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
663 result = lp_build_ctlz(uint_bld, src[0]);
664 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
665 break;
666 }
667 case nir_op_uge32:
668 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
669 break;
670 case nir_op_ult32:
671 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
672 break;
673 case nir_op_umax:
674 result = lp_build_max(&bld_base->uint_bld, src[0], src[1]);
675 break;
676 case nir_op_umin:
677 result = lp_build_min(&bld_base->uint_bld, src[0], src[1]);
678 break;
679 case nir_op_umod:
680 result = lp_build_mod(&bld_base->uint_bld, src[0], src[1]);
681 break;
682 case nir_op_umul_high: {
683 LLVMValueRef hi_bits;
684 lp_build_mul_32_lohi(&bld_base->uint_bld, src[0], src[1], &hi_bits);
685 result = hi_bits;
686 break;
687 }
688 case nir_op_ushr:
689 src[1] = lp_build_and(&bld_base->uint_bld, src[1], lp_build_const_int_vec(gallivm, bld_base->uint_bld.type, (src_bit_size[0] - 1)));
690 result = lp_build_shr(&bld_base->uint_bld, src[0], src[1]);
691 break;
692 default:
693 assert(0);
694 break;
695 }
696 return result;
697 }
698
699 static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr *instr)
700 {
701 struct gallivm_state *gallivm = bld_base->base.gallivm;
702 LLVMValueRef src[4];
703 unsigned src_bit_size[4];
704 unsigned num_components = nir_dest_num_components(instr->dest.dest);
705 unsigned src_components;
706 switch (instr->op) {
707 case nir_op_vec2:
708 case nir_op_vec3:
709 case nir_op_vec4:
710 src_components = 1;
711 break;
712 case nir_op_pack_half_2x16:
713 src_components = 2;
714 break;
715 case nir_op_unpack_half_2x16:
716 src_components = 1;
717 break;
718 case nir_op_cube_face_coord:
719 case nir_op_cube_face_index:
720 src_components = 3;
721 break;
722 default:
723 src_components = num_components;
724 break;
725 }
726 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
727 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
728 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
729 }
730
731 LLVMValueRef result[4];
732 if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2) {
733 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
734 result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
735 }
736 } else {
737 for (unsigned c = 0; c < num_components; c++) {
738 LLVMValueRef src_chan[4];
739
740 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
741 if (num_components > 1) {
742 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
743 src[i], c, "");
744 } else
745 src_chan[i] = src[i];
746 src_chan[i] = cast_type(bld_base, src_chan[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
747 }
748 result[c] = do_alu_action(bld_base, instr->op, src_bit_size, src_chan);
749 result[c] = cast_type(bld_base, result[c], nir_op_infos[instr->op].output_type, nir_dest_bit_size(instr->dest.dest));
750 }
751 }
752 assign_alu_dest(bld_base, &instr->dest, result);
753 }
754
755 static void visit_load_const(struct lp_build_nir_context *bld_base,
756 const nir_load_const_instr *instr)
757 {
758 LLVMValueRef result[4];
759 struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
760 for (unsigned i = 0; i < instr->def.num_components; i++)
761 result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->value[i].u64);
762 assign_ssa_dest(bld_base, &instr->def, result);
763 }
764
765 static void
766 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
767 bool vs_in, unsigned *vertex_index_out,
768 LLVMValueRef *vertex_index_ref,
769 unsigned *const_out, LLVMValueRef *indir_out)
770 {
771 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
772 nir_variable *var = nir_deref_instr_get_variable(instr);
773 nir_deref_path path;
774 unsigned idx_lvl = 1;
775
776 nir_deref_path_init(&path, instr, NULL);
777
778 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
779 if (vertex_index_ref) {
780 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
781 if (vertex_index_out)
782 *vertex_index_out = 0;
783 } else {
784 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
785 }
786 ++idx_lvl;
787 }
788
789 uint32_t const_offset = 0;
790 LLVMValueRef offset = NULL;
791
792 if (var->data.compact) {
793 assert(instr->deref_type == nir_deref_type_array);
794 const_offset = nir_src_as_uint(instr->arr.index);
795 goto out;
796 }
797
798 for (; path.path[idx_lvl]; ++idx_lvl) {
799 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
800 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
801 unsigned index = path.path[idx_lvl]->strct.index;
802
803 for (unsigned i = 0; i < index; i++) {
804 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
805 const_offset += glsl_count_attribute_slots(ft, vs_in);
806 }
807 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
808 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
809 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
810 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
811 } else {
812 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
813 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
814 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
815 idx_src);
816 if (offset)
817 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
818 else
819 offset = array_off;
820 }
821 } else
822 unreachable("Uhandled deref type in get_deref_instr_offset");
823 }
824
825 out:
826 nir_deref_path_finish(&path);
827
828 if (const_offset && offset)
829 offset = LLVMBuildAdd(builder, offset,
830 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
831 "");
832 *const_out = const_offset;
833 *indir_out = offset;
834 }
835
836 static void visit_load_var(struct lp_build_nir_context *bld_base,
837 nir_intrinsic_instr *instr,
838 LLVMValueRef result[4])
839 {
840 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
841 nir_variable *var = nir_deref_instr_get_variable(deref);
842 nir_variable_mode mode = deref->mode;
843 unsigned const_index;
844 LLVMValueRef indir_index;
845 unsigned vertex_index = 0;
846 unsigned nc = nir_dest_num_components(instr->dest);
847 unsigned bit_size = nir_dest_bit_size(instr->dest);
848 if (var) {
849 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
850 var->data.mode == nir_var_shader_in;
851 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
852 var->data.mode == nir_var_shader_in;
853 mode = var->data.mode;
854
855 get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, NULL,
856 &const_index, &indir_index);
857 }
858 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, const_index, indir_index, result);
859 }
860
861 static void
862 visit_store_var(struct lp_build_nir_context *bld_base,
863 nir_intrinsic_instr *instr)
864 {
865 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
866 nir_variable *var = nir_deref_instr_get_variable(deref);
867 nir_variable_mode mode = deref->mode;
868 int writemask = instr->const_index[0];
869 unsigned bit_size = nir_src_bit_size(instr->src[1]);
870 LLVMValueRef src = get_src(bld_base, instr->src[1]);
871 unsigned const_index = 0;
872 LLVMValueRef indir_index;
873 if (var)
874 get_deref_offset(bld_base, deref, false, NULL, NULL,
875 &const_index, &indir_index);
876 bld_base->store_var(bld_base, mode, bit_size, instr->num_components, writemask, const_index, var, src);
877 }
878
879 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
880 nir_intrinsic_instr *instr,
881 LLVMValueRef result[4])
882 {
883 struct gallivm_state *gallivm = bld_base->base.gallivm;
884 LLVMBuilderRef builder = gallivm->builder;
885 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
886 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
887
888 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
889 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
890 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
891 offset_is_uniform, idx, offset, result);
892 }
893
894
895 static void visit_load_ssbo(struct lp_build_nir_context *bld_base,
896 nir_intrinsic_instr *instr,
897 LLVMValueRef result[4])
898 {
899 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
900 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
901 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
902 idx, offset, result);
903 }
904
905 static void visit_store_ssbo(struct lp_build_nir_context *bld_base,
906 nir_intrinsic_instr *instr)
907 {
908 LLVMValueRef val = get_src(bld_base, instr->src[0]);
909 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
910 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
911 int writemask = instr->const_index[0];
912 int nc = nir_src_num_components(instr->src[0]);
913 int bitsize = nir_src_bit_size(instr->src[0]);
914 bld_base->store_mem(bld_base, writemask, nc, bitsize, idx, offset, val);
915 }
916
917 static void visit_get_buffer_size(struct lp_build_nir_context *bld_base,
918 nir_intrinsic_instr *instr,
919 LLVMValueRef result[4])
920 {
921 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
922 result[0] = bld_base->get_buffer_size(bld_base, idx);
923 }
924
925 static void visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
926 nir_intrinsic_instr *instr,
927 LLVMValueRef result[4])
928 {
929 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
930 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
931 LLVMValueRef val = get_src(bld_base, instr->src[2]);
932 LLVMValueRef val2 = NULL;
933 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
934 val2 = get_src(bld_base, instr->src[3]);
935
936 bld_base->atomic_mem(bld_base, instr->intrinsic, idx, offset, val, val2, &result[0]);
937
938 }
939
940 static void visit_load_image(struct lp_build_nir_context *bld_base,
941 nir_intrinsic_instr *instr,
942 LLVMValueRef result[4])
943 {
944 struct gallivm_state *gallivm = bld_base->base.gallivm;
945 LLVMBuilderRef builder = gallivm->builder;
946 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
947 nir_variable *var = nir_deref_instr_get_variable(deref);
948 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
949 LLVMValueRef coords[5];
950 struct lp_img_params params;
951 const struct glsl_type *type = glsl_without_array(var->type);
952
953 memset(&params, 0, sizeof(params));
954 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
955 for (unsigned i = 0; i < 4; i++)
956 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
957 if (params.target == PIPE_TEXTURE_1D_ARRAY)
958 coords[2] = coords[1];
959
960 params.coords = coords;
961 params.outdata = result;
962 params.img_op = LP_IMG_LOAD;
963 params.image_index = var->data.binding;
964 bld_base->image_op(bld_base, &params);
965 }
966
967 static void visit_store_image(struct lp_build_nir_context *bld_base,
968 nir_intrinsic_instr *instr)
969 {
970 struct gallivm_state *gallivm = bld_base->base.gallivm;
971 LLVMBuilderRef builder = gallivm->builder;
972 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
973 nir_variable *var = nir_deref_instr_get_variable(deref);
974 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
975 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
976 LLVMValueRef coords[5];
977 struct lp_img_params params;
978 const struct glsl_type *type = glsl_without_array(var->type);
979
980 memset(&params, 0, sizeof(params));
981 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
982 for (unsigned i = 0; i < 4; i++)
983 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
984 if (params.target == PIPE_TEXTURE_1D_ARRAY)
985 coords[2] = coords[1];
986 params.coords = coords;
987
988 for (unsigned i = 0; i < 4; i++) {
989 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
990 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
991 }
992 params.img_op = LP_IMG_STORE;
993 params.image_index = var->data.binding;
994
995 if (params.target == PIPE_TEXTURE_1D_ARRAY)
996 coords[2] = coords[1];
997 bld_base->image_op(bld_base, &params);
998 }
999
1000 static void visit_atomic_image(struct lp_build_nir_context *bld_base,
1001 nir_intrinsic_instr *instr,
1002 LLVMValueRef result[4])
1003 {
1004 struct gallivm_state *gallivm = bld_base->base.gallivm;
1005 LLVMBuilderRef builder = gallivm->builder;
1006 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1007 nir_variable *var = nir_deref_instr_get_variable(deref);
1008 struct lp_img_params params;
1009 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1010 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1011 LLVMValueRef coords[5];
1012 const struct glsl_type *type = glsl_without_array(var->type);
1013
1014 memset(&params, 0, sizeof(params));
1015
1016 switch (instr->intrinsic) {
1017 case nir_intrinsic_image_deref_atomic_add:
1018 params.op = LLVMAtomicRMWBinOpAdd;
1019 break;
1020 case nir_intrinsic_image_deref_atomic_exchange:
1021 params.op = LLVMAtomicRMWBinOpXchg;
1022 break;
1023 case nir_intrinsic_image_deref_atomic_and:
1024 params.op = LLVMAtomicRMWBinOpAnd;
1025 break;
1026 case nir_intrinsic_image_deref_atomic_or:
1027 params.op = LLVMAtomicRMWBinOpOr;
1028 break;
1029 case nir_intrinsic_image_deref_atomic_xor:
1030 params.op = LLVMAtomicRMWBinOpXor;
1031 break;
1032 case nir_intrinsic_image_deref_atomic_umin:
1033 params.op = LLVMAtomicRMWBinOpUMin;
1034 break;
1035 case nir_intrinsic_image_deref_atomic_umax:
1036 params.op = LLVMAtomicRMWBinOpUMax;
1037 break;
1038 case nir_intrinsic_image_deref_atomic_imin:
1039 params.op = LLVMAtomicRMWBinOpMin;
1040 break;
1041 case nir_intrinsic_image_deref_atomic_imax:
1042 params.op = LLVMAtomicRMWBinOpMax;
1043 break;
1044 default:
1045 break;
1046 }
1047
1048 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1049 for (unsigned i = 0; i < 4; i++)
1050 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1051 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1052 coords[2] = coords[1];
1053 params.coords = coords;
1054 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
1055 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1056 params.indata[0] = in_val;
1057 params.indata2[0] = cas_val;
1058 } else
1059 params.indata[0] = in_val;
1060
1061 params.outdata = result;
1062 params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1063 params.image_index = var->data.binding;
1064
1065 bld_base->image_op(bld_base, &params);
1066 }
1067
1068
1069 static void visit_image_size(struct lp_build_nir_context *bld_base,
1070 nir_intrinsic_instr *instr,
1071 LLVMValueRef result[4])
1072 {
1073 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1074 nir_variable *var = nir_deref_instr_get_variable(deref);
1075 struct lp_sampler_size_query_params params = { 0 };
1076 params.texture_unit = var->data.binding;
1077 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(var->type), glsl_sampler_type_is_array(var->type));
1078 params.sizes_out = result;
1079
1080 bld_base->image_size(bld_base, &params);
1081 }
1082
1083 static void visit_shared_load(struct lp_build_nir_context *bld_base,
1084 nir_intrinsic_instr *instr,
1085 LLVMValueRef result[4])
1086 {
1087 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1088 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1089 NULL, offset, result);
1090 }
1091
1092 static void visit_shared_store(struct lp_build_nir_context *bld_base,
1093 nir_intrinsic_instr *instr)
1094 {
1095 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1096 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1097 int writemask = instr->const_index[1];
1098 int nc = nir_src_num_components(instr->src[0]);
1099 int bitsize = nir_src_bit_size(instr->src[0]);
1100 bld_base->store_mem(bld_base, writemask, nc, bitsize, NULL, offset, val);
1101 }
1102
1103 static void visit_shared_atomic(struct lp_build_nir_context *bld_base,
1104 nir_intrinsic_instr *instr,
1105 LLVMValueRef result[4])
1106 {
1107 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1108 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1109 LLVMValueRef val2 = NULL;
1110 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1111 val2 = get_src(bld_base, instr->src[2]);
1112
1113 bld_base->atomic_mem(bld_base, instr->intrinsic, NULL, offset, val, val2, &result[0]);
1114
1115 }
1116
1117 static void visit_barrier(struct lp_build_nir_context *bld_base)
1118 {
1119 bld_base->barrier(bld_base);
1120 }
1121
1122 static void visit_discard(struct lp_build_nir_context *bld_base,
1123 nir_intrinsic_instr *instr)
1124 {
1125 LLVMValueRef cond = NULL;
1126 if (instr->intrinsic == nir_intrinsic_discard_if) {
1127 cond = get_src(bld_base, instr->src[0]);
1128 cond = cast_type(bld_base, cond, nir_type_int, 32);
1129 }
1130 bld_base->discard(bld_base, cond);
1131 }
1132
1133 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
1134 nir_intrinsic_instr *instr)
1135 {
1136 LLVMValueRef result[4] = {0};
1137 switch (instr->intrinsic) {
1138 case nir_intrinsic_load_deref:
1139 visit_load_var(bld_base, instr, result);
1140 break;
1141 case nir_intrinsic_store_deref:
1142 visit_store_var(bld_base, instr);
1143 break;
1144 case nir_intrinsic_load_ubo:
1145 visit_load_ubo(bld_base, instr, result);
1146 break;
1147 case nir_intrinsic_load_ssbo:
1148 visit_load_ssbo(bld_base, instr, result);
1149 break;
1150 case nir_intrinsic_store_ssbo:
1151 visit_store_ssbo(bld_base, instr);
1152 break;
1153 case nir_intrinsic_get_buffer_size:
1154 visit_get_buffer_size(bld_base, instr, result);
1155 break;
1156 case nir_intrinsic_load_vertex_id:
1157 case nir_intrinsic_load_primitive_id:
1158 case nir_intrinsic_load_instance_id:
1159 case nir_intrinsic_load_work_group_id:
1160 case nir_intrinsic_load_local_invocation_id:
1161 case nir_intrinsic_load_num_work_groups:
1162 case nir_intrinsic_load_invocation_id:
1163 case nir_intrinsic_load_front_face:
1164 bld_base->sysval_intrin(bld_base, instr, result);
1165 break;
1166 case nir_intrinsic_discard_if:
1167 case nir_intrinsic_discard:
1168 visit_discard(bld_base, instr);
1169 break;
1170 case nir_intrinsic_emit_vertex:
1171 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
1172 break;
1173 case nir_intrinsic_end_primitive:
1174 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
1175 break;
1176 case nir_intrinsic_ssbo_atomic_add:
1177 case nir_intrinsic_ssbo_atomic_imin:
1178 case nir_intrinsic_ssbo_atomic_imax:
1179 case nir_intrinsic_ssbo_atomic_umin:
1180 case nir_intrinsic_ssbo_atomic_umax:
1181 case nir_intrinsic_ssbo_atomic_and:
1182 case nir_intrinsic_ssbo_atomic_or:
1183 case nir_intrinsic_ssbo_atomic_xor:
1184 case nir_intrinsic_ssbo_atomic_exchange:
1185 case nir_intrinsic_ssbo_atomic_comp_swap:
1186 visit_ssbo_atomic(bld_base, instr, result);
1187 break;
1188 case nir_intrinsic_image_deref_load:
1189 visit_load_image(bld_base, instr, result);
1190 break;
1191 case nir_intrinsic_image_deref_store:
1192 visit_store_image(bld_base, instr);
1193 break;
1194 case nir_intrinsic_image_deref_atomic_add:
1195 case nir_intrinsic_image_deref_atomic_imin:
1196 case nir_intrinsic_image_deref_atomic_imax:
1197 case nir_intrinsic_image_deref_atomic_umin:
1198 case nir_intrinsic_image_deref_atomic_umax:
1199 case nir_intrinsic_image_deref_atomic_and:
1200 case nir_intrinsic_image_deref_atomic_or:
1201 case nir_intrinsic_image_deref_atomic_xor:
1202 case nir_intrinsic_image_deref_atomic_exchange:
1203 case nir_intrinsic_image_deref_atomic_comp_swap:
1204 visit_atomic_image(bld_base, instr, result);
1205 break;
1206 case nir_intrinsic_image_deref_size:
1207 visit_image_size(bld_base, instr, result);
1208 break;
1209 case nir_intrinsic_load_shared:
1210 visit_shared_load(bld_base, instr, result);
1211 break;
1212 case nir_intrinsic_store_shared:
1213 visit_shared_store(bld_base, instr);
1214 break;
1215 case nir_intrinsic_shared_atomic_add:
1216 case nir_intrinsic_shared_atomic_imin:
1217 case nir_intrinsic_shared_atomic_umin:
1218 case nir_intrinsic_shared_atomic_imax:
1219 case nir_intrinsic_shared_atomic_umax:
1220 case nir_intrinsic_shared_atomic_and:
1221 case nir_intrinsic_shared_atomic_or:
1222 case nir_intrinsic_shared_atomic_xor:
1223 case nir_intrinsic_shared_atomic_exchange:
1224 case nir_intrinsic_shared_atomic_comp_swap:
1225 visit_shared_atomic(bld_base, instr, result);
1226 break;
1227 case nir_intrinsic_barrier:
1228 visit_barrier(bld_base);
1229 break;
1230 case nir_intrinsic_memory_barrier:
1231 break;
1232 default:
1233 assert(0);
1234 break;
1235 }
1236 if (result[0]) {
1237 assign_dest(bld_base, &instr->dest, result);
1238 }
1239 }
1240
1241 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1242 {
1243 struct lp_sampler_size_query_params params;
1244 LLVMValueRef sizes_out[4];
1245 LLVMValueRef explicit_lod = NULL;
1246
1247 for (unsigned i = 0; i < instr->num_srcs; i++) {
1248 switch (instr->src[i].src_type) {
1249 case nir_tex_src_lod:
1250 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1251 break;
1252 default:
1253 break;
1254 }
1255 }
1256
1257 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
1258 params.texture_unit = instr->texture_index;
1259 params.explicit_lod = explicit_lod;
1260 params.is_sviewinfo = TRUE;
1261 params.sizes_out = sizes_out;
1262
1263 if (instr->op == nir_texop_query_levels)
1264 params.explicit_lod = bld_base->uint_bld.zero;
1265 bld_base->tex_size(bld_base, &params);
1266 assign_dest(bld_base, &instr->dest, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
1267 }
1268
1269 static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
1270 nir_src lod_src)
1271 {
1272 enum lp_sampler_lod_property lod_property;
1273
1274 if (nir_src_is_dynamically_uniform(lod_src))
1275 lod_property = LP_SAMPLER_LOD_SCALAR;
1276 else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1277 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1278 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1279 else
1280 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1281 }
1282 else
1283 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1284 return lod_property;
1285 }
1286
1287 static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1288 {
1289 struct gallivm_state *gallivm = bld_base->base.gallivm;
1290 LLVMBuilderRef builder = gallivm->builder;
1291 LLVMValueRef coords[5];
1292 LLVMValueRef offsets[3] = { NULL };
1293 LLVMValueRef explicit_lod = NULL, projector = NULL;
1294 struct lp_sampler_params params;
1295 struct lp_derivatives derivs;
1296 unsigned sample_key = 0;
1297 nir_deref_instr *texture_deref_instr = NULL;
1298 nir_deref_instr *sampler_deref_instr = NULL;
1299 LLVMValueRef texel[4];
1300 unsigned lod_src = 0;
1301 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
1302
1303 memset(&params, 0, sizeof(params));
1304 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
1305
1306 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels) {
1307 visit_txs(bld_base, instr);
1308 return;
1309 }
1310 if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1311 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
1312 else if (instr->op == nir_texop_tg4)
1313 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
1314 else if (instr->op == nir_texop_lod)
1315 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
1316 for (unsigned i = 0; i < instr->num_srcs; i++) {
1317 switch (instr->src[i].src_type) {
1318 case nir_tex_src_coord: {
1319 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
1320 if (instr->coord_components == 1)
1321 coords[0] = coord;
1322 else {
1323 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1324 coords[chan] = LLVMBuildExtractValue(builder, coord,
1325 chan, "");
1326 }
1327 for (unsigned chan = instr->coord_components; chan < 5; chan++)
1328 coords[chan] = coord_undef;
1329
1330 break;
1331 }
1332 case nir_tex_src_texture_deref:
1333 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
1334 break;
1335 case nir_tex_src_sampler_deref:
1336 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
1337 break;
1338 case nir_tex_src_projector:
1339 projector = lp_build_rcp(&bld_base->base, cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32));
1340 break;
1341 case nir_tex_src_comparator:
1342 sample_key |= LP_SAMPLER_SHADOW;
1343 coords[4] = get_src(bld_base, instr->src[i].src);
1344 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
1345 break;
1346 case nir_tex_src_bias:
1347 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
1348 lod_src = i;
1349 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1350 break;
1351 case nir_tex_src_lod:
1352 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
1353 lod_src = i;
1354 if (instr->op == nir_texop_txf)
1355 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1356 else
1357 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1358 break;
1359 case nir_tex_src_ddx: {
1360 int deriv_cnt = instr->coord_components;
1361 if (instr->is_array)
1362 deriv_cnt--;
1363 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1364 if (deriv_cnt == 1)
1365 derivs.ddx[0] = deriv_val;
1366 else
1367 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1368 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
1369 chan, "");
1370 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1371 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
1372 break;
1373 }
1374 case nir_tex_src_ddy: {
1375 int deriv_cnt = instr->coord_components;
1376 if (instr->is_array)
1377 deriv_cnt--;
1378 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1379 if (deriv_cnt == 1)
1380 derivs.ddy[0] = deriv_val;
1381 else
1382 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1383 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
1384 chan, "");
1385 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1386 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
1387 break;
1388 }
1389 case nir_tex_src_offset: {
1390 int offset_cnt = instr->coord_components;
1391 if (instr->is_array)
1392 offset_cnt--;
1393 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
1394 sample_key |= LP_SAMPLER_OFFSETS;
1395 if (offset_cnt == 1)
1396 offsets[0] = offset_val;
1397 else {
1398 for (unsigned chan = 0; chan < offset_cnt; ++chan)
1399 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
1400 chan, "");
1401 }
1402 break;
1403 }
1404 case nir_tex_src_ms_index:
1405 break;
1406 default:
1407 assert(0);
1408 break;
1409 }
1410 }
1411 if (!sampler_deref_instr)
1412 sampler_deref_instr = texture_deref_instr;
1413
1414 if (explicit_lod)
1415 lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
1416
1417 if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
1418 instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
1419 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1420 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
1421 else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1422 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1423 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
1424
1425 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
1426 /* move layer coord for 1d arrays. */
1427 coords[2] = coords[1];
1428 coords[1] = coord_undef;
1429 }
1430
1431 if (projector) {
1432 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1433 coords[chan] = lp_build_mul(&bld_base->base, coords[chan], projector);
1434 if (sample_key & LP_SAMPLER_SHADOW)
1435 coords[4] = lp_build_mul(&bld_base->base, coords[4], projector);
1436 }
1437
1438 uint32_t base_index = 0;
1439 if (!texture_deref_instr) {
1440 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
1441 if (samp_src_index == -1) {
1442 base_index = instr->sampler_index;
1443 }
1444 }
1445
1446 if (instr->op == nir_texop_txd) {
1447 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
1448 params.derivs = &derivs;
1449 if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1450 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1451 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1452 else
1453 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1454 } else
1455 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1456 }
1457
1458 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
1459 params.sample_key = sample_key;
1460 params.offsets = offsets;
1461 params.texture_index = base_index;
1462 params.sampler_index = base_index;
1463 params.coords = coords;
1464 params.texel = texel;
1465 params.lod = explicit_lod;
1466 bld_base->tex(bld_base, &params);
1467 assign_dest(bld_base, &instr->dest, texel);
1468 }
1469
1470 static void visit_ssa_undef(struct lp_build_nir_context *bld_base,
1471 const nir_ssa_undef_instr *instr)
1472 {
1473 unsigned num_components = instr->def.num_components;
1474 LLVMValueRef undef[4];
1475 for (unsigned i = 0; i < num_components; i++)
1476 undef[i] = LLVMGetUndef(bld_base->base.vec_type);
1477 assign_ssa_dest(bld_base, &instr->def, undef);
1478 }
1479
1480 static void visit_jump(struct lp_build_nir_context *bld_base,
1481 const nir_jump_instr *instr)
1482 {
1483 switch (instr->type) {
1484 case nir_jump_break:
1485 bld_base->break_stmt(bld_base);
1486 break;
1487 case nir_jump_continue:
1488 bld_base->continue_stmt(bld_base);
1489 break;
1490 default:
1491 unreachable("Unknown jump instr\n");
1492 }
1493 }
1494
1495 static void visit_deref(struct lp_build_nir_context *bld_base,
1496 nir_deref_instr *instr)
1497 {
1498 if (instr->mode != nir_var_mem_shared &&
1499 instr->mode != nir_var_mem_global)
1500 return;
1501 LLVMValueRef result = NULL;
1502 switch(instr->deref_type) {
1503 case nir_deref_type_var: {
1504 struct hash_entry *entry = _mesa_hash_table_search(bld_base->vars, instr->var);
1505 result = entry->data;
1506 break;
1507 }
1508 default:
1509 unreachable("Unhandled deref_instr deref type");
1510 }
1511
1512 assign_ssa(bld_base, instr->dest.ssa.index, result);
1513 }
1514
1515 static void visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
1516 {
1517 nir_foreach_instr(instr, block)
1518 {
1519 switch (instr->type) {
1520 case nir_instr_type_alu:
1521 visit_alu(bld_base, nir_instr_as_alu(instr));
1522 break;
1523 case nir_instr_type_load_const:
1524 visit_load_const(bld_base, nir_instr_as_load_const(instr));
1525 break;
1526 case nir_instr_type_intrinsic:
1527 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
1528 break;
1529 case nir_instr_type_tex:
1530 visit_tex(bld_base, nir_instr_as_tex(instr));
1531 break;
1532 case nir_instr_type_phi:
1533 assert(0);
1534 break;
1535 case nir_instr_type_ssa_undef:
1536 visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
1537 break;
1538 case nir_instr_type_jump:
1539 visit_jump(bld_base, nir_instr_as_jump(instr));
1540 break;
1541 case nir_instr_type_deref:
1542 visit_deref(bld_base, nir_instr_as_deref(instr));
1543 break;
1544 default:
1545 fprintf(stderr, "Unknown NIR instr type: ");
1546 nir_print_instr(instr, stderr);
1547 fprintf(stderr, "\n");
1548 abort();
1549 }
1550 }
1551 }
1552
1553 static void visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
1554 {
1555 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
1556
1557 bld_base->if_cond(bld_base, cond);
1558 visit_cf_list(bld_base, &if_stmt->then_list);
1559
1560 if (!exec_list_is_empty(&if_stmt->else_list)) {
1561 bld_base->else_stmt(bld_base);
1562 visit_cf_list(bld_base, &if_stmt->else_list);
1563 }
1564 bld_base->endif_stmt(bld_base);
1565 }
1566
1567 static void visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
1568 {
1569 bld_base->bgnloop(bld_base);
1570 visit_cf_list(bld_base, &loop->body);
1571 bld_base->endloop(bld_base);
1572 }
1573
1574 static void visit_cf_list(struct lp_build_nir_context *bld_base,
1575 struct exec_list *list)
1576 {
1577 foreach_list_typed(nir_cf_node, node, node, list)
1578 {
1579 switch (node->type) {
1580 case nir_cf_node_block:
1581 visit_block(bld_base, nir_cf_node_as_block(node));
1582 break;
1583
1584 case nir_cf_node_if:
1585 visit_if(bld_base, nir_cf_node_as_if(node));
1586 break;
1587
1588 case nir_cf_node_loop:
1589 visit_loop(bld_base, nir_cf_node_as_loop(node));
1590 break;
1591
1592 default:
1593 assert(0);
1594 }
1595 }
1596 }
1597
1598 static void
1599 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
1600 struct nir_shader *nir,
1601 struct nir_variable *variable)
1602 {
1603 bld_base->emit_var_decl(bld_base, variable);
1604 }
1605
1606 /* vector registers are stored as arrays in LLVM side,
1607 so we can use GEP on them, as to do exec mask stores
1608 we need to operate on a single components.
1609 arrays are:
1610 0.x, 1.x, 2.x, 3.x
1611 0.y, 1.y, 2.y, 3.y
1612 ....
1613 */
1614 static LLVMTypeRef get_register_type(struct lp_build_nir_context *bld_base,
1615 nir_register *reg)
1616 {
1617 struct lp_build_context *int_bld = get_int_bld(bld_base, true, reg->bit_size);
1618
1619 LLVMTypeRef type = int_bld->vec_type;
1620 if (reg->num_array_elems)
1621 type = LLVMArrayType(type, reg->num_array_elems);
1622 if (reg->num_components > 1)
1623 type = LLVMArrayType(type, reg->num_components);
1624
1625 return type;
1626 }
1627
1628
1629 bool lp_build_nir_llvm(
1630 struct lp_build_nir_context *bld_base,
1631 struct nir_shader *nir)
1632 {
1633 struct nir_function *func;
1634
1635 nir_convert_from_ssa(nir, true);
1636 nir_lower_locals_to_regs(nir);
1637 nir_remove_dead_derefs(nir);
1638 nir_remove_dead_variables(nir, nir_var_function_temp);
1639
1640 nir_foreach_variable(variable, &nir->outputs)
1641 handle_shader_output_decl(bld_base, nir, variable);
1642
1643 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1644 _mesa_key_pointer_equal);
1645 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1646 _mesa_key_pointer_equal);
1647
1648 func = (struct nir_function *)exec_list_get_head(&nir->functions);
1649
1650 nir_foreach_register(reg, &func->impl->registers) {
1651 LLVMTypeRef type = get_register_type(bld_base, reg);
1652 LLVMValueRef reg_alloc = lp_build_alloca_undef(bld_base->base.gallivm,
1653 type, "reg");
1654 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
1655 }
1656 nir_index_ssa_defs(func->impl);
1657 bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
1658 visit_cf_list(bld_base, &func->impl->body);
1659
1660 free(bld_base->ssa_defs);
1661 ralloc_free(bld_base->vars);
1662 ralloc_free(bld_base->regs);
1663 return true;
1664 }
1665
1666 /* do some basic opts to remove some things we don't want to see. */
1667 void lp_build_opt_nir(struct nir_shader *nir)
1668 {
1669 bool progress;
1670 do {
1671 progress = false;
1672 NIR_PASS_V(nir, nir_opt_constant_folding);
1673 NIR_PASS_V(nir, nir_opt_algebraic);
1674 } while (progress);
1675 nir_lower_bool_to_int32(nir);
1676 }