gallivm/draw: add support for draw_id system value.
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_gather.h"
31 #include "lp_bld_logic.h"
32 #include "lp_bld_quad.h"
33 #include "lp_bld_flow.h"
34 #include "lp_bld_struct.h"
35 #include "lp_bld_debug.h"
36 #include "lp_bld_printf.h"
37 #include "nir_deref.h"
38
39 static void visit_cf_list(struct lp_build_nir_context *bld_base,
40 struct exec_list *list);
41
42 static LLVMValueRef cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
43 nir_alu_type alu_type, unsigned bit_size)
44 {
45 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
46 switch (alu_type) {
47 case nir_type_float:
48 switch (bit_size) {
49 case 32:
50 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
51 case 64:
52 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
53 default:
54 assert(0);
55 break;
56 }
57 break;
58 case nir_type_int:
59 switch (bit_size) {
60 case 32:
61 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
62 case 64:
63 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
64 default:
65 assert(0);
66 break;
67 }
68 break;
69 case nir_type_uint:
70 switch (bit_size) {
71 case 32:
72 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
73 case 64:
74 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
75 default:
76 assert(0);
77 break;
78 }
79 break;
80 case nir_type_uint32:
81 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
82 default:
83 return val;
84 }
85 return NULL;
86 }
87
88 static struct lp_build_context *get_int_bld(struct lp_build_nir_context *bld_base,
89 bool is_unsigned,
90 unsigned op_bit_size)
91 {
92 if (is_unsigned)
93 if (op_bit_size == 64)
94 return &bld_base->uint64_bld;
95 else
96 return &bld_base->uint_bld;
97 else if (op_bit_size == 64)
98 return &bld_base->int64_bld;
99 else
100 return &bld_base->int_bld;
101 }
102
103 static struct lp_build_context *get_flt_bld(struct lp_build_nir_context *bld_base,
104 unsigned op_bit_size)
105 {
106 if (op_bit_size == 64)
107 return &bld_base->dbl_bld;
108 else
109 return &bld_base->base;
110 }
111
112 static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
113 {
114 unsigned pipe_target = PIPE_BUFFER;
115 switch (sampler_dim) {
116 case GLSL_SAMPLER_DIM_1D:
117 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
118 break;
119 case GLSL_SAMPLER_DIM_2D:
120 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
121 break;
122 case GLSL_SAMPLER_DIM_3D:
123 pipe_target = PIPE_TEXTURE_3D;
124 break;
125 case GLSL_SAMPLER_DIM_CUBE:
126 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
127 break;
128 case GLSL_SAMPLER_DIM_RECT:
129 pipe_target = PIPE_TEXTURE_RECT;
130 break;
131 case GLSL_SAMPLER_DIM_BUF:
132 pipe_target = PIPE_BUFFER;
133 break;
134 default:
135 break;
136 }
137 return pipe_target;
138 }
139
140 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
141 {
142 return bld_base->ssa_defs[ssa->index];
143 }
144
145 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src);
146
147 static LLVMValueRef get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
148 {
149 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
150 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
151 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
152 LLVMValueRef indir_src = NULL;
153 if (src.indirect)
154 indir_src = get_src(bld_base, *src.indirect);
155 return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
156 }
157
158 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src)
159 {
160 if (src.is_ssa)
161 return get_ssa_src(bld_base, src.ssa);
162 else
163 return get_reg_src(bld_base, src.reg);
164 }
165
166 static void assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
167 {
168 bld_base->ssa_defs[idx] = ptr;
169 }
170
171 static void assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
172 LLVMValueRef vals[4])
173 {
174 assign_ssa(bld_base, ssa->index, ssa->num_components == 1 ? vals[0] : lp_nir_array_build_gather_values(bld_base->base.gallivm->builder, vals, ssa->num_components));
175 }
176
177 static void assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
178 unsigned write_mask,
179 LLVMValueRef vals[4])
180 {
181 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
182 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
183 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
184 LLVMValueRef indir_src = NULL;
185 if (reg->indirect)
186 indir_src = get_src(bld_base, *reg->indirect);
187 bld_base->store_reg(bld_base, reg_bld, reg, write_mask ? write_mask : 0xf, indir_src, reg_storage, vals);
188 }
189
190 static void assign_dest(struct lp_build_nir_context *bld_base, const nir_dest *dest, LLVMValueRef vals[4])
191 {
192 if (dest->is_ssa)
193 assign_ssa_dest(bld_base, &dest->ssa, vals);
194 else
195 assign_reg(bld_base, &dest->reg, 0, vals);
196 }
197
198 static void assign_alu_dest(struct lp_build_nir_context *bld_base, const nir_alu_dest *dest, LLVMValueRef vals[4])
199 {
200 if (dest->dest.is_ssa)
201 assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
202 else
203 assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
204 }
205
206 static LLVMValueRef int_to_bool32(struct lp_build_nir_context *bld_base,
207 uint32_t src_bit_size,
208 bool is_unsigned,
209 LLVMValueRef val)
210 {
211 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
212 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
213 LLVMValueRef result = lp_build_compare(bld_base->base.gallivm, int_bld->type, PIPE_FUNC_NOTEQUAL, val, int_bld->zero);
214 if (src_bit_size == 64)
215 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
216 return result;
217 }
218
219 static LLVMValueRef flt_to_bool32(struct lp_build_nir_context *bld_base,
220 uint32_t src_bit_size,
221 LLVMValueRef val)
222 {
223 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
224 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
225 LLVMValueRef result = lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
226 if (src_bit_size == 64)
227 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
228 return result;
229 }
230
231 static LLVMValueRef fcmp32(struct lp_build_nir_context *bld_base,
232 enum pipe_compare_func compare,
233 uint32_t src_bit_size,
234 LLVMValueRef src[4])
235 {
236 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
237 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
238 LLVMValueRef result;
239
240 if (compare != PIPE_FUNC_NOTEQUAL)
241 result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
242 else
243 result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
244 if (src_bit_size == 64)
245 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
246 return result;
247 }
248
249 static LLVMValueRef icmp32(struct lp_build_nir_context *bld_base,
250 enum pipe_compare_func compare,
251 bool is_unsigned,
252 uint32_t src_bit_size,
253 LLVMValueRef src[4])
254 {
255 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
256 struct lp_build_context *i_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
257 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
258 if (src_bit_size == 64)
259 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
260 return result;
261 }
262
263 static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
264 nir_alu_src src,
265 unsigned num_components)
266 {
267 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
268 struct gallivm_state *gallivm = bld_base->base.gallivm;
269 LLVMValueRef value = get_src(bld_base, src.src);
270 bool need_swizzle = false;
271
272 assert(value);
273 unsigned src_components = nir_src_num_components(src.src);
274 for (unsigned i = 0; i < num_components; ++i) {
275 assert(src.swizzle[i] < src_components);
276 if (src.swizzle[i] != i)
277 need_swizzle = true;
278 }
279
280 if (need_swizzle || num_components != src_components) {
281 if (src_components > 1 && num_components == 1) {
282 value = LLVMBuildExtractValue(gallivm->builder, value,
283 src.swizzle[0], "");
284 } else if (src_components == 1 && num_components > 1) {
285 LLVMValueRef values[] = {value, value, value, value};
286 value = lp_nir_array_build_gather_values(builder, values, num_components);
287 } else {
288 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
289 for (unsigned i = 0; i < num_components; i++)
290 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
291 value = arr;
292 }
293 }
294 assert(!src.negate);
295 assert(!src.abs);
296 return value;
297 }
298
299 static LLVMValueRef emit_b2f(struct lp_build_nir_context *bld_base,
300 LLVMValueRef src0,
301 unsigned bitsize)
302 {
303 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
304 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
305 LLVMBuildBitCast(builder, lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type,
306 1.0), bld_base->int_bld.vec_type, ""),
307 "");
308 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
309 switch (bitsize) {
310 case 32:
311 break;
312 case 64:
313 result = LLVMBuildFPExt(builder, result, bld_base->dbl_bld.vec_type, "");
314 break;
315 default:
316 unreachable("unsupported bit size.");
317 }
318 return result;
319 }
320
321 static LLVMValueRef emit_b2i(struct lp_build_nir_context *bld_base,
322 LLVMValueRef src0,
323 unsigned bitsize)
324 {
325 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
326 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
327 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, 1), "");
328 switch (bitsize) {
329 case 32:
330 return result;
331 case 64:
332 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
333 default:
334 unreachable("unsupported bit size.");
335 }
336 }
337
338 static LLVMValueRef emit_b32csel(struct lp_build_nir_context *bld_base,
339 unsigned src_bit_size[4],
340 LLVMValueRef src[4])
341 {
342 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
343 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
344 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
345 return lp_build_select(bld, v, src[1], src[2]);
346 }
347
348 static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
349 LLVMValueRef src,
350 bool hi)
351 {
352 struct gallivm_state *gallivm = bld_base->base.gallivm;
353 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
354 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
355 int len = bld_base->base.type.length * 2;
356 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
357 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
358 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
359 }
360
361 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
362 return LLVMBuildShuffleVector(gallivm->builder, src,
363 LLVMGetUndef(LLVMTypeOf(src)),
364 LLVMConstVector(hi ? shuffles2 : shuffles,
365 bld_base->base.type.length),
366 "");
367 }
368
369 static LLVMValueRef
370 merge_64bit(struct lp_build_nir_context *bld_base,
371 LLVMValueRef input,
372 LLVMValueRef input2)
373 {
374 struct gallivm_state *gallivm = bld_base->base.gallivm;
375 LLVMBuilderRef builder = gallivm->builder;
376 int i;
377 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
378 int len = bld_base->base.type.length * 2;
379 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
380
381 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
382 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
383 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
384 }
385 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
386 }
387
388 static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
389 nir_op op, unsigned src_bit_size[4], LLVMValueRef src[4])
390 {
391 struct gallivm_state *gallivm = bld_base->base.gallivm;
392 LLVMBuilderRef builder = gallivm->builder;
393 LLVMValueRef result;
394 switch (op) {
395 case nir_op_b2f32:
396 result = emit_b2f(bld_base, src[0], 32);
397 break;
398 case nir_op_b2f64:
399 result = emit_b2f(bld_base, src[0], 64);
400 break;
401 case nir_op_b2i32:
402 result = emit_b2i(bld_base, src[0], 32);
403 break;
404 case nir_op_b2i64:
405 result = emit_b2i(bld_base, src[0], 64);
406 break;
407 case nir_op_b32csel:
408 result = emit_b32csel(bld_base, src_bit_size, src);
409 break;
410 case nir_op_bit_count:
411 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
412 break;
413 case nir_op_bitfield_select:
414 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
415 break;
416 case nir_op_bitfield_reverse:
417 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
418 break;
419 case nir_op_f2b32:
420 result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
421 break;
422 case nir_op_f2f32:
423 result = LLVMBuildFPTrunc(builder, src[0],
424 bld_base->base.vec_type, "");
425 break;
426 case nir_op_f2f64:
427 result = LLVMBuildFPExt(builder, src[0],
428 bld_base->dbl_bld.vec_type, "");
429 break;
430 case nir_op_f2i32:
431 result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
432 break;
433 case nir_op_f2u32:
434 result = LLVMBuildFPToUI(builder,
435 src[0],
436 bld_base->base.int_vec_type, "");
437 break;
438 case nir_op_f2i64:
439 result = LLVMBuildFPToSI(builder,
440 src[0],
441 bld_base->int64_bld.vec_type, "");
442 break;
443 case nir_op_f2u64:
444 result = LLVMBuildFPToUI(builder,
445 src[0],
446 bld_base->uint64_bld.vec_type, "");
447 break;
448 case nir_op_fabs:
449 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
450 break;
451 case nir_op_fadd:
452 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
453 src[0], src[1]);
454 break;
455 case nir_op_fceil:
456 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
457 break;
458 case nir_op_fcos:
459 result = lp_build_cos(&bld_base->base, src[0]);
460 break;
461 case nir_op_fddx:
462 result = lp_build_ddx(&bld_base->base, src[0]);
463 break;
464 case nir_op_fddy:
465 result = lp_build_ddy(&bld_base->base, src[0]);
466 break;
467 case nir_op_fdiv:
468 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
469 src[0], src[1]);
470 break;
471 case nir_op_feq32:
472 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
473 break;
474 case nir_op_fexp2:
475 result = lp_build_exp2(&bld_base->base, src[0]);
476 break;
477 case nir_op_ffloor:
478 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
479 break;
480 case nir_op_ffma:
481 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
482 break;
483 case nir_op_ffract: {
484 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
485 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
486 result = lp_build_sub(flt_bld, src[0], tmp);
487 break;
488 }
489 case nir_op_fge32:
490 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
491 break;
492 case nir_op_find_lsb:
493 result = lp_build_cttz(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
494 break;
495 case nir_op_flog2:
496 result = lp_build_log2_safe(&bld_base->base, src[0]);
497 break;
498 case nir_op_flt32:
499 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
500 break;
501 case nir_op_fmin:
502 result = lp_build_min(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
503 break;
504 case nir_op_fmod: {
505 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
506 result = lp_build_div(flt_bld, src[0], src[1]);
507 result = lp_build_floor(flt_bld, result);
508 result = lp_build_mul(flt_bld, src[1], result);
509 result = lp_build_sub(flt_bld, src[0], result);
510 break;
511 }
512 case nir_op_fmul:
513 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
514 src[0], src[1]);
515 break;
516 case nir_op_fmax:
517 result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
518 break;
519 case nir_op_fne32:
520 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
521 break;
522 case nir_op_fneg:
523 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
524 break;
525 case nir_op_fpow:
526 result = lp_build_pow(&bld_base->base, src[0], src[1]);
527 break;
528 case nir_op_frcp:
529 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
530 break;
531 case nir_op_fround_even:
532 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
533 break;
534 case nir_op_frsq:
535 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
536 break;
537 case nir_op_fsat:
538 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
539 break;
540 case nir_op_fsign:
541 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
542 break;
543 case nir_op_fsin:
544 result = lp_build_sin(&bld_base->base, src[0]);
545 break;
546 case nir_op_fsqrt:
547 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
548 break;
549 case nir_op_ftrunc:
550 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
551 break;
552 case nir_op_i2b32:
553 result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
554 break;
555 case nir_op_i2f32:
556 result = lp_build_int_to_float(&bld_base->base, src[0]);
557 break;
558 case nir_op_i2f64:
559 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
560 break;
561 case nir_op_i2i32:
562 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
563 break;
564 case nir_op_i2i64:
565 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
566 break;
567 case nir_op_iabs:
568 result = lp_build_abs(&bld_base->int_bld, src[0]);
569 break;
570 case nir_op_iadd:
571 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
572 src[0], src[1]);
573 break;
574 case nir_op_iand:
575 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
576 src[0], src[1]);
577 break;
578 case nir_op_idiv:
579 result = lp_build_div(&bld_base->int_bld,
580 src[0], src[1]);
581 break;
582 case nir_op_ieq32:
583 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
584 break;
585 case nir_op_ige32:
586 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
587 break;
588 case nir_op_ilt32:
589 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
590 break;
591 case nir_op_imax:
592 result = lp_build_max(&bld_base->int_bld, src[0], src[1]);
593 break;
594 case nir_op_imin:
595 result = lp_build_min(&bld_base->int_bld, src[0], src[1]);
596 break;
597 case nir_op_imul:
598 result = lp_build_mul(&bld_base->int_bld,
599 src[0], src[1]);
600 break;
601 case nir_op_imul_high: {
602 LLVMValueRef hi_bits;
603 lp_build_mul_32_lohi(&bld_base->int_bld, src[0], src[1], &hi_bits);
604 result = hi_bits;
605 break;
606 }
607 case nir_op_ine32:
608 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
609 break;
610 case nir_op_ineg:
611 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
612 break;
613 case nir_op_inot:
614 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
615 break;
616 case nir_op_ior:
617 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
618 src[0], src[1]);
619 break;
620 case nir_op_ishl:
621 src[1] = lp_build_and(&bld_base->uint_bld, src[1], lp_build_const_int_vec(gallivm, bld_base->uint_bld.type, (src_bit_size[0] - 1)));
622 result = lp_build_shl(&bld_base->int_bld, src[0], src[1]);
623 break;
624 case nir_op_ishr:
625 src[1] = lp_build_and(&bld_base->uint_bld, src[1], lp_build_const_int_vec(gallivm, bld_base->uint_bld.type, (src_bit_size[0] - 1)));
626 result = lp_build_shr(&bld_base->int_bld, src[0], src[1]);
627 break;
628 case nir_op_isign:
629 result = lp_build_sgn(&bld_base->int_bld, src[0]);
630 break;
631 case nir_op_ixor:
632 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
633 src[0], src[1]);
634 break;
635 case nir_op_mov:
636 result = src[0];
637 break;
638 case nir_op_unpack_64_2x32_split_x:
639 result = split_64bit(bld_base, src[0], false);
640 break;
641 case nir_op_unpack_64_2x32_split_y:
642 result = split_64bit(bld_base, src[0], true);
643 break;
644
645 case nir_op_pack_64_2x32_split: {
646 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
647 result = LLVMBuildBitCast(builder, tmp, bld_base->dbl_bld.vec_type, "");
648 break;
649 }
650 case nir_op_u2f32:
651 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
652 break;
653 case nir_op_u2f64:
654 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
655 break;
656 case nir_op_u2u32:
657 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
658 break;
659 case nir_op_u2u64:
660 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
661 break;
662 case nir_op_udiv:
663 result = lp_build_div(&bld_base->uint_bld,
664 src[0], src[1]);
665 break;
666 case nir_op_ufind_msb: {
667 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
668 result = lp_build_ctlz(uint_bld, src[0]);
669 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
670 break;
671 }
672 case nir_op_uge32:
673 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
674 break;
675 case nir_op_ult32:
676 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
677 break;
678 case nir_op_umax:
679 result = lp_build_max(&bld_base->uint_bld, src[0], src[1]);
680 break;
681 case nir_op_umin:
682 result = lp_build_min(&bld_base->uint_bld, src[0], src[1]);
683 break;
684 case nir_op_umod:
685 result = lp_build_mod(&bld_base->uint_bld, src[0], src[1]);
686 break;
687 case nir_op_umul_high: {
688 LLVMValueRef hi_bits;
689 lp_build_mul_32_lohi(&bld_base->uint_bld, src[0], src[1], &hi_bits);
690 result = hi_bits;
691 break;
692 }
693 case nir_op_ushr:
694 src[1] = lp_build_and(&bld_base->uint_bld, src[1], lp_build_const_int_vec(gallivm, bld_base->uint_bld.type, (src_bit_size[0] - 1)));
695 result = lp_build_shr(&bld_base->uint_bld, src[0], src[1]);
696 break;
697 default:
698 assert(0);
699 break;
700 }
701 return result;
702 }
703
704 static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr *instr)
705 {
706 struct gallivm_state *gallivm = bld_base->base.gallivm;
707 LLVMValueRef src[4];
708 unsigned src_bit_size[4];
709 unsigned num_components = nir_dest_num_components(instr->dest.dest);
710 unsigned src_components;
711 switch (instr->op) {
712 case nir_op_vec2:
713 case nir_op_vec3:
714 case nir_op_vec4:
715 src_components = 1;
716 break;
717 case nir_op_pack_half_2x16:
718 src_components = 2;
719 break;
720 case nir_op_unpack_half_2x16:
721 src_components = 1;
722 break;
723 case nir_op_cube_face_coord:
724 case nir_op_cube_face_index:
725 src_components = 3;
726 break;
727 default:
728 src_components = num_components;
729 break;
730 }
731 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
732 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
733 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
734 }
735
736 LLVMValueRef result[4];
737 if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2) {
738 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
739 result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
740 }
741 } else {
742 for (unsigned c = 0; c < num_components; c++) {
743 LLVMValueRef src_chan[4];
744
745 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
746 if (num_components > 1) {
747 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
748 src[i], c, "");
749 } else
750 src_chan[i] = src[i];
751 src_chan[i] = cast_type(bld_base, src_chan[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
752 }
753 result[c] = do_alu_action(bld_base, instr->op, src_bit_size, src_chan);
754 result[c] = cast_type(bld_base, result[c], nir_op_infos[instr->op].output_type, nir_dest_bit_size(instr->dest.dest));
755 }
756 }
757 assign_alu_dest(bld_base, &instr->dest, result);
758 }
759
760 static void visit_load_const(struct lp_build_nir_context *bld_base,
761 const nir_load_const_instr *instr)
762 {
763 LLVMValueRef result[4];
764 struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
765 for (unsigned i = 0; i < instr->def.num_components; i++)
766 result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->value[i].u64);
767 assign_ssa_dest(bld_base, &instr->def, result);
768 }
769
770 static void
771 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
772 bool vs_in, unsigned *vertex_index_out,
773 LLVMValueRef *vertex_index_ref,
774 unsigned *const_out, LLVMValueRef *indir_out)
775 {
776 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
777 nir_variable *var = nir_deref_instr_get_variable(instr);
778 nir_deref_path path;
779 unsigned idx_lvl = 1;
780
781 nir_deref_path_init(&path, instr, NULL);
782
783 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
784 if (vertex_index_ref) {
785 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
786 if (vertex_index_out)
787 *vertex_index_out = 0;
788 } else {
789 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
790 }
791 ++idx_lvl;
792 }
793
794 uint32_t const_offset = 0;
795 LLVMValueRef offset = NULL;
796
797 if (var->data.compact) {
798 assert(instr->deref_type == nir_deref_type_array);
799 const_offset = nir_src_as_uint(instr->arr.index);
800 goto out;
801 }
802
803 for (; path.path[idx_lvl]; ++idx_lvl) {
804 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
805 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
806 unsigned index = path.path[idx_lvl]->strct.index;
807
808 for (unsigned i = 0; i < index; i++) {
809 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
810 const_offset += glsl_count_attribute_slots(ft, vs_in);
811 }
812 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
813 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
814 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
815 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
816 } else {
817 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
818 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
819 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
820 idx_src);
821 if (offset)
822 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
823 else
824 offset = array_off;
825 }
826 } else
827 unreachable("Uhandled deref type in get_deref_instr_offset");
828 }
829
830 out:
831 nir_deref_path_finish(&path);
832
833 if (const_offset && offset)
834 offset = LLVMBuildAdd(builder, offset,
835 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
836 "");
837 *const_out = const_offset;
838 *indir_out = offset;
839 }
840
841 static void visit_load_var(struct lp_build_nir_context *bld_base,
842 nir_intrinsic_instr *instr,
843 LLVMValueRef result[4])
844 {
845 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
846 nir_variable *var = nir_deref_instr_get_variable(deref);
847 nir_variable_mode mode = deref->mode;
848 unsigned const_index;
849 LLVMValueRef indir_index;
850 unsigned vertex_index = 0;
851 unsigned nc = nir_dest_num_components(instr->dest);
852 unsigned bit_size = nir_dest_bit_size(instr->dest);
853 if (var) {
854 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
855 var->data.mode == nir_var_shader_in;
856 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
857 var->data.mode == nir_var_shader_in;
858 mode = var->data.mode;
859
860 get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, NULL,
861 &const_index, &indir_index);
862 }
863 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, const_index, indir_index, result);
864 }
865
866 static void
867 visit_store_var(struct lp_build_nir_context *bld_base,
868 nir_intrinsic_instr *instr)
869 {
870 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
871 nir_variable *var = nir_deref_instr_get_variable(deref);
872 nir_variable_mode mode = deref->mode;
873 int writemask = instr->const_index[0];
874 unsigned bit_size = nir_src_bit_size(instr->src[1]);
875 LLVMValueRef src = get_src(bld_base, instr->src[1]);
876 unsigned const_index = 0;
877 LLVMValueRef indir_index;
878 if (var)
879 get_deref_offset(bld_base, deref, false, NULL, NULL,
880 &const_index, &indir_index);
881 bld_base->store_var(bld_base, mode, bit_size, instr->num_components, writemask, const_index, var, src);
882 }
883
884 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
885 nir_intrinsic_instr *instr,
886 LLVMValueRef result[4])
887 {
888 struct gallivm_state *gallivm = bld_base->base.gallivm;
889 LLVMBuilderRef builder = gallivm->builder;
890 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
891 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
892
893 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
894 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
895 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
896 offset_is_uniform, idx, offset, result);
897 }
898
899
900 static void visit_load_ssbo(struct lp_build_nir_context *bld_base,
901 nir_intrinsic_instr *instr,
902 LLVMValueRef result[4])
903 {
904 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
905 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
906 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
907 idx, offset, result);
908 }
909
910 static void visit_store_ssbo(struct lp_build_nir_context *bld_base,
911 nir_intrinsic_instr *instr)
912 {
913 LLVMValueRef val = get_src(bld_base, instr->src[0]);
914 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
915 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
916 int writemask = instr->const_index[0];
917 int nc = nir_src_num_components(instr->src[0]);
918 int bitsize = nir_src_bit_size(instr->src[0]);
919 bld_base->store_mem(bld_base, writemask, nc, bitsize, idx, offset, val);
920 }
921
922 static void visit_get_buffer_size(struct lp_build_nir_context *bld_base,
923 nir_intrinsic_instr *instr,
924 LLVMValueRef result[4])
925 {
926 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
927 result[0] = bld_base->get_buffer_size(bld_base, idx);
928 }
929
930 static void visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
931 nir_intrinsic_instr *instr,
932 LLVMValueRef result[4])
933 {
934 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
935 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
936 LLVMValueRef val = get_src(bld_base, instr->src[2]);
937 LLVMValueRef val2 = NULL;
938 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
939 val2 = get_src(bld_base, instr->src[3]);
940
941 bld_base->atomic_mem(bld_base, instr->intrinsic, idx, offset, val, val2, &result[0]);
942
943 }
944
945 static void visit_load_image(struct lp_build_nir_context *bld_base,
946 nir_intrinsic_instr *instr,
947 LLVMValueRef result[4])
948 {
949 struct gallivm_state *gallivm = bld_base->base.gallivm;
950 LLVMBuilderRef builder = gallivm->builder;
951 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
952 nir_variable *var = nir_deref_instr_get_variable(deref);
953 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
954 LLVMValueRef coords[5];
955 struct lp_img_params params;
956 const struct glsl_type *type = glsl_without_array(var->type);
957
958 memset(&params, 0, sizeof(params));
959 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
960 for (unsigned i = 0; i < 4; i++)
961 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
962 if (params.target == PIPE_TEXTURE_1D_ARRAY)
963 coords[2] = coords[1];
964
965 params.coords = coords;
966 params.outdata = result;
967 params.img_op = LP_IMG_LOAD;
968 params.image_index = var->data.binding;
969 bld_base->image_op(bld_base, &params);
970 }
971
972 static void visit_store_image(struct lp_build_nir_context *bld_base,
973 nir_intrinsic_instr *instr)
974 {
975 struct gallivm_state *gallivm = bld_base->base.gallivm;
976 LLVMBuilderRef builder = gallivm->builder;
977 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
978 nir_variable *var = nir_deref_instr_get_variable(deref);
979 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
980 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
981 LLVMValueRef coords[5];
982 struct lp_img_params params;
983 const struct glsl_type *type = glsl_without_array(var->type);
984
985 memset(&params, 0, sizeof(params));
986 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
987 for (unsigned i = 0; i < 4; i++)
988 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
989 if (params.target == PIPE_TEXTURE_1D_ARRAY)
990 coords[2] = coords[1];
991 params.coords = coords;
992
993 for (unsigned i = 0; i < 4; i++) {
994 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
995 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
996 }
997 params.img_op = LP_IMG_STORE;
998 params.image_index = var->data.binding;
999
1000 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1001 coords[2] = coords[1];
1002 bld_base->image_op(bld_base, &params);
1003 }
1004
1005 static void visit_atomic_image(struct lp_build_nir_context *bld_base,
1006 nir_intrinsic_instr *instr,
1007 LLVMValueRef result[4])
1008 {
1009 struct gallivm_state *gallivm = bld_base->base.gallivm;
1010 LLVMBuilderRef builder = gallivm->builder;
1011 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1012 nir_variable *var = nir_deref_instr_get_variable(deref);
1013 struct lp_img_params params;
1014 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1015 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1016 LLVMValueRef coords[5];
1017 const struct glsl_type *type = glsl_without_array(var->type);
1018
1019 memset(&params, 0, sizeof(params));
1020
1021 switch (instr->intrinsic) {
1022 case nir_intrinsic_image_deref_atomic_add:
1023 params.op = LLVMAtomicRMWBinOpAdd;
1024 break;
1025 case nir_intrinsic_image_deref_atomic_exchange:
1026 params.op = LLVMAtomicRMWBinOpXchg;
1027 break;
1028 case nir_intrinsic_image_deref_atomic_and:
1029 params.op = LLVMAtomicRMWBinOpAnd;
1030 break;
1031 case nir_intrinsic_image_deref_atomic_or:
1032 params.op = LLVMAtomicRMWBinOpOr;
1033 break;
1034 case nir_intrinsic_image_deref_atomic_xor:
1035 params.op = LLVMAtomicRMWBinOpXor;
1036 break;
1037 case nir_intrinsic_image_deref_atomic_umin:
1038 params.op = LLVMAtomicRMWBinOpUMin;
1039 break;
1040 case nir_intrinsic_image_deref_atomic_umax:
1041 params.op = LLVMAtomicRMWBinOpUMax;
1042 break;
1043 case nir_intrinsic_image_deref_atomic_imin:
1044 params.op = LLVMAtomicRMWBinOpMin;
1045 break;
1046 case nir_intrinsic_image_deref_atomic_imax:
1047 params.op = LLVMAtomicRMWBinOpMax;
1048 break;
1049 default:
1050 break;
1051 }
1052
1053 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1054 for (unsigned i = 0; i < 4; i++)
1055 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1056 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1057 coords[2] = coords[1];
1058 params.coords = coords;
1059 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
1060 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1061 params.indata[0] = in_val;
1062 params.indata2[0] = cas_val;
1063 } else
1064 params.indata[0] = in_val;
1065
1066 params.outdata = result;
1067 params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1068 params.image_index = var->data.binding;
1069
1070 bld_base->image_op(bld_base, &params);
1071 }
1072
1073
1074 static void visit_image_size(struct lp_build_nir_context *bld_base,
1075 nir_intrinsic_instr *instr,
1076 LLVMValueRef result[4])
1077 {
1078 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1079 nir_variable *var = nir_deref_instr_get_variable(deref);
1080 struct lp_sampler_size_query_params params = { 0 };
1081 params.texture_unit = var->data.binding;
1082 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(var->type), glsl_sampler_type_is_array(var->type));
1083 params.sizes_out = result;
1084
1085 bld_base->image_size(bld_base, &params);
1086 }
1087
1088 static void visit_shared_load(struct lp_build_nir_context *bld_base,
1089 nir_intrinsic_instr *instr,
1090 LLVMValueRef result[4])
1091 {
1092 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1093 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1094 NULL, offset, result);
1095 }
1096
1097 static void visit_shared_store(struct lp_build_nir_context *bld_base,
1098 nir_intrinsic_instr *instr)
1099 {
1100 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1101 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1102 int writemask = instr->const_index[1];
1103 int nc = nir_src_num_components(instr->src[0]);
1104 int bitsize = nir_src_bit_size(instr->src[0]);
1105 bld_base->store_mem(bld_base, writemask, nc, bitsize, NULL, offset, val);
1106 }
1107
1108 static void visit_shared_atomic(struct lp_build_nir_context *bld_base,
1109 nir_intrinsic_instr *instr,
1110 LLVMValueRef result[4])
1111 {
1112 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1113 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1114 LLVMValueRef val2 = NULL;
1115 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1116 val2 = get_src(bld_base, instr->src[2]);
1117
1118 bld_base->atomic_mem(bld_base, instr->intrinsic, NULL, offset, val, val2, &result[0]);
1119
1120 }
1121
1122 static void visit_barrier(struct lp_build_nir_context *bld_base)
1123 {
1124 bld_base->barrier(bld_base);
1125 }
1126
1127 static void visit_discard(struct lp_build_nir_context *bld_base,
1128 nir_intrinsic_instr *instr)
1129 {
1130 LLVMValueRef cond = NULL;
1131 if (instr->intrinsic == nir_intrinsic_discard_if) {
1132 cond = get_src(bld_base, instr->src[0]);
1133 cond = cast_type(bld_base, cond, nir_type_int, 32);
1134 }
1135 bld_base->discard(bld_base, cond);
1136 }
1137
1138 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
1139 nir_intrinsic_instr *instr)
1140 {
1141 LLVMValueRef result[4] = {0};
1142 switch (instr->intrinsic) {
1143 case nir_intrinsic_load_deref:
1144 visit_load_var(bld_base, instr, result);
1145 break;
1146 case nir_intrinsic_store_deref:
1147 visit_store_var(bld_base, instr);
1148 break;
1149 case nir_intrinsic_load_ubo:
1150 visit_load_ubo(bld_base, instr, result);
1151 break;
1152 case nir_intrinsic_load_ssbo:
1153 visit_load_ssbo(bld_base, instr, result);
1154 break;
1155 case nir_intrinsic_store_ssbo:
1156 visit_store_ssbo(bld_base, instr);
1157 break;
1158 case nir_intrinsic_get_buffer_size:
1159 visit_get_buffer_size(bld_base, instr, result);
1160 break;
1161 case nir_intrinsic_load_vertex_id:
1162 case nir_intrinsic_load_primitive_id:
1163 case nir_intrinsic_load_instance_id:
1164 case nir_intrinsic_load_base_instance:
1165 case nir_intrinsic_load_work_group_id:
1166 case nir_intrinsic_load_local_invocation_id:
1167 case nir_intrinsic_load_num_work_groups:
1168 case nir_intrinsic_load_invocation_id:
1169 case nir_intrinsic_load_front_face:
1170 case nir_intrinsic_load_draw_id:
1171 bld_base->sysval_intrin(bld_base, instr, result);
1172 break;
1173 case nir_intrinsic_discard_if:
1174 case nir_intrinsic_discard:
1175 visit_discard(bld_base, instr);
1176 break;
1177 case nir_intrinsic_emit_vertex:
1178 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
1179 break;
1180 case nir_intrinsic_end_primitive:
1181 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
1182 break;
1183 case nir_intrinsic_ssbo_atomic_add:
1184 case nir_intrinsic_ssbo_atomic_imin:
1185 case nir_intrinsic_ssbo_atomic_imax:
1186 case nir_intrinsic_ssbo_atomic_umin:
1187 case nir_intrinsic_ssbo_atomic_umax:
1188 case nir_intrinsic_ssbo_atomic_and:
1189 case nir_intrinsic_ssbo_atomic_or:
1190 case nir_intrinsic_ssbo_atomic_xor:
1191 case nir_intrinsic_ssbo_atomic_exchange:
1192 case nir_intrinsic_ssbo_atomic_comp_swap:
1193 visit_ssbo_atomic(bld_base, instr, result);
1194 break;
1195 case nir_intrinsic_image_deref_load:
1196 visit_load_image(bld_base, instr, result);
1197 break;
1198 case nir_intrinsic_image_deref_store:
1199 visit_store_image(bld_base, instr);
1200 break;
1201 case nir_intrinsic_image_deref_atomic_add:
1202 case nir_intrinsic_image_deref_atomic_imin:
1203 case nir_intrinsic_image_deref_atomic_imax:
1204 case nir_intrinsic_image_deref_atomic_umin:
1205 case nir_intrinsic_image_deref_atomic_umax:
1206 case nir_intrinsic_image_deref_atomic_and:
1207 case nir_intrinsic_image_deref_atomic_or:
1208 case nir_intrinsic_image_deref_atomic_xor:
1209 case nir_intrinsic_image_deref_atomic_exchange:
1210 case nir_intrinsic_image_deref_atomic_comp_swap:
1211 visit_atomic_image(bld_base, instr, result);
1212 break;
1213 case nir_intrinsic_image_deref_size:
1214 visit_image_size(bld_base, instr, result);
1215 break;
1216 case nir_intrinsic_load_shared:
1217 visit_shared_load(bld_base, instr, result);
1218 break;
1219 case nir_intrinsic_store_shared:
1220 visit_shared_store(bld_base, instr);
1221 break;
1222 case nir_intrinsic_shared_atomic_add:
1223 case nir_intrinsic_shared_atomic_imin:
1224 case nir_intrinsic_shared_atomic_umin:
1225 case nir_intrinsic_shared_atomic_imax:
1226 case nir_intrinsic_shared_atomic_umax:
1227 case nir_intrinsic_shared_atomic_and:
1228 case nir_intrinsic_shared_atomic_or:
1229 case nir_intrinsic_shared_atomic_xor:
1230 case nir_intrinsic_shared_atomic_exchange:
1231 case nir_intrinsic_shared_atomic_comp_swap:
1232 visit_shared_atomic(bld_base, instr, result);
1233 break;
1234 case nir_intrinsic_barrier:
1235 visit_barrier(bld_base);
1236 break;
1237 case nir_intrinsic_memory_barrier:
1238 break;
1239 default:
1240 assert(0);
1241 break;
1242 }
1243 if (result[0]) {
1244 assign_dest(bld_base, &instr->dest, result);
1245 }
1246 }
1247
1248 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1249 {
1250 struct lp_sampler_size_query_params params;
1251 LLVMValueRef sizes_out[4];
1252 LLVMValueRef explicit_lod = NULL;
1253
1254 for (unsigned i = 0; i < instr->num_srcs; i++) {
1255 switch (instr->src[i].src_type) {
1256 case nir_tex_src_lod:
1257 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1258 break;
1259 default:
1260 break;
1261 }
1262 }
1263
1264 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
1265 params.texture_unit = instr->texture_index;
1266 params.explicit_lod = explicit_lod;
1267 params.is_sviewinfo = TRUE;
1268 params.sizes_out = sizes_out;
1269
1270 if (instr->op == nir_texop_query_levels)
1271 params.explicit_lod = bld_base->uint_bld.zero;
1272 bld_base->tex_size(bld_base, &params);
1273 assign_dest(bld_base, &instr->dest, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
1274 }
1275
1276 static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
1277 nir_src lod_src)
1278 {
1279 enum lp_sampler_lod_property lod_property;
1280
1281 if (nir_src_is_dynamically_uniform(lod_src))
1282 lod_property = LP_SAMPLER_LOD_SCALAR;
1283 else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1284 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1285 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1286 else
1287 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1288 }
1289 else
1290 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1291 return lod_property;
1292 }
1293
1294 static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1295 {
1296 struct gallivm_state *gallivm = bld_base->base.gallivm;
1297 LLVMBuilderRef builder = gallivm->builder;
1298 LLVMValueRef coords[5];
1299 LLVMValueRef offsets[3] = { NULL };
1300 LLVMValueRef explicit_lod = NULL, projector = NULL;
1301 struct lp_sampler_params params;
1302 struct lp_derivatives derivs;
1303 unsigned sample_key = 0;
1304 nir_deref_instr *texture_deref_instr = NULL;
1305 nir_deref_instr *sampler_deref_instr = NULL;
1306 LLVMValueRef texel[4];
1307 unsigned lod_src = 0;
1308 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
1309
1310 memset(&params, 0, sizeof(params));
1311 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
1312
1313 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels) {
1314 visit_txs(bld_base, instr);
1315 return;
1316 }
1317 if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1318 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
1319 else if (instr->op == nir_texop_tg4)
1320 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
1321 else if (instr->op == nir_texop_lod)
1322 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
1323 for (unsigned i = 0; i < instr->num_srcs; i++) {
1324 switch (instr->src[i].src_type) {
1325 case nir_tex_src_coord: {
1326 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
1327 if (instr->coord_components == 1)
1328 coords[0] = coord;
1329 else {
1330 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1331 coords[chan] = LLVMBuildExtractValue(builder, coord,
1332 chan, "");
1333 }
1334 for (unsigned chan = instr->coord_components; chan < 5; chan++)
1335 coords[chan] = coord_undef;
1336
1337 break;
1338 }
1339 case nir_tex_src_texture_deref:
1340 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
1341 break;
1342 case nir_tex_src_sampler_deref:
1343 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
1344 break;
1345 case nir_tex_src_projector:
1346 projector = lp_build_rcp(&bld_base->base, cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32));
1347 break;
1348 case nir_tex_src_comparator:
1349 sample_key |= LP_SAMPLER_SHADOW;
1350 coords[4] = get_src(bld_base, instr->src[i].src);
1351 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
1352 break;
1353 case nir_tex_src_bias:
1354 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
1355 lod_src = i;
1356 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1357 break;
1358 case nir_tex_src_lod:
1359 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
1360 lod_src = i;
1361 if (instr->op == nir_texop_txf)
1362 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1363 else
1364 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1365 break;
1366 case nir_tex_src_ddx: {
1367 int deriv_cnt = instr->coord_components;
1368 if (instr->is_array)
1369 deriv_cnt--;
1370 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1371 if (deriv_cnt == 1)
1372 derivs.ddx[0] = deriv_val;
1373 else
1374 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1375 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
1376 chan, "");
1377 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1378 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
1379 break;
1380 }
1381 case nir_tex_src_ddy: {
1382 int deriv_cnt = instr->coord_components;
1383 if (instr->is_array)
1384 deriv_cnt--;
1385 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1386 if (deriv_cnt == 1)
1387 derivs.ddy[0] = deriv_val;
1388 else
1389 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1390 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
1391 chan, "");
1392 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1393 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
1394 break;
1395 }
1396 case nir_tex_src_offset: {
1397 int offset_cnt = instr->coord_components;
1398 if (instr->is_array)
1399 offset_cnt--;
1400 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
1401 sample_key |= LP_SAMPLER_OFFSETS;
1402 if (offset_cnt == 1)
1403 offsets[0] = offset_val;
1404 else {
1405 for (unsigned chan = 0; chan < offset_cnt; ++chan)
1406 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
1407 chan, "");
1408 }
1409 break;
1410 }
1411 case nir_tex_src_ms_index:
1412 break;
1413 default:
1414 assert(0);
1415 break;
1416 }
1417 }
1418 if (!sampler_deref_instr)
1419 sampler_deref_instr = texture_deref_instr;
1420
1421 if (explicit_lod)
1422 lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
1423
1424 if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
1425 instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
1426 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1427 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
1428 else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1429 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1430 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
1431
1432 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
1433 /* move layer coord for 1d arrays. */
1434 coords[2] = coords[1];
1435 coords[1] = coord_undef;
1436 }
1437
1438 if (projector) {
1439 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1440 coords[chan] = lp_build_mul(&bld_base->base, coords[chan], projector);
1441 if (sample_key & LP_SAMPLER_SHADOW)
1442 coords[4] = lp_build_mul(&bld_base->base, coords[4], projector);
1443 }
1444
1445 uint32_t base_index = 0;
1446 if (!texture_deref_instr) {
1447 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
1448 if (samp_src_index == -1) {
1449 base_index = instr->sampler_index;
1450 }
1451 }
1452
1453 if (instr->op == nir_texop_txd) {
1454 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
1455 params.derivs = &derivs;
1456 if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1457 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1458 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1459 else
1460 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1461 } else
1462 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1463 }
1464
1465 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
1466 params.sample_key = sample_key;
1467 params.offsets = offsets;
1468 params.texture_index = base_index;
1469 params.sampler_index = base_index;
1470 params.coords = coords;
1471 params.texel = texel;
1472 params.lod = explicit_lod;
1473 bld_base->tex(bld_base, &params);
1474 assign_dest(bld_base, &instr->dest, texel);
1475 }
1476
1477 static void visit_ssa_undef(struct lp_build_nir_context *bld_base,
1478 const nir_ssa_undef_instr *instr)
1479 {
1480 unsigned num_components = instr->def.num_components;
1481 LLVMValueRef undef[4];
1482 for (unsigned i = 0; i < num_components; i++)
1483 undef[i] = LLVMGetUndef(bld_base->base.vec_type);
1484 assign_ssa_dest(bld_base, &instr->def, undef);
1485 }
1486
1487 static void visit_jump(struct lp_build_nir_context *bld_base,
1488 const nir_jump_instr *instr)
1489 {
1490 switch (instr->type) {
1491 case nir_jump_break:
1492 bld_base->break_stmt(bld_base);
1493 break;
1494 case nir_jump_continue:
1495 bld_base->continue_stmt(bld_base);
1496 break;
1497 default:
1498 unreachable("Unknown jump instr\n");
1499 }
1500 }
1501
1502 static void visit_deref(struct lp_build_nir_context *bld_base,
1503 nir_deref_instr *instr)
1504 {
1505 if (instr->mode != nir_var_mem_shared &&
1506 instr->mode != nir_var_mem_global)
1507 return;
1508 LLVMValueRef result = NULL;
1509 switch(instr->deref_type) {
1510 case nir_deref_type_var: {
1511 struct hash_entry *entry = _mesa_hash_table_search(bld_base->vars, instr->var);
1512 result = entry->data;
1513 break;
1514 }
1515 default:
1516 unreachable("Unhandled deref_instr deref type");
1517 }
1518
1519 assign_ssa(bld_base, instr->dest.ssa.index, result);
1520 }
1521
1522 static void visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
1523 {
1524 nir_foreach_instr(instr, block)
1525 {
1526 switch (instr->type) {
1527 case nir_instr_type_alu:
1528 visit_alu(bld_base, nir_instr_as_alu(instr));
1529 break;
1530 case nir_instr_type_load_const:
1531 visit_load_const(bld_base, nir_instr_as_load_const(instr));
1532 break;
1533 case nir_instr_type_intrinsic:
1534 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
1535 break;
1536 case nir_instr_type_tex:
1537 visit_tex(bld_base, nir_instr_as_tex(instr));
1538 break;
1539 case nir_instr_type_phi:
1540 assert(0);
1541 break;
1542 case nir_instr_type_ssa_undef:
1543 visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
1544 break;
1545 case nir_instr_type_jump:
1546 visit_jump(bld_base, nir_instr_as_jump(instr));
1547 break;
1548 case nir_instr_type_deref:
1549 visit_deref(bld_base, nir_instr_as_deref(instr));
1550 break;
1551 default:
1552 fprintf(stderr, "Unknown NIR instr type: ");
1553 nir_print_instr(instr, stderr);
1554 fprintf(stderr, "\n");
1555 abort();
1556 }
1557 }
1558 }
1559
1560 static void visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
1561 {
1562 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
1563
1564 bld_base->if_cond(bld_base, cond);
1565 visit_cf_list(bld_base, &if_stmt->then_list);
1566
1567 if (!exec_list_is_empty(&if_stmt->else_list)) {
1568 bld_base->else_stmt(bld_base);
1569 visit_cf_list(bld_base, &if_stmt->else_list);
1570 }
1571 bld_base->endif_stmt(bld_base);
1572 }
1573
1574 static void visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
1575 {
1576 bld_base->bgnloop(bld_base);
1577 visit_cf_list(bld_base, &loop->body);
1578 bld_base->endloop(bld_base);
1579 }
1580
1581 static void visit_cf_list(struct lp_build_nir_context *bld_base,
1582 struct exec_list *list)
1583 {
1584 foreach_list_typed(nir_cf_node, node, node, list)
1585 {
1586 switch (node->type) {
1587 case nir_cf_node_block:
1588 visit_block(bld_base, nir_cf_node_as_block(node));
1589 break;
1590
1591 case nir_cf_node_if:
1592 visit_if(bld_base, nir_cf_node_as_if(node));
1593 break;
1594
1595 case nir_cf_node_loop:
1596 visit_loop(bld_base, nir_cf_node_as_loop(node));
1597 break;
1598
1599 default:
1600 assert(0);
1601 }
1602 }
1603 }
1604
1605 static void
1606 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
1607 struct nir_shader *nir,
1608 struct nir_variable *variable)
1609 {
1610 bld_base->emit_var_decl(bld_base, variable);
1611 }
1612
1613 /* vector registers are stored as arrays in LLVM side,
1614 so we can use GEP on them, as to do exec mask stores
1615 we need to operate on a single components.
1616 arrays are:
1617 0.x, 1.x, 2.x, 3.x
1618 0.y, 1.y, 2.y, 3.y
1619 ....
1620 */
1621 static LLVMTypeRef get_register_type(struct lp_build_nir_context *bld_base,
1622 nir_register *reg)
1623 {
1624 struct lp_build_context *int_bld = get_int_bld(bld_base, true, reg->bit_size);
1625
1626 LLVMTypeRef type = int_bld->vec_type;
1627 if (reg->num_array_elems)
1628 type = LLVMArrayType(type, reg->num_array_elems);
1629 if (reg->num_components > 1)
1630 type = LLVMArrayType(type, reg->num_components);
1631
1632 return type;
1633 }
1634
1635
1636 bool lp_build_nir_llvm(
1637 struct lp_build_nir_context *bld_base,
1638 struct nir_shader *nir)
1639 {
1640 struct nir_function *func;
1641
1642 nir_convert_from_ssa(nir, true);
1643 nir_lower_locals_to_regs(nir);
1644 nir_remove_dead_derefs(nir);
1645 nir_remove_dead_variables(nir, nir_var_function_temp);
1646
1647 nir_foreach_variable(variable, &nir->outputs)
1648 handle_shader_output_decl(bld_base, nir, variable);
1649
1650 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1651 _mesa_key_pointer_equal);
1652 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1653 _mesa_key_pointer_equal);
1654
1655 func = (struct nir_function *)exec_list_get_head(&nir->functions);
1656
1657 nir_foreach_register(reg, &func->impl->registers) {
1658 LLVMTypeRef type = get_register_type(bld_base, reg);
1659 LLVMValueRef reg_alloc = lp_build_alloca_undef(bld_base->base.gallivm,
1660 type, "reg");
1661 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
1662 }
1663 nir_index_ssa_defs(func->impl);
1664 bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
1665 visit_cf_list(bld_base, &func->impl->body);
1666
1667 free(bld_base->ssa_defs);
1668 ralloc_free(bld_base->vars);
1669 ralloc_free(bld_base->regs);
1670 return true;
1671 }
1672
1673 /* do some basic opts to remove some things we don't want to see. */
1674 void lp_build_opt_nir(struct nir_shader *nir)
1675 {
1676 bool progress;
1677 do {
1678 progress = false;
1679 NIR_PASS_V(nir, nir_opt_constant_folding);
1680 NIR_PASS_V(nir, nir_opt_algebraic);
1681 } while (progress);
1682 nir_lower_bool_to_int32(nir);
1683 }