gallivm/nir: wrap idiv to avoid divide by 0 (v2)
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_gather.h"
31 #include "lp_bld_logic.h"
32 #include "lp_bld_quad.h"
33 #include "lp_bld_flow.h"
34 #include "lp_bld_struct.h"
35 #include "lp_bld_debug.h"
36 #include "lp_bld_printf.h"
37 #include "nir_deref.h"
38
39 static void visit_cf_list(struct lp_build_nir_context *bld_base,
40 struct exec_list *list);
41
42 static LLVMValueRef cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
43 nir_alu_type alu_type, unsigned bit_size)
44 {
45 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
46 switch (alu_type) {
47 case nir_type_float:
48 switch (bit_size) {
49 case 32:
50 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
51 case 64:
52 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
53 default:
54 assert(0);
55 break;
56 }
57 break;
58 case nir_type_int:
59 switch (bit_size) {
60 case 32:
61 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
62 case 64:
63 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
64 default:
65 assert(0);
66 break;
67 }
68 break;
69 case nir_type_uint:
70 switch (bit_size) {
71 case 32:
72 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
73 case 64:
74 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
75 default:
76 assert(0);
77 break;
78 }
79 break;
80 case nir_type_uint32:
81 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
82 default:
83 return val;
84 }
85 return NULL;
86 }
87
88 static struct lp_build_context *get_int_bld(struct lp_build_nir_context *bld_base,
89 bool is_unsigned,
90 unsigned op_bit_size)
91 {
92 if (is_unsigned)
93 if (op_bit_size == 64)
94 return &bld_base->uint64_bld;
95 else
96 return &bld_base->uint_bld;
97 else if (op_bit_size == 64)
98 return &bld_base->int64_bld;
99 else
100 return &bld_base->int_bld;
101 }
102
103 static struct lp_build_context *get_flt_bld(struct lp_build_nir_context *bld_base,
104 unsigned op_bit_size)
105 {
106 if (op_bit_size == 64)
107 return &bld_base->dbl_bld;
108 else
109 return &bld_base->base;
110 }
111
112 static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
113 {
114 unsigned pipe_target = PIPE_BUFFER;
115 switch (sampler_dim) {
116 case GLSL_SAMPLER_DIM_1D:
117 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
118 break;
119 case GLSL_SAMPLER_DIM_2D:
120 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
121 break;
122 case GLSL_SAMPLER_DIM_3D:
123 pipe_target = PIPE_TEXTURE_3D;
124 break;
125 case GLSL_SAMPLER_DIM_CUBE:
126 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
127 break;
128 case GLSL_SAMPLER_DIM_RECT:
129 pipe_target = PIPE_TEXTURE_RECT;
130 break;
131 case GLSL_SAMPLER_DIM_BUF:
132 pipe_target = PIPE_BUFFER;
133 break;
134 default:
135 break;
136 }
137 return pipe_target;
138 }
139
140 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
141 {
142 return bld_base->ssa_defs[ssa->index];
143 }
144
145 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src);
146
147 static LLVMValueRef get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
148 {
149 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
150 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
151 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
152 LLVMValueRef indir_src = NULL;
153 if (src.indirect)
154 indir_src = get_src(bld_base, *src.indirect);
155 return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
156 }
157
158 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src)
159 {
160 if (src.is_ssa)
161 return get_ssa_src(bld_base, src.ssa);
162 else
163 return get_reg_src(bld_base, src.reg);
164 }
165
166 static void assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
167 {
168 bld_base->ssa_defs[idx] = ptr;
169 }
170
171 static void assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
172 LLVMValueRef vals[4])
173 {
174 assign_ssa(bld_base, ssa->index, ssa->num_components == 1 ? vals[0] : lp_nir_array_build_gather_values(bld_base->base.gallivm->builder, vals, ssa->num_components));
175 }
176
177 static void assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
178 unsigned write_mask,
179 LLVMValueRef vals[4])
180 {
181 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
182 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
183 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
184 LLVMValueRef indir_src = NULL;
185 if (reg->indirect)
186 indir_src = get_src(bld_base, *reg->indirect);
187 bld_base->store_reg(bld_base, reg_bld, reg, write_mask ? write_mask : 0xf, indir_src, reg_storage, vals);
188 }
189
190 static void assign_dest(struct lp_build_nir_context *bld_base, const nir_dest *dest, LLVMValueRef vals[4])
191 {
192 if (dest->is_ssa)
193 assign_ssa_dest(bld_base, &dest->ssa, vals);
194 else
195 assign_reg(bld_base, &dest->reg, 0, vals);
196 }
197
198 static void assign_alu_dest(struct lp_build_nir_context *bld_base, const nir_alu_dest *dest, LLVMValueRef vals[4])
199 {
200 if (dest->dest.is_ssa)
201 assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
202 else
203 assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
204 }
205
206 static LLVMValueRef int_to_bool32(struct lp_build_nir_context *bld_base,
207 uint32_t src_bit_size,
208 bool is_unsigned,
209 LLVMValueRef val)
210 {
211 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
212 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
213 LLVMValueRef result = lp_build_compare(bld_base->base.gallivm, int_bld->type, PIPE_FUNC_NOTEQUAL, val, int_bld->zero);
214 if (src_bit_size == 64)
215 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
216 return result;
217 }
218
219 static LLVMValueRef flt_to_bool32(struct lp_build_nir_context *bld_base,
220 uint32_t src_bit_size,
221 LLVMValueRef val)
222 {
223 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
224 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
225 LLVMValueRef result = lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
226 if (src_bit_size == 64)
227 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
228 return result;
229 }
230
231 static LLVMValueRef fcmp32(struct lp_build_nir_context *bld_base,
232 enum pipe_compare_func compare,
233 uint32_t src_bit_size,
234 LLVMValueRef src[4])
235 {
236 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
237 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
238 LLVMValueRef result;
239
240 if (compare != PIPE_FUNC_NOTEQUAL)
241 result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
242 else
243 result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
244 if (src_bit_size == 64)
245 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
246 return result;
247 }
248
249 static LLVMValueRef icmp32(struct lp_build_nir_context *bld_base,
250 enum pipe_compare_func compare,
251 bool is_unsigned,
252 uint32_t src_bit_size,
253 LLVMValueRef src[4])
254 {
255 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
256 struct lp_build_context *i_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
257 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
258 if (src_bit_size == 64)
259 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
260 return result;
261 }
262
263 static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
264 nir_alu_src src,
265 unsigned num_components)
266 {
267 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
268 struct gallivm_state *gallivm = bld_base->base.gallivm;
269 LLVMValueRef value = get_src(bld_base, src.src);
270 bool need_swizzle = false;
271
272 assert(value);
273 unsigned src_components = nir_src_num_components(src.src);
274 for (unsigned i = 0; i < num_components; ++i) {
275 assert(src.swizzle[i] < src_components);
276 if (src.swizzle[i] != i)
277 need_swizzle = true;
278 }
279
280 if (need_swizzle || num_components != src_components) {
281 if (src_components > 1 && num_components == 1) {
282 value = LLVMBuildExtractValue(gallivm->builder, value,
283 src.swizzle[0], "");
284 } else if (src_components == 1 && num_components > 1) {
285 LLVMValueRef values[] = {value, value, value, value};
286 value = lp_nir_array_build_gather_values(builder, values, num_components);
287 } else {
288 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
289 for (unsigned i = 0; i < num_components; i++)
290 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
291 value = arr;
292 }
293 }
294 assert(!src.negate);
295 assert(!src.abs);
296 return value;
297 }
298
299 static LLVMValueRef emit_b2f(struct lp_build_nir_context *bld_base,
300 LLVMValueRef src0,
301 unsigned bitsize)
302 {
303 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
304 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
305 LLVMBuildBitCast(builder, lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type,
306 1.0), bld_base->int_bld.vec_type, ""),
307 "");
308 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
309 switch (bitsize) {
310 case 32:
311 break;
312 case 64:
313 result = LLVMBuildFPExt(builder, result, bld_base->dbl_bld.vec_type, "");
314 break;
315 default:
316 unreachable("unsupported bit size.");
317 }
318 return result;
319 }
320
321 static LLVMValueRef emit_b2i(struct lp_build_nir_context *bld_base,
322 LLVMValueRef src0,
323 unsigned bitsize)
324 {
325 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
326 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
327 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, 1), "");
328 switch (bitsize) {
329 case 32:
330 return result;
331 case 64:
332 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
333 default:
334 unreachable("unsupported bit size.");
335 }
336 }
337
338 static LLVMValueRef emit_b32csel(struct lp_build_nir_context *bld_base,
339 unsigned src_bit_size[4],
340 LLVMValueRef src[4])
341 {
342 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
343 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
344 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
345 return lp_build_select(bld, v, src[1], src[2]);
346 }
347
348 static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
349 LLVMValueRef src,
350 bool hi)
351 {
352 struct gallivm_state *gallivm = bld_base->base.gallivm;
353 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
354 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
355 int len = bld_base->base.type.length * 2;
356 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
357 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
358 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
359 }
360
361 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
362 return LLVMBuildShuffleVector(gallivm->builder, src,
363 LLVMGetUndef(LLVMTypeOf(src)),
364 LLVMConstVector(hi ? shuffles2 : shuffles,
365 bld_base->base.type.length),
366 "");
367 }
368
369 static LLVMValueRef
370 merge_64bit(struct lp_build_nir_context *bld_base,
371 LLVMValueRef input,
372 LLVMValueRef input2)
373 {
374 struct gallivm_state *gallivm = bld_base->base.gallivm;
375 LLVMBuilderRef builder = gallivm->builder;
376 int i;
377 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
378 int len = bld_base->base.type.length * 2;
379 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
380
381 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
382 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
383 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
384 }
385 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
386 }
387
388 static LLVMValueRef
389 do_int_divide(struct lp_build_nir_context *bld_base,
390 bool is_unsigned, unsigned src_bit_size,
391 LLVMValueRef src, LLVMValueRef src2)
392 {
393 struct gallivm_state *gallivm = bld_base->base.gallivm;
394 LLVMBuilderRef builder = gallivm->builder;
395 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
396 LLVMValueRef div_mask = lp_build_cmp(int_bld, PIPE_FUNC_EQUAL, src2,
397 int_bld->zero);
398 LLVMValueRef divisor = LLVMBuildOr(builder,
399 div_mask,
400 src2, "");
401 LLVMValueRef result = lp_build_div(int_bld, src, divisor);
402 /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
403 * may as well do same for idiv */
404 return LLVMBuildOr(builder, div_mask, result, "");
405 }
406
407 static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
408 nir_op op, unsigned src_bit_size[4], LLVMValueRef src[4])
409 {
410 struct gallivm_state *gallivm = bld_base->base.gallivm;
411 LLVMBuilderRef builder = gallivm->builder;
412 LLVMValueRef result;
413 switch (op) {
414 case nir_op_b2f32:
415 result = emit_b2f(bld_base, src[0], 32);
416 break;
417 case nir_op_b2f64:
418 result = emit_b2f(bld_base, src[0], 64);
419 break;
420 case nir_op_b2i32:
421 result = emit_b2i(bld_base, src[0], 32);
422 break;
423 case nir_op_b2i64:
424 result = emit_b2i(bld_base, src[0], 64);
425 break;
426 case nir_op_b32csel:
427 result = emit_b32csel(bld_base, src_bit_size, src);
428 break;
429 case nir_op_bit_count:
430 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
431 break;
432 case nir_op_bitfield_select:
433 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
434 break;
435 case nir_op_bitfield_reverse:
436 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
437 break;
438 case nir_op_f2b32:
439 result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
440 break;
441 case nir_op_f2f32:
442 result = LLVMBuildFPTrunc(builder, src[0],
443 bld_base->base.vec_type, "");
444 break;
445 case nir_op_f2f64:
446 result = LLVMBuildFPExt(builder, src[0],
447 bld_base->dbl_bld.vec_type, "");
448 break;
449 case nir_op_f2i32:
450 result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
451 break;
452 case nir_op_f2u32:
453 result = LLVMBuildFPToUI(builder,
454 src[0],
455 bld_base->base.int_vec_type, "");
456 break;
457 case nir_op_f2i64:
458 result = LLVMBuildFPToSI(builder,
459 src[0],
460 bld_base->int64_bld.vec_type, "");
461 break;
462 case nir_op_f2u64:
463 result = LLVMBuildFPToUI(builder,
464 src[0],
465 bld_base->uint64_bld.vec_type, "");
466 break;
467 case nir_op_fabs:
468 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
469 break;
470 case nir_op_fadd:
471 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
472 src[0], src[1]);
473 break;
474 case nir_op_fceil:
475 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
476 break;
477 case nir_op_fcos:
478 result = lp_build_cos(&bld_base->base, src[0]);
479 break;
480 case nir_op_fddx:
481 result = lp_build_ddx(&bld_base->base, src[0]);
482 break;
483 case nir_op_fddy:
484 result = lp_build_ddy(&bld_base->base, src[0]);
485 break;
486 case nir_op_fdiv:
487 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
488 src[0], src[1]);
489 break;
490 case nir_op_feq32:
491 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
492 break;
493 case nir_op_fexp2:
494 result = lp_build_exp2(&bld_base->base, src[0]);
495 break;
496 case nir_op_ffloor:
497 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
498 break;
499 case nir_op_ffma:
500 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
501 break;
502 case nir_op_ffract: {
503 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
504 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
505 result = lp_build_sub(flt_bld, src[0], tmp);
506 break;
507 }
508 case nir_op_fge32:
509 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
510 break;
511 case nir_op_find_lsb:
512 result = lp_build_cttz(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
513 break;
514 case nir_op_flog2:
515 result = lp_build_log2_safe(&bld_base->base, src[0]);
516 break;
517 case nir_op_flt32:
518 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
519 break;
520 case nir_op_fmin:
521 result = lp_build_min(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
522 break;
523 case nir_op_fmod: {
524 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
525 result = lp_build_div(flt_bld, src[0], src[1]);
526 result = lp_build_floor(flt_bld, result);
527 result = lp_build_mul(flt_bld, src[1], result);
528 result = lp_build_sub(flt_bld, src[0], result);
529 break;
530 }
531 case nir_op_fmul:
532 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
533 src[0], src[1]);
534 break;
535 case nir_op_fmax:
536 result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
537 break;
538 case nir_op_fne32:
539 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
540 break;
541 case nir_op_fneg:
542 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
543 break;
544 case nir_op_fpow:
545 result = lp_build_pow(&bld_base->base, src[0], src[1]);
546 break;
547 case nir_op_frcp:
548 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
549 break;
550 case nir_op_fround_even:
551 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
552 break;
553 case nir_op_frsq:
554 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
555 break;
556 case nir_op_fsat:
557 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
558 break;
559 case nir_op_fsign:
560 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
561 break;
562 case nir_op_fsin:
563 result = lp_build_sin(&bld_base->base, src[0]);
564 break;
565 case nir_op_fsqrt:
566 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
567 break;
568 case nir_op_ftrunc:
569 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
570 break;
571 case nir_op_i2b32:
572 result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
573 break;
574 case nir_op_i2f32:
575 result = lp_build_int_to_float(&bld_base->base, src[0]);
576 break;
577 case nir_op_i2f64:
578 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
579 break;
580 case nir_op_i2i32:
581 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
582 break;
583 case nir_op_i2i64:
584 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
585 break;
586 case nir_op_iabs:
587 result = lp_build_abs(&bld_base->int_bld, src[0]);
588 break;
589 case nir_op_iadd:
590 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
591 src[0], src[1]);
592 break;
593 case nir_op_iand:
594 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
595 src[0], src[1]);
596 break;
597 case nir_op_idiv:
598 result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
599 break;
600 case nir_op_ieq32:
601 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
602 break;
603 case nir_op_ige32:
604 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
605 break;
606 case nir_op_ilt32:
607 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
608 break;
609 case nir_op_imax:
610 result = lp_build_max(&bld_base->int_bld, src[0], src[1]);
611 break;
612 case nir_op_imin:
613 result = lp_build_min(&bld_base->int_bld, src[0], src[1]);
614 break;
615 case nir_op_imul:
616 result = lp_build_mul(&bld_base->int_bld,
617 src[0], src[1]);
618 break;
619 case nir_op_imul_high: {
620 LLVMValueRef hi_bits;
621 lp_build_mul_32_lohi(&bld_base->int_bld, src[0], src[1], &hi_bits);
622 result = hi_bits;
623 break;
624 }
625 case nir_op_ine32:
626 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
627 break;
628 case nir_op_ineg:
629 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
630 break;
631 case nir_op_inot:
632 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
633 break;
634 case nir_op_ior:
635 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
636 src[0], src[1]);
637 break;
638 case nir_op_ishl:
639 src[1] = lp_build_and(&bld_base->uint_bld, src[1], lp_build_const_int_vec(gallivm, bld_base->uint_bld.type, (src_bit_size[0] - 1)));
640 result = lp_build_shl(&bld_base->int_bld, src[0], src[1]);
641 break;
642 case nir_op_ishr:
643 src[1] = lp_build_and(&bld_base->uint_bld, src[1], lp_build_const_int_vec(gallivm, bld_base->uint_bld.type, (src_bit_size[0] - 1)));
644 result = lp_build_shr(&bld_base->int_bld, src[0], src[1]);
645 break;
646 case nir_op_isign:
647 result = lp_build_sgn(&bld_base->int_bld, src[0]);
648 break;
649 case nir_op_ixor:
650 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
651 src[0], src[1]);
652 break;
653 case nir_op_mov:
654 result = src[0];
655 break;
656 case nir_op_unpack_64_2x32_split_x:
657 result = split_64bit(bld_base, src[0], false);
658 break;
659 case nir_op_unpack_64_2x32_split_y:
660 result = split_64bit(bld_base, src[0], true);
661 break;
662
663 case nir_op_pack_64_2x32_split: {
664 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
665 result = LLVMBuildBitCast(builder, tmp, bld_base->dbl_bld.vec_type, "");
666 break;
667 }
668 case nir_op_u2f32:
669 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
670 break;
671 case nir_op_u2f64:
672 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
673 break;
674 case nir_op_u2u32:
675 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
676 break;
677 case nir_op_u2u64:
678 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
679 break;
680 case nir_op_udiv:
681 result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
682 break;
683 case nir_op_ufind_msb: {
684 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
685 result = lp_build_ctlz(uint_bld, src[0]);
686 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
687 break;
688 }
689 case nir_op_uge32:
690 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
691 break;
692 case nir_op_ult32:
693 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
694 break;
695 case nir_op_umax:
696 result = lp_build_max(&bld_base->uint_bld, src[0], src[1]);
697 break;
698 case nir_op_umin:
699 result = lp_build_min(&bld_base->uint_bld, src[0], src[1]);
700 break;
701 case nir_op_umod:
702 result = lp_build_mod(&bld_base->uint_bld, src[0], src[1]);
703 break;
704 case nir_op_umul_high: {
705 LLVMValueRef hi_bits;
706 lp_build_mul_32_lohi(&bld_base->uint_bld, src[0], src[1], &hi_bits);
707 result = hi_bits;
708 break;
709 }
710 case nir_op_ushr:
711 src[1] = lp_build_and(&bld_base->uint_bld, src[1], lp_build_const_int_vec(gallivm, bld_base->uint_bld.type, (src_bit_size[0] - 1)));
712 result = lp_build_shr(&bld_base->uint_bld, src[0], src[1]);
713 break;
714 default:
715 assert(0);
716 break;
717 }
718 return result;
719 }
720
721 static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr *instr)
722 {
723 struct gallivm_state *gallivm = bld_base->base.gallivm;
724 LLVMValueRef src[4];
725 unsigned src_bit_size[4];
726 unsigned num_components = nir_dest_num_components(instr->dest.dest);
727 unsigned src_components;
728 switch (instr->op) {
729 case nir_op_vec2:
730 case nir_op_vec3:
731 case nir_op_vec4:
732 src_components = 1;
733 break;
734 case nir_op_pack_half_2x16:
735 src_components = 2;
736 break;
737 case nir_op_unpack_half_2x16:
738 src_components = 1;
739 break;
740 case nir_op_cube_face_coord:
741 case nir_op_cube_face_index:
742 src_components = 3;
743 break;
744 default:
745 src_components = num_components;
746 break;
747 }
748 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
749 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
750 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
751 }
752
753 LLVMValueRef result[4];
754 if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2) {
755 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
756 result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
757 }
758 } else {
759 for (unsigned c = 0; c < num_components; c++) {
760 LLVMValueRef src_chan[4];
761
762 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
763 if (num_components > 1) {
764 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
765 src[i], c, "");
766 } else
767 src_chan[i] = src[i];
768 src_chan[i] = cast_type(bld_base, src_chan[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
769 }
770 result[c] = do_alu_action(bld_base, instr->op, src_bit_size, src_chan);
771 result[c] = cast_type(bld_base, result[c], nir_op_infos[instr->op].output_type, nir_dest_bit_size(instr->dest.dest));
772 }
773 }
774 assign_alu_dest(bld_base, &instr->dest, result);
775 }
776
777 static void visit_load_const(struct lp_build_nir_context *bld_base,
778 const nir_load_const_instr *instr)
779 {
780 LLVMValueRef result[4];
781 struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
782 for (unsigned i = 0; i < instr->def.num_components; i++)
783 result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->value[i].u64);
784 assign_ssa_dest(bld_base, &instr->def, result);
785 }
786
787 static void
788 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
789 bool vs_in, unsigned *vertex_index_out,
790 LLVMValueRef *vertex_index_ref,
791 unsigned *const_out, LLVMValueRef *indir_out)
792 {
793 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
794 nir_variable *var = nir_deref_instr_get_variable(instr);
795 nir_deref_path path;
796 unsigned idx_lvl = 1;
797
798 nir_deref_path_init(&path, instr, NULL);
799
800 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
801 if (vertex_index_ref) {
802 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
803 if (vertex_index_out)
804 *vertex_index_out = 0;
805 } else {
806 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
807 }
808 ++idx_lvl;
809 }
810
811 uint32_t const_offset = 0;
812 LLVMValueRef offset = NULL;
813
814 if (var->data.compact) {
815 assert(instr->deref_type == nir_deref_type_array);
816 const_offset = nir_src_as_uint(instr->arr.index);
817 goto out;
818 }
819
820 for (; path.path[idx_lvl]; ++idx_lvl) {
821 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
822 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
823 unsigned index = path.path[idx_lvl]->strct.index;
824
825 for (unsigned i = 0; i < index; i++) {
826 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
827 const_offset += glsl_count_attribute_slots(ft, vs_in);
828 }
829 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
830 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
831 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
832 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
833 } else {
834 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
835 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
836 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
837 idx_src);
838 if (offset)
839 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
840 else
841 offset = array_off;
842 }
843 } else
844 unreachable("Uhandled deref type in get_deref_instr_offset");
845 }
846
847 out:
848 nir_deref_path_finish(&path);
849
850 if (const_offset && offset)
851 offset = LLVMBuildAdd(builder, offset,
852 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
853 "");
854 *const_out = const_offset;
855 *indir_out = offset;
856 }
857
858 static void visit_load_var(struct lp_build_nir_context *bld_base,
859 nir_intrinsic_instr *instr,
860 LLVMValueRef result[4])
861 {
862 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
863 nir_variable *var = nir_deref_instr_get_variable(deref);
864 nir_variable_mode mode = deref->mode;
865 unsigned const_index;
866 LLVMValueRef indir_index;
867 unsigned vertex_index = 0;
868 unsigned nc = nir_dest_num_components(instr->dest);
869 unsigned bit_size = nir_dest_bit_size(instr->dest);
870 if (var) {
871 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
872 var->data.mode == nir_var_shader_in;
873 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
874 var->data.mode == nir_var_shader_in;
875 mode = var->data.mode;
876
877 get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, NULL,
878 &const_index, &indir_index);
879 }
880 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, const_index, indir_index, result);
881 }
882
883 static void
884 visit_store_var(struct lp_build_nir_context *bld_base,
885 nir_intrinsic_instr *instr)
886 {
887 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
888 nir_variable *var = nir_deref_instr_get_variable(deref);
889 nir_variable_mode mode = deref->mode;
890 int writemask = instr->const_index[0];
891 unsigned bit_size = nir_src_bit_size(instr->src[1]);
892 LLVMValueRef src = get_src(bld_base, instr->src[1]);
893 unsigned const_index = 0;
894 LLVMValueRef indir_index;
895 if (var)
896 get_deref_offset(bld_base, deref, false, NULL, NULL,
897 &const_index, &indir_index);
898 bld_base->store_var(bld_base, mode, bit_size, instr->num_components, writemask, const_index, var, src);
899 }
900
901 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
902 nir_intrinsic_instr *instr,
903 LLVMValueRef result[4])
904 {
905 struct gallivm_state *gallivm = bld_base->base.gallivm;
906 LLVMBuilderRef builder = gallivm->builder;
907 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
908 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
909
910 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
911 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
912 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
913 offset_is_uniform, idx, offset, result);
914 }
915
916
917 static void visit_load_ssbo(struct lp_build_nir_context *bld_base,
918 nir_intrinsic_instr *instr,
919 LLVMValueRef result[4])
920 {
921 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
922 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
923 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
924 idx, offset, result);
925 }
926
927 static void visit_store_ssbo(struct lp_build_nir_context *bld_base,
928 nir_intrinsic_instr *instr)
929 {
930 LLVMValueRef val = get_src(bld_base, instr->src[0]);
931 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
932 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
933 int writemask = instr->const_index[0];
934 int nc = nir_src_num_components(instr->src[0]);
935 int bitsize = nir_src_bit_size(instr->src[0]);
936 bld_base->store_mem(bld_base, writemask, nc, bitsize, idx, offset, val);
937 }
938
939 static void visit_get_buffer_size(struct lp_build_nir_context *bld_base,
940 nir_intrinsic_instr *instr,
941 LLVMValueRef result[4])
942 {
943 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
944 result[0] = bld_base->get_buffer_size(bld_base, idx);
945 }
946
947 static void visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
948 nir_intrinsic_instr *instr,
949 LLVMValueRef result[4])
950 {
951 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
952 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
953 LLVMValueRef val = get_src(bld_base, instr->src[2]);
954 LLVMValueRef val2 = NULL;
955 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
956 val2 = get_src(bld_base, instr->src[3]);
957
958 bld_base->atomic_mem(bld_base, instr->intrinsic, idx, offset, val, val2, &result[0]);
959
960 }
961
962 static void visit_load_image(struct lp_build_nir_context *bld_base,
963 nir_intrinsic_instr *instr,
964 LLVMValueRef result[4])
965 {
966 struct gallivm_state *gallivm = bld_base->base.gallivm;
967 LLVMBuilderRef builder = gallivm->builder;
968 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
969 nir_variable *var = nir_deref_instr_get_variable(deref);
970 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
971 LLVMValueRef coords[5];
972 struct lp_img_params params;
973 const struct glsl_type *type = glsl_without_array(var->type);
974
975 memset(&params, 0, sizeof(params));
976 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
977 for (unsigned i = 0; i < 4; i++)
978 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
979 if (params.target == PIPE_TEXTURE_1D_ARRAY)
980 coords[2] = coords[1];
981
982 params.coords = coords;
983 params.outdata = result;
984 params.img_op = LP_IMG_LOAD;
985 params.image_index = var->data.binding;
986 bld_base->image_op(bld_base, &params);
987 }
988
989 static void visit_store_image(struct lp_build_nir_context *bld_base,
990 nir_intrinsic_instr *instr)
991 {
992 struct gallivm_state *gallivm = bld_base->base.gallivm;
993 LLVMBuilderRef builder = gallivm->builder;
994 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
995 nir_variable *var = nir_deref_instr_get_variable(deref);
996 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
997 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
998 LLVMValueRef coords[5];
999 struct lp_img_params params;
1000 const struct glsl_type *type = glsl_without_array(var->type);
1001
1002 memset(&params, 0, sizeof(params));
1003 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1004 for (unsigned i = 0; i < 4; i++)
1005 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1006 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1007 coords[2] = coords[1];
1008 params.coords = coords;
1009
1010 for (unsigned i = 0; i < 4; i++) {
1011 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1012 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1013 }
1014 params.img_op = LP_IMG_STORE;
1015 params.image_index = var->data.binding;
1016
1017 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1018 coords[2] = coords[1];
1019 bld_base->image_op(bld_base, &params);
1020 }
1021
1022 static void visit_atomic_image(struct lp_build_nir_context *bld_base,
1023 nir_intrinsic_instr *instr,
1024 LLVMValueRef result[4])
1025 {
1026 struct gallivm_state *gallivm = bld_base->base.gallivm;
1027 LLVMBuilderRef builder = gallivm->builder;
1028 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1029 nir_variable *var = nir_deref_instr_get_variable(deref);
1030 struct lp_img_params params;
1031 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1032 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1033 LLVMValueRef coords[5];
1034 const struct glsl_type *type = glsl_without_array(var->type);
1035
1036 memset(&params, 0, sizeof(params));
1037
1038 switch (instr->intrinsic) {
1039 case nir_intrinsic_image_deref_atomic_add:
1040 params.op = LLVMAtomicRMWBinOpAdd;
1041 break;
1042 case nir_intrinsic_image_deref_atomic_exchange:
1043 params.op = LLVMAtomicRMWBinOpXchg;
1044 break;
1045 case nir_intrinsic_image_deref_atomic_and:
1046 params.op = LLVMAtomicRMWBinOpAnd;
1047 break;
1048 case nir_intrinsic_image_deref_atomic_or:
1049 params.op = LLVMAtomicRMWBinOpOr;
1050 break;
1051 case nir_intrinsic_image_deref_atomic_xor:
1052 params.op = LLVMAtomicRMWBinOpXor;
1053 break;
1054 case nir_intrinsic_image_deref_atomic_umin:
1055 params.op = LLVMAtomicRMWBinOpUMin;
1056 break;
1057 case nir_intrinsic_image_deref_atomic_umax:
1058 params.op = LLVMAtomicRMWBinOpUMax;
1059 break;
1060 case nir_intrinsic_image_deref_atomic_imin:
1061 params.op = LLVMAtomicRMWBinOpMin;
1062 break;
1063 case nir_intrinsic_image_deref_atomic_imax:
1064 params.op = LLVMAtomicRMWBinOpMax;
1065 break;
1066 default:
1067 break;
1068 }
1069
1070 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1071 for (unsigned i = 0; i < 4; i++)
1072 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1073 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1074 coords[2] = coords[1];
1075 params.coords = coords;
1076 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
1077 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1078 params.indata[0] = in_val;
1079 params.indata2[0] = cas_val;
1080 } else
1081 params.indata[0] = in_val;
1082
1083 params.outdata = result;
1084 params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1085 params.image_index = var->data.binding;
1086
1087 bld_base->image_op(bld_base, &params);
1088 }
1089
1090
1091 static void visit_image_size(struct lp_build_nir_context *bld_base,
1092 nir_intrinsic_instr *instr,
1093 LLVMValueRef result[4])
1094 {
1095 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1096 nir_variable *var = nir_deref_instr_get_variable(deref);
1097 struct lp_sampler_size_query_params params = { 0 };
1098 params.texture_unit = var->data.binding;
1099 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(var->type), glsl_sampler_type_is_array(var->type));
1100 params.sizes_out = result;
1101
1102 bld_base->image_size(bld_base, &params);
1103 }
1104
1105 static void visit_shared_load(struct lp_build_nir_context *bld_base,
1106 nir_intrinsic_instr *instr,
1107 LLVMValueRef result[4])
1108 {
1109 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1110 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1111 NULL, offset, result);
1112 }
1113
1114 static void visit_shared_store(struct lp_build_nir_context *bld_base,
1115 nir_intrinsic_instr *instr)
1116 {
1117 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1118 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1119 int writemask = instr->const_index[1];
1120 int nc = nir_src_num_components(instr->src[0]);
1121 int bitsize = nir_src_bit_size(instr->src[0]);
1122 bld_base->store_mem(bld_base, writemask, nc, bitsize, NULL, offset, val);
1123 }
1124
1125 static void visit_shared_atomic(struct lp_build_nir_context *bld_base,
1126 nir_intrinsic_instr *instr,
1127 LLVMValueRef result[4])
1128 {
1129 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1130 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1131 LLVMValueRef val2 = NULL;
1132 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1133 val2 = get_src(bld_base, instr->src[2]);
1134
1135 bld_base->atomic_mem(bld_base, instr->intrinsic, NULL, offset, val, val2, &result[0]);
1136
1137 }
1138
1139 static void visit_barrier(struct lp_build_nir_context *bld_base)
1140 {
1141 bld_base->barrier(bld_base);
1142 }
1143
1144 static void visit_discard(struct lp_build_nir_context *bld_base,
1145 nir_intrinsic_instr *instr)
1146 {
1147 LLVMValueRef cond = NULL;
1148 if (instr->intrinsic == nir_intrinsic_discard_if) {
1149 cond = get_src(bld_base, instr->src[0]);
1150 cond = cast_type(bld_base, cond, nir_type_int, 32);
1151 }
1152 bld_base->discard(bld_base, cond);
1153 }
1154
1155 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
1156 nir_intrinsic_instr *instr)
1157 {
1158 LLVMValueRef result[4] = {0};
1159 switch (instr->intrinsic) {
1160 case nir_intrinsic_load_deref:
1161 visit_load_var(bld_base, instr, result);
1162 break;
1163 case nir_intrinsic_store_deref:
1164 visit_store_var(bld_base, instr);
1165 break;
1166 case nir_intrinsic_load_ubo:
1167 visit_load_ubo(bld_base, instr, result);
1168 break;
1169 case nir_intrinsic_load_ssbo:
1170 visit_load_ssbo(bld_base, instr, result);
1171 break;
1172 case nir_intrinsic_store_ssbo:
1173 visit_store_ssbo(bld_base, instr);
1174 break;
1175 case nir_intrinsic_get_buffer_size:
1176 visit_get_buffer_size(bld_base, instr, result);
1177 break;
1178 case nir_intrinsic_load_vertex_id:
1179 case nir_intrinsic_load_primitive_id:
1180 case nir_intrinsic_load_instance_id:
1181 case nir_intrinsic_load_base_instance:
1182 case nir_intrinsic_load_base_vertex:
1183 case nir_intrinsic_load_work_group_id:
1184 case nir_intrinsic_load_local_invocation_id:
1185 case nir_intrinsic_load_num_work_groups:
1186 case nir_intrinsic_load_invocation_id:
1187 case nir_intrinsic_load_front_face:
1188 case nir_intrinsic_load_draw_id:
1189 bld_base->sysval_intrin(bld_base, instr, result);
1190 break;
1191 case nir_intrinsic_discard_if:
1192 case nir_intrinsic_discard:
1193 visit_discard(bld_base, instr);
1194 break;
1195 case nir_intrinsic_emit_vertex:
1196 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
1197 break;
1198 case nir_intrinsic_end_primitive:
1199 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
1200 break;
1201 case nir_intrinsic_ssbo_atomic_add:
1202 case nir_intrinsic_ssbo_atomic_imin:
1203 case nir_intrinsic_ssbo_atomic_imax:
1204 case nir_intrinsic_ssbo_atomic_umin:
1205 case nir_intrinsic_ssbo_atomic_umax:
1206 case nir_intrinsic_ssbo_atomic_and:
1207 case nir_intrinsic_ssbo_atomic_or:
1208 case nir_intrinsic_ssbo_atomic_xor:
1209 case nir_intrinsic_ssbo_atomic_exchange:
1210 case nir_intrinsic_ssbo_atomic_comp_swap:
1211 visit_ssbo_atomic(bld_base, instr, result);
1212 break;
1213 case nir_intrinsic_image_deref_load:
1214 visit_load_image(bld_base, instr, result);
1215 break;
1216 case nir_intrinsic_image_deref_store:
1217 visit_store_image(bld_base, instr);
1218 break;
1219 case nir_intrinsic_image_deref_atomic_add:
1220 case nir_intrinsic_image_deref_atomic_imin:
1221 case nir_intrinsic_image_deref_atomic_imax:
1222 case nir_intrinsic_image_deref_atomic_umin:
1223 case nir_intrinsic_image_deref_atomic_umax:
1224 case nir_intrinsic_image_deref_atomic_and:
1225 case nir_intrinsic_image_deref_atomic_or:
1226 case nir_intrinsic_image_deref_atomic_xor:
1227 case nir_intrinsic_image_deref_atomic_exchange:
1228 case nir_intrinsic_image_deref_atomic_comp_swap:
1229 visit_atomic_image(bld_base, instr, result);
1230 break;
1231 case nir_intrinsic_image_deref_size:
1232 visit_image_size(bld_base, instr, result);
1233 break;
1234 case nir_intrinsic_load_shared:
1235 visit_shared_load(bld_base, instr, result);
1236 break;
1237 case nir_intrinsic_store_shared:
1238 visit_shared_store(bld_base, instr);
1239 break;
1240 case nir_intrinsic_shared_atomic_add:
1241 case nir_intrinsic_shared_atomic_imin:
1242 case nir_intrinsic_shared_atomic_umin:
1243 case nir_intrinsic_shared_atomic_imax:
1244 case nir_intrinsic_shared_atomic_umax:
1245 case nir_intrinsic_shared_atomic_and:
1246 case nir_intrinsic_shared_atomic_or:
1247 case nir_intrinsic_shared_atomic_xor:
1248 case nir_intrinsic_shared_atomic_exchange:
1249 case nir_intrinsic_shared_atomic_comp_swap:
1250 visit_shared_atomic(bld_base, instr, result);
1251 break;
1252 case nir_intrinsic_barrier:
1253 visit_barrier(bld_base);
1254 break;
1255 case nir_intrinsic_memory_barrier:
1256 break;
1257 default:
1258 assert(0);
1259 break;
1260 }
1261 if (result[0]) {
1262 assign_dest(bld_base, &instr->dest, result);
1263 }
1264 }
1265
1266 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1267 {
1268 struct lp_sampler_size_query_params params;
1269 LLVMValueRef sizes_out[4];
1270 LLVMValueRef explicit_lod = NULL;
1271
1272 for (unsigned i = 0; i < instr->num_srcs; i++) {
1273 switch (instr->src[i].src_type) {
1274 case nir_tex_src_lod:
1275 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1276 break;
1277 default:
1278 break;
1279 }
1280 }
1281
1282 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
1283 params.texture_unit = instr->texture_index;
1284 params.explicit_lod = explicit_lod;
1285 params.is_sviewinfo = TRUE;
1286 params.sizes_out = sizes_out;
1287
1288 if (instr->op == nir_texop_query_levels)
1289 params.explicit_lod = bld_base->uint_bld.zero;
1290 bld_base->tex_size(bld_base, &params);
1291 assign_dest(bld_base, &instr->dest, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
1292 }
1293
1294 static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
1295 nir_src lod_src)
1296 {
1297 enum lp_sampler_lod_property lod_property;
1298
1299 if (nir_src_is_dynamically_uniform(lod_src))
1300 lod_property = LP_SAMPLER_LOD_SCALAR;
1301 else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1302 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1303 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1304 else
1305 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1306 }
1307 else
1308 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1309 return lod_property;
1310 }
1311
1312 static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1313 {
1314 struct gallivm_state *gallivm = bld_base->base.gallivm;
1315 LLVMBuilderRef builder = gallivm->builder;
1316 LLVMValueRef coords[5];
1317 LLVMValueRef offsets[3] = { NULL };
1318 LLVMValueRef explicit_lod = NULL, projector = NULL;
1319 struct lp_sampler_params params;
1320 struct lp_derivatives derivs;
1321 unsigned sample_key = 0;
1322 nir_deref_instr *texture_deref_instr = NULL;
1323 nir_deref_instr *sampler_deref_instr = NULL;
1324 LLVMValueRef texel[4];
1325 unsigned lod_src = 0;
1326 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
1327
1328 memset(&params, 0, sizeof(params));
1329 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
1330
1331 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels) {
1332 visit_txs(bld_base, instr);
1333 return;
1334 }
1335 if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1336 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
1337 else if (instr->op == nir_texop_tg4)
1338 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
1339 else if (instr->op == nir_texop_lod)
1340 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
1341 for (unsigned i = 0; i < instr->num_srcs; i++) {
1342 switch (instr->src[i].src_type) {
1343 case nir_tex_src_coord: {
1344 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
1345 if (instr->coord_components == 1)
1346 coords[0] = coord;
1347 else {
1348 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1349 coords[chan] = LLVMBuildExtractValue(builder, coord,
1350 chan, "");
1351 }
1352 for (unsigned chan = instr->coord_components; chan < 5; chan++)
1353 coords[chan] = coord_undef;
1354
1355 break;
1356 }
1357 case nir_tex_src_texture_deref:
1358 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
1359 break;
1360 case nir_tex_src_sampler_deref:
1361 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
1362 break;
1363 case nir_tex_src_projector:
1364 projector = lp_build_rcp(&bld_base->base, cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32));
1365 break;
1366 case nir_tex_src_comparator:
1367 sample_key |= LP_SAMPLER_SHADOW;
1368 coords[4] = get_src(bld_base, instr->src[i].src);
1369 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
1370 break;
1371 case nir_tex_src_bias:
1372 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
1373 lod_src = i;
1374 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1375 break;
1376 case nir_tex_src_lod:
1377 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
1378 lod_src = i;
1379 if (instr->op == nir_texop_txf)
1380 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1381 else
1382 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1383 break;
1384 case nir_tex_src_ddx: {
1385 int deriv_cnt = instr->coord_components;
1386 if (instr->is_array)
1387 deriv_cnt--;
1388 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1389 if (deriv_cnt == 1)
1390 derivs.ddx[0] = deriv_val;
1391 else
1392 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1393 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
1394 chan, "");
1395 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1396 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
1397 break;
1398 }
1399 case nir_tex_src_ddy: {
1400 int deriv_cnt = instr->coord_components;
1401 if (instr->is_array)
1402 deriv_cnt--;
1403 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1404 if (deriv_cnt == 1)
1405 derivs.ddy[0] = deriv_val;
1406 else
1407 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1408 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
1409 chan, "");
1410 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1411 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
1412 break;
1413 }
1414 case nir_tex_src_offset: {
1415 int offset_cnt = instr->coord_components;
1416 if (instr->is_array)
1417 offset_cnt--;
1418 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
1419 sample_key |= LP_SAMPLER_OFFSETS;
1420 if (offset_cnt == 1)
1421 offsets[0] = offset_val;
1422 else {
1423 for (unsigned chan = 0; chan < offset_cnt; ++chan)
1424 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
1425 chan, "");
1426 }
1427 break;
1428 }
1429 case nir_tex_src_ms_index:
1430 break;
1431 default:
1432 assert(0);
1433 break;
1434 }
1435 }
1436 if (!sampler_deref_instr)
1437 sampler_deref_instr = texture_deref_instr;
1438
1439 if (explicit_lod)
1440 lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
1441
1442 if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
1443 instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
1444 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1445 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
1446 else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1447 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1448 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
1449
1450 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
1451 /* move layer coord for 1d arrays. */
1452 coords[2] = coords[1];
1453 coords[1] = coord_undef;
1454 }
1455
1456 if (projector) {
1457 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1458 coords[chan] = lp_build_mul(&bld_base->base, coords[chan], projector);
1459 if (sample_key & LP_SAMPLER_SHADOW)
1460 coords[4] = lp_build_mul(&bld_base->base, coords[4], projector);
1461 }
1462
1463 uint32_t base_index = 0;
1464 if (!texture_deref_instr) {
1465 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
1466 if (samp_src_index == -1) {
1467 base_index = instr->sampler_index;
1468 }
1469 }
1470
1471 if (instr->op == nir_texop_txd) {
1472 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
1473 params.derivs = &derivs;
1474 if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1475 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1476 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1477 else
1478 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1479 } else
1480 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1481 }
1482
1483 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
1484 params.sample_key = sample_key;
1485 params.offsets = offsets;
1486 params.texture_index = base_index;
1487 params.sampler_index = base_index;
1488 params.coords = coords;
1489 params.texel = texel;
1490 params.lod = explicit_lod;
1491 bld_base->tex(bld_base, &params);
1492 assign_dest(bld_base, &instr->dest, texel);
1493 }
1494
1495 static void visit_ssa_undef(struct lp_build_nir_context *bld_base,
1496 const nir_ssa_undef_instr *instr)
1497 {
1498 unsigned num_components = instr->def.num_components;
1499 LLVMValueRef undef[4];
1500 for (unsigned i = 0; i < num_components; i++)
1501 undef[i] = LLVMGetUndef(bld_base->base.vec_type);
1502 assign_ssa_dest(bld_base, &instr->def, undef);
1503 }
1504
1505 static void visit_jump(struct lp_build_nir_context *bld_base,
1506 const nir_jump_instr *instr)
1507 {
1508 switch (instr->type) {
1509 case nir_jump_break:
1510 bld_base->break_stmt(bld_base);
1511 break;
1512 case nir_jump_continue:
1513 bld_base->continue_stmt(bld_base);
1514 break;
1515 default:
1516 unreachable("Unknown jump instr\n");
1517 }
1518 }
1519
1520 static void visit_deref(struct lp_build_nir_context *bld_base,
1521 nir_deref_instr *instr)
1522 {
1523 if (instr->mode != nir_var_mem_shared &&
1524 instr->mode != nir_var_mem_global)
1525 return;
1526 LLVMValueRef result = NULL;
1527 switch(instr->deref_type) {
1528 case nir_deref_type_var: {
1529 struct hash_entry *entry = _mesa_hash_table_search(bld_base->vars, instr->var);
1530 result = entry->data;
1531 break;
1532 }
1533 default:
1534 unreachable("Unhandled deref_instr deref type");
1535 }
1536
1537 assign_ssa(bld_base, instr->dest.ssa.index, result);
1538 }
1539
1540 static void visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
1541 {
1542 nir_foreach_instr(instr, block)
1543 {
1544 switch (instr->type) {
1545 case nir_instr_type_alu:
1546 visit_alu(bld_base, nir_instr_as_alu(instr));
1547 break;
1548 case nir_instr_type_load_const:
1549 visit_load_const(bld_base, nir_instr_as_load_const(instr));
1550 break;
1551 case nir_instr_type_intrinsic:
1552 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
1553 break;
1554 case nir_instr_type_tex:
1555 visit_tex(bld_base, nir_instr_as_tex(instr));
1556 break;
1557 case nir_instr_type_phi:
1558 assert(0);
1559 break;
1560 case nir_instr_type_ssa_undef:
1561 visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
1562 break;
1563 case nir_instr_type_jump:
1564 visit_jump(bld_base, nir_instr_as_jump(instr));
1565 break;
1566 case nir_instr_type_deref:
1567 visit_deref(bld_base, nir_instr_as_deref(instr));
1568 break;
1569 default:
1570 fprintf(stderr, "Unknown NIR instr type: ");
1571 nir_print_instr(instr, stderr);
1572 fprintf(stderr, "\n");
1573 abort();
1574 }
1575 }
1576 }
1577
1578 static void visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
1579 {
1580 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
1581
1582 bld_base->if_cond(bld_base, cond);
1583 visit_cf_list(bld_base, &if_stmt->then_list);
1584
1585 if (!exec_list_is_empty(&if_stmt->else_list)) {
1586 bld_base->else_stmt(bld_base);
1587 visit_cf_list(bld_base, &if_stmt->else_list);
1588 }
1589 bld_base->endif_stmt(bld_base);
1590 }
1591
1592 static void visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
1593 {
1594 bld_base->bgnloop(bld_base);
1595 visit_cf_list(bld_base, &loop->body);
1596 bld_base->endloop(bld_base);
1597 }
1598
1599 static void visit_cf_list(struct lp_build_nir_context *bld_base,
1600 struct exec_list *list)
1601 {
1602 foreach_list_typed(nir_cf_node, node, node, list)
1603 {
1604 switch (node->type) {
1605 case nir_cf_node_block:
1606 visit_block(bld_base, nir_cf_node_as_block(node));
1607 break;
1608
1609 case nir_cf_node_if:
1610 visit_if(bld_base, nir_cf_node_as_if(node));
1611 break;
1612
1613 case nir_cf_node_loop:
1614 visit_loop(bld_base, nir_cf_node_as_loop(node));
1615 break;
1616
1617 default:
1618 assert(0);
1619 }
1620 }
1621 }
1622
1623 static void
1624 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
1625 struct nir_shader *nir,
1626 struct nir_variable *variable)
1627 {
1628 bld_base->emit_var_decl(bld_base, variable);
1629 }
1630
1631 /* vector registers are stored as arrays in LLVM side,
1632 so we can use GEP on them, as to do exec mask stores
1633 we need to operate on a single components.
1634 arrays are:
1635 0.x, 1.x, 2.x, 3.x
1636 0.y, 1.y, 2.y, 3.y
1637 ....
1638 */
1639 static LLVMTypeRef get_register_type(struct lp_build_nir_context *bld_base,
1640 nir_register *reg)
1641 {
1642 struct lp_build_context *int_bld = get_int_bld(bld_base, true, reg->bit_size);
1643
1644 LLVMTypeRef type = int_bld->vec_type;
1645 if (reg->num_array_elems)
1646 type = LLVMArrayType(type, reg->num_array_elems);
1647 if (reg->num_components > 1)
1648 type = LLVMArrayType(type, reg->num_components);
1649
1650 return type;
1651 }
1652
1653
1654 bool lp_build_nir_llvm(
1655 struct lp_build_nir_context *bld_base,
1656 struct nir_shader *nir)
1657 {
1658 struct nir_function *func;
1659
1660 nir_convert_from_ssa(nir, true);
1661 nir_lower_locals_to_regs(nir);
1662 nir_remove_dead_derefs(nir);
1663 nir_remove_dead_variables(nir, nir_var_function_temp);
1664
1665 nir_foreach_variable(variable, &nir->outputs)
1666 handle_shader_output_decl(bld_base, nir, variable);
1667
1668 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1669 _mesa_key_pointer_equal);
1670 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1671 _mesa_key_pointer_equal);
1672
1673 func = (struct nir_function *)exec_list_get_head(&nir->functions);
1674
1675 nir_foreach_register(reg, &func->impl->registers) {
1676 LLVMTypeRef type = get_register_type(bld_base, reg);
1677 LLVMValueRef reg_alloc = lp_build_alloca_undef(bld_base->base.gallivm,
1678 type, "reg");
1679 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
1680 }
1681 nir_index_ssa_defs(func->impl);
1682 bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
1683 visit_cf_list(bld_base, &func->impl->body);
1684
1685 free(bld_base->ssa_defs);
1686 ralloc_free(bld_base->vars);
1687 ralloc_free(bld_base->regs);
1688 return true;
1689 }
1690
1691 /* do some basic opts to remove some things we don't want to see. */
1692 void lp_build_opt_nir(struct nir_shader *nir)
1693 {
1694 bool progress;
1695 do {
1696 progress = false;
1697 NIR_PASS_V(nir, nir_opt_constant_folding);
1698 NIR_PASS_V(nir, nir_opt_algebraic);
1699 } while (progress);
1700 nir_lower_bool_to_int32(nir);
1701 }