gallivm/llvmpipe: add support for global operations.
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_gather.h"
31 #include "lp_bld_logic.h"
32 #include "lp_bld_quad.h"
33 #include "lp_bld_flow.h"
34 #include "lp_bld_struct.h"
35 #include "lp_bld_debug.h"
36 #include "lp_bld_printf.h"
37 #include "nir_deref.h"
38
39 static void visit_cf_list(struct lp_build_nir_context *bld_base,
40 struct exec_list *list);
41
42 static LLVMValueRef cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
43 nir_alu_type alu_type, unsigned bit_size)
44 {
45 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
46 switch (alu_type) {
47 case nir_type_float:
48 switch (bit_size) {
49 case 32:
50 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
51 case 64:
52 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
53 default:
54 assert(0);
55 break;
56 }
57 break;
58 case nir_type_int:
59 switch (bit_size) {
60 case 8:
61 return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
62 case 16:
63 return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
64 case 32:
65 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
66 case 64:
67 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
68 default:
69 assert(0);
70 break;
71 }
72 break;
73 case nir_type_uint:
74 switch (bit_size) {
75 case 8:
76 return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
77 case 16:
78 return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
79 case 32:
80 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
81 case 64:
82 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
83 default:
84 assert(0);
85 break;
86 }
87 break;
88 case nir_type_uint32:
89 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
90 default:
91 return val;
92 }
93 return NULL;
94 }
95
96
97 static struct lp_build_context *get_flt_bld(struct lp_build_nir_context *bld_base,
98 unsigned op_bit_size)
99 {
100 if (op_bit_size == 64)
101 return &bld_base->dbl_bld;
102 else
103 return &bld_base->base;
104 }
105
106 static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
107 {
108 unsigned pipe_target = PIPE_BUFFER;
109 switch (sampler_dim) {
110 case GLSL_SAMPLER_DIM_1D:
111 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
112 break;
113 case GLSL_SAMPLER_DIM_2D:
114 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
115 break;
116 case GLSL_SAMPLER_DIM_3D:
117 pipe_target = PIPE_TEXTURE_3D;
118 break;
119 case GLSL_SAMPLER_DIM_CUBE:
120 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
121 break;
122 case GLSL_SAMPLER_DIM_RECT:
123 pipe_target = PIPE_TEXTURE_RECT;
124 break;
125 case GLSL_SAMPLER_DIM_BUF:
126 pipe_target = PIPE_BUFFER;
127 break;
128 default:
129 break;
130 }
131 return pipe_target;
132 }
133
134 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
135 {
136 return bld_base->ssa_defs[ssa->index];
137 }
138
139 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src);
140
141 static LLVMValueRef get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
142 {
143 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
144 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
145 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
146 LLVMValueRef indir_src = NULL;
147 if (src.indirect)
148 indir_src = get_src(bld_base, *src.indirect);
149 return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
150 }
151
152 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src)
153 {
154 if (src.is_ssa)
155 return get_ssa_src(bld_base, src.ssa);
156 else
157 return get_reg_src(bld_base, src.reg);
158 }
159
160 static void assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
161 {
162 bld_base->ssa_defs[idx] = ptr;
163 }
164
165 static void assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
166 LLVMValueRef vals[4])
167 {
168 assign_ssa(bld_base, ssa->index, ssa->num_components == 1 ? vals[0] : lp_nir_array_build_gather_values(bld_base->base.gallivm->builder, vals, ssa->num_components));
169 }
170
171 static void assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
172 unsigned write_mask,
173 LLVMValueRef vals[4])
174 {
175 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
176 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
177 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
178 LLVMValueRef indir_src = NULL;
179 if (reg->indirect)
180 indir_src = get_src(bld_base, *reg->indirect);
181 bld_base->store_reg(bld_base, reg_bld, reg, write_mask ? write_mask : 0xf, indir_src, reg_storage, vals);
182 }
183
184 static void assign_dest(struct lp_build_nir_context *bld_base, const nir_dest *dest, LLVMValueRef vals[4])
185 {
186 if (dest->is_ssa)
187 assign_ssa_dest(bld_base, &dest->ssa, vals);
188 else
189 assign_reg(bld_base, &dest->reg, 0, vals);
190 }
191
192 static void assign_alu_dest(struct lp_build_nir_context *bld_base, const nir_alu_dest *dest, LLVMValueRef vals[4])
193 {
194 if (dest->dest.is_ssa)
195 assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
196 else
197 assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
198 }
199
200 static LLVMValueRef int_to_bool32(struct lp_build_nir_context *bld_base,
201 uint32_t src_bit_size,
202 bool is_unsigned,
203 LLVMValueRef val)
204 {
205 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
206 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
207 LLVMValueRef result = lp_build_compare(bld_base->base.gallivm, int_bld->type, PIPE_FUNC_NOTEQUAL, val, int_bld->zero);
208 if (src_bit_size == 64)
209 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
210 return result;
211 }
212
213 static LLVMValueRef flt_to_bool32(struct lp_build_nir_context *bld_base,
214 uint32_t src_bit_size,
215 LLVMValueRef val)
216 {
217 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
218 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
219 LLVMValueRef result = lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
220 if (src_bit_size == 64)
221 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
222 return result;
223 }
224
225 static LLVMValueRef fcmp32(struct lp_build_nir_context *bld_base,
226 enum pipe_compare_func compare,
227 uint32_t src_bit_size,
228 LLVMValueRef src[4])
229 {
230 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
231 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
232 LLVMValueRef result;
233
234 if (compare != PIPE_FUNC_NOTEQUAL)
235 result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
236 else
237 result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
238 if (src_bit_size == 64)
239 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
240 return result;
241 }
242
243 static LLVMValueRef icmp32(struct lp_build_nir_context *bld_base,
244 enum pipe_compare_func compare,
245 bool is_unsigned,
246 uint32_t src_bit_size,
247 LLVMValueRef src[4])
248 {
249 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
250 struct lp_build_context *i_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
251 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
252 if (src_bit_size < 32)
253 result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
254 else if (src_bit_size == 64)
255 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
256 return result;
257 }
258
259 static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
260 nir_alu_src src,
261 unsigned num_components)
262 {
263 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
264 struct gallivm_state *gallivm = bld_base->base.gallivm;
265 LLVMValueRef value = get_src(bld_base, src.src);
266 bool need_swizzle = false;
267
268 assert(value);
269 unsigned src_components = nir_src_num_components(src.src);
270 for (unsigned i = 0; i < num_components; ++i) {
271 assert(src.swizzle[i] < src_components);
272 if (src.swizzle[i] != i)
273 need_swizzle = true;
274 }
275
276 if (need_swizzle || num_components != src_components) {
277 if (src_components > 1 && num_components == 1) {
278 value = LLVMBuildExtractValue(gallivm->builder, value,
279 src.swizzle[0], "");
280 } else if (src_components == 1 && num_components > 1) {
281 LLVMValueRef values[] = {value, value, value, value};
282 value = lp_nir_array_build_gather_values(builder, values, num_components);
283 } else {
284 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
285 for (unsigned i = 0; i < num_components; i++)
286 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
287 value = arr;
288 }
289 }
290 assert(!src.negate);
291 assert(!src.abs);
292 return value;
293 }
294
295 static LLVMValueRef emit_b2f(struct lp_build_nir_context *bld_base,
296 LLVMValueRef src0,
297 unsigned bitsize)
298 {
299 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
300 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
301 LLVMBuildBitCast(builder, lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type,
302 1.0), bld_base->int_bld.vec_type, ""),
303 "");
304 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
305 switch (bitsize) {
306 case 32:
307 break;
308 case 64:
309 result = LLVMBuildFPExt(builder, result, bld_base->dbl_bld.vec_type, "");
310 break;
311 default:
312 unreachable("unsupported bit size.");
313 }
314 return result;
315 }
316
317 static LLVMValueRef emit_b2i(struct lp_build_nir_context *bld_base,
318 LLVMValueRef src0,
319 unsigned bitsize)
320 {
321 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
322 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
323 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, 1), "");
324 switch (bitsize) {
325 case 32:
326 return result;
327 case 64:
328 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
329 default:
330 unreachable("unsupported bit size.");
331 }
332 }
333
334 static LLVMValueRef emit_b32csel(struct lp_build_nir_context *bld_base,
335 unsigned src_bit_size[4],
336 LLVMValueRef src[4])
337 {
338 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
339 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
340 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
341 return lp_build_select(bld, v, src[1], src[2]);
342 }
343
344 static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
345 LLVMValueRef src,
346 bool hi)
347 {
348 struct gallivm_state *gallivm = bld_base->base.gallivm;
349 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
350 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
351 int len = bld_base->base.type.length * 2;
352 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
353 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
354 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
355 }
356
357 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
358 return LLVMBuildShuffleVector(gallivm->builder, src,
359 LLVMGetUndef(LLVMTypeOf(src)),
360 LLVMConstVector(hi ? shuffles2 : shuffles,
361 bld_base->base.type.length),
362 "");
363 }
364
365 static LLVMValueRef
366 merge_64bit(struct lp_build_nir_context *bld_base,
367 LLVMValueRef input,
368 LLVMValueRef input2)
369 {
370 struct gallivm_state *gallivm = bld_base->base.gallivm;
371 LLVMBuilderRef builder = gallivm->builder;
372 int i;
373 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
374 int len = bld_base->base.type.length * 2;
375 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
376
377 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
378 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
379 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
380 }
381 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
382 }
383
384 static LLVMValueRef
385 do_int_divide(struct lp_build_nir_context *bld_base,
386 bool is_unsigned, unsigned src_bit_size,
387 LLVMValueRef src, LLVMValueRef src2)
388 {
389 struct gallivm_state *gallivm = bld_base->base.gallivm;
390 LLVMBuilderRef builder = gallivm->builder;
391 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
392 LLVMValueRef div_mask = lp_build_cmp(int_bld, PIPE_FUNC_EQUAL, src2,
393 int_bld->zero);
394 LLVMValueRef divisor = LLVMBuildOr(builder,
395 div_mask,
396 src2, "");
397 LLVMValueRef result = lp_build_div(int_bld, src, divisor);
398 /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
399 * may as well do same for idiv */
400 return LLVMBuildOr(builder, div_mask, result, "");
401 }
402
403 static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
404 nir_op op, unsigned src_bit_size[4], LLVMValueRef src[4])
405 {
406 struct gallivm_state *gallivm = bld_base->base.gallivm;
407 LLVMBuilderRef builder = gallivm->builder;
408 LLVMValueRef result;
409 switch (op) {
410 case nir_op_b2f32:
411 result = emit_b2f(bld_base, src[0], 32);
412 break;
413 case nir_op_b2f64:
414 result = emit_b2f(bld_base, src[0], 64);
415 break;
416 case nir_op_b2i32:
417 result = emit_b2i(bld_base, src[0], 32);
418 break;
419 case nir_op_b2i64:
420 result = emit_b2i(bld_base, src[0], 64);
421 break;
422 case nir_op_b32csel:
423 result = emit_b32csel(bld_base, src_bit_size, src);
424 break;
425 case nir_op_bit_count:
426 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
427 break;
428 case nir_op_bitfield_select:
429 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
430 break;
431 case nir_op_bitfield_reverse:
432 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
433 break;
434 case nir_op_f2b32:
435 result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
436 break;
437 case nir_op_f2f32:
438 result = LLVMBuildFPTrunc(builder, src[0],
439 bld_base->base.vec_type, "");
440 break;
441 case nir_op_f2f64:
442 result = LLVMBuildFPExt(builder, src[0],
443 bld_base->dbl_bld.vec_type, "");
444 break;
445 case nir_op_f2i32:
446 result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
447 break;
448 case nir_op_f2u32:
449 result = LLVMBuildFPToUI(builder,
450 src[0],
451 bld_base->base.int_vec_type, "");
452 break;
453 case nir_op_f2i64:
454 result = LLVMBuildFPToSI(builder,
455 src[0],
456 bld_base->int64_bld.vec_type, "");
457 break;
458 case nir_op_f2u64:
459 result = LLVMBuildFPToUI(builder,
460 src[0],
461 bld_base->uint64_bld.vec_type, "");
462 break;
463 case nir_op_fabs:
464 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
465 break;
466 case nir_op_fadd:
467 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
468 src[0], src[1]);
469 break;
470 case nir_op_fceil:
471 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
472 break;
473 case nir_op_fcos:
474 result = lp_build_cos(&bld_base->base, src[0]);
475 break;
476 case nir_op_fddx:
477 result = lp_build_ddx(&bld_base->base, src[0]);
478 break;
479 case nir_op_fddy:
480 result = lp_build_ddy(&bld_base->base, src[0]);
481 break;
482 case nir_op_fdiv:
483 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
484 src[0], src[1]);
485 break;
486 case nir_op_feq32:
487 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
488 break;
489 case nir_op_fexp2:
490 result = lp_build_exp2(&bld_base->base, src[0]);
491 break;
492 case nir_op_ffloor:
493 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
494 break;
495 case nir_op_ffma:
496 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
497 break;
498 case nir_op_ffract: {
499 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
500 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
501 result = lp_build_sub(flt_bld, src[0], tmp);
502 break;
503 }
504 case nir_op_fge32:
505 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
506 break;
507 case nir_op_find_lsb:
508 result = lp_build_cttz(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
509 break;
510 case nir_op_flog2:
511 result = lp_build_log2_safe(&bld_base->base, src[0]);
512 break;
513 case nir_op_flt32:
514 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
515 break;
516 case nir_op_fmin:
517 result = lp_build_min(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
518 break;
519 case nir_op_fmod: {
520 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
521 result = lp_build_div(flt_bld, src[0], src[1]);
522 result = lp_build_floor(flt_bld, result);
523 result = lp_build_mul(flt_bld, src[1], result);
524 result = lp_build_sub(flt_bld, src[0], result);
525 break;
526 }
527 case nir_op_fmul:
528 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
529 src[0], src[1]);
530 break;
531 case nir_op_fmax:
532 result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
533 break;
534 case nir_op_fne32:
535 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
536 break;
537 case nir_op_fneg:
538 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
539 break;
540 case nir_op_fpow:
541 result = lp_build_pow(&bld_base->base, src[0], src[1]);
542 break;
543 case nir_op_frcp:
544 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
545 break;
546 case nir_op_fround_even:
547 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
548 break;
549 case nir_op_frsq:
550 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
551 break;
552 case nir_op_fsat:
553 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
554 break;
555 case nir_op_fsign:
556 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
557 break;
558 case nir_op_fsin:
559 result = lp_build_sin(&bld_base->base, src[0]);
560 break;
561 case nir_op_fsqrt:
562 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
563 break;
564 case nir_op_ftrunc:
565 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
566 break;
567 case nir_op_i2b32:
568 result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
569 break;
570 case nir_op_i2f32:
571 result = lp_build_int_to_float(&bld_base->base, src[0]);
572 break;
573 case nir_op_i2f64:
574 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
575 break;
576 case nir_op_i2i8:
577 result = LLVMBuildTrunc(builder, src[0], bld_base->int8_bld.vec_type, "");
578 break;
579 case nir_op_i2i16:
580 if (src_bit_size[0] < 16)
581 result = LLVMBuildSExt(builder, src[0], bld_base->int16_bld.vec_type, "");
582 else
583 result = LLVMBuildTrunc(builder, src[0], bld_base->int16_bld.vec_type, "");
584 break;
585 case nir_op_i2i32:
586 if (src_bit_size[0] < 32)
587 result = LLVMBuildSExt(builder, src[0], bld_base->int_bld.vec_type, "");
588 else
589 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
590 break;
591 case nir_op_i2i64:
592 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
593 break;
594 case nir_op_iabs:
595 result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
596 break;
597 case nir_op_iadd:
598 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
599 src[0], src[1]);
600 break;
601 case nir_op_iand:
602 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
603 src[0], src[1]);
604 break;
605 case nir_op_idiv:
606 result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
607 break;
608 case nir_op_ieq32:
609 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
610 break;
611 case nir_op_ige32:
612 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
613 break;
614 case nir_op_ilt32:
615 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
616 break;
617 case nir_op_imax:
618 result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
619 break;
620 case nir_op_imin:
621 result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
622 break;
623 case nir_op_imul:
624 case nir_op_imul24:
625 result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
626 src[0], src[1]);
627 break;
628 case nir_op_imul_high: {
629 LLVMValueRef hi_bits;
630 lp_build_mul_32_lohi(&bld_base->int_bld, src[0], src[1], &hi_bits);
631 result = hi_bits;
632 break;
633 }
634 case nir_op_ine32:
635 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
636 break;
637 case nir_op_ineg:
638 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
639 break;
640 case nir_op_inot:
641 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
642 break;
643 case nir_op_ior:
644 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
645 src[0], src[1]);
646 break;
647 case nir_op_irem:
648 result = lp_build_mod(get_int_bld(bld_base, false, src_bit_size[0]),
649 src[0], src[1]);
650 break;
651 case nir_op_ishl: {
652 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
653 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
654 if (src_bit_size[0] == 64)
655 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
656 if (src_bit_size[0] < 32)
657 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
658 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
659 result = lp_build_shl(int_bld, src[0], src[1]);
660 break;
661 }
662 case nir_op_ishr: {
663 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
664 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
665 if (src_bit_size[0] == 64)
666 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
667 if (src_bit_size[0] < 32)
668 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
669 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
670 result = lp_build_shr(int_bld, src[0], src[1]);
671 break;
672 }
673 case nir_op_isign:
674 result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
675 break;
676 case nir_op_isub:
677 result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
678 src[0], src[1]);
679
680 case nir_op_ixor:
681 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
682 src[0], src[1]);
683 break;
684 case nir_op_mov:
685 result = src[0];
686 break;
687 case nir_op_unpack_64_2x32_split_x:
688 result = split_64bit(bld_base, src[0], false);
689 break;
690 case nir_op_unpack_64_2x32_split_y:
691 result = split_64bit(bld_base, src[0], true);
692 break;
693
694 case nir_op_pack_64_2x32_split: {
695 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
696 result = LLVMBuildBitCast(builder, tmp, bld_base->dbl_bld.vec_type, "");
697 break;
698 }
699 case nir_op_u2f32:
700 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
701 break;
702 case nir_op_u2f64:
703 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
704 break;
705 case nir_op_u2u8:
706 result = LLVMBuildTrunc(builder, src[0], bld_base->uint8_bld.vec_type, "");
707 break;
708 case nir_op_u2u16:
709 if (src_bit_size[0] < 16)
710 result = LLVMBuildZExt(builder, src[0], bld_base->uint16_bld.vec_type, "");
711 else
712 result = LLVMBuildTrunc(builder, src[0], bld_base->uint16_bld.vec_type, "");
713 break;
714 case nir_op_u2u32:
715 if (src_bit_size[0] < 32)
716 result = LLVMBuildZExt(builder, src[0], bld_base->uint_bld.vec_type, "");
717 else
718 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
719 break;
720 case nir_op_u2u64:
721 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
722 break;
723 case nir_op_udiv:
724 result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
725 break;
726 case nir_op_ufind_msb: {
727 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
728 result = lp_build_ctlz(uint_bld, src[0]);
729 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
730 break;
731 }
732 case nir_op_uge32:
733 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
734 break;
735 case nir_op_ult32:
736 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
737 break;
738 case nir_op_umax:
739 result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
740 break;
741 case nir_op_umin:
742 result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
743 break;
744 case nir_op_umod:
745 result = lp_build_mod(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
746 break;
747 case nir_op_umul_high: {
748 LLVMValueRef hi_bits;
749 lp_build_mul_32_lohi(&bld_base->uint_bld, src[0], src[1], &hi_bits);
750 result = hi_bits;
751 break;
752 }
753 case nir_op_ushr: {
754 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
755 if (src_bit_size[0] == 64)
756 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
757 if (src_bit_size[0] < 32)
758 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
759 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
760 result = lp_build_shr(uint_bld, src[0], src[1]);
761 break;
762 }
763 default:
764 assert(0);
765 break;
766 }
767 return result;
768 }
769
770 static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr *instr)
771 {
772 struct gallivm_state *gallivm = bld_base->base.gallivm;
773 LLVMValueRef src[4];
774 unsigned src_bit_size[4];
775 unsigned num_components = nir_dest_num_components(instr->dest.dest);
776 unsigned src_components;
777 switch (instr->op) {
778 case nir_op_vec2:
779 case nir_op_vec3:
780 case nir_op_vec4:
781 src_components = 1;
782 break;
783 case nir_op_pack_half_2x16:
784 src_components = 2;
785 break;
786 case nir_op_unpack_half_2x16:
787 src_components = 1;
788 break;
789 case nir_op_cube_face_coord:
790 case nir_op_cube_face_index:
791 src_components = 3;
792 break;
793 default:
794 src_components = num_components;
795 break;
796 }
797 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
798 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
799 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
800 }
801
802 LLVMValueRef result[4];
803 if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2) {
804 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
805 result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
806 }
807 } else {
808 for (unsigned c = 0; c < num_components; c++) {
809 LLVMValueRef src_chan[4];
810
811 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
812 if (num_components > 1) {
813 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
814 src[i], c, "");
815 } else
816 src_chan[i] = src[i];
817 src_chan[i] = cast_type(bld_base, src_chan[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
818 }
819 result[c] = do_alu_action(bld_base, instr->op, src_bit_size, src_chan);
820 result[c] = cast_type(bld_base, result[c], nir_op_infos[instr->op].output_type, nir_dest_bit_size(instr->dest.dest));
821 }
822 }
823 assign_alu_dest(bld_base, &instr->dest, result);
824 }
825
826 static void visit_load_const(struct lp_build_nir_context *bld_base,
827 const nir_load_const_instr *instr)
828 {
829 LLVMValueRef result[4];
830 struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
831 for (unsigned i = 0; i < instr->def.num_components; i++)
832 result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->value[i].u64);
833 assign_ssa_dest(bld_base, &instr->def, result);
834 }
835
836 static void
837 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
838 bool vs_in, unsigned *vertex_index_out,
839 LLVMValueRef *vertex_index_ref,
840 unsigned *const_out, LLVMValueRef *indir_out)
841 {
842 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
843 nir_variable *var = nir_deref_instr_get_variable(instr);
844 nir_deref_path path;
845 unsigned idx_lvl = 1;
846
847 nir_deref_path_init(&path, instr, NULL);
848
849 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
850 if (vertex_index_ref) {
851 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
852 if (vertex_index_out)
853 *vertex_index_out = 0;
854 } else {
855 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
856 }
857 ++idx_lvl;
858 }
859
860 uint32_t const_offset = 0;
861 LLVMValueRef offset = NULL;
862
863 if (var->data.compact) {
864 assert(instr->deref_type == nir_deref_type_array);
865 const_offset = nir_src_as_uint(instr->arr.index);
866 goto out;
867 }
868
869 for (; path.path[idx_lvl]; ++idx_lvl) {
870 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
871 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
872 unsigned index = path.path[idx_lvl]->strct.index;
873
874 for (unsigned i = 0; i < index; i++) {
875 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
876 const_offset += glsl_count_attribute_slots(ft, vs_in);
877 }
878 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
879 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
880 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
881 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
882 } else {
883 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
884 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
885 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
886 idx_src);
887 if (offset)
888 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
889 else
890 offset = array_off;
891 }
892 } else
893 unreachable("Uhandled deref type in get_deref_instr_offset");
894 }
895
896 out:
897 nir_deref_path_finish(&path);
898
899 if (const_offset && offset)
900 offset = LLVMBuildAdd(builder, offset,
901 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
902 "");
903 *const_out = const_offset;
904 *indir_out = offset;
905 }
906
907 static void visit_load_var(struct lp_build_nir_context *bld_base,
908 nir_intrinsic_instr *instr,
909 LLVMValueRef result[4])
910 {
911 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
912 nir_variable *var = nir_deref_instr_get_variable(deref);
913 nir_variable_mode mode = deref->mode;
914 unsigned const_index;
915 LLVMValueRef indir_index;
916 unsigned vertex_index = 0;
917 unsigned nc = nir_dest_num_components(instr->dest);
918 unsigned bit_size = nir_dest_bit_size(instr->dest);
919 if (var) {
920 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
921 var->data.mode == nir_var_shader_in;
922 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
923 var->data.mode == nir_var_shader_in;
924 mode = var->data.mode;
925
926 get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, NULL,
927 &const_index, &indir_index);
928 }
929 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, const_index, indir_index, result);
930 }
931
932 static void
933 visit_store_var(struct lp_build_nir_context *bld_base,
934 nir_intrinsic_instr *instr)
935 {
936 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
937 nir_variable *var = nir_deref_instr_get_variable(deref);
938 nir_variable_mode mode = deref->mode;
939 int writemask = instr->const_index[0];
940 unsigned bit_size = nir_src_bit_size(instr->src[1]);
941 LLVMValueRef src = get_src(bld_base, instr->src[1]);
942 unsigned const_index = 0;
943 LLVMValueRef indir_index;
944 if (var)
945 get_deref_offset(bld_base, deref, false, NULL, NULL,
946 &const_index, &indir_index);
947 bld_base->store_var(bld_base, mode, bit_size, instr->num_components, writemask, const_index, var, src);
948 }
949
950 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
951 nir_intrinsic_instr *instr,
952 LLVMValueRef result[4])
953 {
954 struct gallivm_state *gallivm = bld_base->base.gallivm;
955 LLVMBuilderRef builder = gallivm->builder;
956 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
957 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
958
959 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
960 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
961 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
962 offset_is_uniform, idx, offset, result);
963 }
964
965
966 static void visit_load_ssbo(struct lp_build_nir_context *bld_base,
967 nir_intrinsic_instr *instr,
968 LLVMValueRef result[4])
969 {
970 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
971 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
972 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
973 idx, offset, result);
974 }
975
976 static void visit_store_ssbo(struct lp_build_nir_context *bld_base,
977 nir_intrinsic_instr *instr)
978 {
979 LLVMValueRef val = get_src(bld_base, instr->src[0]);
980 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
981 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
982 int writemask = instr->const_index[0];
983 int nc = nir_src_num_components(instr->src[0]);
984 int bitsize = nir_src_bit_size(instr->src[0]);
985 bld_base->store_mem(bld_base, writemask, nc, bitsize, idx, offset, val);
986 }
987
988 static void visit_get_buffer_size(struct lp_build_nir_context *bld_base,
989 nir_intrinsic_instr *instr,
990 LLVMValueRef result[4])
991 {
992 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
993 result[0] = bld_base->get_buffer_size(bld_base, idx);
994 }
995
996 static void visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
997 nir_intrinsic_instr *instr,
998 LLVMValueRef result[4])
999 {
1000 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1001 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1002 LLVMValueRef val = get_src(bld_base, instr->src[2]);
1003 LLVMValueRef val2 = NULL;
1004 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
1005 val2 = get_src(bld_base, instr->src[3]);
1006
1007 bld_base->atomic_mem(bld_base, instr->intrinsic, idx, offset, val, val2, &result[0]);
1008
1009 }
1010
1011 static void visit_load_image(struct lp_build_nir_context *bld_base,
1012 nir_intrinsic_instr *instr,
1013 LLVMValueRef result[4])
1014 {
1015 struct gallivm_state *gallivm = bld_base->base.gallivm;
1016 LLVMBuilderRef builder = gallivm->builder;
1017 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1018 nir_variable *var = nir_deref_instr_get_variable(deref);
1019 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1020 LLVMValueRef coords[5];
1021 struct lp_img_params params;
1022 const struct glsl_type *type = glsl_without_array(var->type);
1023
1024 memset(&params, 0, sizeof(params));
1025 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1026 for (unsigned i = 0; i < 4; i++)
1027 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1028 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1029 coords[2] = coords[1];
1030
1031 params.coords = coords;
1032 params.outdata = result;
1033 params.img_op = LP_IMG_LOAD;
1034 params.image_index = var->data.binding;
1035 bld_base->image_op(bld_base, &params);
1036 }
1037
1038 static void visit_store_image(struct lp_build_nir_context *bld_base,
1039 nir_intrinsic_instr *instr)
1040 {
1041 struct gallivm_state *gallivm = bld_base->base.gallivm;
1042 LLVMBuilderRef builder = gallivm->builder;
1043 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1044 nir_variable *var = nir_deref_instr_get_variable(deref);
1045 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1046 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1047 LLVMValueRef coords[5];
1048 struct lp_img_params params;
1049 const struct glsl_type *type = glsl_without_array(var->type);
1050
1051 memset(&params, 0, sizeof(params));
1052 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1053 for (unsigned i = 0; i < 4; i++)
1054 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1055 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1056 coords[2] = coords[1];
1057 params.coords = coords;
1058
1059 for (unsigned i = 0; i < 4; i++) {
1060 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1061 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1062 }
1063 params.img_op = LP_IMG_STORE;
1064 params.image_index = var->data.binding;
1065
1066 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1067 coords[2] = coords[1];
1068 bld_base->image_op(bld_base, &params);
1069 }
1070
1071 static void visit_atomic_image(struct lp_build_nir_context *bld_base,
1072 nir_intrinsic_instr *instr,
1073 LLVMValueRef result[4])
1074 {
1075 struct gallivm_state *gallivm = bld_base->base.gallivm;
1076 LLVMBuilderRef builder = gallivm->builder;
1077 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1078 nir_variable *var = nir_deref_instr_get_variable(deref);
1079 struct lp_img_params params;
1080 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1081 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1082 LLVMValueRef coords[5];
1083 const struct glsl_type *type = glsl_without_array(var->type);
1084
1085 memset(&params, 0, sizeof(params));
1086
1087 switch (instr->intrinsic) {
1088 case nir_intrinsic_image_deref_atomic_add:
1089 params.op = LLVMAtomicRMWBinOpAdd;
1090 break;
1091 case nir_intrinsic_image_deref_atomic_exchange:
1092 params.op = LLVMAtomicRMWBinOpXchg;
1093 break;
1094 case nir_intrinsic_image_deref_atomic_and:
1095 params.op = LLVMAtomicRMWBinOpAnd;
1096 break;
1097 case nir_intrinsic_image_deref_atomic_or:
1098 params.op = LLVMAtomicRMWBinOpOr;
1099 break;
1100 case nir_intrinsic_image_deref_atomic_xor:
1101 params.op = LLVMAtomicRMWBinOpXor;
1102 break;
1103 case nir_intrinsic_image_deref_atomic_umin:
1104 params.op = LLVMAtomicRMWBinOpUMin;
1105 break;
1106 case nir_intrinsic_image_deref_atomic_umax:
1107 params.op = LLVMAtomicRMWBinOpUMax;
1108 break;
1109 case nir_intrinsic_image_deref_atomic_imin:
1110 params.op = LLVMAtomicRMWBinOpMin;
1111 break;
1112 case nir_intrinsic_image_deref_atomic_imax:
1113 params.op = LLVMAtomicRMWBinOpMax;
1114 break;
1115 default:
1116 break;
1117 }
1118
1119 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1120 for (unsigned i = 0; i < 4; i++)
1121 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1122 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1123 coords[2] = coords[1];
1124 params.coords = coords;
1125 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
1126 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1127 params.indata[0] = in_val;
1128 params.indata2[0] = cas_val;
1129 } else
1130 params.indata[0] = in_val;
1131
1132 params.outdata = result;
1133 params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1134 params.image_index = var->data.binding;
1135
1136 bld_base->image_op(bld_base, &params);
1137 }
1138
1139
1140 static void visit_image_size(struct lp_build_nir_context *bld_base,
1141 nir_intrinsic_instr *instr,
1142 LLVMValueRef result[4])
1143 {
1144 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1145 nir_variable *var = nir_deref_instr_get_variable(deref);
1146 struct lp_sampler_size_query_params params = { 0 };
1147 params.texture_unit = var->data.binding;
1148 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(var->type), glsl_sampler_type_is_array(var->type));
1149 params.sizes_out = result;
1150
1151 bld_base->image_size(bld_base, &params);
1152 }
1153
1154 static void visit_shared_load(struct lp_build_nir_context *bld_base,
1155 nir_intrinsic_instr *instr,
1156 LLVMValueRef result[4])
1157 {
1158 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1159 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1160 NULL, offset, result);
1161 }
1162
1163 static void visit_shared_store(struct lp_build_nir_context *bld_base,
1164 nir_intrinsic_instr *instr)
1165 {
1166 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1167 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1168 int writemask = instr->const_index[1];
1169 int nc = nir_src_num_components(instr->src[0]);
1170 int bitsize = nir_src_bit_size(instr->src[0]);
1171 bld_base->store_mem(bld_base, writemask, nc, bitsize, NULL, offset, val);
1172 }
1173
1174 static void visit_shared_atomic(struct lp_build_nir_context *bld_base,
1175 nir_intrinsic_instr *instr,
1176 LLVMValueRef result[4])
1177 {
1178 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1179 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1180 LLVMValueRef val2 = NULL;
1181 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1182 val2 = get_src(bld_base, instr->src[2]);
1183
1184 bld_base->atomic_mem(bld_base, instr->intrinsic, NULL, offset, val, val2, &result[0]);
1185
1186 }
1187
1188 static void visit_barrier(struct lp_build_nir_context *bld_base)
1189 {
1190 bld_base->barrier(bld_base);
1191 }
1192
1193 static void visit_discard(struct lp_build_nir_context *bld_base,
1194 nir_intrinsic_instr *instr)
1195 {
1196 LLVMValueRef cond = NULL;
1197 if (instr->intrinsic == nir_intrinsic_discard_if) {
1198 cond = get_src(bld_base, instr->src[0]);
1199 cond = cast_type(bld_base, cond, nir_type_int, 32);
1200 }
1201 bld_base->discard(bld_base, cond);
1202 }
1203
1204 static void visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1205 nir_intrinsic_instr *instr, LLVMValueRef result[4])
1206 {
1207 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1208
1209 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
1210 bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1211 nir_src_bit_size(instr->src[0]),
1212 offset_is_uniform, offset, result);
1213 }
1214
1215 static void visit_load_global(struct lp_build_nir_context *bld_base,
1216 nir_intrinsic_instr *instr, LLVMValueRef result[4])
1217 {
1218 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1219 bld_base->load_global(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1220 nir_src_bit_size(instr->src[0]),
1221 addr, result);
1222 }
1223
1224 static void visit_store_global(struct lp_build_nir_context *bld_base,
1225 nir_intrinsic_instr *instr)
1226 {
1227 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1228 int nc = nir_src_num_components(instr->src[0]);
1229 int bitsize = nir_src_bit_size(instr->src[0]);
1230 LLVMValueRef addr = get_src(bld_base, instr->src[1]);
1231 int addr_bitsize = nir_src_bit_size(instr->src[1]);
1232 int writemask = instr->const_index[0];
1233 bld_base->store_global(bld_base, writemask, nc, bitsize, addr_bitsize, addr, val);
1234 }
1235
1236 static void visit_global_atomic(struct lp_build_nir_context *bld_base,
1237 nir_intrinsic_instr *instr,
1238 LLVMValueRef result[4])
1239 {
1240 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1241 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1242 LLVMValueRef val2 = NULL;
1243 int addr_bitsize = nir_src_bit_size(instr->src[0]);
1244 if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap)
1245 val2 = get_src(bld_base, instr->src[2]);
1246
1247 bld_base->atomic_global(bld_base, instr->intrinsic, addr_bitsize, addr, val, val2, &result[0]);
1248 }
1249
1250 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
1251 nir_intrinsic_instr *instr)
1252 {
1253 LLVMValueRef result[4] = {0};
1254 switch (instr->intrinsic) {
1255 case nir_intrinsic_load_deref:
1256 visit_load_var(bld_base, instr, result);
1257 break;
1258 case nir_intrinsic_store_deref:
1259 visit_store_var(bld_base, instr);
1260 break;
1261 case nir_intrinsic_load_ubo:
1262 visit_load_ubo(bld_base, instr, result);
1263 break;
1264 case nir_intrinsic_load_ssbo:
1265 visit_load_ssbo(bld_base, instr, result);
1266 break;
1267 case nir_intrinsic_store_ssbo:
1268 visit_store_ssbo(bld_base, instr);
1269 break;
1270 case nir_intrinsic_get_buffer_size:
1271 visit_get_buffer_size(bld_base, instr, result);
1272 break;
1273 case nir_intrinsic_load_vertex_id:
1274 case nir_intrinsic_load_primitive_id:
1275 case nir_intrinsic_load_instance_id:
1276 case nir_intrinsic_load_base_instance:
1277 case nir_intrinsic_load_base_vertex:
1278 case nir_intrinsic_load_work_group_id:
1279 case nir_intrinsic_load_local_invocation_id:
1280 case nir_intrinsic_load_num_work_groups:
1281 case nir_intrinsic_load_invocation_id:
1282 case nir_intrinsic_load_front_face:
1283 case nir_intrinsic_load_draw_id:
1284 case nir_intrinsic_load_local_group_size:
1285 case nir_intrinsic_load_work_dim:
1286 bld_base->sysval_intrin(bld_base, instr, result);
1287 break;
1288 case nir_intrinsic_discard_if:
1289 case nir_intrinsic_discard:
1290 visit_discard(bld_base, instr);
1291 break;
1292 case nir_intrinsic_emit_vertex:
1293 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
1294 break;
1295 case nir_intrinsic_end_primitive:
1296 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
1297 break;
1298 case nir_intrinsic_ssbo_atomic_add:
1299 case nir_intrinsic_ssbo_atomic_imin:
1300 case nir_intrinsic_ssbo_atomic_imax:
1301 case nir_intrinsic_ssbo_atomic_umin:
1302 case nir_intrinsic_ssbo_atomic_umax:
1303 case nir_intrinsic_ssbo_atomic_and:
1304 case nir_intrinsic_ssbo_atomic_or:
1305 case nir_intrinsic_ssbo_atomic_xor:
1306 case nir_intrinsic_ssbo_atomic_exchange:
1307 case nir_intrinsic_ssbo_atomic_comp_swap:
1308 visit_ssbo_atomic(bld_base, instr, result);
1309 break;
1310 case nir_intrinsic_image_deref_load:
1311 visit_load_image(bld_base, instr, result);
1312 break;
1313 case nir_intrinsic_image_deref_store:
1314 visit_store_image(bld_base, instr);
1315 break;
1316 case nir_intrinsic_image_deref_atomic_add:
1317 case nir_intrinsic_image_deref_atomic_imin:
1318 case nir_intrinsic_image_deref_atomic_imax:
1319 case nir_intrinsic_image_deref_atomic_umin:
1320 case nir_intrinsic_image_deref_atomic_umax:
1321 case nir_intrinsic_image_deref_atomic_and:
1322 case nir_intrinsic_image_deref_atomic_or:
1323 case nir_intrinsic_image_deref_atomic_xor:
1324 case nir_intrinsic_image_deref_atomic_exchange:
1325 case nir_intrinsic_image_deref_atomic_comp_swap:
1326 visit_atomic_image(bld_base, instr, result);
1327 break;
1328 case nir_intrinsic_image_deref_size:
1329 visit_image_size(bld_base, instr, result);
1330 break;
1331 case nir_intrinsic_load_shared:
1332 visit_shared_load(bld_base, instr, result);
1333 break;
1334 case nir_intrinsic_store_shared:
1335 visit_shared_store(bld_base, instr);
1336 break;
1337 case nir_intrinsic_shared_atomic_add:
1338 case nir_intrinsic_shared_atomic_imin:
1339 case nir_intrinsic_shared_atomic_umin:
1340 case nir_intrinsic_shared_atomic_imax:
1341 case nir_intrinsic_shared_atomic_umax:
1342 case nir_intrinsic_shared_atomic_and:
1343 case nir_intrinsic_shared_atomic_or:
1344 case nir_intrinsic_shared_atomic_xor:
1345 case nir_intrinsic_shared_atomic_exchange:
1346 case nir_intrinsic_shared_atomic_comp_swap:
1347 visit_shared_atomic(bld_base, instr, result);
1348 break;
1349 case nir_intrinsic_barrier:
1350 visit_barrier(bld_base);
1351 break;
1352 case nir_intrinsic_memory_barrier:
1353 break;
1354 case nir_intrinsic_load_kernel_input:
1355 visit_load_kernel_input(bld_base, instr, result);
1356 break;
1357 case nir_intrinsic_load_global:
1358 visit_load_global(bld_base, instr, result);
1359 break;
1360 case nir_intrinsic_store_global:
1361 visit_store_global(bld_base, instr);
1362 break;
1363 case nir_intrinsic_global_atomic_add:
1364 case nir_intrinsic_global_atomic_imin:
1365 case nir_intrinsic_global_atomic_umin:
1366 case nir_intrinsic_global_atomic_imax:
1367 case nir_intrinsic_global_atomic_umax:
1368 case nir_intrinsic_global_atomic_and:
1369 case nir_intrinsic_global_atomic_or:
1370 case nir_intrinsic_global_atomic_xor:
1371 case nir_intrinsic_global_atomic_exchange:
1372 case nir_intrinsic_global_atomic_comp_swap:
1373 visit_global_atomic(bld_base, instr, result);
1374 break;
1375 default:
1376 assert(0);
1377 break;
1378 }
1379 if (result[0]) {
1380 assign_dest(bld_base, &instr->dest, result);
1381 }
1382 }
1383
1384 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1385 {
1386 struct lp_sampler_size_query_params params;
1387 LLVMValueRef sizes_out[4];
1388 LLVMValueRef explicit_lod = NULL;
1389
1390 for (unsigned i = 0; i < instr->num_srcs; i++) {
1391 switch (instr->src[i].src_type) {
1392 case nir_tex_src_lod:
1393 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1394 break;
1395 default:
1396 break;
1397 }
1398 }
1399
1400 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
1401 params.texture_unit = instr->texture_index;
1402 params.explicit_lod = explicit_lod;
1403 params.is_sviewinfo = TRUE;
1404 params.sizes_out = sizes_out;
1405
1406 if (instr->op == nir_texop_query_levels)
1407 params.explicit_lod = bld_base->uint_bld.zero;
1408 bld_base->tex_size(bld_base, &params);
1409 assign_dest(bld_base, &instr->dest, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
1410 }
1411
1412 static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
1413 nir_src lod_src)
1414 {
1415 enum lp_sampler_lod_property lod_property;
1416
1417 if (nir_src_is_dynamically_uniform(lod_src))
1418 lod_property = LP_SAMPLER_LOD_SCALAR;
1419 else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1420 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1421 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1422 else
1423 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1424 }
1425 else
1426 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1427 return lod_property;
1428 }
1429
1430 static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1431 {
1432 struct gallivm_state *gallivm = bld_base->base.gallivm;
1433 LLVMBuilderRef builder = gallivm->builder;
1434 LLVMValueRef coords[5];
1435 LLVMValueRef offsets[3] = { NULL };
1436 LLVMValueRef explicit_lod = NULL, projector = NULL;
1437 struct lp_sampler_params params;
1438 struct lp_derivatives derivs;
1439 unsigned sample_key = 0;
1440 nir_deref_instr *texture_deref_instr = NULL;
1441 nir_deref_instr *sampler_deref_instr = NULL;
1442 LLVMValueRef texel[4];
1443 unsigned lod_src = 0;
1444 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
1445
1446 memset(&params, 0, sizeof(params));
1447 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
1448
1449 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels) {
1450 visit_txs(bld_base, instr);
1451 return;
1452 }
1453 if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1454 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
1455 else if (instr->op == nir_texop_tg4)
1456 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
1457 else if (instr->op == nir_texop_lod)
1458 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
1459 for (unsigned i = 0; i < instr->num_srcs; i++) {
1460 switch (instr->src[i].src_type) {
1461 case nir_tex_src_coord: {
1462 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
1463 if (instr->coord_components == 1)
1464 coords[0] = coord;
1465 else {
1466 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1467 coords[chan] = LLVMBuildExtractValue(builder, coord,
1468 chan, "");
1469 }
1470 for (unsigned chan = instr->coord_components; chan < 5; chan++)
1471 coords[chan] = coord_undef;
1472
1473 break;
1474 }
1475 case nir_tex_src_texture_deref:
1476 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
1477 break;
1478 case nir_tex_src_sampler_deref:
1479 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
1480 break;
1481 case nir_tex_src_projector:
1482 projector = lp_build_rcp(&bld_base->base, cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32));
1483 break;
1484 case nir_tex_src_comparator:
1485 sample_key |= LP_SAMPLER_SHADOW;
1486 coords[4] = get_src(bld_base, instr->src[i].src);
1487 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
1488 break;
1489 case nir_tex_src_bias:
1490 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
1491 lod_src = i;
1492 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1493 break;
1494 case nir_tex_src_lod:
1495 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
1496 lod_src = i;
1497 if (instr->op == nir_texop_txf)
1498 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1499 else
1500 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1501 break;
1502 case nir_tex_src_ddx: {
1503 int deriv_cnt = instr->coord_components;
1504 if (instr->is_array)
1505 deriv_cnt--;
1506 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1507 if (deriv_cnt == 1)
1508 derivs.ddx[0] = deriv_val;
1509 else
1510 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1511 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
1512 chan, "");
1513 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1514 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
1515 break;
1516 }
1517 case nir_tex_src_ddy: {
1518 int deriv_cnt = instr->coord_components;
1519 if (instr->is_array)
1520 deriv_cnt--;
1521 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1522 if (deriv_cnt == 1)
1523 derivs.ddy[0] = deriv_val;
1524 else
1525 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1526 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
1527 chan, "");
1528 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1529 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
1530 break;
1531 }
1532 case nir_tex_src_offset: {
1533 int offset_cnt = instr->coord_components;
1534 if (instr->is_array)
1535 offset_cnt--;
1536 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
1537 sample_key |= LP_SAMPLER_OFFSETS;
1538 if (offset_cnt == 1)
1539 offsets[0] = offset_val;
1540 else {
1541 for (unsigned chan = 0; chan < offset_cnt; ++chan)
1542 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
1543 chan, "");
1544 }
1545 break;
1546 }
1547 case nir_tex_src_ms_index:
1548 break;
1549 default:
1550 assert(0);
1551 break;
1552 }
1553 }
1554 if (!sampler_deref_instr)
1555 sampler_deref_instr = texture_deref_instr;
1556
1557 if (explicit_lod)
1558 lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
1559
1560 if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
1561 instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
1562 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1563 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
1564 else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1565 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1566 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
1567
1568 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
1569 /* move layer coord for 1d arrays. */
1570 coords[2] = coords[1];
1571 coords[1] = coord_undef;
1572 }
1573
1574 if (projector) {
1575 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1576 coords[chan] = lp_build_mul(&bld_base->base, coords[chan], projector);
1577 if (sample_key & LP_SAMPLER_SHADOW)
1578 coords[4] = lp_build_mul(&bld_base->base, coords[4], projector);
1579 }
1580
1581 uint32_t base_index = 0;
1582 if (!texture_deref_instr) {
1583 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
1584 if (samp_src_index == -1) {
1585 base_index = instr->sampler_index;
1586 }
1587 }
1588
1589 if (instr->op == nir_texop_txd) {
1590 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
1591 params.derivs = &derivs;
1592 if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1593 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1594 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1595 else
1596 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1597 } else
1598 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1599 }
1600
1601 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
1602 params.sample_key = sample_key;
1603 params.offsets = offsets;
1604 params.texture_index = base_index;
1605 params.sampler_index = base_index;
1606 params.coords = coords;
1607 params.texel = texel;
1608 params.lod = explicit_lod;
1609 bld_base->tex(bld_base, &params);
1610 assign_dest(bld_base, &instr->dest, texel);
1611 }
1612
1613 static void visit_ssa_undef(struct lp_build_nir_context *bld_base,
1614 const nir_ssa_undef_instr *instr)
1615 {
1616 unsigned num_components = instr->def.num_components;
1617 LLVMValueRef undef[4];
1618 for (unsigned i = 0; i < num_components; i++)
1619 undef[i] = LLVMGetUndef(bld_base->base.vec_type);
1620 assign_ssa_dest(bld_base, &instr->def, undef);
1621 }
1622
1623 static void visit_jump(struct lp_build_nir_context *bld_base,
1624 const nir_jump_instr *instr)
1625 {
1626 switch (instr->type) {
1627 case nir_jump_break:
1628 bld_base->break_stmt(bld_base);
1629 break;
1630 case nir_jump_continue:
1631 bld_base->continue_stmt(bld_base);
1632 break;
1633 default:
1634 unreachable("Unknown jump instr\n");
1635 }
1636 }
1637
1638 static void visit_deref(struct lp_build_nir_context *bld_base,
1639 nir_deref_instr *instr)
1640 {
1641 if (instr->mode != nir_var_mem_shared &&
1642 instr->mode != nir_var_mem_global)
1643 return;
1644 LLVMValueRef result = NULL;
1645 switch(instr->deref_type) {
1646 case nir_deref_type_var: {
1647 struct hash_entry *entry = _mesa_hash_table_search(bld_base->vars, instr->var);
1648 result = entry->data;
1649 break;
1650 }
1651 default:
1652 unreachable("Unhandled deref_instr deref type");
1653 }
1654
1655 assign_ssa(bld_base, instr->dest.ssa.index, result);
1656 }
1657
1658 static void visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
1659 {
1660 nir_foreach_instr(instr, block)
1661 {
1662 switch (instr->type) {
1663 case nir_instr_type_alu:
1664 visit_alu(bld_base, nir_instr_as_alu(instr));
1665 break;
1666 case nir_instr_type_load_const:
1667 visit_load_const(bld_base, nir_instr_as_load_const(instr));
1668 break;
1669 case nir_instr_type_intrinsic:
1670 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
1671 break;
1672 case nir_instr_type_tex:
1673 visit_tex(bld_base, nir_instr_as_tex(instr));
1674 break;
1675 case nir_instr_type_phi:
1676 assert(0);
1677 break;
1678 case nir_instr_type_ssa_undef:
1679 visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
1680 break;
1681 case nir_instr_type_jump:
1682 visit_jump(bld_base, nir_instr_as_jump(instr));
1683 break;
1684 case nir_instr_type_deref:
1685 visit_deref(bld_base, nir_instr_as_deref(instr));
1686 break;
1687 default:
1688 fprintf(stderr, "Unknown NIR instr type: ");
1689 nir_print_instr(instr, stderr);
1690 fprintf(stderr, "\n");
1691 abort();
1692 }
1693 }
1694 }
1695
1696 static void visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
1697 {
1698 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
1699
1700 bld_base->if_cond(bld_base, cond);
1701 visit_cf_list(bld_base, &if_stmt->then_list);
1702
1703 if (!exec_list_is_empty(&if_stmt->else_list)) {
1704 bld_base->else_stmt(bld_base);
1705 visit_cf_list(bld_base, &if_stmt->else_list);
1706 }
1707 bld_base->endif_stmt(bld_base);
1708 }
1709
1710 static void visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
1711 {
1712 bld_base->bgnloop(bld_base);
1713 visit_cf_list(bld_base, &loop->body);
1714 bld_base->endloop(bld_base);
1715 }
1716
1717 static void visit_cf_list(struct lp_build_nir_context *bld_base,
1718 struct exec_list *list)
1719 {
1720 foreach_list_typed(nir_cf_node, node, node, list)
1721 {
1722 switch (node->type) {
1723 case nir_cf_node_block:
1724 visit_block(bld_base, nir_cf_node_as_block(node));
1725 break;
1726
1727 case nir_cf_node_if:
1728 visit_if(bld_base, nir_cf_node_as_if(node));
1729 break;
1730
1731 case nir_cf_node_loop:
1732 visit_loop(bld_base, nir_cf_node_as_loop(node));
1733 break;
1734
1735 default:
1736 assert(0);
1737 }
1738 }
1739 }
1740
1741 static void
1742 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
1743 struct nir_shader *nir,
1744 struct nir_variable *variable)
1745 {
1746 bld_base->emit_var_decl(bld_base, variable);
1747 }
1748
1749 /* vector registers are stored as arrays in LLVM side,
1750 so we can use GEP on them, as to do exec mask stores
1751 we need to operate on a single components.
1752 arrays are:
1753 0.x, 1.x, 2.x, 3.x
1754 0.y, 1.y, 2.y, 3.y
1755 ....
1756 */
1757 static LLVMTypeRef get_register_type(struct lp_build_nir_context *bld_base,
1758 nir_register *reg)
1759 {
1760 struct lp_build_context *int_bld = get_int_bld(bld_base, true, reg->bit_size);
1761
1762 LLVMTypeRef type = int_bld->vec_type;
1763 if (reg->num_array_elems)
1764 type = LLVMArrayType(type, reg->num_array_elems);
1765 if (reg->num_components > 1)
1766 type = LLVMArrayType(type, reg->num_components);
1767
1768 return type;
1769 }
1770
1771
1772 bool lp_build_nir_llvm(
1773 struct lp_build_nir_context *bld_base,
1774 struct nir_shader *nir)
1775 {
1776 struct nir_function *func;
1777
1778 nir_convert_from_ssa(nir, true);
1779 nir_lower_locals_to_regs(nir);
1780 nir_remove_dead_derefs(nir);
1781 nir_remove_dead_variables(nir, nir_var_function_temp);
1782
1783 nir_foreach_variable(variable, &nir->outputs)
1784 handle_shader_output_decl(bld_base, nir, variable);
1785
1786 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1787 _mesa_key_pointer_equal);
1788 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1789 _mesa_key_pointer_equal);
1790
1791 func = (struct nir_function *)exec_list_get_head(&nir->functions);
1792
1793 nir_foreach_register(reg, &func->impl->registers) {
1794 LLVMTypeRef type = get_register_type(bld_base, reg);
1795 LLVMValueRef reg_alloc = lp_build_alloca_undef(bld_base->base.gallivm,
1796 type, "reg");
1797 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
1798 }
1799 nir_index_ssa_defs(func->impl);
1800 bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
1801 visit_cf_list(bld_base, &func->impl->body);
1802
1803 free(bld_base->ssa_defs);
1804 ralloc_free(bld_base->vars);
1805 ralloc_free(bld_base->regs);
1806 return true;
1807 }
1808
1809 /* do some basic opts to remove some things we don't want to see. */
1810 void lp_build_opt_nir(struct nir_shader *nir)
1811 {
1812 bool progress;
1813 do {
1814 progress = false;
1815 NIR_PASS_V(nir, nir_opt_constant_folding);
1816 NIR_PASS_V(nir, nir_opt_algebraic);
1817 } while (progress);
1818 nir_lower_bool_to_int32(nir);
1819 }