gallivm/nir: hooks up texture samples queries
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_gather.h"
31 #include "lp_bld_logic.h"
32 #include "lp_bld_quad.h"
33 #include "lp_bld_flow.h"
34 #include "lp_bld_struct.h"
35 #include "lp_bld_debug.h"
36 #include "lp_bld_printf.h"
37 #include "nir_deref.h"
38
39 static void visit_cf_list(struct lp_build_nir_context *bld_base,
40 struct exec_list *list);
41
42 static LLVMValueRef cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
43 nir_alu_type alu_type, unsigned bit_size)
44 {
45 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
46 switch (alu_type) {
47 case nir_type_float:
48 switch (bit_size) {
49 case 32:
50 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
51 case 64:
52 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
53 default:
54 assert(0);
55 break;
56 }
57 break;
58 case nir_type_int:
59 switch (bit_size) {
60 case 8:
61 return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
62 case 16:
63 return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
64 case 32:
65 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
66 case 64:
67 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
68 default:
69 assert(0);
70 break;
71 }
72 break;
73 case nir_type_uint:
74 switch (bit_size) {
75 case 8:
76 return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
77 case 16:
78 return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
79 case 32:
80 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
81 case 64:
82 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
83 default:
84 assert(0);
85 break;
86 }
87 break;
88 case nir_type_uint32:
89 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
90 default:
91 return val;
92 }
93 return NULL;
94 }
95
96
97 static struct lp_build_context *get_flt_bld(struct lp_build_nir_context *bld_base,
98 unsigned op_bit_size)
99 {
100 if (op_bit_size == 64)
101 return &bld_base->dbl_bld;
102 else
103 return &bld_base->base;
104 }
105
106 static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
107 {
108 unsigned pipe_target = PIPE_BUFFER;
109 switch (sampler_dim) {
110 case GLSL_SAMPLER_DIM_1D:
111 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
112 break;
113 case GLSL_SAMPLER_DIM_2D:
114 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
115 break;
116 case GLSL_SAMPLER_DIM_3D:
117 pipe_target = PIPE_TEXTURE_3D;
118 break;
119 case GLSL_SAMPLER_DIM_MS:
120 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
121 break;
122 case GLSL_SAMPLER_DIM_CUBE:
123 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
124 break;
125 case GLSL_SAMPLER_DIM_RECT:
126 pipe_target = PIPE_TEXTURE_RECT;
127 break;
128 case GLSL_SAMPLER_DIM_BUF:
129 pipe_target = PIPE_BUFFER;
130 break;
131 default:
132 break;
133 }
134 return pipe_target;
135 }
136
137 static LLVMValueRef get_ssa_src(struct lp_build_nir_context *bld_base, nir_ssa_def *ssa)
138 {
139 return bld_base->ssa_defs[ssa->index];
140 }
141
142 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src);
143
144 static LLVMValueRef get_reg_src(struct lp_build_nir_context *bld_base, nir_reg_src src)
145 {
146 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, src.reg);
147 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
148 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, src.reg->bit_size);
149 LLVMValueRef indir_src = NULL;
150 if (src.indirect)
151 indir_src = get_src(bld_base, *src.indirect);
152 return bld_base->load_reg(bld_base, reg_bld, &src, indir_src, reg_storage);
153 }
154
155 static LLVMValueRef get_src(struct lp_build_nir_context *bld_base, nir_src src)
156 {
157 if (src.is_ssa)
158 return get_ssa_src(bld_base, src.ssa);
159 else
160 return get_reg_src(bld_base, src.reg);
161 }
162
163 static void assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
164 {
165 bld_base->ssa_defs[idx] = ptr;
166 }
167
168 static void assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
169 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
170 {
171 assign_ssa(bld_base, ssa->index, ssa->num_components == 1 ? vals[0] : lp_nir_array_build_gather_values(bld_base->base.gallivm->builder, vals, ssa->num_components));
172 }
173
174 static void assign_reg(struct lp_build_nir_context *bld_base, const nir_reg_dest *reg,
175 unsigned write_mask,
176 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
177 {
178 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, reg->reg);
179 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
180 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, reg->reg->bit_size);
181 LLVMValueRef indir_src = NULL;
182 if (reg->indirect)
183 indir_src = get_src(bld_base, *reg->indirect);
184 bld_base->store_reg(bld_base, reg_bld, reg, write_mask ? write_mask : 0xf, indir_src, reg_storage, vals);
185 }
186
187 static void assign_dest(struct lp_build_nir_context *bld_base, const nir_dest *dest, LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
188 {
189 if (dest->is_ssa)
190 assign_ssa_dest(bld_base, &dest->ssa, vals);
191 else
192 assign_reg(bld_base, &dest->reg, 0, vals);
193 }
194
195 static void assign_alu_dest(struct lp_build_nir_context *bld_base, const nir_alu_dest *dest, LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
196 {
197 if (dest->dest.is_ssa)
198 assign_ssa_dest(bld_base, &dest->dest.ssa, vals);
199 else
200 assign_reg(bld_base, &dest->dest.reg, dest->write_mask, vals);
201 }
202
203 static LLVMValueRef int_to_bool32(struct lp_build_nir_context *bld_base,
204 uint32_t src_bit_size,
205 bool is_unsigned,
206 LLVMValueRef val)
207 {
208 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
209 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
210 LLVMValueRef result = lp_build_compare(bld_base->base.gallivm, int_bld->type, PIPE_FUNC_NOTEQUAL, val, int_bld->zero);
211 if (src_bit_size == 64)
212 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
213 return result;
214 }
215
216 static LLVMValueRef flt_to_bool32(struct lp_build_nir_context *bld_base,
217 uint32_t src_bit_size,
218 LLVMValueRef val)
219 {
220 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
221 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
222 LLVMValueRef result = lp_build_cmp(flt_bld, PIPE_FUNC_NOTEQUAL, val, flt_bld->zero);
223 if (src_bit_size == 64)
224 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
225 return result;
226 }
227
228 static LLVMValueRef fcmp32(struct lp_build_nir_context *bld_base,
229 enum pipe_compare_func compare,
230 uint32_t src_bit_size,
231 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
232 {
233 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
234 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
235 LLVMValueRef result;
236
237 if (compare != PIPE_FUNC_NOTEQUAL)
238 result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
239 else
240 result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
241 if (src_bit_size == 64)
242 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
243 return result;
244 }
245
246 static LLVMValueRef icmp32(struct lp_build_nir_context *bld_base,
247 enum pipe_compare_func compare,
248 bool is_unsigned,
249 uint32_t src_bit_size,
250 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
251 {
252 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
253 struct lp_build_context *i_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
254 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
255 if (src_bit_size < 32)
256 result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
257 else if (src_bit_size == 64)
258 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
259 return result;
260 }
261
262 static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
263 nir_alu_src src,
264 unsigned num_components)
265 {
266 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
267 struct gallivm_state *gallivm = bld_base->base.gallivm;
268 LLVMValueRef value = get_src(bld_base, src.src);
269 bool need_swizzle = false;
270
271 assert(value);
272 unsigned src_components = nir_src_num_components(src.src);
273 for (unsigned i = 0; i < num_components; ++i) {
274 assert(src.swizzle[i] < src_components);
275 if (src.swizzle[i] != i)
276 need_swizzle = true;
277 }
278
279 if (need_swizzle || num_components != src_components) {
280 if (src_components > 1 && num_components == 1) {
281 value = LLVMBuildExtractValue(gallivm->builder, value,
282 src.swizzle[0], "");
283 } else if (src_components == 1 && num_components > 1) {
284 LLVMValueRef values[] = {value, value, value, value, value, value, value, value, value, value, value, value, value, value, value, value};
285 value = lp_nir_array_build_gather_values(builder, values, num_components);
286 } else {
287 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
288 for (unsigned i = 0; i < num_components; i++)
289 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
290 value = arr;
291 }
292 }
293 assert(!src.negate);
294 assert(!src.abs);
295 return value;
296 }
297
298 static LLVMValueRef emit_b2f(struct lp_build_nir_context *bld_base,
299 LLVMValueRef src0,
300 unsigned bitsize)
301 {
302 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
303 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
304 LLVMBuildBitCast(builder, lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type,
305 1.0), bld_base->int_bld.vec_type, ""),
306 "");
307 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
308 switch (bitsize) {
309 case 32:
310 break;
311 case 64:
312 result = LLVMBuildFPExt(builder, result, bld_base->dbl_bld.vec_type, "");
313 break;
314 default:
315 unreachable("unsupported bit size.");
316 }
317 return result;
318 }
319
320 static LLVMValueRef emit_b2i(struct lp_build_nir_context *bld_base,
321 LLVMValueRef src0,
322 unsigned bitsize)
323 {
324 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
325 LLVMValueRef result = LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
326 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, 1), "");
327 switch (bitsize) {
328 case 32:
329 return result;
330 case 64:
331 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
332 default:
333 unreachable("unsupported bit size.");
334 }
335 }
336
337 static LLVMValueRef emit_b32csel(struct lp_build_nir_context *bld_base,
338 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
339 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
340 {
341 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
342 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
343 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
344 return lp_build_select(bld, v, src[1], src[2]);
345 }
346
347 static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
348 LLVMValueRef src,
349 bool hi)
350 {
351 struct gallivm_state *gallivm = bld_base->base.gallivm;
352 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
353 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
354 int len = bld_base->base.type.length * 2;
355 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
356 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
357 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
358 }
359
360 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
361 return LLVMBuildShuffleVector(gallivm->builder, src,
362 LLVMGetUndef(LLVMTypeOf(src)),
363 LLVMConstVector(hi ? shuffles2 : shuffles,
364 bld_base->base.type.length),
365 "");
366 }
367
368 static LLVMValueRef
369 merge_64bit(struct lp_build_nir_context *bld_base,
370 LLVMValueRef input,
371 LLVMValueRef input2)
372 {
373 struct gallivm_state *gallivm = bld_base->base.gallivm;
374 LLVMBuilderRef builder = gallivm->builder;
375 int i;
376 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
377 int len = bld_base->base.type.length * 2;
378 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
379
380 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
381 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
382 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
383 }
384 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
385 }
386
387 static LLVMValueRef
388 do_int_divide(struct lp_build_nir_context *bld_base,
389 bool is_unsigned, unsigned src_bit_size,
390 LLVMValueRef src, LLVMValueRef src2)
391 {
392 struct gallivm_state *gallivm = bld_base->base.gallivm;
393 LLVMBuilderRef builder = gallivm->builder;
394 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
395 struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
396 LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
397 mask_bld->zero);
398
399 if (!is_unsigned) {
400 /* INT_MIN (0x80000000) / -1 (0xffffffff) causes sigfpe, seen with blender. */
401 div_mask = LLVMBuildAnd(builder, div_mask, lp_build_const_int_vec(gallivm, int_bld->type, 0x7fffffff), "");
402 }
403 LLVMValueRef divisor = LLVMBuildOr(builder,
404 div_mask,
405 src2, "");
406 LLVMValueRef result = lp_build_div(int_bld, src, divisor);
407
408 if (!is_unsigned) {
409 LLVMValueRef not_div_mask = LLVMBuildNot(builder, div_mask, "");
410 return LLVMBuildAnd(builder, not_div_mask, result, "");
411 } else
412 /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
413 * may as well do same for idiv */
414 return LLVMBuildOr(builder, div_mask, result, "");
415 }
416
417 static LLVMValueRef
418 do_int_mod(struct lp_build_nir_context *bld_base,
419 bool is_unsigned, unsigned src_bit_size,
420 LLVMValueRef src, LLVMValueRef src2)
421 {
422 struct gallivm_state *gallivm = bld_base->base.gallivm;
423 LLVMBuilderRef builder = gallivm->builder;
424 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
425 LLVMValueRef div_mask = lp_build_cmp(int_bld, PIPE_FUNC_EQUAL, src2,
426 int_bld->zero);
427 LLVMValueRef divisor = LLVMBuildOr(builder,
428 div_mask,
429 src2, "");
430 LLVMValueRef result = lp_build_mod(int_bld, src, divisor);
431 return LLVMBuildOr(builder, div_mask, result, "");
432 }
433
434 static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
435 nir_op op, unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS], LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
436 {
437 struct gallivm_state *gallivm = bld_base->base.gallivm;
438 LLVMBuilderRef builder = gallivm->builder;
439 LLVMValueRef result;
440 switch (op) {
441 case nir_op_b2f32:
442 result = emit_b2f(bld_base, src[0], 32);
443 break;
444 case nir_op_b2f64:
445 result = emit_b2f(bld_base, src[0], 64);
446 break;
447 case nir_op_b2i32:
448 result = emit_b2i(bld_base, src[0], 32);
449 break;
450 case nir_op_b2i64:
451 result = emit_b2i(bld_base, src[0], 64);
452 break;
453 case nir_op_b32csel:
454 result = emit_b32csel(bld_base, src_bit_size, src);
455 break;
456 case nir_op_bit_count:
457 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
458 break;
459 case nir_op_bitfield_select:
460 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
461 break;
462 case nir_op_bitfield_reverse:
463 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
464 break;
465 case nir_op_f2b32:
466 result = flt_to_bool32(bld_base, src_bit_size[0], src[0]);
467 break;
468 case nir_op_f2f32:
469 result = LLVMBuildFPTrunc(builder, src[0],
470 bld_base->base.vec_type, "");
471 break;
472 case nir_op_f2f64:
473 result = LLVMBuildFPExt(builder, src[0],
474 bld_base->dbl_bld.vec_type, "");
475 break;
476 case nir_op_f2i32:
477 result = LLVMBuildFPToSI(builder, src[0], bld_base->base.int_vec_type, "");
478 break;
479 case nir_op_f2u32:
480 result = LLVMBuildFPToUI(builder,
481 src[0],
482 bld_base->base.int_vec_type, "");
483 break;
484 case nir_op_f2i64:
485 result = LLVMBuildFPToSI(builder,
486 src[0],
487 bld_base->int64_bld.vec_type, "");
488 break;
489 case nir_op_f2u64:
490 result = LLVMBuildFPToUI(builder,
491 src[0],
492 bld_base->uint64_bld.vec_type, "");
493 break;
494 case nir_op_fabs:
495 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
496 break;
497 case nir_op_fadd:
498 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
499 src[0], src[1]);
500 break;
501 case nir_op_fceil:
502 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
503 break;
504 case nir_op_fcos:
505 result = lp_build_cos(&bld_base->base, src[0]);
506 break;
507 case nir_op_fddx:
508 case nir_op_fddx_coarse:
509 case nir_op_fddx_fine:
510 result = lp_build_ddx(&bld_base->base, src[0]);
511 break;
512 case nir_op_fddy:
513 case nir_op_fddy_coarse:
514 case nir_op_fddy_fine:
515 result = lp_build_ddy(&bld_base->base, src[0]);
516 break;
517 case nir_op_fdiv:
518 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
519 src[0], src[1]);
520 break;
521 case nir_op_feq32:
522 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
523 break;
524 case nir_op_fexp2:
525 result = lp_build_exp2(&bld_base->base, src[0]);
526 break;
527 case nir_op_ffloor:
528 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
529 break;
530 case nir_op_ffma:
531 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
532 break;
533 case nir_op_ffract: {
534 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
535 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
536 result = lp_build_sub(flt_bld, src[0], tmp);
537 break;
538 }
539 case nir_op_fge32:
540 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
541 break;
542 case nir_op_find_lsb:
543 result = lp_build_cttz(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
544 break;
545 case nir_op_flog2:
546 result = lp_build_log2_safe(&bld_base->base, src[0]);
547 break;
548 case nir_op_flt32:
549 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
550 break;
551 case nir_op_fmin:
552 result = lp_build_min(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
553 break;
554 case nir_op_fmod: {
555 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
556 result = lp_build_div(flt_bld, src[0], src[1]);
557 result = lp_build_floor(flt_bld, result);
558 result = lp_build_mul(flt_bld, src[1], result);
559 result = lp_build_sub(flt_bld, src[0], result);
560 break;
561 }
562 case nir_op_fmul:
563 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
564 src[0], src[1]);
565 break;
566 case nir_op_fmax:
567 result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
568 break;
569 case nir_op_fne32:
570 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
571 break;
572 case nir_op_fneg:
573 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
574 break;
575 case nir_op_fpow:
576 result = lp_build_pow(&bld_base->base, src[0], src[1]);
577 break;
578 case nir_op_frcp:
579 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
580 break;
581 case nir_op_fround_even:
582 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
583 break;
584 case nir_op_frsq:
585 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
586 break;
587 case nir_op_fsat:
588 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
589 break;
590 case nir_op_fsign:
591 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
592 break;
593 case nir_op_fsin:
594 result = lp_build_sin(&bld_base->base, src[0]);
595 break;
596 case nir_op_fsqrt:
597 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
598 break;
599 case nir_op_ftrunc:
600 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
601 break;
602 case nir_op_i2b32:
603 result = int_to_bool32(bld_base, src_bit_size[0], false, src[0]);
604 break;
605 case nir_op_i2f32:
606 result = lp_build_int_to_float(&bld_base->base, src[0]);
607 break;
608 case nir_op_i2f64:
609 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
610 break;
611 case nir_op_i2i8:
612 result = LLVMBuildTrunc(builder, src[0], bld_base->int8_bld.vec_type, "");
613 break;
614 case nir_op_i2i16:
615 if (src_bit_size[0] < 16)
616 result = LLVMBuildSExt(builder, src[0], bld_base->int16_bld.vec_type, "");
617 else
618 result = LLVMBuildTrunc(builder, src[0], bld_base->int16_bld.vec_type, "");
619 break;
620 case nir_op_i2i32:
621 if (src_bit_size[0] < 32)
622 result = LLVMBuildSExt(builder, src[0], bld_base->int_bld.vec_type, "");
623 else
624 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
625 break;
626 case nir_op_i2i64:
627 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
628 break;
629 case nir_op_iabs:
630 result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
631 break;
632 case nir_op_iadd:
633 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
634 src[0], src[1]);
635 break;
636 case nir_op_iand:
637 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
638 src[0], src[1]);
639 break;
640 case nir_op_idiv:
641 result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
642 break;
643 case nir_op_ieq32:
644 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
645 break;
646 case nir_op_ige32:
647 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
648 break;
649 case nir_op_ilt32:
650 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
651 break;
652 case nir_op_imax:
653 result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
654 break;
655 case nir_op_imin:
656 result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
657 break;
658 case nir_op_imul:
659 case nir_op_imul24:
660 result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
661 src[0], src[1]);
662 break;
663 case nir_op_imul_high: {
664 LLVMValueRef hi_bits;
665 lp_build_mul_32_lohi(&bld_base->int_bld, src[0], src[1], &hi_bits);
666 result = hi_bits;
667 break;
668 }
669 case nir_op_ine32:
670 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
671 break;
672 case nir_op_ineg:
673 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
674 break;
675 case nir_op_inot:
676 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
677 break;
678 case nir_op_ior:
679 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
680 src[0], src[1]);
681 break;
682 case nir_op_irem:
683 result = do_int_mod(bld_base, false, src_bit_size[0], src[0], src[1]);
684 break;
685 case nir_op_ishl: {
686 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
687 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
688 if (src_bit_size[0] == 64)
689 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
690 if (src_bit_size[0] < 32)
691 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
692 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
693 result = lp_build_shl(int_bld, src[0], src[1]);
694 break;
695 }
696 case nir_op_ishr: {
697 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
698 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
699 if (src_bit_size[0] == 64)
700 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
701 if (src_bit_size[0] < 32)
702 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
703 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
704 result = lp_build_shr(int_bld, src[0], src[1]);
705 break;
706 }
707 case nir_op_isign:
708 result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
709 break;
710 case nir_op_isub:
711 result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
712 src[0], src[1]);
713 break;
714 case nir_op_ixor:
715 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
716 src[0], src[1]);
717 break;
718 case nir_op_mov:
719 result = src[0];
720 break;
721 case nir_op_unpack_64_2x32_split_x:
722 result = split_64bit(bld_base, src[0], false);
723 break;
724 case nir_op_unpack_64_2x32_split_y:
725 result = split_64bit(bld_base, src[0], true);
726 break;
727
728 case nir_op_pack_64_2x32_split: {
729 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
730 result = LLVMBuildBitCast(builder, tmp, bld_base->dbl_bld.vec_type, "");
731 break;
732 }
733 case nir_op_u2f32:
734 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
735 break;
736 case nir_op_u2f64:
737 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
738 break;
739 case nir_op_u2u8:
740 result = LLVMBuildTrunc(builder, src[0], bld_base->uint8_bld.vec_type, "");
741 break;
742 case nir_op_u2u16:
743 if (src_bit_size[0] < 16)
744 result = LLVMBuildZExt(builder, src[0], bld_base->uint16_bld.vec_type, "");
745 else
746 result = LLVMBuildTrunc(builder, src[0], bld_base->uint16_bld.vec_type, "");
747 break;
748 case nir_op_u2u32:
749 if (src_bit_size[0] < 32)
750 result = LLVMBuildZExt(builder, src[0], bld_base->uint_bld.vec_type, "");
751 else
752 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
753 break;
754 case nir_op_u2u64:
755 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
756 break;
757 case nir_op_udiv:
758 result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
759 break;
760 case nir_op_ufind_msb: {
761 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
762 result = lp_build_ctlz(uint_bld, src[0]);
763 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
764 break;
765 }
766 case nir_op_uge32:
767 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
768 break;
769 case nir_op_ult32:
770 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
771 break;
772 case nir_op_umax:
773 result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
774 break;
775 case nir_op_umin:
776 result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
777 break;
778 case nir_op_umod:
779 result = do_int_mod(bld_base, true, src_bit_size[0], src[0], src[1]);
780 break;
781 case nir_op_umul_high: {
782 LLVMValueRef hi_bits;
783 lp_build_mul_32_lohi(&bld_base->uint_bld, src[0], src[1], &hi_bits);
784 result = hi_bits;
785 break;
786 }
787 case nir_op_ushr: {
788 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
789 if (src_bit_size[0] == 64)
790 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
791 if (src_bit_size[0] < 32)
792 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
793 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
794 result = lp_build_shr(uint_bld, src[0], src[1]);
795 break;
796 }
797 default:
798 assert(0);
799 break;
800 }
801 return result;
802 }
803
804 static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr *instr)
805 {
806 struct gallivm_state *gallivm = bld_base->base.gallivm;
807 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS];
808 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS];
809 unsigned num_components = nir_dest_num_components(instr->dest.dest);
810 unsigned src_components;
811 switch (instr->op) {
812 case nir_op_vec2:
813 case nir_op_vec3:
814 case nir_op_vec4:
815 case nir_op_vec8:
816 case nir_op_vec16:
817 src_components = 1;
818 break;
819 case nir_op_pack_half_2x16:
820 src_components = 2;
821 break;
822 case nir_op_unpack_half_2x16:
823 src_components = 1;
824 break;
825 case nir_op_cube_face_coord:
826 case nir_op_cube_face_index:
827 src_components = 3;
828 break;
829 default:
830 src_components = num_components;
831 break;
832 }
833 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
834 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
835 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
836 }
837
838 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
839 if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2 || instr->op == nir_op_vec8 || instr->op == nir_op_vec16) {
840 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
841 result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
842 }
843 } else {
844 for (unsigned c = 0; c < num_components; c++) {
845 LLVMValueRef src_chan[NIR_MAX_VEC_COMPONENTS];
846
847 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
848 if (num_components > 1) {
849 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
850 src[i], c, "");
851 } else
852 src_chan[i] = src[i];
853 src_chan[i] = cast_type(bld_base, src_chan[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
854 }
855 result[c] = do_alu_action(bld_base, instr->op, src_bit_size, src_chan);
856 result[c] = cast_type(bld_base, result[c], nir_op_infos[instr->op].output_type, nir_dest_bit_size(instr->dest.dest));
857 }
858 }
859 assign_alu_dest(bld_base, &instr->dest, result);
860 }
861
862 static void visit_load_const(struct lp_build_nir_context *bld_base,
863 const nir_load_const_instr *instr)
864 {
865 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
866 struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
867 for (unsigned i = 0; i < instr->def.num_components; i++)
868 result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->value[i].u64);
869 assign_ssa_dest(bld_base, &instr->def, result);
870 }
871
872 static void
873 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
874 bool vs_in, unsigned *vertex_index_out,
875 LLVMValueRef *vertex_index_ref,
876 unsigned *const_out, LLVMValueRef *indir_out)
877 {
878 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
879 nir_variable *var = nir_deref_instr_get_variable(instr);
880 nir_deref_path path;
881 unsigned idx_lvl = 1;
882
883 nir_deref_path_init(&path, instr, NULL);
884
885 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
886 if (vertex_index_ref) {
887 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
888 if (vertex_index_out)
889 *vertex_index_out = 0;
890 } else {
891 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
892 }
893 ++idx_lvl;
894 }
895
896 uint32_t const_offset = 0;
897 LLVMValueRef offset = NULL;
898
899 if (var->data.compact) {
900 assert(instr->deref_type == nir_deref_type_array);
901 const_offset = nir_src_as_uint(instr->arr.index);
902 goto out;
903 }
904
905 for (; path.path[idx_lvl]; ++idx_lvl) {
906 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
907 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
908 unsigned index = path.path[idx_lvl]->strct.index;
909
910 for (unsigned i = 0; i < index; i++) {
911 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
912 const_offset += glsl_count_attribute_slots(ft, vs_in);
913 }
914 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
915 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
916 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
917 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
918 } else {
919 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
920 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
921 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
922 idx_src);
923 if (offset)
924 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
925 else
926 offset = array_off;
927 }
928 } else
929 unreachable("Uhandled deref type in get_deref_instr_offset");
930 }
931
932 out:
933 nir_deref_path_finish(&path);
934
935 if (const_offset && offset)
936 offset = LLVMBuildAdd(builder, offset,
937 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
938 "");
939 *const_out = const_offset;
940 *indir_out = offset;
941 }
942
943 static void visit_load_var(struct lp_build_nir_context *bld_base,
944 nir_intrinsic_instr *instr,
945 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
946 {
947 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
948 nir_variable *var = nir_deref_instr_get_variable(deref);
949 nir_variable_mode mode = deref->mode;
950 unsigned const_index;
951 LLVMValueRef indir_index;
952 LLVMValueRef indir_vertex_index = NULL;
953 unsigned vertex_index = 0;
954 unsigned nc = nir_dest_num_components(instr->dest);
955 unsigned bit_size = nir_dest_bit_size(instr->dest);
956 if (var) {
957 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
958 var->data.mode == nir_var_shader_in;
959 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
960 var->data.mode == nir_var_shader_in;
961 bool tcs_in = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
962 var->data.mode == nir_var_shader_in;
963 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
964 var->data.mode == nir_var_shader_out && !var->data.patch;
965 bool tes_in = bld_base->shader->info.stage == MESA_SHADER_TESS_EVAL &&
966 var->data.mode == nir_var_shader_in && !var->data.patch;
967
968 mode = var->data.mode;
969
970 get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, (tcs_in || tcs_out || tes_in) ? &indir_vertex_index : NULL,
971 &const_index, &indir_index);
972 }
973 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, indir_vertex_index, const_index, indir_index, result);
974 }
975
976 static void
977 visit_store_var(struct lp_build_nir_context *bld_base,
978 nir_intrinsic_instr *instr)
979 {
980 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
981 nir_variable *var = nir_deref_instr_get_variable(deref);
982 nir_variable_mode mode = deref->mode;
983 int writemask = instr->const_index[0];
984 unsigned bit_size = nir_src_bit_size(instr->src[1]);
985 LLVMValueRef src = get_src(bld_base, instr->src[1]);
986 unsigned const_index = 0;
987 LLVMValueRef indir_index, indir_vertex_index = NULL;
988 if (var) {
989 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
990 var->data.mode == nir_var_shader_out && !var->data.patch;
991 get_deref_offset(bld_base, deref, false, NULL, tcs_out ? &indir_vertex_index : NULL,
992 &const_index, &indir_index);
993 }
994 bld_base->store_var(bld_base, mode, instr->num_components, bit_size, var, writemask, indir_vertex_index, const_index, indir_index, src);
995 }
996
997 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
998 nir_intrinsic_instr *instr,
999 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1000 {
1001 struct gallivm_state *gallivm = bld_base->base.gallivm;
1002 LLVMBuilderRef builder = gallivm->builder;
1003 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1004 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1005
1006 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
1007 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
1008 bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1009 offset_is_uniform, idx, offset, result);
1010 }
1011
1012
1013 static void visit_load_ssbo(struct lp_build_nir_context *bld_base,
1014 nir_intrinsic_instr *instr,
1015 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1016 {
1017 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1018 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1019 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1020 idx, offset, result);
1021 }
1022
1023 static void visit_store_ssbo(struct lp_build_nir_context *bld_base,
1024 nir_intrinsic_instr *instr)
1025 {
1026 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1027 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
1028 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
1029 int writemask = instr->const_index[0];
1030 int nc = nir_src_num_components(instr->src[0]);
1031 int bitsize = nir_src_bit_size(instr->src[0]);
1032 bld_base->store_mem(bld_base, writemask, nc, bitsize, idx, offset, val);
1033 }
1034
1035 static void visit_get_buffer_size(struct lp_build_nir_context *bld_base,
1036 nir_intrinsic_instr *instr,
1037 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1038 {
1039 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1040 result[0] = bld_base->get_buffer_size(bld_base, idx);
1041 }
1042
1043 static void visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
1044 nir_intrinsic_instr *instr,
1045 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1046 {
1047 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1048 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1049 LLVMValueRef val = get_src(bld_base, instr->src[2]);
1050 LLVMValueRef val2 = NULL;
1051 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
1052 val2 = get_src(bld_base, instr->src[3]);
1053
1054 bld_base->atomic_mem(bld_base, instr->intrinsic, idx, offset, val, val2, &result[0]);
1055
1056 }
1057
1058 static void visit_load_image(struct lp_build_nir_context *bld_base,
1059 nir_intrinsic_instr *instr,
1060 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1061 {
1062 struct gallivm_state *gallivm = bld_base->base.gallivm;
1063 LLVMBuilderRef builder = gallivm->builder;
1064 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1065 nir_variable *var = nir_deref_instr_get_variable(deref);
1066 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1067 LLVMValueRef coords[5];
1068 struct lp_img_params params;
1069 const struct glsl_type *type = glsl_without_array(var->type);
1070
1071 memset(&params, 0, sizeof(params));
1072 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1073 for (unsigned i = 0; i < 4; i++)
1074 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1075 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1076 coords[2] = coords[1];
1077
1078 params.coords = coords;
1079 params.outdata = result;
1080 params.img_op = LP_IMG_LOAD;
1081 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1082 params.ms_index = get_src(bld_base, instr->src[2]);
1083 params.image_index = var->data.binding;
1084 bld_base->image_op(bld_base, &params);
1085 }
1086
1087 static void visit_store_image(struct lp_build_nir_context *bld_base,
1088 nir_intrinsic_instr *instr)
1089 {
1090 struct gallivm_state *gallivm = bld_base->base.gallivm;
1091 LLVMBuilderRef builder = gallivm->builder;
1092 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1093 nir_variable *var = nir_deref_instr_get_variable(deref);
1094 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1095 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1096 LLVMValueRef coords[5];
1097 struct lp_img_params params;
1098 const struct glsl_type *type = glsl_without_array(var->type);
1099
1100 memset(&params, 0, sizeof(params));
1101 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1102 for (unsigned i = 0; i < 4; i++)
1103 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1104 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1105 coords[2] = coords[1];
1106 params.coords = coords;
1107
1108 for (unsigned i = 0; i < 4; i++) {
1109 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1110 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1111 }
1112 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1113 params.ms_index = get_src(bld_base, instr->src[2]);
1114 params.img_op = LP_IMG_STORE;
1115 params.image_index = var->data.binding;
1116
1117 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1118 coords[2] = coords[1];
1119 bld_base->image_op(bld_base, &params);
1120 }
1121
1122 static void visit_atomic_image(struct lp_build_nir_context *bld_base,
1123 nir_intrinsic_instr *instr,
1124 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1125 {
1126 struct gallivm_state *gallivm = bld_base->base.gallivm;
1127 LLVMBuilderRef builder = gallivm->builder;
1128 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1129 nir_variable *var = nir_deref_instr_get_variable(deref);
1130 struct lp_img_params params;
1131 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1132 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1133 LLVMValueRef coords[5];
1134 const struct glsl_type *type = glsl_without_array(var->type);
1135
1136 memset(&params, 0, sizeof(params));
1137
1138 switch (instr->intrinsic) {
1139 case nir_intrinsic_image_deref_atomic_add:
1140 params.op = LLVMAtomicRMWBinOpAdd;
1141 break;
1142 case nir_intrinsic_image_deref_atomic_exchange:
1143 params.op = LLVMAtomicRMWBinOpXchg;
1144 break;
1145 case nir_intrinsic_image_deref_atomic_and:
1146 params.op = LLVMAtomicRMWBinOpAnd;
1147 break;
1148 case nir_intrinsic_image_deref_atomic_or:
1149 params.op = LLVMAtomicRMWBinOpOr;
1150 break;
1151 case nir_intrinsic_image_deref_atomic_xor:
1152 params.op = LLVMAtomicRMWBinOpXor;
1153 break;
1154 case nir_intrinsic_image_deref_atomic_umin:
1155 params.op = LLVMAtomicRMWBinOpUMin;
1156 break;
1157 case nir_intrinsic_image_deref_atomic_umax:
1158 params.op = LLVMAtomicRMWBinOpUMax;
1159 break;
1160 case nir_intrinsic_image_deref_atomic_imin:
1161 params.op = LLVMAtomicRMWBinOpMin;
1162 break;
1163 case nir_intrinsic_image_deref_atomic_imax:
1164 params.op = LLVMAtomicRMWBinOpMax;
1165 break;
1166 default:
1167 break;
1168 }
1169
1170 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
1171 for (unsigned i = 0; i < 4; i++)
1172 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1173 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1174 coords[2] = coords[1];
1175 params.coords = coords;
1176 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
1177 params.ms_index = get_src(bld_base, instr->src[2]);
1178 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
1179 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1180 params.indata[0] = in_val;
1181 params.indata2[0] = cas_val;
1182 } else
1183 params.indata[0] = in_val;
1184
1185 params.outdata = result;
1186 params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
1187 params.image_index = var->data.binding;
1188
1189 bld_base->image_op(bld_base, &params);
1190 }
1191
1192
1193 static void visit_image_size(struct lp_build_nir_context *bld_base,
1194 nir_intrinsic_instr *instr,
1195 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1196 {
1197 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1198 nir_variable *var = nir_deref_instr_get_variable(deref);
1199 struct lp_sampler_size_query_params params = { 0 };
1200 params.texture_unit = var->data.binding;
1201 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(var->type), glsl_sampler_type_is_array(var->type));
1202 params.sizes_out = result;
1203
1204 bld_base->image_size(bld_base, &params);
1205 }
1206
1207 static void visit_image_samples(struct lp_build_nir_context *bld_base,
1208 nir_intrinsic_instr *instr,
1209 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1210 {
1211 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1212 nir_variable *var = nir_deref_instr_get_variable(deref);
1213 struct lp_sampler_size_query_params params = { 0 };
1214 params.texture_unit = var->data.binding;
1215 params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(var->type), glsl_sampler_type_is_array(var->type));
1216 params.sizes_out = result;
1217 params.samples_only = true;
1218
1219 bld_base->image_size(bld_base, &params);
1220 }
1221
1222 static void visit_shared_load(struct lp_build_nir_context *bld_base,
1223 nir_intrinsic_instr *instr,
1224 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1225 {
1226 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1227 bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1228 NULL, offset, result);
1229 }
1230
1231 static void visit_shared_store(struct lp_build_nir_context *bld_base,
1232 nir_intrinsic_instr *instr)
1233 {
1234 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1235 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1236 int writemask = instr->const_index[1];
1237 int nc = nir_src_num_components(instr->src[0]);
1238 int bitsize = nir_src_bit_size(instr->src[0]);
1239 bld_base->store_mem(bld_base, writemask, nc, bitsize, NULL, offset, val);
1240 }
1241
1242 static void visit_shared_atomic(struct lp_build_nir_context *bld_base,
1243 nir_intrinsic_instr *instr,
1244 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1245 {
1246 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1247 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1248 LLVMValueRef val2 = NULL;
1249 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap)
1250 val2 = get_src(bld_base, instr->src[2]);
1251
1252 bld_base->atomic_mem(bld_base, instr->intrinsic, NULL, offset, val, val2, &result[0]);
1253
1254 }
1255
1256 static void visit_barrier(struct lp_build_nir_context *bld_base)
1257 {
1258 bld_base->barrier(bld_base);
1259 }
1260
1261 static void visit_discard(struct lp_build_nir_context *bld_base,
1262 nir_intrinsic_instr *instr)
1263 {
1264 LLVMValueRef cond = NULL;
1265 if (instr->intrinsic == nir_intrinsic_discard_if) {
1266 cond = get_src(bld_base, instr->src[0]);
1267 cond = cast_type(bld_base, cond, nir_type_int, 32);
1268 }
1269 bld_base->discard(bld_base, cond);
1270 }
1271
1272 static void visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1273 nir_intrinsic_instr *instr, LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1274 {
1275 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1276
1277 bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
1278 bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1279 nir_src_bit_size(instr->src[0]),
1280 offset_is_uniform, offset, result);
1281 }
1282
1283 static void visit_load_global(struct lp_build_nir_context *bld_base,
1284 nir_intrinsic_instr *instr, LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1285 {
1286 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1287 bld_base->load_global(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
1288 nir_src_bit_size(instr->src[0]),
1289 addr, result);
1290 }
1291
1292 static void visit_store_global(struct lp_build_nir_context *bld_base,
1293 nir_intrinsic_instr *instr)
1294 {
1295 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1296 int nc = nir_src_num_components(instr->src[0]);
1297 int bitsize = nir_src_bit_size(instr->src[0]);
1298 LLVMValueRef addr = get_src(bld_base, instr->src[1]);
1299 int addr_bitsize = nir_src_bit_size(instr->src[1]);
1300 int writemask = instr->const_index[0];
1301 bld_base->store_global(bld_base, writemask, nc, bitsize, addr_bitsize, addr, val);
1302 }
1303
1304 static void visit_global_atomic(struct lp_build_nir_context *bld_base,
1305 nir_intrinsic_instr *instr,
1306 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1307 {
1308 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1309 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1310 LLVMValueRef val2 = NULL;
1311 int addr_bitsize = nir_src_bit_size(instr->src[0]);
1312 if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap)
1313 val2 = get_src(bld_base, instr->src[2]);
1314
1315 bld_base->atomic_global(bld_base, instr->intrinsic, addr_bitsize, addr, val, val2, &result[0]);
1316 }
1317
1318 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
1319 nir_intrinsic_instr *instr)
1320 {
1321 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS] = {0};
1322 switch (instr->intrinsic) {
1323 case nir_intrinsic_load_deref:
1324 visit_load_var(bld_base, instr, result);
1325 break;
1326 case nir_intrinsic_store_deref:
1327 visit_store_var(bld_base, instr);
1328 break;
1329 case nir_intrinsic_load_ubo:
1330 visit_load_ubo(bld_base, instr, result);
1331 break;
1332 case nir_intrinsic_load_ssbo:
1333 visit_load_ssbo(bld_base, instr, result);
1334 break;
1335 case nir_intrinsic_store_ssbo:
1336 visit_store_ssbo(bld_base, instr);
1337 break;
1338 case nir_intrinsic_get_buffer_size:
1339 visit_get_buffer_size(bld_base, instr, result);
1340 break;
1341 case nir_intrinsic_load_vertex_id:
1342 case nir_intrinsic_load_primitive_id:
1343 case nir_intrinsic_load_instance_id:
1344 case nir_intrinsic_load_base_instance:
1345 case nir_intrinsic_load_base_vertex:
1346 case nir_intrinsic_load_work_group_id:
1347 case nir_intrinsic_load_local_invocation_id:
1348 case nir_intrinsic_load_num_work_groups:
1349 case nir_intrinsic_load_invocation_id:
1350 case nir_intrinsic_load_front_face:
1351 case nir_intrinsic_load_draw_id:
1352 case nir_intrinsic_load_local_group_size:
1353 case nir_intrinsic_load_work_dim:
1354 case nir_intrinsic_load_tess_coord:
1355 case nir_intrinsic_load_tess_level_outer:
1356 case nir_intrinsic_load_tess_level_inner:
1357 case nir_intrinsic_load_patch_vertices_in:
1358 case nir_intrinsic_load_sample_id:
1359 case nir_intrinsic_load_sample_pos:
1360 bld_base->sysval_intrin(bld_base, instr, result);
1361 break;
1362 case nir_intrinsic_load_helper_invocation:
1363 bld_base->helper_invocation(bld_base, &result[0]);
1364 break;
1365 case nir_intrinsic_discard_if:
1366 case nir_intrinsic_discard:
1367 visit_discard(bld_base, instr);
1368 break;
1369 case nir_intrinsic_emit_vertex:
1370 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
1371 break;
1372 case nir_intrinsic_end_primitive:
1373 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
1374 break;
1375 case nir_intrinsic_ssbo_atomic_add:
1376 case nir_intrinsic_ssbo_atomic_imin:
1377 case nir_intrinsic_ssbo_atomic_imax:
1378 case nir_intrinsic_ssbo_atomic_umin:
1379 case nir_intrinsic_ssbo_atomic_umax:
1380 case nir_intrinsic_ssbo_atomic_and:
1381 case nir_intrinsic_ssbo_atomic_or:
1382 case nir_intrinsic_ssbo_atomic_xor:
1383 case nir_intrinsic_ssbo_atomic_exchange:
1384 case nir_intrinsic_ssbo_atomic_comp_swap:
1385 visit_ssbo_atomic(bld_base, instr, result);
1386 break;
1387 case nir_intrinsic_image_deref_load:
1388 visit_load_image(bld_base, instr, result);
1389 break;
1390 case nir_intrinsic_image_deref_store:
1391 visit_store_image(bld_base, instr);
1392 break;
1393 case nir_intrinsic_image_deref_atomic_add:
1394 case nir_intrinsic_image_deref_atomic_imin:
1395 case nir_intrinsic_image_deref_atomic_imax:
1396 case nir_intrinsic_image_deref_atomic_umin:
1397 case nir_intrinsic_image_deref_atomic_umax:
1398 case nir_intrinsic_image_deref_atomic_and:
1399 case nir_intrinsic_image_deref_atomic_or:
1400 case nir_intrinsic_image_deref_atomic_xor:
1401 case nir_intrinsic_image_deref_atomic_exchange:
1402 case nir_intrinsic_image_deref_atomic_comp_swap:
1403 visit_atomic_image(bld_base, instr, result);
1404 break;
1405 case nir_intrinsic_image_deref_size:
1406 visit_image_size(bld_base, instr, result);
1407 break;
1408 case nir_intrinsic_image_deref_samples:
1409 visit_image_samples(bld_base, instr, result);
1410 break;
1411 case nir_intrinsic_load_shared:
1412 visit_shared_load(bld_base, instr, result);
1413 break;
1414 case nir_intrinsic_store_shared:
1415 visit_shared_store(bld_base, instr);
1416 break;
1417 case nir_intrinsic_shared_atomic_add:
1418 case nir_intrinsic_shared_atomic_imin:
1419 case nir_intrinsic_shared_atomic_umin:
1420 case nir_intrinsic_shared_atomic_imax:
1421 case nir_intrinsic_shared_atomic_umax:
1422 case nir_intrinsic_shared_atomic_and:
1423 case nir_intrinsic_shared_atomic_or:
1424 case nir_intrinsic_shared_atomic_xor:
1425 case nir_intrinsic_shared_atomic_exchange:
1426 case nir_intrinsic_shared_atomic_comp_swap:
1427 visit_shared_atomic(bld_base, instr, result);
1428 break;
1429 case nir_intrinsic_control_barrier:
1430 visit_barrier(bld_base);
1431 break;
1432 case nir_intrinsic_memory_barrier:
1433 case nir_intrinsic_memory_barrier_shared:
1434 case nir_intrinsic_memory_barrier_buffer:
1435 case nir_intrinsic_memory_barrier_image:
1436 case nir_intrinsic_memory_barrier_tcs_patch:
1437 break;
1438 case nir_intrinsic_load_kernel_input:
1439 visit_load_kernel_input(bld_base, instr, result);
1440 break;
1441 case nir_intrinsic_load_global:
1442 visit_load_global(bld_base, instr, result);
1443 break;
1444 case nir_intrinsic_store_global:
1445 visit_store_global(bld_base, instr);
1446 break;
1447 case nir_intrinsic_global_atomic_add:
1448 case nir_intrinsic_global_atomic_imin:
1449 case nir_intrinsic_global_atomic_umin:
1450 case nir_intrinsic_global_atomic_imax:
1451 case nir_intrinsic_global_atomic_umax:
1452 case nir_intrinsic_global_atomic_and:
1453 case nir_intrinsic_global_atomic_or:
1454 case nir_intrinsic_global_atomic_xor:
1455 case nir_intrinsic_global_atomic_exchange:
1456 case nir_intrinsic_global_atomic_comp_swap:
1457 visit_global_atomic(bld_base, instr, result);
1458 case nir_intrinsic_vote_all:
1459 case nir_intrinsic_vote_any:
1460 case nir_intrinsic_vote_ieq:
1461 bld_base->vote(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, 32), instr, result);
1462 break;
1463 default:
1464 assert(0);
1465 break;
1466 }
1467 if (result[0]) {
1468 assign_dest(bld_base, &instr->dest, result);
1469 }
1470 }
1471
1472 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1473 {
1474 struct lp_sampler_size_query_params params = { 0 };
1475 LLVMValueRef sizes_out[NIR_MAX_VEC_COMPONENTS];
1476 LLVMValueRef explicit_lod = NULL;
1477
1478 for (unsigned i = 0; i < instr->num_srcs; i++) {
1479 switch (instr->src[i].src_type) {
1480 case nir_tex_src_lod:
1481 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1482 break;
1483 default:
1484 break;
1485 }
1486 }
1487
1488 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
1489 params.texture_unit = instr->texture_index;
1490 params.explicit_lod = explicit_lod;
1491 params.is_sviewinfo = TRUE;
1492 params.sizes_out = sizes_out;
1493 params.samples_only = (instr->op == nir_texop_texture_samples);
1494
1495 if (instr->op == nir_texop_query_levels)
1496 params.explicit_lod = bld_base->uint_bld.zero;
1497 bld_base->tex_size(bld_base, &params);
1498 assign_dest(bld_base, &instr->dest, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
1499 }
1500
1501 static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_nir_context *bld_base,
1502 nir_src lod_src)
1503 {
1504 enum lp_sampler_lod_property lod_property;
1505
1506 if (nir_src_is_dynamically_uniform(lod_src))
1507 lod_property = LP_SAMPLER_LOD_SCALAR;
1508 else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1509 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1510 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1511 else
1512 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1513 }
1514 else
1515 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1516 return lod_property;
1517 }
1518
1519 static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
1520 {
1521 struct gallivm_state *gallivm = bld_base->base.gallivm;
1522 LLVMBuilderRef builder = gallivm->builder;
1523 LLVMValueRef coords[5];
1524 LLVMValueRef offsets[3] = { NULL };
1525 LLVMValueRef explicit_lod = NULL, projector = NULL, ms_index = NULL;
1526 struct lp_sampler_params params;
1527 struct lp_derivatives derivs;
1528 unsigned sample_key = 0;
1529 nir_deref_instr *texture_deref_instr = NULL;
1530 nir_deref_instr *sampler_deref_instr = NULL;
1531 LLVMValueRef texel[NIR_MAX_VEC_COMPONENTS];
1532 unsigned lod_src = 0;
1533 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
1534
1535 memset(&params, 0, sizeof(params));
1536 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
1537
1538 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels || instr->op == nir_texop_texture_samples) {
1539 visit_txs(bld_base, instr);
1540 return;
1541 }
1542 if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1543 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
1544 else if (instr->op == nir_texop_tg4) {
1545 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
1546 sample_key |= (instr->component << LP_SAMPLER_GATHER_COMP_SHIFT);
1547 } else if (instr->op == nir_texop_lod)
1548 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
1549 for (unsigned i = 0; i < instr->num_srcs; i++) {
1550 switch (instr->src[i].src_type) {
1551 case nir_tex_src_coord: {
1552 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
1553 if (instr->coord_components == 1)
1554 coords[0] = coord;
1555 else {
1556 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1557 coords[chan] = LLVMBuildExtractValue(builder, coord,
1558 chan, "");
1559 }
1560 for (unsigned chan = instr->coord_components; chan < 5; chan++)
1561 coords[chan] = coord_undef;
1562
1563 break;
1564 }
1565 case nir_tex_src_texture_deref:
1566 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
1567 break;
1568 case nir_tex_src_sampler_deref:
1569 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
1570 break;
1571 case nir_tex_src_projector:
1572 projector = lp_build_rcp(&bld_base->base, cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32));
1573 break;
1574 case nir_tex_src_comparator:
1575 sample_key |= LP_SAMPLER_SHADOW;
1576 coords[4] = get_src(bld_base, instr->src[i].src);
1577 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
1578 break;
1579 case nir_tex_src_bias:
1580 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
1581 lod_src = i;
1582 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1583 break;
1584 case nir_tex_src_lod:
1585 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
1586 lod_src = i;
1587 if (instr->op == nir_texop_txf)
1588 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1589 else
1590 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
1591 break;
1592 case nir_tex_src_ddx: {
1593 int deriv_cnt = instr->coord_components;
1594 if (instr->is_array)
1595 deriv_cnt--;
1596 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1597 if (deriv_cnt == 1)
1598 derivs.ddx[0] = deriv_val;
1599 else
1600 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1601 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
1602 chan, "");
1603 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1604 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
1605 break;
1606 }
1607 case nir_tex_src_ddy: {
1608 int deriv_cnt = instr->coord_components;
1609 if (instr->is_array)
1610 deriv_cnt--;
1611 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
1612 if (deriv_cnt == 1)
1613 derivs.ddy[0] = deriv_val;
1614 else
1615 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1616 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
1617 chan, "");
1618 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
1619 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
1620 break;
1621 }
1622 case nir_tex_src_offset: {
1623 int offset_cnt = instr->coord_components;
1624 if (instr->is_array)
1625 offset_cnt--;
1626 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
1627 sample_key |= LP_SAMPLER_OFFSETS;
1628 if (offset_cnt == 1)
1629 offsets[0] = cast_type(bld_base, offset_val, nir_type_int, 32);
1630 else {
1631 for (unsigned chan = 0; chan < offset_cnt; ++chan) {
1632 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
1633 chan, "");
1634 offsets[chan] = cast_type(bld_base, offsets[chan], nir_type_int, 32);
1635 }
1636 }
1637 break;
1638 }
1639 case nir_tex_src_ms_index:
1640 sample_key |= LP_SAMPLER_FETCH_MS;
1641 ms_index = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
1642 break;
1643 default:
1644 assert(0);
1645 break;
1646 }
1647 }
1648 if (!sampler_deref_instr)
1649 sampler_deref_instr = texture_deref_instr;
1650
1651 if (explicit_lod)
1652 lod_property = lp_build_nir_lod_property(bld_base, instr->src[lod_src].src);
1653
1654 if (instr->op == nir_texop_tex || instr->op == nir_texop_tg4 || instr->op == nir_texop_txb ||
1655 instr->op == nir_texop_txl || instr->op == nir_texop_txd || instr->op == nir_texop_lod)
1656 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1657 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
1658 else if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
1659 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1660 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
1661
1662 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
1663 /* move layer coord for 1d arrays. */
1664 coords[2] = coords[1];
1665 coords[1] = coord_undef;
1666 }
1667
1668 if (projector) {
1669 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
1670 coords[chan] = lp_build_mul(&bld_base->base, coords[chan], projector);
1671 if (sample_key & LP_SAMPLER_SHADOW)
1672 coords[4] = lp_build_mul(&bld_base->base, coords[4], projector);
1673 }
1674
1675 uint32_t base_index = 0;
1676 if (!texture_deref_instr) {
1677 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
1678 if (samp_src_index == -1) {
1679 base_index = instr->sampler_index;
1680 }
1681 }
1682
1683 if (instr->op == nir_texop_txd) {
1684 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
1685 params.derivs = &derivs;
1686 if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
1687 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
1688 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1689 else
1690 lod_property = LP_SAMPLER_LOD_PER_QUAD;
1691 } else
1692 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
1693 }
1694
1695 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
1696 params.sample_key = sample_key;
1697 params.offsets = offsets;
1698 params.texture_index = base_index;
1699 params.sampler_index = base_index;
1700 params.coords = coords;
1701 params.texel = texel;
1702 params.lod = explicit_lod;
1703 params.ms_index = ms_index;
1704 bld_base->tex(bld_base, &params);
1705 assign_dest(bld_base, &instr->dest, texel);
1706 }
1707
1708 static void visit_ssa_undef(struct lp_build_nir_context *bld_base,
1709 const nir_ssa_undef_instr *instr)
1710 {
1711 unsigned num_components = instr->def.num_components;
1712 LLVMValueRef undef[NIR_MAX_VEC_COMPONENTS];
1713 struct lp_build_context *undef_bld = get_int_bld(bld_base, true, instr->def.bit_size);
1714 for (unsigned i = 0; i < num_components; i++)
1715 undef[i] = LLVMGetUndef(undef_bld->vec_type);
1716 assign_ssa_dest(bld_base, &instr->def, undef);
1717 }
1718
1719 static void visit_jump(struct lp_build_nir_context *bld_base,
1720 const nir_jump_instr *instr)
1721 {
1722 switch (instr->type) {
1723 case nir_jump_break:
1724 bld_base->break_stmt(bld_base);
1725 break;
1726 case nir_jump_continue:
1727 bld_base->continue_stmt(bld_base);
1728 break;
1729 default:
1730 unreachable("Unknown jump instr\n");
1731 }
1732 }
1733
1734 static void visit_deref(struct lp_build_nir_context *bld_base,
1735 nir_deref_instr *instr)
1736 {
1737 if (instr->mode != nir_var_mem_shared &&
1738 instr->mode != nir_var_mem_global)
1739 return;
1740 LLVMValueRef result = NULL;
1741 switch(instr->deref_type) {
1742 case nir_deref_type_var: {
1743 struct hash_entry *entry = _mesa_hash_table_search(bld_base->vars, instr->var);
1744 result = entry->data;
1745 break;
1746 }
1747 default:
1748 unreachable("Unhandled deref_instr deref type");
1749 }
1750
1751 assign_ssa(bld_base, instr->dest.ssa.index, result);
1752 }
1753
1754 static void visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
1755 {
1756 nir_foreach_instr(instr, block)
1757 {
1758 switch (instr->type) {
1759 case nir_instr_type_alu:
1760 visit_alu(bld_base, nir_instr_as_alu(instr));
1761 break;
1762 case nir_instr_type_load_const:
1763 visit_load_const(bld_base, nir_instr_as_load_const(instr));
1764 break;
1765 case nir_instr_type_intrinsic:
1766 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
1767 break;
1768 case nir_instr_type_tex:
1769 visit_tex(bld_base, nir_instr_as_tex(instr));
1770 break;
1771 case nir_instr_type_phi:
1772 assert(0);
1773 break;
1774 case nir_instr_type_ssa_undef:
1775 visit_ssa_undef(bld_base, nir_instr_as_ssa_undef(instr));
1776 break;
1777 case nir_instr_type_jump:
1778 visit_jump(bld_base, nir_instr_as_jump(instr));
1779 break;
1780 case nir_instr_type_deref:
1781 visit_deref(bld_base, nir_instr_as_deref(instr));
1782 break;
1783 default:
1784 fprintf(stderr, "Unknown NIR instr type: ");
1785 nir_print_instr(instr, stderr);
1786 fprintf(stderr, "\n");
1787 abort();
1788 }
1789 }
1790 }
1791
1792 static void visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
1793 {
1794 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
1795
1796 bld_base->if_cond(bld_base, cond);
1797 visit_cf_list(bld_base, &if_stmt->then_list);
1798
1799 if (!exec_list_is_empty(&if_stmt->else_list)) {
1800 bld_base->else_stmt(bld_base);
1801 visit_cf_list(bld_base, &if_stmt->else_list);
1802 }
1803 bld_base->endif_stmt(bld_base);
1804 }
1805
1806 static void visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
1807 {
1808 bld_base->bgnloop(bld_base);
1809 visit_cf_list(bld_base, &loop->body);
1810 bld_base->endloop(bld_base);
1811 }
1812
1813 static void visit_cf_list(struct lp_build_nir_context *bld_base,
1814 struct exec_list *list)
1815 {
1816 foreach_list_typed(nir_cf_node, node, node, list)
1817 {
1818 switch (node->type) {
1819 case nir_cf_node_block:
1820 visit_block(bld_base, nir_cf_node_as_block(node));
1821 break;
1822
1823 case nir_cf_node_if:
1824 visit_if(bld_base, nir_cf_node_as_if(node));
1825 break;
1826
1827 case nir_cf_node_loop:
1828 visit_loop(bld_base, nir_cf_node_as_loop(node));
1829 break;
1830
1831 default:
1832 assert(0);
1833 }
1834 }
1835 }
1836
1837 static void
1838 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
1839 struct nir_shader *nir,
1840 struct nir_variable *variable)
1841 {
1842 bld_base->emit_var_decl(bld_base, variable);
1843 }
1844
1845 /* vector registers are stored as arrays in LLVM side,
1846 so we can use GEP on them, as to do exec mask stores
1847 we need to operate on a single components.
1848 arrays are:
1849 0.x, 1.x, 2.x, 3.x
1850 0.y, 1.y, 2.y, 3.y
1851 ....
1852 */
1853 static LLVMTypeRef get_register_type(struct lp_build_nir_context *bld_base,
1854 nir_register *reg)
1855 {
1856 struct lp_build_context *int_bld = get_int_bld(bld_base, true, reg->bit_size);
1857
1858 LLVMTypeRef type = int_bld->vec_type;
1859 if (reg->num_array_elems)
1860 type = LLVMArrayType(type, reg->num_array_elems);
1861 if (reg->num_components > 1)
1862 type = LLVMArrayType(type, reg->num_components);
1863
1864 return type;
1865 }
1866
1867
1868 bool lp_build_nir_llvm(
1869 struct lp_build_nir_context *bld_base,
1870 struct nir_shader *nir)
1871 {
1872 struct nir_function *func;
1873
1874 nir_convert_from_ssa(nir, true);
1875 nir_lower_locals_to_regs(nir);
1876 nir_remove_dead_derefs(nir);
1877 nir_remove_dead_variables(nir, nir_var_function_temp);
1878
1879 nir_foreach_variable(variable, &nir->outputs)
1880 handle_shader_output_decl(bld_base, nir, variable);
1881
1882 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1883 _mesa_key_pointer_equal);
1884 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1885 _mesa_key_pointer_equal);
1886
1887 func = (struct nir_function *)exec_list_get_head(&nir->functions);
1888
1889 nir_foreach_register(reg, &func->impl->registers) {
1890 LLVMTypeRef type = get_register_type(bld_base, reg);
1891 LLVMValueRef reg_alloc = lp_build_alloca_undef(bld_base->base.gallivm,
1892 type, "reg");
1893 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
1894 }
1895 nir_index_ssa_defs(func->impl);
1896 bld_base->ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
1897 visit_cf_list(bld_base, &func->impl->body);
1898
1899 free(bld_base->ssa_defs);
1900 ralloc_free(bld_base->vars);
1901 ralloc_free(bld_base->regs);
1902 return true;
1903 }
1904
1905 /* do some basic opts to remove some things we don't want to see. */
1906 void lp_build_opt_nir(struct nir_shader *nir)
1907 {
1908 bool progress;
1909 do {
1910 progress = false;
1911 NIR_PASS_V(nir, nir_opt_constant_folding);
1912 NIR_PASS_V(nir, nir_opt_algebraic);
1913 NIR_PASS_V(nir, nir_lower_pack);
1914
1915 nir_lower_tex_options options = { .lower_tex_without_implicit_lod = true };
1916 NIR_PASS_V(nir, nir_lower_tex, &options);
1917 } while (progress);
1918 nir_lower_bool_to_int32(nir);
1919 }