gallium/radeon: rename bo_size -> surf_size, bo_alignment -> surf_alignment
[mesa.git] / src / gallium / drivers / radeonsi / si_shader_tgsi_alu.c
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_shader_internal.h"
25 #include "gallivm/lp_bld_const.h"
26 #include "gallivm/lp_bld_intr.h"
27 #include "gallivm/lp_bld_gather.h"
28 #include "tgsi/tgsi_parse.h"
29
30 static void kill_if_fetch_args(struct lp_build_tgsi_context *bld_base,
31 struct lp_build_emit_data *emit_data)
32 {
33 const struct tgsi_full_instruction *inst = emit_data->inst;
34 struct gallivm_state *gallivm = bld_base->base.gallivm;
35 LLVMBuilderRef builder = gallivm->builder;
36 unsigned i;
37 LLVMValueRef conds[TGSI_NUM_CHANNELS];
38
39 for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
40 LLVMValueRef value = lp_build_emit_fetch(bld_base, inst, 0, i);
41 conds[i] = LLVMBuildFCmp(builder, LLVMRealOLT, value,
42 bld_base->base.zero, "");
43 }
44
45 /* Or the conditions together */
46 for (i = TGSI_NUM_CHANNELS - 1; i > 0; i--) {
47 conds[i - 1] = LLVMBuildOr(builder, conds[i], conds[i - 1], "");
48 }
49
50 emit_data->dst_type = LLVMVoidTypeInContext(gallivm->context);
51 emit_data->arg_count = 1;
52 emit_data->args[0] = LLVMBuildSelect(builder, conds[0],
53 lp_build_const_float(gallivm, -1.0f),
54 bld_base->base.zero, "");
55 }
56
57 static void kil_emit(const struct lp_build_tgsi_action *action,
58 struct lp_build_tgsi_context *bld_base,
59 struct lp_build_emit_data *emit_data)
60 {
61 unsigned i;
62 for (i = 0; i < emit_data->arg_count; i++) {
63 emit_data->output[i] = lp_build_intrinsic_unary(
64 bld_base->base.gallivm->builder,
65 action->intr_name,
66 emit_data->dst_type, emit_data->args[i]);
67 }
68 }
69
70 static void emit_icmp(const struct lp_build_tgsi_action *action,
71 struct lp_build_tgsi_context *bld_base,
72 struct lp_build_emit_data *emit_data)
73 {
74 unsigned pred;
75 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
76 LLVMContextRef context = bld_base->base.gallivm->context;
77
78 switch (emit_data->inst->Instruction.Opcode) {
79 case TGSI_OPCODE_USEQ:
80 case TGSI_OPCODE_U64SEQ: pred = LLVMIntEQ; break;
81 case TGSI_OPCODE_USNE:
82 case TGSI_OPCODE_U64SNE: pred = LLVMIntNE; break;
83 case TGSI_OPCODE_USGE:
84 case TGSI_OPCODE_U64SGE: pred = LLVMIntUGE; break;
85 case TGSI_OPCODE_USLT:
86 case TGSI_OPCODE_U64SLT: pred = LLVMIntULT; break;
87 case TGSI_OPCODE_ISGE:
88 case TGSI_OPCODE_I64SGE: pred = LLVMIntSGE; break;
89 case TGSI_OPCODE_ISLT:
90 case TGSI_OPCODE_I64SLT: pred = LLVMIntSLT; break;
91 default:
92 assert(!"unknown instruction");
93 pred = 0;
94 break;
95 }
96
97 LLVMValueRef v = LLVMBuildICmp(builder, pred,
98 emit_data->args[0], emit_data->args[1],"");
99
100 v = LLVMBuildSExtOrBitCast(builder, v,
101 LLVMInt32TypeInContext(context), "");
102
103 emit_data->output[emit_data->chan] = v;
104 }
105
106 static void emit_ucmp(const struct lp_build_tgsi_action *action,
107 struct lp_build_tgsi_context *bld_base,
108 struct lp_build_emit_data *emit_data)
109 {
110 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
111
112 LLVMValueRef arg0 = LLVMBuildBitCast(builder, emit_data->args[0],
113 bld_base->uint_bld.elem_type, "");
114
115 LLVMValueRef v = LLVMBuildICmp(builder, LLVMIntNE, arg0,
116 bld_base->uint_bld.zero, "");
117
118 emit_data->output[emit_data->chan] =
119 LLVMBuildSelect(builder, v, emit_data->args[1], emit_data->args[2], "");
120 }
121
122 static void emit_cmp(const struct lp_build_tgsi_action *action,
123 struct lp_build_tgsi_context *bld_base,
124 struct lp_build_emit_data *emit_data)
125 {
126 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
127 LLVMValueRef cond, *args = emit_data->args;
128
129 cond = LLVMBuildFCmp(builder, LLVMRealOLT, args[0],
130 bld_base->base.zero, "");
131
132 emit_data->output[emit_data->chan] =
133 LLVMBuildSelect(builder, cond, args[1], args[2], "");
134 }
135
136 static void emit_set_cond(const struct lp_build_tgsi_action *action,
137 struct lp_build_tgsi_context *bld_base,
138 struct lp_build_emit_data *emit_data)
139 {
140 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
141 LLVMRealPredicate pred;
142 LLVMValueRef cond;
143
144 /* Use ordered for everything but NE (which is usual for
145 * float comparisons)
146 */
147 switch (emit_data->inst->Instruction.Opcode) {
148 case TGSI_OPCODE_SGE: pred = LLVMRealOGE; break;
149 case TGSI_OPCODE_SEQ: pred = LLVMRealOEQ; break;
150 case TGSI_OPCODE_SLE: pred = LLVMRealOLE; break;
151 case TGSI_OPCODE_SLT: pred = LLVMRealOLT; break;
152 case TGSI_OPCODE_SNE: pred = LLVMRealUNE; break;
153 case TGSI_OPCODE_SGT: pred = LLVMRealOGT; break;
154 default: assert(!"unknown instruction"); pred = 0; break;
155 }
156
157 cond = LLVMBuildFCmp(builder,
158 pred, emit_data->args[0], emit_data->args[1], "");
159
160 emit_data->output[emit_data->chan] = LLVMBuildSelect(builder,
161 cond, bld_base->base.one, bld_base->base.zero, "");
162 }
163
164 static void emit_fcmp(const struct lp_build_tgsi_action *action,
165 struct lp_build_tgsi_context *bld_base,
166 struct lp_build_emit_data *emit_data)
167 {
168 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
169 LLVMContextRef context = bld_base->base.gallivm->context;
170 LLVMRealPredicate pred;
171
172 /* Use ordered for everything but NE (which is usual for
173 * float comparisons)
174 */
175 switch (emit_data->inst->Instruction.Opcode) {
176 case TGSI_OPCODE_FSEQ: pred = LLVMRealOEQ; break;
177 case TGSI_OPCODE_FSGE: pred = LLVMRealOGE; break;
178 case TGSI_OPCODE_FSLT: pred = LLVMRealOLT; break;
179 case TGSI_OPCODE_FSNE: pred = LLVMRealUNE; break;
180 default: assert(!"unknown instruction"); pred = 0; break;
181 }
182
183 LLVMValueRef v = LLVMBuildFCmp(builder, pred,
184 emit_data->args[0], emit_data->args[1],"");
185
186 v = LLVMBuildSExtOrBitCast(builder, v,
187 LLVMInt32TypeInContext(context), "");
188
189 emit_data->output[emit_data->chan] = v;
190 }
191
192 static void emit_dcmp(const struct lp_build_tgsi_action *action,
193 struct lp_build_tgsi_context *bld_base,
194 struct lp_build_emit_data *emit_data)
195 {
196 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
197 LLVMContextRef context = bld_base->base.gallivm->context;
198 LLVMRealPredicate pred;
199
200 /* Use ordered for everything but NE (which is usual for
201 * float comparisons)
202 */
203 switch (emit_data->inst->Instruction.Opcode) {
204 case TGSI_OPCODE_DSEQ: pred = LLVMRealOEQ; break;
205 case TGSI_OPCODE_DSGE: pred = LLVMRealOGE; break;
206 case TGSI_OPCODE_DSLT: pred = LLVMRealOLT; break;
207 case TGSI_OPCODE_DSNE: pred = LLVMRealUNE; break;
208 default: assert(!"unknown instruction"); pred = 0; break;
209 }
210
211 LLVMValueRef v = LLVMBuildFCmp(builder, pred,
212 emit_data->args[0], emit_data->args[1],"");
213
214 v = LLVMBuildSExtOrBitCast(builder, v,
215 LLVMInt32TypeInContext(context), "");
216
217 emit_data->output[emit_data->chan] = v;
218 }
219
220 static void emit_not(const struct lp_build_tgsi_action *action,
221 struct lp_build_tgsi_context *bld_base,
222 struct lp_build_emit_data *emit_data)
223 {
224 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
225 LLVMValueRef v = bitcast(bld_base, TGSI_TYPE_UNSIGNED,
226 emit_data->args[0]);
227 emit_data->output[emit_data->chan] = LLVMBuildNot(builder, v, "");
228 }
229
230 static void emit_arl(const struct lp_build_tgsi_action *action,
231 struct lp_build_tgsi_context *bld_base,
232 struct lp_build_emit_data *emit_data)
233 {
234 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
235 LLVMValueRef floor_index = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_FLR, emit_data->args[0]);
236 emit_data->output[emit_data->chan] = LLVMBuildFPToSI(builder,
237 floor_index, bld_base->base.int_elem_type , "");
238 }
239
240 static void emit_and(const struct lp_build_tgsi_action *action,
241 struct lp_build_tgsi_context *bld_base,
242 struct lp_build_emit_data *emit_data)
243 {
244 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
245 emit_data->output[emit_data->chan] = LLVMBuildAnd(builder,
246 emit_data->args[0], emit_data->args[1], "");
247 }
248
249 static void emit_or(const struct lp_build_tgsi_action *action,
250 struct lp_build_tgsi_context *bld_base,
251 struct lp_build_emit_data *emit_data)
252 {
253 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
254 emit_data->output[emit_data->chan] = LLVMBuildOr(builder,
255 emit_data->args[0], emit_data->args[1], "");
256 }
257
258 static void emit_uadd(const struct lp_build_tgsi_action *action,
259 struct lp_build_tgsi_context *bld_base,
260 struct lp_build_emit_data *emit_data)
261 {
262 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
263 emit_data->output[emit_data->chan] = LLVMBuildAdd(builder,
264 emit_data->args[0], emit_data->args[1], "");
265 }
266
267 static void emit_udiv(const struct lp_build_tgsi_action *action,
268 struct lp_build_tgsi_context *bld_base,
269 struct lp_build_emit_data *emit_data)
270 {
271 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
272 emit_data->output[emit_data->chan] = LLVMBuildUDiv(builder,
273 emit_data->args[0], emit_data->args[1], "");
274 }
275
276 static void emit_idiv(const struct lp_build_tgsi_action *action,
277 struct lp_build_tgsi_context *bld_base,
278 struct lp_build_emit_data *emit_data)
279 {
280 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
281 emit_data->output[emit_data->chan] = LLVMBuildSDiv(builder,
282 emit_data->args[0], emit_data->args[1], "");
283 }
284
285 static void emit_mod(const struct lp_build_tgsi_action *action,
286 struct lp_build_tgsi_context *bld_base,
287 struct lp_build_emit_data *emit_data)
288 {
289 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
290 emit_data->output[emit_data->chan] = LLVMBuildSRem(builder,
291 emit_data->args[0], emit_data->args[1], "");
292 }
293
294 static void emit_umod(const struct lp_build_tgsi_action *action,
295 struct lp_build_tgsi_context *bld_base,
296 struct lp_build_emit_data *emit_data)
297 {
298 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
299 emit_data->output[emit_data->chan] = LLVMBuildURem(builder,
300 emit_data->args[0], emit_data->args[1], "");
301 }
302
303 static void emit_shl(const struct lp_build_tgsi_action *action,
304 struct lp_build_tgsi_context *bld_base,
305 struct lp_build_emit_data *emit_data)
306 {
307 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
308 emit_data->output[emit_data->chan] = LLVMBuildShl(builder,
309 emit_data->args[0], emit_data->args[1], "");
310 }
311
312 static void emit_ushr(const struct lp_build_tgsi_action *action,
313 struct lp_build_tgsi_context *bld_base,
314 struct lp_build_emit_data *emit_data)
315 {
316 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
317 emit_data->output[emit_data->chan] = LLVMBuildLShr(builder,
318 emit_data->args[0], emit_data->args[1], "");
319 }
320 static void emit_ishr(const struct lp_build_tgsi_action *action,
321 struct lp_build_tgsi_context *bld_base,
322 struct lp_build_emit_data *emit_data)
323 {
324 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
325 emit_data->output[emit_data->chan] = LLVMBuildAShr(builder,
326 emit_data->args[0], emit_data->args[1], "");
327 }
328
329 static void emit_xor(const struct lp_build_tgsi_action *action,
330 struct lp_build_tgsi_context *bld_base,
331 struct lp_build_emit_data *emit_data)
332 {
333 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
334 emit_data->output[emit_data->chan] = LLVMBuildXor(builder,
335 emit_data->args[0], emit_data->args[1], "");
336 }
337
338 static void emit_ssg(const struct lp_build_tgsi_action *action,
339 struct lp_build_tgsi_context *bld_base,
340 struct lp_build_emit_data *emit_data)
341 {
342 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
343
344 LLVMValueRef cmp, val;
345
346 if (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_I64SSG) {
347 cmp = LLVMBuildICmp(builder, LLVMIntSGT, emit_data->args[0], bld_base->int64_bld.zero, "");
348 val = LLVMBuildSelect(builder, cmp, bld_base->int64_bld.one, emit_data->args[0], "");
349 cmp = LLVMBuildICmp(builder, LLVMIntSGE, val, bld_base->int64_bld.zero, "");
350 val = LLVMBuildSelect(builder, cmp, val, LLVMConstInt(bld_base->int64_bld.elem_type, -1, true), "");
351 } else if (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_ISSG) {
352 cmp = LLVMBuildICmp(builder, LLVMIntSGT, emit_data->args[0], bld_base->int_bld.zero, "");
353 val = LLVMBuildSelect(builder, cmp, bld_base->int_bld.one, emit_data->args[0], "");
354 cmp = LLVMBuildICmp(builder, LLVMIntSGE, val, bld_base->int_bld.zero, "");
355 val = LLVMBuildSelect(builder, cmp, val, LLVMConstInt(bld_base->int_bld.elem_type, -1, true), "");
356 } else { // float SSG
357 cmp = LLVMBuildFCmp(builder, LLVMRealOGT, emit_data->args[0], bld_base->base.zero, "");
358 val = LLVMBuildSelect(builder, cmp, bld_base->base.one, emit_data->args[0], "");
359 cmp = LLVMBuildFCmp(builder, LLVMRealOGE, val, bld_base->base.zero, "");
360 val = LLVMBuildSelect(builder, cmp, val, LLVMConstReal(bld_base->base.elem_type, -1), "");
361 }
362
363 emit_data->output[emit_data->chan] = val;
364 }
365
366 static void emit_ineg(const struct lp_build_tgsi_action *action,
367 struct lp_build_tgsi_context *bld_base,
368 struct lp_build_emit_data *emit_data)
369 {
370 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
371 emit_data->output[emit_data->chan] = LLVMBuildNeg(builder,
372 emit_data->args[0], "");
373 }
374
375 static void emit_dneg(const struct lp_build_tgsi_action *action,
376 struct lp_build_tgsi_context *bld_base,
377 struct lp_build_emit_data *emit_data)
378 {
379 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
380 emit_data->output[emit_data->chan] = LLVMBuildFNeg(builder,
381 emit_data->args[0], "");
382 }
383
384 static void emit_frac(const struct lp_build_tgsi_action *action,
385 struct lp_build_tgsi_context *bld_base,
386 struct lp_build_emit_data *emit_data)
387 {
388 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
389 char *intr;
390
391 if (emit_data->info->opcode == TGSI_OPCODE_FRC)
392 intr = "llvm.floor.f32";
393 else if (emit_data->info->opcode == TGSI_OPCODE_DFRAC)
394 intr = "llvm.floor.f64";
395 else {
396 assert(0);
397 return;
398 }
399
400 LLVMValueRef floor = lp_build_intrinsic(builder, intr, emit_data->dst_type,
401 &emit_data->args[0], 1,
402 LLVMReadNoneAttribute);
403 emit_data->output[emit_data->chan] = LLVMBuildFSub(builder,
404 emit_data->args[0], floor, "");
405 }
406
407 static void emit_f2i(const struct lp_build_tgsi_action *action,
408 struct lp_build_tgsi_context *bld_base,
409 struct lp_build_emit_data *emit_data)
410 {
411 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
412 emit_data->output[emit_data->chan] = LLVMBuildFPToSI(builder,
413 emit_data->args[0], bld_base->int_bld.elem_type, "");
414 }
415
416 static void emit_f2u(const struct lp_build_tgsi_action *action,
417 struct lp_build_tgsi_context *bld_base,
418 struct lp_build_emit_data *emit_data)
419 {
420 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
421 emit_data->output[emit_data->chan] = LLVMBuildFPToUI(builder,
422 emit_data->args[0], bld_base->uint_bld.elem_type, "");
423 }
424
425 static void emit_i2f(const struct lp_build_tgsi_action *action,
426 struct lp_build_tgsi_context *bld_base,
427 struct lp_build_emit_data *emit_data)
428 {
429 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
430 emit_data->output[emit_data->chan] = LLVMBuildSIToFP(builder,
431 emit_data->args[0], bld_base->base.elem_type, "");
432 }
433
434 static void emit_u2f(const struct lp_build_tgsi_action *action,
435 struct lp_build_tgsi_context *bld_base,
436 struct lp_build_emit_data *emit_data)
437 {
438 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
439 emit_data->output[emit_data->chan] = LLVMBuildUIToFP(builder,
440 emit_data->args[0], bld_base->base.elem_type, "");
441 }
442
443 static void
444 build_tgsi_intrinsic_nomem(const struct lp_build_tgsi_action *action,
445 struct lp_build_tgsi_context *bld_base,
446 struct lp_build_emit_data *emit_data)
447 {
448 struct lp_build_context *base = &bld_base->base;
449 emit_data->output[emit_data->chan] =
450 lp_build_intrinsic(base->gallivm->builder, action->intr_name,
451 emit_data->dst_type, emit_data->args,
452 emit_data->arg_count, LLVMReadNoneAttribute);
453 }
454
455 static void emit_bfi(const struct lp_build_tgsi_action *action,
456 struct lp_build_tgsi_context *bld_base,
457 struct lp_build_emit_data *emit_data)
458 {
459 struct gallivm_state *gallivm = bld_base->base.gallivm;
460 LLVMBuilderRef builder = gallivm->builder;
461 LLVMValueRef bfi_args[3];
462
463 // Calculate the bitmask: (((1 << src3) - 1) << src2
464 bfi_args[0] = LLVMBuildShl(builder,
465 LLVMBuildSub(builder,
466 LLVMBuildShl(builder,
467 bld_base->int_bld.one,
468 emit_data->args[3], ""),
469 bld_base->int_bld.one, ""),
470 emit_data->args[2], "");
471
472 bfi_args[1] = LLVMBuildShl(builder, emit_data->args[1],
473 emit_data->args[2], "");
474
475 bfi_args[2] = emit_data->args[0];
476
477 /* Calculate:
478 * (arg0 & arg1) | (~arg0 & arg2) = arg2 ^ (arg0 & (arg1 ^ arg2)
479 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
480 */
481 emit_data->output[emit_data->chan] =
482 LLVMBuildXor(builder, bfi_args[2],
483 LLVMBuildAnd(builder, bfi_args[0],
484 LLVMBuildXor(builder, bfi_args[1], bfi_args[2],
485 ""), ""), "");
486 }
487
488 /* this is ffs in C */
489 static void emit_lsb(const struct lp_build_tgsi_action *action,
490 struct lp_build_tgsi_context *bld_base,
491 struct lp_build_emit_data *emit_data)
492 {
493 struct gallivm_state *gallivm = bld_base->base.gallivm;
494 LLVMValueRef args[2] = {
495 emit_data->args[0],
496
497 /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
498 * add special code to check for x=0. The reason is that
499 * the LLVM behavior for x=0 is different from what we
500 * need here.
501 *
502 * The hardware already implements the correct behavior.
503 */
504 LLVMConstInt(LLVMInt1TypeInContext(gallivm->context), 1, 0)
505 };
506
507 emit_data->output[emit_data->chan] =
508 lp_build_intrinsic(gallivm->builder, "llvm.cttz.i32",
509 emit_data->dst_type, args, ARRAY_SIZE(args),
510 LLVMReadNoneAttribute);
511 }
512
513 /* Find the last bit set. */
514 static void emit_umsb(const struct lp_build_tgsi_action *action,
515 struct lp_build_tgsi_context *bld_base,
516 struct lp_build_emit_data *emit_data)
517 {
518 struct gallivm_state *gallivm = bld_base->base.gallivm;
519 LLVMBuilderRef builder = gallivm->builder;
520 LLVMValueRef args[2] = {
521 emit_data->args[0],
522 /* Don't generate code for handling zero: */
523 LLVMConstInt(LLVMInt1TypeInContext(gallivm->context), 1, 0)
524 };
525
526 LLVMValueRef msb =
527 lp_build_intrinsic(builder, "llvm.ctlz.i32",
528 emit_data->dst_type, args, ARRAY_SIZE(args),
529 LLVMReadNoneAttribute);
530
531 /* The HW returns the last bit index from MSB, but TGSI wants
532 * the index from LSB. Invert it by doing "31 - msb". */
533 msb = LLVMBuildSub(builder, lp_build_const_int32(gallivm, 31),
534 msb, "");
535
536 /* Check for zero: */
537 emit_data->output[emit_data->chan] =
538 LLVMBuildSelect(builder,
539 LLVMBuildICmp(builder, LLVMIntEQ, args[0],
540 bld_base->uint_bld.zero, ""),
541 lp_build_const_int32(gallivm, -1), msb, "");
542 }
543
544 /* Find the last bit opposite of the sign bit. */
545 static void emit_imsb(const struct lp_build_tgsi_action *action,
546 struct lp_build_tgsi_context *bld_base,
547 struct lp_build_emit_data *emit_data)
548 {
549 struct gallivm_state *gallivm = bld_base->base.gallivm;
550 LLVMBuilderRef builder = gallivm->builder;
551 LLVMValueRef arg = emit_data->args[0];
552
553 LLVMValueRef msb =
554 lp_build_intrinsic(builder, "llvm.AMDGPU.flbit.i32",
555 emit_data->dst_type, &arg, 1,
556 LLVMReadNoneAttribute);
557
558 /* The HW returns the last bit index from MSB, but TGSI wants
559 * the index from LSB. Invert it by doing "31 - msb". */
560 msb = LLVMBuildSub(builder, lp_build_const_int32(gallivm, 31),
561 msb, "");
562
563 /* If arg == 0 || arg == -1 (0xffffffff), return -1. */
564 LLVMValueRef all_ones = lp_build_const_int32(gallivm, -1);
565
566 LLVMValueRef cond =
567 LLVMBuildOr(builder,
568 LLVMBuildICmp(builder, LLVMIntEQ, arg,
569 bld_base->uint_bld.zero, ""),
570 LLVMBuildICmp(builder, LLVMIntEQ, arg,
571 all_ones, ""), "");
572
573 emit_data->output[emit_data->chan] =
574 LLVMBuildSelect(builder, cond, all_ones, msb, "");
575 }
576
577 static void emit_iabs(const struct lp_build_tgsi_action *action,
578 struct lp_build_tgsi_context *bld_base,
579 struct lp_build_emit_data *emit_data)
580 {
581 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
582
583 emit_data->output[emit_data->chan] =
584 lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_IMAX,
585 emit_data->args[0],
586 LLVMBuildNeg(builder,
587 emit_data->args[0], ""));
588 }
589
590 static void emit_minmax_int(const struct lp_build_tgsi_action *action,
591 struct lp_build_tgsi_context *bld_base,
592 struct lp_build_emit_data *emit_data)
593 {
594 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
595 LLVMIntPredicate op;
596
597 switch (emit_data->info->opcode) {
598 default:
599 assert(0);
600 case TGSI_OPCODE_IMAX:
601 case TGSI_OPCODE_I64MAX:
602 op = LLVMIntSGT;
603 break;
604 case TGSI_OPCODE_IMIN:
605 case TGSI_OPCODE_I64MIN:
606 op = LLVMIntSLT;
607 break;
608 case TGSI_OPCODE_UMAX:
609 case TGSI_OPCODE_U64MAX:
610 op = LLVMIntUGT;
611 break;
612 case TGSI_OPCODE_UMIN:
613 case TGSI_OPCODE_U64MIN:
614 op = LLVMIntULT;
615 break;
616 }
617
618 emit_data->output[emit_data->chan] =
619 LLVMBuildSelect(builder,
620 LLVMBuildICmp(builder, op, emit_data->args[0],
621 emit_data->args[1], ""),
622 emit_data->args[0],
623 emit_data->args[1], "");
624 }
625
626 static void pk2h_fetch_args(struct lp_build_tgsi_context *bld_base,
627 struct lp_build_emit_data *emit_data)
628 {
629 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
630 0, TGSI_CHAN_X);
631 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
632 0, TGSI_CHAN_Y);
633 }
634
635 static void emit_pk2h(const struct lp_build_tgsi_action *action,
636 struct lp_build_tgsi_context *bld_base,
637 struct lp_build_emit_data *emit_data)
638 {
639 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
640 LLVMContextRef context = bld_base->base.gallivm->context;
641 struct lp_build_context *uint_bld = &bld_base->uint_bld;
642 LLVMTypeRef fp16, i16;
643 LLVMValueRef const16, comp[2];
644 unsigned i;
645
646 fp16 = LLVMHalfTypeInContext(context);
647 i16 = LLVMInt16TypeInContext(context);
648 const16 = lp_build_const_int32(uint_bld->gallivm, 16);
649
650 for (i = 0; i < 2; i++) {
651 comp[i] = LLVMBuildFPTrunc(builder, emit_data->args[i], fp16, "");
652 comp[i] = LLVMBuildBitCast(builder, comp[i], i16, "");
653 comp[i] = LLVMBuildZExt(builder, comp[i], uint_bld->elem_type, "");
654 }
655
656 comp[1] = LLVMBuildShl(builder, comp[1], const16, "");
657 comp[0] = LLVMBuildOr(builder, comp[0], comp[1], "");
658
659 emit_data->output[emit_data->chan] = comp[0];
660 }
661
662 static void up2h_fetch_args(struct lp_build_tgsi_context *bld_base,
663 struct lp_build_emit_data *emit_data)
664 {
665 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
666 0, TGSI_CHAN_X);
667 }
668
669 static void emit_up2h(const struct lp_build_tgsi_action *action,
670 struct lp_build_tgsi_context *bld_base,
671 struct lp_build_emit_data *emit_data)
672 {
673 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
674 LLVMContextRef context = bld_base->base.gallivm->context;
675 struct lp_build_context *uint_bld = &bld_base->uint_bld;
676 LLVMTypeRef fp16, i16;
677 LLVMValueRef const16, input, val;
678 unsigned i;
679
680 fp16 = LLVMHalfTypeInContext(context);
681 i16 = LLVMInt16TypeInContext(context);
682 const16 = lp_build_const_int32(uint_bld->gallivm, 16);
683 input = emit_data->args[0];
684
685 for (i = 0; i < 2; i++) {
686 val = i == 1 ? LLVMBuildLShr(builder, input, const16, "") : input;
687 val = LLVMBuildTrunc(builder, val, i16, "");
688 val = LLVMBuildBitCast(builder, val, fp16, "");
689 emit_data->output[i] =
690 LLVMBuildFPExt(builder, val, bld_base->base.elem_type, "");
691 }
692 }
693
694 static void emit_fdiv(const struct lp_build_tgsi_action *action,
695 struct lp_build_tgsi_context *bld_base,
696 struct lp_build_emit_data *emit_data)
697 {
698 struct si_shader_context *ctx = si_shader_context(bld_base);
699
700 emit_data->output[emit_data->chan] =
701 LLVMBuildFDiv(bld_base->base.gallivm->builder,
702 emit_data->args[0], emit_data->args[1], "");
703
704 /* Use v_rcp_f32 instead of precise division. */
705 if (HAVE_LLVM >= 0x0309 &&
706 !LLVMIsConstant(emit_data->output[emit_data->chan]))
707 LLVMSetMetadata(emit_data->output[emit_data->chan],
708 ctx->fpmath_md_kind, ctx->fpmath_md_2p5_ulp);
709 }
710
711 /* 1/sqrt is translated to rsq for f32 if fp32 denormals are not enabled in
712 * the target machine. f64 needs global unsafe math flags to get rsq. */
713 static void emit_rsq(const struct lp_build_tgsi_action *action,
714 struct lp_build_tgsi_context *bld_base,
715 struct lp_build_emit_data *emit_data)
716 {
717 LLVMValueRef sqrt =
718 lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_SQRT,
719 emit_data->args[0]);
720
721 emit_data->output[emit_data->chan] =
722 lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_DIV,
723 bld_base->base.one, sqrt);
724 }
725
726 void si_shader_context_init_alu(struct lp_build_tgsi_context *bld_base)
727 {
728 lp_set_default_actions(bld_base);
729
730 bld_base->op_actions[TGSI_OPCODE_ABS].emit = build_tgsi_intrinsic_nomem;
731 bld_base->op_actions[TGSI_OPCODE_ABS].intr_name = "llvm.fabs.f32";
732 bld_base->op_actions[TGSI_OPCODE_AND].emit = emit_and;
733 bld_base->op_actions[TGSI_OPCODE_ARL].emit = emit_arl;
734 bld_base->op_actions[TGSI_OPCODE_BFI].emit = emit_bfi;
735 bld_base->op_actions[TGSI_OPCODE_BREV].emit = build_tgsi_intrinsic_nomem;
736 bld_base->op_actions[TGSI_OPCODE_BREV].intr_name =
737 HAVE_LLVM >= 0x0308 ? "llvm.bitreverse.i32" : "llvm.AMDGPU.brev";
738 bld_base->op_actions[TGSI_OPCODE_CEIL].emit = build_tgsi_intrinsic_nomem;
739 bld_base->op_actions[TGSI_OPCODE_CEIL].intr_name = "llvm.ceil.f32";
740 bld_base->op_actions[TGSI_OPCODE_CLAMP].emit = build_tgsi_intrinsic_nomem;
741 bld_base->op_actions[TGSI_OPCODE_CLAMP].intr_name =
742 HAVE_LLVM >= 0x0308 ? "llvm.AMDGPU.clamp." : "llvm.AMDIL.clamp.";
743 bld_base->op_actions[TGSI_OPCODE_CMP].emit = emit_cmp;
744 bld_base->op_actions[TGSI_OPCODE_COS].emit = build_tgsi_intrinsic_nomem;
745 bld_base->op_actions[TGSI_OPCODE_COS].intr_name = "llvm.cos.f32";
746 bld_base->op_actions[TGSI_OPCODE_DABS].emit = build_tgsi_intrinsic_nomem;
747 bld_base->op_actions[TGSI_OPCODE_DABS].intr_name = "llvm.fabs.f64";
748 bld_base->op_actions[TGSI_OPCODE_DFMA].emit = build_tgsi_intrinsic_nomem;
749 bld_base->op_actions[TGSI_OPCODE_DFMA].intr_name = "llvm.fma.f64";
750 bld_base->op_actions[TGSI_OPCODE_DFRAC].emit = emit_frac;
751 bld_base->op_actions[TGSI_OPCODE_DIV].emit = emit_fdiv;
752 bld_base->op_actions[TGSI_OPCODE_DNEG].emit = emit_dneg;
753 bld_base->op_actions[TGSI_OPCODE_DSEQ].emit = emit_dcmp;
754 bld_base->op_actions[TGSI_OPCODE_DSGE].emit = emit_dcmp;
755 bld_base->op_actions[TGSI_OPCODE_DSLT].emit = emit_dcmp;
756 bld_base->op_actions[TGSI_OPCODE_DSNE].emit = emit_dcmp;
757 bld_base->op_actions[TGSI_OPCODE_DRSQ].emit = build_tgsi_intrinsic_nomem;
758 bld_base->op_actions[TGSI_OPCODE_DRSQ].intr_name =
759 HAVE_LLVM >= 0x0309 ? "llvm.amdgcn.rsq.f64" : "llvm.AMDGPU.rsq.f64";
760 bld_base->op_actions[TGSI_OPCODE_DSQRT].emit = build_tgsi_intrinsic_nomem;
761 bld_base->op_actions[TGSI_OPCODE_DSQRT].intr_name = "llvm.sqrt.f64";
762 bld_base->op_actions[TGSI_OPCODE_EX2].emit = build_tgsi_intrinsic_nomem;
763 bld_base->op_actions[TGSI_OPCODE_EX2].intr_name =
764 HAVE_LLVM >= 0x0308 ? "llvm.exp2.f32" : "llvm.AMDIL.exp.";
765 bld_base->op_actions[TGSI_OPCODE_FLR].emit = build_tgsi_intrinsic_nomem;
766 bld_base->op_actions[TGSI_OPCODE_FLR].intr_name = "llvm.floor.f32";
767 bld_base->op_actions[TGSI_OPCODE_FMA].emit =
768 bld_base->op_actions[TGSI_OPCODE_MAD].emit;
769 bld_base->op_actions[TGSI_OPCODE_FRC].emit = emit_frac;
770 bld_base->op_actions[TGSI_OPCODE_F2I].emit = emit_f2i;
771 bld_base->op_actions[TGSI_OPCODE_F2U].emit = emit_f2u;
772 bld_base->op_actions[TGSI_OPCODE_FSEQ].emit = emit_fcmp;
773 bld_base->op_actions[TGSI_OPCODE_FSGE].emit = emit_fcmp;
774 bld_base->op_actions[TGSI_OPCODE_FSLT].emit = emit_fcmp;
775 bld_base->op_actions[TGSI_OPCODE_FSNE].emit = emit_fcmp;
776 bld_base->op_actions[TGSI_OPCODE_IABS].emit = emit_iabs;
777 bld_base->op_actions[TGSI_OPCODE_IBFE].emit = build_tgsi_intrinsic_nomem;
778 bld_base->op_actions[TGSI_OPCODE_IBFE].intr_name = "llvm.AMDGPU.bfe.i32";
779 bld_base->op_actions[TGSI_OPCODE_IDIV].emit = emit_idiv;
780 bld_base->op_actions[TGSI_OPCODE_IMAX].emit = emit_minmax_int;
781 bld_base->op_actions[TGSI_OPCODE_IMIN].emit = emit_minmax_int;
782 bld_base->op_actions[TGSI_OPCODE_IMSB].emit = emit_imsb;
783 bld_base->op_actions[TGSI_OPCODE_INEG].emit = emit_ineg;
784 bld_base->op_actions[TGSI_OPCODE_ISHR].emit = emit_ishr;
785 bld_base->op_actions[TGSI_OPCODE_ISGE].emit = emit_icmp;
786 bld_base->op_actions[TGSI_OPCODE_ISLT].emit = emit_icmp;
787 bld_base->op_actions[TGSI_OPCODE_ISSG].emit = emit_ssg;
788 bld_base->op_actions[TGSI_OPCODE_I2F].emit = emit_i2f;
789 bld_base->op_actions[TGSI_OPCODE_KILL_IF].fetch_args = kill_if_fetch_args;
790 bld_base->op_actions[TGSI_OPCODE_KILL_IF].emit = kil_emit;
791 bld_base->op_actions[TGSI_OPCODE_KILL_IF].intr_name = "llvm.AMDGPU.kill";
792 bld_base->op_actions[TGSI_OPCODE_KILL].emit = lp_build_tgsi_intrinsic;
793 bld_base->op_actions[TGSI_OPCODE_KILL].intr_name = "llvm.AMDGPU.kilp";
794 bld_base->op_actions[TGSI_OPCODE_LSB].emit = emit_lsb;
795 bld_base->op_actions[TGSI_OPCODE_LG2].emit = build_tgsi_intrinsic_nomem;
796 bld_base->op_actions[TGSI_OPCODE_LG2].intr_name = "llvm.log2.f32";
797 bld_base->op_actions[TGSI_OPCODE_MAX].emit = build_tgsi_intrinsic_nomem;
798 bld_base->op_actions[TGSI_OPCODE_MAX].intr_name = "llvm.maxnum.f32";
799 bld_base->op_actions[TGSI_OPCODE_MIN].emit = build_tgsi_intrinsic_nomem;
800 bld_base->op_actions[TGSI_OPCODE_MIN].intr_name = "llvm.minnum.f32";
801 bld_base->op_actions[TGSI_OPCODE_MOD].emit = emit_mod;
802 bld_base->op_actions[TGSI_OPCODE_UMSB].emit = emit_umsb;
803 bld_base->op_actions[TGSI_OPCODE_NOT].emit = emit_not;
804 bld_base->op_actions[TGSI_OPCODE_OR].emit = emit_or;
805 bld_base->op_actions[TGSI_OPCODE_PK2H].fetch_args = pk2h_fetch_args;
806 bld_base->op_actions[TGSI_OPCODE_PK2H].emit = emit_pk2h;
807 bld_base->op_actions[TGSI_OPCODE_POPC].emit = build_tgsi_intrinsic_nomem;
808 bld_base->op_actions[TGSI_OPCODE_POPC].intr_name = "llvm.ctpop.i32";
809 bld_base->op_actions[TGSI_OPCODE_POW].emit = build_tgsi_intrinsic_nomem;
810 bld_base->op_actions[TGSI_OPCODE_POW].intr_name = "llvm.pow.f32";
811 bld_base->op_actions[TGSI_OPCODE_ROUND].emit = build_tgsi_intrinsic_nomem;
812 bld_base->op_actions[TGSI_OPCODE_ROUND].intr_name = "llvm.rint.f32";
813 bld_base->op_actions[TGSI_OPCODE_RSQ].emit = emit_rsq;
814 bld_base->op_actions[TGSI_OPCODE_SGE].emit = emit_set_cond;
815 bld_base->op_actions[TGSI_OPCODE_SEQ].emit = emit_set_cond;
816 bld_base->op_actions[TGSI_OPCODE_SHL].emit = emit_shl;
817 bld_base->op_actions[TGSI_OPCODE_SLE].emit = emit_set_cond;
818 bld_base->op_actions[TGSI_OPCODE_SLT].emit = emit_set_cond;
819 bld_base->op_actions[TGSI_OPCODE_SNE].emit = emit_set_cond;
820 bld_base->op_actions[TGSI_OPCODE_SGT].emit = emit_set_cond;
821 bld_base->op_actions[TGSI_OPCODE_SIN].emit = build_tgsi_intrinsic_nomem;
822 bld_base->op_actions[TGSI_OPCODE_SIN].intr_name = "llvm.sin.f32";
823 bld_base->op_actions[TGSI_OPCODE_SQRT].emit = build_tgsi_intrinsic_nomem;
824 bld_base->op_actions[TGSI_OPCODE_SQRT].intr_name = "llvm.sqrt.f32";
825 bld_base->op_actions[TGSI_OPCODE_SSG].emit = emit_ssg;
826 bld_base->op_actions[TGSI_OPCODE_TRUNC].emit = build_tgsi_intrinsic_nomem;
827 bld_base->op_actions[TGSI_OPCODE_TRUNC].intr_name = "llvm.trunc.f32";
828 bld_base->op_actions[TGSI_OPCODE_UADD].emit = emit_uadd;
829 bld_base->op_actions[TGSI_OPCODE_UBFE].emit = build_tgsi_intrinsic_nomem;
830 bld_base->op_actions[TGSI_OPCODE_UBFE].intr_name = "llvm.AMDGPU.bfe.u32";
831 bld_base->op_actions[TGSI_OPCODE_UDIV].emit = emit_udiv;
832 bld_base->op_actions[TGSI_OPCODE_UMAX].emit = emit_minmax_int;
833 bld_base->op_actions[TGSI_OPCODE_UMIN].emit = emit_minmax_int;
834 bld_base->op_actions[TGSI_OPCODE_UMOD].emit = emit_umod;
835 bld_base->op_actions[TGSI_OPCODE_USEQ].emit = emit_icmp;
836 bld_base->op_actions[TGSI_OPCODE_USGE].emit = emit_icmp;
837 bld_base->op_actions[TGSI_OPCODE_USHR].emit = emit_ushr;
838 bld_base->op_actions[TGSI_OPCODE_USLT].emit = emit_icmp;
839 bld_base->op_actions[TGSI_OPCODE_USNE].emit = emit_icmp;
840 bld_base->op_actions[TGSI_OPCODE_U2F].emit = emit_u2f;
841 bld_base->op_actions[TGSI_OPCODE_XOR].emit = emit_xor;
842 bld_base->op_actions[TGSI_OPCODE_UCMP].emit = emit_ucmp;
843 bld_base->op_actions[TGSI_OPCODE_UP2H].fetch_args = up2h_fetch_args;
844 bld_base->op_actions[TGSI_OPCODE_UP2H].emit = emit_up2h;
845
846 bld_base->op_actions[TGSI_OPCODE_I64MAX].emit = emit_minmax_int;
847 bld_base->op_actions[TGSI_OPCODE_I64MIN].emit = emit_minmax_int;
848 bld_base->op_actions[TGSI_OPCODE_U64MAX].emit = emit_minmax_int;
849 bld_base->op_actions[TGSI_OPCODE_U64MIN].emit = emit_minmax_int;
850 bld_base->op_actions[TGSI_OPCODE_I64ABS].emit = emit_iabs;
851 bld_base->op_actions[TGSI_OPCODE_I64SSG].emit = emit_ssg;
852 bld_base->op_actions[TGSI_OPCODE_I64NEG].emit = emit_ineg;
853
854 bld_base->op_actions[TGSI_OPCODE_U64SEQ].emit = emit_icmp;
855 bld_base->op_actions[TGSI_OPCODE_U64SNE].emit = emit_icmp;
856 bld_base->op_actions[TGSI_OPCODE_U64SGE].emit = emit_icmp;
857 bld_base->op_actions[TGSI_OPCODE_U64SLT].emit = emit_icmp;
858 bld_base->op_actions[TGSI_OPCODE_I64SGE].emit = emit_icmp;
859 bld_base->op_actions[TGSI_OPCODE_I64SLT].emit = emit_icmp;
860
861 bld_base->op_actions[TGSI_OPCODE_U64ADD].emit = emit_uadd;
862 bld_base->op_actions[TGSI_OPCODE_U64SHL].emit = emit_shl;
863 bld_base->op_actions[TGSI_OPCODE_U64SHR].emit = emit_ushr;
864 bld_base->op_actions[TGSI_OPCODE_I64SHR].emit = emit_ishr;
865
866 bld_base->op_actions[TGSI_OPCODE_U64MOD].emit = emit_umod;
867 bld_base->op_actions[TGSI_OPCODE_I64MOD].emit = emit_mod;
868 bld_base->op_actions[TGSI_OPCODE_U64DIV].emit = emit_udiv;
869 bld_base->op_actions[TGSI_OPCODE_I64DIV].emit = emit_idiv;
870 }
871
872 static LLVMValueRef build_cube_intrinsic(struct gallivm_state *gallivm,
873 LLVMValueRef in[3])
874 {
875 if (HAVE_LLVM >= 0x0309) {
876 LLVMTypeRef f32 = LLVMTypeOf(in[0]);
877 LLVMValueRef out[4];
878
879 out[0] = lp_build_intrinsic(gallivm->builder, "llvm.amdgcn.cubetc",
880 f32, in, 3, LLVMReadNoneAttribute);
881 out[1] = lp_build_intrinsic(gallivm->builder, "llvm.amdgcn.cubesc",
882 f32, in, 3, LLVMReadNoneAttribute);
883 out[2] = lp_build_intrinsic(gallivm->builder, "llvm.amdgcn.cubema",
884 f32, in, 3, LLVMReadNoneAttribute);
885 out[3] = lp_build_intrinsic(gallivm->builder, "llvm.amdgcn.cubeid",
886 f32, in, 3, LLVMReadNoneAttribute);
887
888 return lp_build_gather_values(gallivm, out, 4);
889 } else {
890 LLVMValueRef c[4] = {
891 in[0],
892 in[1],
893 in[2],
894 LLVMGetUndef(LLVMTypeOf(in[0]))
895 };
896 LLVMValueRef vec = lp_build_gather_values(gallivm, c, 4);
897
898 return lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.cube",
899 LLVMTypeOf(vec), &vec, 1,
900 LLVMReadNoneAttribute);
901 }
902 }
903
904 static void si_llvm_cube_to_2d_coords(struct lp_build_tgsi_context *bld_base,
905 LLVMValueRef *in, LLVMValueRef *out)
906 {
907 struct gallivm_state *gallivm = bld_base->base.gallivm;
908 LLVMBuilderRef builder = gallivm->builder;
909 LLVMTypeRef type = bld_base->base.elem_type;
910 LLVMValueRef coords[4];
911 LLVMValueRef mad_args[3];
912 LLVMValueRef v;
913 unsigned i;
914
915 v = build_cube_intrinsic(gallivm, in);
916
917 for (i = 0; i < 4; ++i)
918 coords[i] = LLVMBuildExtractElement(builder, v,
919 lp_build_const_int32(gallivm, i), "");
920
921 coords[2] = lp_build_intrinsic(builder, "llvm.fabs.f32",
922 type, &coords[2], 1, LLVMReadNoneAttribute);
923 coords[2] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_RCP, coords[2]);
924
925 mad_args[1] = coords[2];
926 mad_args[2] = LLVMConstReal(type, 1.5);
927
928 mad_args[0] = coords[0];
929 coords[0] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
930 mad_args[0], mad_args[1], mad_args[2]);
931
932 mad_args[0] = coords[1];
933 coords[1] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
934 mad_args[0], mad_args[1], mad_args[2]);
935
936 /* apply xyz = yxw swizzle to cooords */
937 out[0] = coords[1];
938 out[1] = coords[0];
939 out[2] = coords[3];
940 }
941
942 void si_prepare_cube_coords(struct lp_build_tgsi_context *bld_base,
943 struct lp_build_emit_data *emit_data,
944 LLVMValueRef *coords_arg,
945 LLVMValueRef *derivs_arg)
946 {
947
948 unsigned target = emit_data->inst->Texture.Texture;
949 unsigned opcode = emit_data->inst->Instruction.Opcode;
950 struct gallivm_state *gallivm = bld_base->base.gallivm;
951 LLVMBuilderRef builder = gallivm->builder;
952 LLVMValueRef coords[4];
953 unsigned i;
954
955 si_llvm_cube_to_2d_coords(bld_base, coords_arg, coords);
956
957 if (opcode == TGSI_OPCODE_TXD && derivs_arg) {
958 LLVMValueRef derivs[4];
959 int axis;
960
961 /* Convert cube derivatives to 2D derivatives. */
962 for (axis = 0; axis < 2; axis++) {
963 LLVMValueRef shifted_cube_coords[4], shifted_coords[4];
964
965 /* Shift the cube coordinates by the derivatives to get
966 * the cube coordinates of the "neighboring pixel".
967 */
968 for (i = 0; i < 3; i++)
969 shifted_cube_coords[i] =
970 LLVMBuildFAdd(builder, coords_arg[i],
971 derivs_arg[axis*3+i], "");
972 shifted_cube_coords[3] = LLVMGetUndef(bld_base->base.elem_type);
973
974 /* Project the shifted cube coordinates onto the face. */
975 si_llvm_cube_to_2d_coords(bld_base, shifted_cube_coords,
976 shifted_coords);
977
978 /* Subtract both sets of 2D coordinates to get 2D derivatives.
979 * This won't work if the shifted coordinates ended up
980 * in a different face.
981 */
982 for (i = 0; i < 2; i++)
983 derivs[axis * 2 + i] =
984 LLVMBuildFSub(builder, shifted_coords[i],
985 coords[i], "");
986 }
987
988 memcpy(derivs_arg, derivs, sizeof(derivs));
989 }
990
991 if (target == TGSI_TEXTURE_CUBE_ARRAY ||
992 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
993 /* for cube arrays coord.z = coord.w(array_index) * 8 + face */
994 /* coords_arg.w component - array_index for cube arrays */
995 coords[2] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
996 coords_arg[3], lp_build_const_float(gallivm, 8.0), coords[2]);
997 }
998
999 /* Preserve compare/lod/bias. Put it in coords.w. */
1000 if (opcode == TGSI_OPCODE_TEX2 ||
1001 opcode == TGSI_OPCODE_TXB2 ||
1002 opcode == TGSI_OPCODE_TXL2) {
1003 coords[3] = coords_arg[4];
1004 } else if (opcode == TGSI_OPCODE_TXB ||
1005 opcode == TGSI_OPCODE_TXL ||
1006 target == TGSI_TEXTURE_SHADOWCUBE) {
1007 coords[3] = coords_arg[3];
1008 }
1009
1010 memcpy(coords_arg, coords, sizeof(coords));
1011 }