radeon/llvm: Remove uses_temp_indirect_addressing() function
[mesa.git] / src / gallium / drivers / radeon / radeon_setup_tgsi_llvm.c
1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors: Tom Stellard <thomas.stellard@amd.com>
24 *
25 */
26 #include "radeon_llvm.h"
27
28 #include "gallivm/lp_bld_const.h"
29 #include "gallivm/lp_bld_gather.h"
30 #include "gallivm/lp_bld_flow.h"
31 #include "gallivm/lp_bld_init.h"
32 #include "gallivm/lp_bld_intr.h"
33 #include "gallivm/lp_bld_misc.h"
34 #include "gallivm/lp_bld_swizzle.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "util/u_math.h"
38 #include "util/u_memory.h"
39 #include "util/u_debug.h"
40
41 #include <llvm-c/Core.h>
42 #include <llvm-c/Transforms/Scalar.h>
43
44 static struct radeon_llvm_loop * get_current_loop(struct radeon_llvm_context * ctx)
45 {
46 return ctx->loop_depth > 0 ? ctx->loop + (ctx->loop_depth - 1) : NULL;
47 }
48
49 static struct radeon_llvm_branch * get_current_branch(
50 struct radeon_llvm_context * ctx)
51 {
52 return ctx->branch_depth > 0 ?
53 ctx->branch + (ctx->branch_depth - 1) : NULL;
54 }
55
56 unsigned radeon_llvm_reg_index_soa(unsigned index, unsigned chan)
57 {
58 return (index * 4) + chan;
59 }
60
61 static LLVMValueRef emit_swizzle(
62 struct lp_build_tgsi_context * bld_base,
63 LLVMValueRef value,
64 unsigned swizzle_x,
65 unsigned swizzle_y,
66 unsigned swizzle_z,
67 unsigned swizzle_w)
68 {
69 LLVMValueRef swizzles[4];
70 LLVMTypeRef i32t =
71 LLVMInt32TypeInContext(bld_base->base.gallivm->context);
72
73 swizzles[0] = LLVMConstInt(i32t, swizzle_x, 0);
74 swizzles[1] = LLVMConstInt(i32t, swizzle_y, 0);
75 swizzles[2] = LLVMConstInt(i32t, swizzle_z, 0);
76 swizzles[3] = LLVMConstInt(i32t, swizzle_w, 0);
77
78 return LLVMBuildShuffleVector(bld_base->base.gallivm->builder,
79 value,
80 LLVMGetUndef(LLVMTypeOf(value)),
81 LLVMConstVector(swizzles, 4), "");
82 }
83
84 static struct tgsi_declaration_range
85 get_array_range(struct lp_build_tgsi_context *bld_base,
86 unsigned File, const struct tgsi_ind_register *reg)
87 {
88 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
89
90 if (File != TGSI_FILE_TEMPORARY || reg->ArrayID == 0 ||
91 reg->ArrayID > bld_base->info->array_max[TGSI_FILE_TEMPORARY]) {
92 struct tgsi_declaration_range range;
93 range.First = 0;
94 range.Last = bld_base->info->file_max[File];
95 return range;
96 }
97
98 return ctx->arrays[reg->ArrayID - 1];
99 }
100
101 static LLVMValueRef
102 emit_array_index(
103 struct lp_build_tgsi_soa_context *bld,
104 const struct tgsi_ind_register *reg,
105 unsigned offset)
106 {
107 struct gallivm_state * gallivm = bld->bld_base.base.gallivm;
108
109 LLVMValueRef addr = LLVMBuildLoad(gallivm->builder, bld->addr[reg->Index][reg->Swizzle], "");
110 return LLVMBuildAdd(gallivm->builder, addr, lp_build_const_int32(gallivm, offset), "");
111 }
112
113 LLVMValueRef
114 radeon_llvm_emit_fetch_64bit(
115 struct lp_build_tgsi_context *bld_base,
116 enum tgsi_opcode_type type,
117 LLVMValueRef ptr,
118 LLVMValueRef ptr2)
119 {
120 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
121 LLVMValueRef result;
122
123 result = LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), bld_base->base.type.length * 2));
124
125 result = LLVMBuildInsertElement(builder,
126 result,
127 bitcast(bld_base, TGSI_TYPE_UNSIGNED, ptr),
128 bld_base->int_bld.zero, "");
129 result = LLVMBuildInsertElement(builder,
130 result,
131 bitcast(bld_base, TGSI_TYPE_UNSIGNED, ptr2),
132 bld_base->int_bld.one, "");
133 return bitcast(bld_base, type, result);
134 }
135
136 static LLVMValueRef
137 emit_array_fetch(
138 struct lp_build_tgsi_context *bld_base,
139 unsigned File, enum tgsi_opcode_type type,
140 struct tgsi_declaration_range range,
141 unsigned swizzle)
142 {
143 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
144 struct gallivm_state * gallivm = bld->bld_base.base.gallivm;
145 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
146
147 unsigned i, size = range.Last - range.First + 1;
148 LLVMTypeRef vec = LLVMVectorType(tgsi2llvmtype(bld_base, type), size);
149 LLVMValueRef result = LLVMGetUndef(vec);
150
151 struct tgsi_full_src_register tmp_reg = {};
152 tmp_reg.Register.File = File;
153
154 for (i = 0; i < size; ++i) {
155 tmp_reg.Register.Index = i + range.First;
156 LLVMValueRef temp = radeon_llvm_emit_fetch(bld_base, &tmp_reg, type, swizzle);
157 result = LLVMBuildInsertElement(builder, result, temp,
158 lp_build_const_int32(gallivm, i), "");
159 }
160 return result;
161 }
162
163 LLVMValueRef radeon_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
164 const struct tgsi_full_src_register *reg,
165 enum tgsi_opcode_type type,
166 unsigned swizzle)
167 {
168 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
169 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
170 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
171 LLVMValueRef result = NULL, ptr, ptr2;
172
173 if (swizzle == ~0) {
174 LLVMValueRef values[TGSI_NUM_CHANNELS];
175 unsigned chan;
176 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
177 values[chan] = radeon_llvm_emit_fetch(bld_base, reg, type, chan);
178 }
179 return lp_build_gather_values(bld_base->base.gallivm, values,
180 TGSI_NUM_CHANNELS);
181 }
182
183 if (reg->Register.Indirect) {
184 struct tgsi_declaration_range range = get_array_range(bld_base,
185 reg->Register.File, &reg->Indirect);
186 return LLVMBuildExtractElement(builder,
187 emit_array_fetch(bld_base, reg->Register.File, type, range, swizzle),
188 emit_array_index(bld, &reg->Indirect, reg->Register.Index - range.First),
189 "");
190 }
191
192 switch(reg->Register.File) {
193 case TGSI_FILE_IMMEDIATE: {
194 LLVMTypeRef ctype = tgsi2llvmtype(bld_base, type);
195 if (tgsi_type_is_64bit(type)) {
196 result = LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), bld_base->base.type.length * 2));
197 result = LLVMConstInsertElement(result,
198 bld->immediates[reg->Register.Index][swizzle],
199 bld_base->int_bld.zero);
200 result = LLVMConstInsertElement(result,
201 bld->immediates[reg->Register.Index][swizzle + 1],
202 bld_base->int_bld.one);
203 return LLVMConstBitCast(result, ctype);
204 } else {
205 return LLVMConstBitCast(bld->immediates[reg->Register.Index][swizzle], ctype);
206 }
207 }
208
209 case TGSI_FILE_INPUT:
210 result = ctx->inputs[radeon_llvm_reg_index_soa(reg->Register.Index, swizzle)];
211 if (tgsi_type_is_64bit(type)) {
212 ptr = result;
213 ptr2 = ctx->inputs[radeon_llvm_reg_index_soa(reg->Register.Index, swizzle + 1)];
214 return radeon_llvm_emit_fetch_64bit(bld_base, type, ptr, ptr2);
215 }
216 break;
217
218 case TGSI_FILE_TEMPORARY:
219 if (reg->Register.Index >= ctx->temps_count)
220 return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
221 ptr = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle];
222 if (tgsi_type_is_64bit(type)) {
223 ptr2 = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle + 1];
224 return radeon_llvm_emit_fetch_64bit(bld_base, type,
225 LLVMBuildLoad(builder, ptr, ""),
226 LLVMBuildLoad(builder, ptr2, ""));
227 }
228 result = LLVMBuildLoad(builder, ptr, "");
229 break;
230
231 case TGSI_FILE_OUTPUT:
232 ptr = lp_get_output_ptr(bld, reg->Register.Index, swizzle);
233 if (tgsi_type_is_64bit(type)) {
234 ptr2 = lp_get_output_ptr(bld, reg->Register.Index, swizzle + 1);
235 return radeon_llvm_emit_fetch_64bit(bld_base, type,
236 LLVMBuildLoad(builder, ptr, ""),
237 LLVMBuildLoad(builder, ptr2, ""));
238 }
239 result = LLVMBuildLoad(builder, ptr, "");
240 break;
241
242 default:
243 return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
244 }
245
246 return bitcast(bld_base, type, result);
247 }
248
249 static LLVMValueRef fetch_system_value(
250 struct lp_build_tgsi_context * bld_base,
251 const struct tgsi_full_src_register *reg,
252 enum tgsi_opcode_type type,
253 unsigned swizzle)
254 {
255 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
256 struct gallivm_state *gallivm = bld_base->base.gallivm;
257
258 LLVMValueRef cval = ctx->system_values[reg->Register.Index];
259 if (LLVMGetTypeKind(LLVMTypeOf(cval)) == LLVMVectorTypeKind) {
260 cval = LLVMBuildExtractElement(gallivm->builder, cval,
261 lp_build_const_int32(gallivm, swizzle), "");
262 }
263 return bitcast(bld_base, type, cval);
264 }
265
266 static LLVMValueRef si_build_alloca_undef(struct gallivm_state *gallivm,
267 LLVMTypeRef type,
268 const char *name)
269 {
270 LLVMValueRef ptr = lp_build_alloca(gallivm, type, name);
271 LLVMBuildStore(gallivm->builder, LLVMGetUndef(type), ptr);
272 return ptr;
273 }
274
275 static void emit_declaration(
276 struct lp_build_tgsi_context * bld_base,
277 const struct tgsi_full_declaration *decl)
278 {
279 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
280 unsigned first, last, i, idx;
281 switch(decl->Declaration.File) {
282 case TGSI_FILE_ADDRESS:
283 {
284 unsigned idx;
285 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
286 unsigned chan;
287 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
288 ctx->soa.addr[idx][chan] = si_build_alloca_undef(
289 &ctx->gallivm,
290 ctx->soa.bld_base.uint_bld.elem_type, "");
291 }
292 }
293 break;
294 }
295
296 case TGSI_FILE_TEMPORARY:
297 if (decl->Declaration.Array) {
298 if (!ctx->arrays) {
299 int size = bld_base->info->array_max[TGSI_FILE_TEMPORARY];
300 ctx->arrays = MALLOC(sizeof(ctx->arrays[0]) * size);
301 }
302
303 ctx->arrays[decl->Array.ArrayID - 1] = decl->Range;
304 }
305 first = decl->Range.First;
306 last = decl->Range.Last;
307 if (!ctx->temps_count) {
308 ctx->temps_count = bld_base->info->file_max[TGSI_FILE_TEMPORARY] + 1;
309 ctx->temps = MALLOC(TGSI_NUM_CHANNELS * ctx->temps_count * sizeof(LLVMValueRef));
310 }
311 for (idx = first; idx <= last; idx++) {
312 for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
313 ctx->temps[idx * TGSI_NUM_CHANNELS + i] =
314 si_build_alloca_undef(bld_base->base.gallivm,
315 bld_base->base.vec_type,
316 "temp");
317 }
318 }
319 break;
320
321 case TGSI_FILE_INPUT:
322 {
323 unsigned idx;
324 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
325 if (ctx->load_input)
326 ctx->load_input(ctx, idx, decl);
327 }
328 }
329 break;
330
331 case TGSI_FILE_SYSTEM_VALUE:
332 {
333 unsigned idx;
334 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
335 ctx->load_system_value(ctx, idx, decl);
336 }
337 }
338 break;
339
340 case TGSI_FILE_OUTPUT:
341 {
342 unsigned idx;
343 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
344 unsigned chan;
345 assert(idx < RADEON_LLVM_MAX_OUTPUTS);
346 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
347 ctx->soa.outputs[idx][chan] = si_build_alloca_undef(
348 &ctx->gallivm,
349 ctx->soa.bld_base.base.elem_type, "");
350 }
351 }
352 break;
353 }
354
355 case TGSI_FILE_MEMORY:
356 ctx->declare_memory_region(ctx, decl);
357 break;
358
359 default:
360 break;
361 }
362 }
363
364 LLVMValueRef radeon_llvm_saturate(struct lp_build_tgsi_context *bld_base,
365 LLVMValueRef value)
366 {
367 struct lp_build_emit_data clamp_emit_data;
368
369 memset(&clamp_emit_data, 0, sizeof(clamp_emit_data));
370 clamp_emit_data.arg_count = 3;
371 clamp_emit_data.args[0] = value;
372 clamp_emit_data.args[2] = bld_base->base.one;
373 clamp_emit_data.args[1] = bld_base->base.zero;
374
375 return lp_build_emit_llvm(bld_base, TGSI_OPCODE_CLAMP,
376 &clamp_emit_data);
377 }
378
379 void radeon_llvm_emit_store(
380 struct lp_build_tgsi_context * bld_base,
381 const struct tgsi_full_instruction * inst,
382 const struct tgsi_opcode_info * info,
383 LLVMValueRef dst[4])
384 {
385 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
386 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
387 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
388 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
389 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
390 LLVMValueRef temp_ptr, temp_ptr2 = NULL;
391 unsigned chan, chan_index;
392 bool is_vec_store = false;
393 enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
394
395 if (dst[0]) {
396 LLVMTypeKind k = LLVMGetTypeKind(LLVMTypeOf(dst[0]));
397 is_vec_store = (k == LLVMVectorTypeKind);
398 }
399
400 if (is_vec_store) {
401 LLVMValueRef values[4] = {};
402 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan) {
403 LLVMValueRef index = lp_build_const_int32(gallivm, chan);
404 values[chan] = LLVMBuildExtractElement(gallivm->builder,
405 dst[0], index, "");
406 }
407 bld_base->emit_store(bld_base, inst, info, values);
408 return;
409 }
410
411 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
412 LLVMValueRef value = dst[chan_index];
413
414 if (tgsi_type_is_64bit(dtype) && (chan_index == 1 || chan_index == 3))
415 continue;
416 if (inst->Instruction.Saturate)
417 value = radeon_llvm_saturate(bld_base, value);
418
419 if (reg->Register.File == TGSI_FILE_ADDRESS) {
420 temp_ptr = bld->addr[reg->Register.Index][chan_index];
421 LLVMBuildStore(builder, value, temp_ptr);
422 continue;
423 }
424
425 if (!tgsi_type_is_64bit(dtype))
426 value = bitcast(bld_base, TGSI_TYPE_FLOAT, value);
427
428 if (reg->Register.Indirect) {
429 struct tgsi_declaration_range range = get_array_range(bld_base,
430 reg->Register.File, &reg->Indirect);
431
432 unsigned i, size = range.Last - range.First + 1;
433 LLVMValueRef array = LLVMBuildInsertElement(builder,
434 emit_array_fetch(bld_base, reg->Register.File, TGSI_TYPE_FLOAT, range, chan_index),
435 value, emit_array_index(bld, &reg->Indirect, reg->Register.Index - range.First), "");
436
437 for (i = 0; i < size; ++i) {
438 switch(reg->Register.File) {
439 case TGSI_FILE_OUTPUT:
440 temp_ptr = bld->outputs[i + range.First][chan_index];
441 break;
442
443 case TGSI_FILE_TEMPORARY:
444 if (range.First + i >= ctx->temps_count)
445 continue;
446 temp_ptr = ctx->temps[(i + range.First) * TGSI_NUM_CHANNELS + chan_index];
447 break;
448
449 default:
450 return;
451 }
452 value = LLVMBuildExtractElement(builder, array,
453 lp_build_const_int32(gallivm, i), "");
454 LLVMBuildStore(builder, value, temp_ptr);
455 }
456
457 } else {
458 switch(reg->Register.File) {
459 case TGSI_FILE_OUTPUT:
460 temp_ptr = bld->outputs[reg->Register.Index][chan_index];
461 if (tgsi_type_is_64bit(dtype))
462 temp_ptr2 = bld->outputs[reg->Register.Index][chan_index + 1];
463 break;
464
465 case TGSI_FILE_TEMPORARY:
466 if (reg->Register.Index >= ctx->temps_count)
467 continue;
468 temp_ptr = ctx->temps[ TGSI_NUM_CHANNELS * reg->Register.Index + chan_index];
469 if (tgsi_type_is_64bit(dtype))
470 temp_ptr2 = ctx->temps[ TGSI_NUM_CHANNELS * reg->Register.Index + chan_index + 1];
471
472 break;
473
474 default:
475 return;
476 }
477 if (!tgsi_type_is_64bit(dtype))
478 LLVMBuildStore(builder, value, temp_ptr);
479 else {
480 LLVMValueRef ptr = LLVMBuildBitCast(builder, value,
481 LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), 2), "");
482 LLVMValueRef val2;
483 value = LLVMBuildExtractElement(builder, ptr,
484 bld_base->uint_bld.zero, "");
485 val2 = LLVMBuildExtractElement(builder, ptr,
486 bld_base->uint_bld.one, "");
487
488 LLVMBuildStore(builder, bitcast(bld_base, TGSI_TYPE_FLOAT, value), temp_ptr);
489 LLVMBuildStore(builder, bitcast(bld_base, TGSI_TYPE_FLOAT, val2), temp_ptr2);
490 }
491 }
492 }
493 }
494
495 static void bgnloop_emit(
496 const struct lp_build_tgsi_action * action,
497 struct lp_build_tgsi_context * bld_base,
498 struct lp_build_emit_data * emit_data)
499 {
500 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
501 struct gallivm_state * gallivm = bld_base->base.gallivm;
502 LLVMBasicBlockRef loop_block;
503 LLVMBasicBlockRef endloop_block;
504 endloop_block = LLVMAppendBasicBlockInContext(gallivm->context,
505 ctx->main_fn, "ENDLOOP");
506 loop_block = LLVMInsertBasicBlockInContext(gallivm->context,
507 endloop_block, "LOOP");
508 LLVMBuildBr(gallivm->builder, loop_block);
509 LLVMPositionBuilderAtEnd(gallivm->builder, loop_block);
510
511 if (++ctx->loop_depth > ctx->loop_depth_max) {
512 unsigned new_max = ctx->loop_depth_max << 1;
513
514 if (!new_max)
515 new_max = RADEON_LLVM_INITIAL_CF_DEPTH;
516
517 ctx->loop = REALLOC(ctx->loop, ctx->loop_depth_max *
518 sizeof(ctx->loop[0]),
519 new_max * sizeof(ctx->loop[0]));
520 ctx->loop_depth_max = new_max;
521 }
522
523 ctx->loop[ctx->loop_depth - 1].loop_block = loop_block;
524 ctx->loop[ctx->loop_depth - 1].endloop_block = endloop_block;
525 }
526
527 static void brk_emit(
528 const struct lp_build_tgsi_action * action,
529 struct lp_build_tgsi_context * bld_base,
530 struct lp_build_emit_data * emit_data)
531 {
532 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
533 struct gallivm_state * gallivm = bld_base->base.gallivm;
534 struct radeon_llvm_loop * current_loop = get_current_loop(ctx);
535
536 LLVMBuildBr(gallivm->builder, current_loop->endloop_block);
537 }
538
539 static void cont_emit(
540 const struct lp_build_tgsi_action * action,
541 struct lp_build_tgsi_context * bld_base,
542 struct lp_build_emit_data * emit_data)
543 {
544 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
545 struct gallivm_state * gallivm = bld_base->base.gallivm;
546 struct radeon_llvm_loop * current_loop = get_current_loop(ctx);
547
548 LLVMBuildBr(gallivm->builder, current_loop->loop_block);
549 }
550
551 static void else_emit(
552 const struct lp_build_tgsi_action * action,
553 struct lp_build_tgsi_context * bld_base,
554 struct lp_build_emit_data * emit_data)
555 {
556 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
557 struct gallivm_state * gallivm = bld_base->base.gallivm;
558 struct radeon_llvm_branch * current_branch = get_current_branch(ctx);
559 LLVMBasicBlockRef current_block = LLVMGetInsertBlock(gallivm->builder);
560
561 /* We need to add a terminator to the current block if the previous
562 * instruction was an ENDIF.Example:
563 * IF
564 * [code]
565 * IF
566 * [code]
567 * ELSE
568 * [code]
569 * ENDIF <--
570 * ELSE<--
571 * [code]
572 * ENDIF
573 */
574
575 if (current_block != current_branch->if_block) {
576 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
577 }
578 if (!LLVMGetBasicBlockTerminator(current_branch->if_block)) {
579 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
580 }
581 current_branch->has_else = 1;
582 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->else_block);
583 }
584
585 static void endif_emit(
586 const struct lp_build_tgsi_action * action,
587 struct lp_build_tgsi_context * bld_base,
588 struct lp_build_emit_data * emit_data)
589 {
590 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
591 struct gallivm_state * gallivm = bld_base->base.gallivm;
592 struct radeon_llvm_branch * current_branch = get_current_branch(ctx);
593 LLVMBasicBlockRef current_block = LLVMGetInsertBlock(gallivm->builder);
594
595 /* If we have consecutive ENDIF instructions, then the first ENDIF
596 * will not have a terminator, so we need to add one. */
597 if (current_block != current_branch->if_block
598 && current_block != current_branch->else_block
599 && !LLVMGetBasicBlockTerminator(current_block)) {
600
601 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
602 }
603 if (!LLVMGetBasicBlockTerminator(current_branch->else_block)) {
604 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->else_block);
605 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
606 }
607
608 if (!LLVMGetBasicBlockTerminator(current_branch->if_block)) {
609 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->if_block);
610 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
611 }
612
613 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->endif_block);
614 ctx->branch_depth--;
615 }
616
617 static void endloop_emit(
618 const struct lp_build_tgsi_action * action,
619 struct lp_build_tgsi_context * bld_base,
620 struct lp_build_emit_data * emit_data)
621 {
622 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
623 struct gallivm_state * gallivm = bld_base->base.gallivm;
624 struct radeon_llvm_loop * current_loop = get_current_loop(ctx);
625
626 if (!LLVMGetBasicBlockTerminator(LLVMGetInsertBlock(gallivm->builder))) {
627 LLVMBuildBr(gallivm->builder, current_loop->loop_block);
628 }
629
630 LLVMPositionBuilderAtEnd(gallivm->builder, current_loop->endloop_block);
631 ctx->loop_depth--;
632 }
633
634 static void if_cond_emit(
635 const struct lp_build_tgsi_action * action,
636 struct lp_build_tgsi_context * bld_base,
637 struct lp_build_emit_data * emit_data,
638 LLVMValueRef cond)
639 {
640 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
641 struct gallivm_state * gallivm = bld_base->base.gallivm;
642 LLVMBasicBlockRef if_block, else_block, endif_block;
643
644 endif_block = LLVMAppendBasicBlockInContext(gallivm->context,
645 ctx->main_fn, "ENDIF");
646 if_block = LLVMInsertBasicBlockInContext(gallivm->context,
647 endif_block, "IF");
648 else_block = LLVMInsertBasicBlockInContext(gallivm->context,
649 endif_block, "ELSE");
650 LLVMBuildCondBr(gallivm->builder, cond, if_block, else_block);
651 LLVMPositionBuilderAtEnd(gallivm->builder, if_block);
652
653 if (++ctx->branch_depth > ctx->branch_depth_max) {
654 unsigned new_max = ctx->branch_depth_max << 1;
655
656 if (!new_max)
657 new_max = RADEON_LLVM_INITIAL_CF_DEPTH;
658
659 ctx->branch = REALLOC(ctx->branch, ctx->branch_depth_max *
660 sizeof(ctx->branch[0]),
661 new_max * sizeof(ctx->branch[0]));
662 ctx->branch_depth_max = new_max;
663 }
664
665 ctx->branch[ctx->branch_depth - 1].endif_block = endif_block;
666 ctx->branch[ctx->branch_depth - 1].if_block = if_block;
667 ctx->branch[ctx->branch_depth - 1].else_block = else_block;
668 ctx->branch[ctx->branch_depth - 1].has_else = 0;
669 }
670
671 static void if_emit(
672 const struct lp_build_tgsi_action * action,
673 struct lp_build_tgsi_context * bld_base,
674 struct lp_build_emit_data * emit_data)
675 {
676 struct gallivm_state * gallivm = bld_base->base.gallivm;
677 LLVMValueRef cond;
678
679 cond = LLVMBuildFCmp(gallivm->builder, LLVMRealUNE,
680 emit_data->args[0],
681 bld_base->base.zero, "");
682
683 if_cond_emit(action, bld_base, emit_data, cond);
684 }
685
686 static void uif_emit(
687 const struct lp_build_tgsi_action * action,
688 struct lp_build_tgsi_context * bld_base,
689 struct lp_build_emit_data * emit_data)
690 {
691 struct gallivm_state * gallivm = bld_base->base.gallivm;
692 LLVMValueRef cond;
693
694 cond = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
695 bitcast(bld_base, TGSI_TYPE_UNSIGNED, emit_data->args[0]),
696 bld_base->int_bld.zero, "");
697
698 if_cond_emit(action, bld_base, emit_data, cond);
699 }
700
701 static void kill_if_fetch_args(
702 struct lp_build_tgsi_context * bld_base,
703 struct lp_build_emit_data * emit_data)
704 {
705 const struct tgsi_full_instruction * inst = emit_data->inst;
706 struct gallivm_state *gallivm = bld_base->base.gallivm;
707 LLVMBuilderRef builder = gallivm->builder;
708 unsigned i;
709 LLVMValueRef conds[TGSI_NUM_CHANNELS];
710
711 for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
712 LLVMValueRef value = lp_build_emit_fetch(bld_base, inst, 0, i);
713 conds[i] = LLVMBuildFCmp(builder, LLVMRealOLT, value,
714 bld_base->base.zero, "");
715 }
716
717 /* Or the conditions together */
718 for (i = TGSI_NUM_CHANNELS - 1; i > 0; i--) {
719 conds[i - 1] = LLVMBuildOr(builder, conds[i], conds[i - 1], "");
720 }
721
722 emit_data->dst_type = LLVMVoidTypeInContext(gallivm->context);
723 emit_data->arg_count = 1;
724 emit_data->args[0] = LLVMBuildSelect(builder, conds[0],
725 lp_build_const_float(gallivm, -1.0f),
726 bld_base->base.zero, "");
727 }
728
729 static void kil_emit(
730 const struct lp_build_tgsi_action * action,
731 struct lp_build_tgsi_context * bld_base,
732 struct lp_build_emit_data * emit_data)
733 {
734 unsigned i;
735 for (i = 0; i < emit_data->arg_count; i++) {
736 emit_data->output[i] = lp_build_intrinsic_unary(
737 bld_base->base.gallivm->builder,
738 action->intr_name,
739 emit_data->dst_type, emit_data->args[i]);
740 }
741 }
742
743 static void radeon_llvm_cube_to_2d_coords(struct lp_build_tgsi_context *bld_base,
744 LLVMValueRef *in, LLVMValueRef *out)
745 {
746 struct gallivm_state * gallivm = bld_base->base.gallivm;
747 LLVMBuilderRef builder = gallivm->builder;
748 LLVMTypeRef type = bld_base->base.elem_type;
749 LLVMValueRef coords[4];
750 LLVMValueRef mad_args[3];
751 LLVMValueRef v, cube_vec;
752 unsigned i;
753
754 cube_vec = lp_build_gather_values(bld_base->base.gallivm, in, 4);
755 v = lp_build_intrinsic(builder, "llvm.AMDGPU.cube", LLVMVectorType(type, 4),
756 &cube_vec, 1, LLVMReadNoneAttribute);
757
758 for (i = 0; i < 4; ++i)
759 coords[i] = LLVMBuildExtractElement(builder, v,
760 lp_build_const_int32(gallivm, i), "");
761
762 coords[2] = lp_build_intrinsic(builder, "llvm.fabs.f32",
763 type, &coords[2], 1, LLVMReadNoneAttribute);
764 coords[2] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_RCP, coords[2]);
765
766 mad_args[1] = coords[2];
767 mad_args[2] = LLVMConstReal(type, 1.5);
768
769 mad_args[0] = coords[0];
770 coords[0] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
771 mad_args[0], mad_args[1], mad_args[2]);
772
773 mad_args[0] = coords[1];
774 coords[1] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
775 mad_args[0], mad_args[1], mad_args[2]);
776
777 /* apply xyz = yxw swizzle to cooords */
778 out[0] = coords[1];
779 out[1] = coords[0];
780 out[2] = coords[3];
781 }
782
783 void radeon_llvm_emit_prepare_cube_coords(
784 struct lp_build_tgsi_context * bld_base,
785 struct lp_build_emit_data * emit_data,
786 LLVMValueRef *coords_arg,
787 LLVMValueRef *derivs_arg)
788 {
789
790 unsigned target = emit_data->inst->Texture.Texture;
791 unsigned opcode = emit_data->inst->Instruction.Opcode;
792 struct gallivm_state * gallivm = bld_base->base.gallivm;
793 LLVMBuilderRef builder = gallivm->builder;
794 LLVMValueRef coords[4];
795 unsigned i;
796
797 radeon_llvm_cube_to_2d_coords(bld_base, coords_arg, coords);
798
799 if (opcode == TGSI_OPCODE_TXD && derivs_arg) {
800 LLVMValueRef derivs[4];
801 int axis;
802
803 /* Convert cube derivatives to 2D derivatives. */
804 for (axis = 0; axis < 2; axis++) {
805 LLVMValueRef shifted_cube_coords[4], shifted_coords[4];
806
807 /* Shift the cube coordinates by the derivatives to get
808 * the cube coordinates of the "neighboring pixel".
809 */
810 for (i = 0; i < 3; i++)
811 shifted_cube_coords[i] =
812 LLVMBuildFAdd(builder, coords_arg[i],
813 derivs_arg[axis*3+i], "");
814 shifted_cube_coords[3] = LLVMGetUndef(bld_base->base.elem_type);
815
816 /* Project the shifted cube coordinates onto the face. */
817 radeon_llvm_cube_to_2d_coords(bld_base, shifted_cube_coords,
818 shifted_coords);
819
820 /* Subtract both sets of 2D coordinates to get 2D derivatives.
821 * This won't work if the shifted coordinates ended up
822 * in a different face.
823 */
824 for (i = 0; i < 2; i++)
825 derivs[axis * 2 + i] =
826 LLVMBuildFSub(builder, shifted_coords[i],
827 coords[i], "");
828 }
829
830 memcpy(derivs_arg, derivs, sizeof(derivs));
831 }
832
833 if (target == TGSI_TEXTURE_CUBE_ARRAY ||
834 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
835 /* for cube arrays coord.z = coord.w(array_index) * 8 + face */
836 /* coords_arg.w component - array_index for cube arrays */
837 coords[2] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
838 coords_arg[3], lp_build_const_float(gallivm, 8.0), coords[2]);
839 }
840
841 /* Preserve compare/lod/bias. Put it in coords.w. */
842 if (opcode == TGSI_OPCODE_TEX2 ||
843 opcode == TGSI_OPCODE_TXB2 ||
844 opcode == TGSI_OPCODE_TXL2) {
845 coords[3] = coords_arg[4];
846 } else if (opcode == TGSI_OPCODE_TXB ||
847 opcode == TGSI_OPCODE_TXL ||
848 target == TGSI_TEXTURE_SHADOWCUBE) {
849 coords[3] = coords_arg[3];
850 }
851
852 memcpy(coords_arg, coords, sizeof(coords));
853 }
854
855 static void emit_icmp(
856 const struct lp_build_tgsi_action * action,
857 struct lp_build_tgsi_context * bld_base,
858 struct lp_build_emit_data * emit_data)
859 {
860 unsigned pred;
861 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
862 LLVMContextRef context = bld_base->base.gallivm->context;
863
864 switch (emit_data->inst->Instruction.Opcode) {
865 case TGSI_OPCODE_USEQ: pred = LLVMIntEQ; break;
866 case TGSI_OPCODE_USNE: pred = LLVMIntNE; break;
867 case TGSI_OPCODE_USGE: pred = LLVMIntUGE; break;
868 case TGSI_OPCODE_USLT: pred = LLVMIntULT; break;
869 case TGSI_OPCODE_ISGE: pred = LLVMIntSGE; break;
870 case TGSI_OPCODE_ISLT: pred = LLVMIntSLT; break;
871 default:
872 assert(!"unknown instruction");
873 pred = 0;
874 break;
875 }
876
877 LLVMValueRef v = LLVMBuildICmp(builder, pred,
878 emit_data->args[0], emit_data->args[1],"");
879
880 v = LLVMBuildSExtOrBitCast(builder, v,
881 LLVMInt32TypeInContext(context), "");
882
883 emit_data->output[emit_data->chan] = v;
884 }
885
886 static void emit_ucmp(
887 const struct lp_build_tgsi_action * action,
888 struct lp_build_tgsi_context * bld_base,
889 struct lp_build_emit_data * emit_data)
890 {
891 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
892
893 LLVMValueRef arg0 = LLVMBuildBitCast(builder, emit_data->args[0],
894 bld_base->uint_bld.elem_type, "");
895
896 LLVMValueRef v = LLVMBuildICmp(builder, LLVMIntNE, arg0,
897 bld_base->uint_bld.zero, "");
898
899 emit_data->output[emit_data->chan] =
900 LLVMBuildSelect(builder, v, emit_data->args[1], emit_data->args[2], "");
901 }
902
903 static void emit_cmp(const struct lp_build_tgsi_action *action,
904 struct lp_build_tgsi_context *bld_base,
905 struct lp_build_emit_data *emit_data)
906 {
907 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
908 LLVMValueRef cond, *args = emit_data->args;
909
910 cond = LLVMBuildFCmp(builder, LLVMRealOLT, args[0],
911 bld_base->base.zero, "");
912
913 emit_data->output[emit_data->chan] =
914 LLVMBuildSelect(builder, cond, args[1], args[2], "");
915 }
916
917 static void emit_set_cond(
918 const struct lp_build_tgsi_action *action,
919 struct lp_build_tgsi_context * bld_base,
920 struct lp_build_emit_data * emit_data)
921 {
922 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
923 LLVMRealPredicate pred;
924 LLVMValueRef cond;
925
926 /* Use ordered for everything but NE (which is usual for
927 * float comparisons)
928 */
929 switch (emit_data->inst->Instruction.Opcode) {
930 case TGSI_OPCODE_SGE: pred = LLVMRealOGE; break;
931 case TGSI_OPCODE_SEQ: pred = LLVMRealOEQ; break;
932 case TGSI_OPCODE_SLE: pred = LLVMRealOLE; break;
933 case TGSI_OPCODE_SLT: pred = LLVMRealOLT; break;
934 case TGSI_OPCODE_SNE: pred = LLVMRealUNE; break;
935 case TGSI_OPCODE_SGT: pred = LLVMRealOGT; break;
936 default: assert(!"unknown instruction"); pred = 0; break;
937 }
938
939 cond = LLVMBuildFCmp(builder,
940 pred, emit_data->args[0], emit_data->args[1], "");
941
942 emit_data->output[emit_data->chan] = LLVMBuildSelect(builder,
943 cond, bld_base->base.one, bld_base->base.zero, "");
944 }
945
946 static void emit_fcmp(
947 const struct lp_build_tgsi_action *action,
948 struct lp_build_tgsi_context * bld_base,
949 struct lp_build_emit_data * emit_data)
950 {
951 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
952 LLVMContextRef context = bld_base->base.gallivm->context;
953 LLVMRealPredicate pred;
954
955 /* Use ordered for everything but NE (which is usual for
956 * float comparisons)
957 */
958 switch (emit_data->inst->Instruction.Opcode) {
959 case TGSI_OPCODE_FSEQ: pred = LLVMRealOEQ; break;
960 case TGSI_OPCODE_FSGE: pred = LLVMRealOGE; break;
961 case TGSI_OPCODE_FSLT: pred = LLVMRealOLT; break;
962 case TGSI_OPCODE_FSNE: pred = LLVMRealUNE; break;
963 default: assert(!"unknown instruction"); pred = 0; break;
964 }
965
966 LLVMValueRef v = LLVMBuildFCmp(builder, pred,
967 emit_data->args[0], emit_data->args[1],"");
968
969 v = LLVMBuildSExtOrBitCast(builder, v,
970 LLVMInt32TypeInContext(context), "");
971
972 emit_data->output[emit_data->chan] = v;
973 }
974
975 static void emit_dcmp(
976 const struct lp_build_tgsi_action *action,
977 struct lp_build_tgsi_context * bld_base,
978 struct lp_build_emit_data * emit_data)
979 {
980 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
981 LLVMContextRef context = bld_base->base.gallivm->context;
982 LLVMRealPredicate pred;
983
984 /* Use ordered for everything but NE (which is usual for
985 * float comparisons)
986 */
987 switch (emit_data->inst->Instruction.Opcode) {
988 case TGSI_OPCODE_DSEQ: pred = LLVMRealOEQ; break;
989 case TGSI_OPCODE_DSGE: pred = LLVMRealOGE; break;
990 case TGSI_OPCODE_DSLT: pred = LLVMRealOLT; break;
991 case TGSI_OPCODE_DSNE: pred = LLVMRealUNE; break;
992 default: assert(!"unknown instruction"); pred = 0; break;
993 }
994
995 LLVMValueRef v = LLVMBuildFCmp(builder, pred,
996 emit_data->args[0], emit_data->args[1],"");
997
998 v = LLVMBuildSExtOrBitCast(builder, v,
999 LLVMInt32TypeInContext(context), "");
1000
1001 emit_data->output[emit_data->chan] = v;
1002 }
1003
1004 static void emit_not(
1005 const struct lp_build_tgsi_action * action,
1006 struct lp_build_tgsi_context * bld_base,
1007 struct lp_build_emit_data * emit_data)
1008 {
1009 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1010 LLVMValueRef v = bitcast(bld_base, TGSI_TYPE_UNSIGNED,
1011 emit_data->args[0]);
1012 emit_data->output[emit_data->chan] = LLVMBuildNot(builder, v, "");
1013 }
1014
1015 static void emit_arl(
1016 const struct lp_build_tgsi_action * action,
1017 struct lp_build_tgsi_context * bld_base,
1018 struct lp_build_emit_data * emit_data)
1019 {
1020 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1021 LLVMValueRef floor_index = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_FLR, emit_data->args[0]);
1022 emit_data->output[emit_data->chan] = LLVMBuildFPToSI(builder,
1023 floor_index, bld_base->base.int_elem_type , "");
1024 }
1025
1026 static void emit_and(
1027 const struct lp_build_tgsi_action * action,
1028 struct lp_build_tgsi_context * bld_base,
1029 struct lp_build_emit_data * emit_data)
1030 {
1031 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1032 emit_data->output[emit_data->chan] = LLVMBuildAnd(builder,
1033 emit_data->args[0], emit_data->args[1], "");
1034 }
1035
1036 static void emit_or(
1037 const struct lp_build_tgsi_action * action,
1038 struct lp_build_tgsi_context * bld_base,
1039 struct lp_build_emit_data * emit_data)
1040 {
1041 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1042 emit_data->output[emit_data->chan] = LLVMBuildOr(builder,
1043 emit_data->args[0], emit_data->args[1], "");
1044 }
1045
1046 static void emit_uadd(
1047 const struct lp_build_tgsi_action * action,
1048 struct lp_build_tgsi_context * bld_base,
1049 struct lp_build_emit_data * emit_data)
1050 {
1051 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1052 emit_data->output[emit_data->chan] = LLVMBuildAdd(builder,
1053 emit_data->args[0], emit_data->args[1], "");
1054 }
1055
1056 static void emit_udiv(
1057 const struct lp_build_tgsi_action * action,
1058 struct lp_build_tgsi_context * bld_base,
1059 struct lp_build_emit_data * emit_data)
1060 {
1061 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1062 emit_data->output[emit_data->chan] = LLVMBuildUDiv(builder,
1063 emit_data->args[0], emit_data->args[1], "");
1064 }
1065
1066 static void emit_idiv(
1067 const struct lp_build_tgsi_action * action,
1068 struct lp_build_tgsi_context * bld_base,
1069 struct lp_build_emit_data * emit_data)
1070 {
1071 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1072 emit_data->output[emit_data->chan] = LLVMBuildSDiv(builder,
1073 emit_data->args[0], emit_data->args[1], "");
1074 }
1075
1076 static void emit_mod(
1077 const struct lp_build_tgsi_action * action,
1078 struct lp_build_tgsi_context * bld_base,
1079 struct lp_build_emit_data * emit_data)
1080 {
1081 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1082 emit_data->output[emit_data->chan] = LLVMBuildSRem(builder,
1083 emit_data->args[0], emit_data->args[1], "");
1084 }
1085
1086 static void emit_umod(
1087 const struct lp_build_tgsi_action * action,
1088 struct lp_build_tgsi_context * bld_base,
1089 struct lp_build_emit_data * emit_data)
1090 {
1091 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1092 emit_data->output[emit_data->chan] = LLVMBuildURem(builder,
1093 emit_data->args[0], emit_data->args[1], "");
1094 }
1095
1096 static void emit_shl(
1097 const struct lp_build_tgsi_action * action,
1098 struct lp_build_tgsi_context * bld_base,
1099 struct lp_build_emit_data * emit_data)
1100 {
1101 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1102 emit_data->output[emit_data->chan] = LLVMBuildShl(builder,
1103 emit_data->args[0], emit_data->args[1], "");
1104 }
1105
1106 static void emit_ushr(
1107 const struct lp_build_tgsi_action * action,
1108 struct lp_build_tgsi_context * bld_base,
1109 struct lp_build_emit_data * emit_data)
1110 {
1111 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1112 emit_data->output[emit_data->chan] = LLVMBuildLShr(builder,
1113 emit_data->args[0], emit_data->args[1], "");
1114 }
1115 static void emit_ishr(
1116 const struct lp_build_tgsi_action * action,
1117 struct lp_build_tgsi_context * bld_base,
1118 struct lp_build_emit_data * emit_data)
1119 {
1120 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1121 emit_data->output[emit_data->chan] = LLVMBuildAShr(builder,
1122 emit_data->args[0], emit_data->args[1], "");
1123 }
1124
1125 static void emit_xor(
1126 const struct lp_build_tgsi_action * action,
1127 struct lp_build_tgsi_context * bld_base,
1128 struct lp_build_emit_data * emit_data)
1129 {
1130 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1131 emit_data->output[emit_data->chan] = LLVMBuildXor(builder,
1132 emit_data->args[0], emit_data->args[1], "");
1133 }
1134
1135 static void emit_ssg(
1136 const struct lp_build_tgsi_action * action,
1137 struct lp_build_tgsi_context * bld_base,
1138 struct lp_build_emit_data * emit_data)
1139 {
1140 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1141
1142 LLVMValueRef cmp, val;
1143
1144 if (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_ISSG) {
1145 cmp = LLVMBuildICmp(builder, LLVMIntSGT, emit_data->args[0], bld_base->int_bld.zero, "");
1146 val = LLVMBuildSelect(builder, cmp, bld_base->int_bld.one, emit_data->args[0], "");
1147 cmp = LLVMBuildICmp(builder, LLVMIntSGE, val, bld_base->int_bld.zero, "");
1148 val = LLVMBuildSelect(builder, cmp, val, LLVMConstInt(bld_base->int_bld.elem_type, -1, true), "");
1149 } else { // float SSG
1150 cmp = LLVMBuildFCmp(builder, LLVMRealOGT, emit_data->args[0], bld_base->base.zero, "");
1151 val = LLVMBuildSelect(builder, cmp, bld_base->base.one, emit_data->args[0], "");
1152 cmp = LLVMBuildFCmp(builder, LLVMRealOGE, val, bld_base->base.zero, "");
1153 val = LLVMBuildSelect(builder, cmp, val, LLVMConstReal(bld_base->base.elem_type, -1), "");
1154 }
1155
1156 emit_data->output[emit_data->chan] = val;
1157 }
1158
1159 static void emit_ineg(
1160 const struct lp_build_tgsi_action * action,
1161 struct lp_build_tgsi_context * bld_base,
1162 struct lp_build_emit_data * emit_data)
1163 {
1164 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1165 emit_data->output[emit_data->chan] = LLVMBuildNeg(builder,
1166 emit_data->args[0], "");
1167 }
1168
1169 static void emit_dneg(
1170 const struct lp_build_tgsi_action * action,
1171 struct lp_build_tgsi_context * bld_base,
1172 struct lp_build_emit_data * emit_data)
1173 {
1174 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1175 emit_data->output[emit_data->chan] = LLVMBuildFNeg(builder,
1176 emit_data->args[0], "");
1177 }
1178
1179 static void emit_frac(
1180 const struct lp_build_tgsi_action * action,
1181 struct lp_build_tgsi_context * bld_base,
1182 struct lp_build_emit_data * emit_data)
1183 {
1184 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1185 char *intr;
1186
1187 if (emit_data->info->opcode == TGSI_OPCODE_FRC)
1188 intr = "llvm.floor.f32";
1189 else if (emit_data->info->opcode == TGSI_OPCODE_DFRAC)
1190 intr = "llvm.floor.f64";
1191 else {
1192 assert(0);
1193 return;
1194 }
1195
1196 LLVMValueRef floor = lp_build_intrinsic(builder, intr, emit_data->dst_type,
1197 &emit_data->args[0], 1,
1198 LLVMReadNoneAttribute);
1199 emit_data->output[emit_data->chan] = LLVMBuildFSub(builder,
1200 emit_data->args[0], floor, "");
1201 }
1202
1203 static void emit_f2i(
1204 const struct lp_build_tgsi_action * action,
1205 struct lp_build_tgsi_context * bld_base,
1206 struct lp_build_emit_data * emit_data)
1207 {
1208 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1209 emit_data->output[emit_data->chan] = LLVMBuildFPToSI(builder,
1210 emit_data->args[0], bld_base->int_bld.elem_type, "");
1211 }
1212
1213 static void emit_f2u(
1214 const struct lp_build_tgsi_action * action,
1215 struct lp_build_tgsi_context * bld_base,
1216 struct lp_build_emit_data * emit_data)
1217 {
1218 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1219 emit_data->output[emit_data->chan] = LLVMBuildFPToUI(builder,
1220 emit_data->args[0], bld_base->uint_bld.elem_type, "");
1221 }
1222
1223 static void emit_i2f(
1224 const struct lp_build_tgsi_action * action,
1225 struct lp_build_tgsi_context * bld_base,
1226 struct lp_build_emit_data * emit_data)
1227 {
1228 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1229 emit_data->output[emit_data->chan] = LLVMBuildSIToFP(builder,
1230 emit_data->args[0], bld_base->base.elem_type, "");
1231 }
1232
1233 static void emit_u2f(
1234 const struct lp_build_tgsi_action * action,
1235 struct lp_build_tgsi_context * bld_base,
1236 struct lp_build_emit_data * emit_data)
1237 {
1238 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1239 emit_data->output[emit_data->chan] = LLVMBuildUIToFP(builder,
1240 emit_data->args[0], bld_base->base.elem_type, "");
1241 }
1242
1243 static void emit_immediate(struct lp_build_tgsi_context * bld_base,
1244 const struct tgsi_full_immediate *imm)
1245 {
1246 unsigned i;
1247 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
1248
1249 for (i = 0; i < 4; ++i) {
1250 ctx->soa.immediates[ctx->soa.num_immediates][i] =
1251 LLVMConstInt(bld_base->uint_bld.elem_type, imm->u[i].Uint, false );
1252 }
1253
1254 ctx->soa.num_immediates++;
1255 }
1256
1257 void
1258 build_tgsi_intrinsic_nomem(const struct lp_build_tgsi_action *action,
1259 struct lp_build_tgsi_context *bld_base,
1260 struct lp_build_emit_data *emit_data)
1261 {
1262 struct lp_build_context * base = &bld_base->base;
1263 emit_data->output[emit_data->chan] =
1264 lp_build_intrinsic(base->gallivm->builder, action->intr_name,
1265 emit_data->dst_type, emit_data->args,
1266 emit_data->arg_count, LLVMReadNoneAttribute);
1267 }
1268
1269 static void emit_bfi(const struct lp_build_tgsi_action * action,
1270 struct lp_build_tgsi_context * bld_base,
1271 struct lp_build_emit_data * emit_data)
1272 {
1273 struct gallivm_state *gallivm = bld_base->base.gallivm;
1274 LLVMBuilderRef builder = gallivm->builder;
1275 LLVMValueRef bfi_args[3];
1276
1277 // Calculate the bitmask: (((1 << src3) - 1) << src2
1278 bfi_args[0] = LLVMBuildShl(builder,
1279 LLVMBuildSub(builder,
1280 LLVMBuildShl(builder,
1281 bld_base->int_bld.one,
1282 emit_data->args[3], ""),
1283 bld_base->int_bld.one, ""),
1284 emit_data->args[2], "");
1285
1286 bfi_args[1] = LLVMBuildShl(builder, emit_data->args[1],
1287 emit_data->args[2], "");
1288
1289 bfi_args[2] = emit_data->args[0];
1290
1291 /* Calculate:
1292 * (arg0 & arg1) | (~arg0 & arg2) = arg2 ^ (arg0 & (arg1 ^ arg2)
1293 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
1294 */
1295 emit_data->output[emit_data->chan] =
1296 LLVMBuildXor(builder, bfi_args[2],
1297 LLVMBuildAnd(builder, bfi_args[0],
1298 LLVMBuildXor(builder, bfi_args[1], bfi_args[2],
1299 ""), ""), "");
1300 }
1301
1302 /* this is ffs in C */
1303 static void emit_lsb(const struct lp_build_tgsi_action * action,
1304 struct lp_build_tgsi_context * bld_base,
1305 struct lp_build_emit_data * emit_data)
1306 {
1307 struct gallivm_state *gallivm = bld_base->base.gallivm;
1308 LLVMValueRef args[2] = {
1309 emit_data->args[0],
1310
1311 /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
1312 * add special code to check for x=0. The reason is that
1313 * the LLVM behavior for x=0 is different from what we
1314 * need here.
1315 *
1316 * The hardware already implements the correct behavior.
1317 */
1318 lp_build_const_int32(gallivm, 1)
1319 };
1320
1321 emit_data->output[emit_data->chan] =
1322 lp_build_intrinsic(gallivm->builder, "llvm.cttz.i32",
1323 emit_data->dst_type, args, ARRAY_SIZE(args),
1324 LLVMReadNoneAttribute);
1325 }
1326
1327 /* Find the last bit set. */
1328 static void emit_umsb(const struct lp_build_tgsi_action * action,
1329 struct lp_build_tgsi_context * bld_base,
1330 struct lp_build_emit_data * emit_data)
1331 {
1332 struct gallivm_state *gallivm = bld_base->base.gallivm;
1333 LLVMBuilderRef builder = gallivm->builder;
1334 LLVMValueRef args[2] = {
1335 emit_data->args[0],
1336 /* Don't generate code for handling zero: */
1337 lp_build_const_int32(gallivm, 1)
1338 };
1339
1340 LLVMValueRef msb =
1341 lp_build_intrinsic(builder, "llvm.ctlz.i32",
1342 emit_data->dst_type, args, ARRAY_SIZE(args),
1343 LLVMReadNoneAttribute);
1344
1345 /* The HW returns the last bit index from MSB, but TGSI wants
1346 * the index from LSB. Invert it by doing "31 - msb". */
1347 msb = LLVMBuildSub(builder, lp_build_const_int32(gallivm, 31),
1348 msb, "");
1349
1350 /* Check for zero: */
1351 emit_data->output[emit_data->chan] =
1352 LLVMBuildSelect(builder,
1353 LLVMBuildICmp(builder, LLVMIntEQ, args[0],
1354 bld_base->uint_bld.zero, ""),
1355 lp_build_const_int32(gallivm, -1), msb, "");
1356 }
1357
1358 /* Find the last bit opposite of the sign bit. */
1359 static void emit_imsb(const struct lp_build_tgsi_action * action,
1360 struct lp_build_tgsi_context * bld_base,
1361 struct lp_build_emit_data * emit_data)
1362 {
1363 struct gallivm_state *gallivm = bld_base->base.gallivm;
1364 LLVMBuilderRef builder = gallivm->builder;
1365 LLVMValueRef arg = emit_data->args[0];
1366
1367 LLVMValueRef msb =
1368 lp_build_intrinsic(builder, "llvm.AMDGPU.flbit.i32",
1369 emit_data->dst_type, &arg, 1,
1370 LLVMReadNoneAttribute);
1371
1372 /* The HW returns the last bit index from MSB, but TGSI wants
1373 * the index from LSB. Invert it by doing "31 - msb". */
1374 msb = LLVMBuildSub(builder, lp_build_const_int32(gallivm, 31),
1375 msb, "");
1376
1377 /* If arg == 0 || arg == -1 (0xffffffff), return -1. */
1378 LLVMValueRef all_ones = lp_build_const_int32(gallivm, -1);
1379
1380 LLVMValueRef cond =
1381 LLVMBuildOr(builder,
1382 LLVMBuildICmp(builder, LLVMIntEQ, arg,
1383 bld_base->uint_bld.zero, ""),
1384 LLVMBuildICmp(builder, LLVMIntEQ, arg,
1385 all_ones, ""), "");
1386
1387 emit_data->output[emit_data->chan] =
1388 LLVMBuildSelect(builder, cond, all_ones, msb, "");
1389 }
1390
1391 static void emit_iabs(const struct lp_build_tgsi_action *action,
1392 struct lp_build_tgsi_context *bld_base,
1393 struct lp_build_emit_data *emit_data)
1394 {
1395 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1396
1397 emit_data->output[emit_data->chan] =
1398 lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_IMAX,
1399 emit_data->args[0],
1400 LLVMBuildNeg(builder,
1401 emit_data->args[0], ""));
1402 }
1403
1404 static void emit_minmax_int(const struct lp_build_tgsi_action *action,
1405 struct lp_build_tgsi_context *bld_base,
1406 struct lp_build_emit_data *emit_data)
1407 {
1408 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1409 LLVMIntPredicate op;
1410
1411 switch (emit_data->info->opcode) {
1412 default:
1413 assert(0);
1414 case TGSI_OPCODE_IMAX:
1415 op = LLVMIntSGT;
1416 break;
1417 case TGSI_OPCODE_IMIN:
1418 op = LLVMIntSLT;
1419 break;
1420 case TGSI_OPCODE_UMAX:
1421 op = LLVMIntUGT;
1422 break;
1423 case TGSI_OPCODE_UMIN:
1424 op = LLVMIntULT;
1425 break;
1426 }
1427
1428 emit_data->output[emit_data->chan] =
1429 LLVMBuildSelect(builder,
1430 LLVMBuildICmp(builder, op, emit_data->args[0],
1431 emit_data->args[1], ""),
1432 emit_data->args[0],
1433 emit_data->args[1], "");
1434 }
1435
1436 static void pk2h_fetch_args(struct lp_build_tgsi_context * bld_base,
1437 struct lp_build_emit_data * emit_data)
1438 {
1439 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
1440 0, TGSI_CHAN_X);
1441 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
1442 0, TGSI_CHAN_Y);
1443 }
1444
1445 static void emit_pk2h(const struct lp_build_tgsi_action *action,
1446 struct lp_build_tgsi_context *bld_base,
1447 struct lp_build_emit_data *emit_data)
1448 {
1449 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1450 LLVMContextRef context = bld_base->base.gallivm->context;
1451 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1452 LLVMTypeRef fp16, i16;
1453 LLVMValueRef const16, comp[2];
1454 unsigned i;
1455
1456 fp16 = LLVMHalfTypeInContext(context);
1457 i16 = LLVMInt16TypeInContext(context);
1458 const16 = lp_build_const_int32(uint_bld->gallivm, 16);
1459
1460 for (i = 0; i < 2; i++) {
1461 comp[i] = LLVMBuildFPTrunc(builder, emit_data->args[i], fp16, "");
1462 comp[i] = LLVMBuildBitCast(builder, comp[i], i16, "");
1463 comp[i] = LLVMBuildZExt(builder, comp[i], uint_bld->elem_type, "");
1464 }
1465
1466 comp[1] = LLVMBuildShl(builder, comp[1], const16, "");
1467 comp[0] = LLVMBuildOr(builder, comp[0], comp[1], "");
1468
1469 emit_data->output[emit_data->chan] = comp[0];
1470 }
1471
1472 static void up2h_fetch_args(struct lp_build_tgsi_context * bld_base,
1473 struct lp_build_emit_data * emit_data)
1474 {
1475 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
1476 0, TGSI_CHAN_X);
1477 }
1478
1479 static void emit_up2h(const struct lp_build_tgsi_action *action,
1480 struct lp_build_tgsi_context *bld_base,
1481 struct lp_build_emit_data *emit_data)
1482 {
1483 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1484 LLVMContextRef context = bld_base->base.gallivm->context;
1485 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1486 LLVMTypeRef fp16, i16;
1487 LLVMValueRef const16, input, val;
1488 unsigned i;
1489
1490 fp16 = LLVMHalfTypeInContext(context);
1491 i16 = LLVMInt16TypeInContext(context);
1492 const16 = lp_build_const_int32(uint_bld->gallivm, 16);
1493 input = emit_data->args[0];
1494
1495 for (i = 0; i < 2; i++) {
1496 val = i == 1 ? LLVMBuildLShr(builder, input, const16, "") : input;
1497 val = LLVMBuildTrunc(builder, val, i16, "");
1498 val = LLVMBuildBitCast(builder, val, fp16, "");
1499 emit_data->output[i] =
1500 LLVMBuildFPExt(builder, val, bld_base->base.elem_type, "");
1501 }
1502 }
1503
1504 static void emit_fdiv(const struct lp_build_tgsi_action *action,
1505 struct lp_build_tgsi_context *bld_base,
1506 struct lp_build_emit_data *emit_data)
1507 {
1508 struct radeon_llvm_context *ctx = radeon_llvm_context(bld_base);
1509
1510 emit_data->output[emit_data->chan] =
1511 LLVMBuildFDiv(bld_base->base.gallivm->builder,
1512 emit_data->args[0], emit_data->args[1], "");
1513
1514 /* Use v_rcp_f32 instead of precise division. */
1515 if (HAVE_LLVM >= 0x0309 &&
1516 !LLVMIsConstant(emit_data->output[emit_data->chan]))
1517 LLVMSetMetadata(emit_data->output[emit_data->chan],
1518 ctx->fpmath_md_kind, ctx->fpmath_md_2p5_ulp);
1519 }
1520
1521 /* 1/sqrt is translated to rsq for f32 if fp32 denormals are not enabled in
1522 * the target machine. f64 needs global unsafe math flags to get rsq. */
1523 static void emit_rsq(const struct lp_build_tgsi_action *action,
1524 struct lp_build_tgsi_context *bld_base,
1525 struct lp_build_emit_data *emit_data)
1526 {
1527 LLVMValueRef sqrt =
1528 lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_SQRT,
1529 emit_data->args[0]);
1530
1531 emit_data->output[emit_data->chan] =
1532 lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_DIV,
1533 bld_base->base.one, sqrt);
1534 }
1535
1536 void radeon_llvm_context_init(struct radeon_llvm_context * ctx, const char *triple)
1537 {
1538 struct lp_type type;
1539
1540 /* Initialize the gallivm object:
1541 * We are only using the module, context, and builder fields of this struct.
1542 * This should be enough for us to be able to pass our gallivm struct to the
1543 * helper functions in the gallivm module.
1544 */
1545 memset(&ctx->gallivm, 0, sizeof (ctx->gallivm));
1546 memset(&ctx->soa, 0, sizeof(ctx->soa));
1547 ctx->gallivm.context = LLVMContextCreate();
1548 ctx->gallivm.module = LLVMModuleCreateWithNameInContext("tgsi",
1549 ctx->gallivm.context);
1550 LLVMSetTarget(ctx->gallivm.module, triple);
1551 ctx->gallivm.builder = LLVMCreateBuilderInContext(ctx->gallivm.context);
1552
1553 struct lp_build_tgsi_context * bld_base = &ctx->soa.bld_base;
1554
1555 type.floating = true;
1556 type.fixed = false;
1557 type.sign = true;
1558 type.norm = false;
1559 type.width = 32;
1560 type.length = 1;
1561
1562 lp_build_context_init(&bld_base->base, &ctx->gallivm, type);
1563 lp_build_context_init(&ctx->soa.bld_base.uint_bld, &ctx->gallivm, lp_uint_type(type));
1564 lp_build_context_init(&ctx->soa.bld_base.int_bld, &ctx->gallivm, lp_int_type(type));
1565 {
1566 struct lp_type dbl_type;
1567 dbl_type = type;
1568 dbl_type.width *= 2;
1569 lp_build_context_init(&ctx->soa.bld_base.dbl_bld, &ctx->gallivm, dbl_type);
1570 }
1571
1572 bld_base->soa = 1;
1573 bld_base->emit_store = radeon_llvm_emit_store;
1574 bld_base->emit_swizzle = emit_swizzle;
1575 bld_base->emit_declaration = emit_declaration;
1576 bld_base->emit_immediate = emit_immediate;
1577
1578 bld_base->emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = radeon_llvm_emit_fetch;
1579 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = radeon_llvm_emit_fetch;
1580 bld_base->emit_fetch_funcs[TGSI_FILE_TEMPORARY] = radeon_llvm_emit_fetch;
1581 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = radeon_llvm_emit_fetch;
1582 bld_base->emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = fetch_system_value;
1583
1584 /* metadata allowing 2.5 ULP */
1585 ctx->fpmath_md_kind = LLVMGetMDKindIDInContext(ctx->gallivm.context,
1586 "fpmath", 6);
1587 LLVMValueRef arg = lp_build_const_float(&ctx->gallivm, 2.5);
1588 ctx->fpmath_md_2p5_ulp = LLVMMDNodeInContext(ctx->gallivm.context,
1589 &arg, 1);
1590
1591 /* Allocate outputs */
1592 ctx->soa.outputs = ctx->outputs;
1593
1594 lp_set_default_actions(bld_base);
1595
1596 bld_base->op_actions[TGSI_OPCODE_ABS].emit = build_tgsi_intrinsic_nomem;
1597 bld_base->op_actions[TGSI_OPCODE_ABS].intr_name = "llvm.fabs.f32";
1598 bld_base->op_actions[TGSI_OPCODE_AND].emit = emit_and;
1599 bld_base->op_actions[TGSI_OPCODE_ARL].emit = emit_arl;
1600 bld_base->op_actions[TGSI_OPCODE_BFI].emit = emit_bfi;
1601 bld_base->op_actions[TGSI_OPCODE_BGNLOOP].emit = bgnloop_emit;
1602 bld_base->op_actions[TGSI_OPCODE_BREV].emit = build_tgsi_intrinsic_nomem;
1603 bld_base->op_actions[TGSI_OPCODE_BREV].intr_name =
1604 HAVE_LLVM >= 0x0308 ? "llvm.bitreverse.i32" : "llvm.AMDGPU.brev";
1605 bld_base->op_actions[TGSI_OPCODE_BRK].emit = brk_emit;
1606 bld_base->op_actions[TGSI_OPCODE_CEIL].emit = build_tgsi_intrinsic_nomem;
1607 bld_base->op_actions[TGSI_OPCODE_CEIL].intr_name = "llvm.ceil.f32";
1608 bld_base->op_actions[TGSI_OPCODE_CLAMP].emit = build_tgsi_intrinsic_nomem;
1609 bld_base->op_actions[TGSI_OPCODE_CLAMP].intr_name =
1610 HAVE_LLVM >= 0x0308 ? "llvm.AMDGPU.clamp." : "llvm.AMDIL.clamp.";
1611 bld_base->op_actions[TGSI_OPCODE_CMP].emit = emit_cmp;
1612 bld_base->op_actions[TGSI_OPCODE_CONT].emit = cont_emit;
1613 bld_base->op_actions[TGSI_OPCODE_COS].emit = build_tgsi_intrinsic_nomem;
1614 bld_base->op_actions[TGSI_OPCODE_COS].intr_name = "llvm.cos.f32";
1615 bld_base->op_actions[TGSI_OPCODE_DABS].emit = build_tgsi_intrinsic_nomem;
1616 bld_base->op_actions[TGSI_OPCODE_DABS].intr_name = "llvm.fabs.f64";
1617 bld_base->op_actions[TGSI_OPCODE_DFMA].emit = build_tgsi_intrinsic_nomem;
1618 bld_base->op_actions[TGSI_OPCODE_DFMA].intr_name = "llvm.fma.f64";
1619 bld_base->op_actions[TGSI_OPCODE_DFRAC].emit = emit_frac;
1620 bld_base->op_actions[TGSI_OPCODE_DIV].emit = emit_fdiv;
1621 bld_base->op_actions[TGSI_OPCODE_DNEG].emit = emit_dneg;
1622 bld_base->op_actions[TGSI_OPCODE_DSEQ].emit = emit_dcmp;
1623 bld_base->op_actions[TGSI_OPCODE_DSGE].emit = emit_dcmp;
1624 bld_base->op_actions[TGSI_OPCODE_DSLT].emit = emit_dcmp;
1625 bld_base->op_actions[TGSI_OPCODE_DSNE].emit = emit_dcmp;
1626 bld_base->op_actions[TGSI_OPCODE_DRSQ].emit = build_tgsi_intrinsic_nomem;
1627 bld_base->op_actions[TGSI_OPCODE_DRSQ].intr_name = "llvm.AMDGPU.rsq.f64";
1628 bld_base->op_actions[TGSI_OPCODE_DSQRT].emit = build_tgsi_intrinsic_nomem;
1629 bld_base->op_actions[TGSI_OPCODE_DSQRT].intr_name = "llvm.sqrt.f64";
1630 bld_base->op_actions[TGSI_OPCODE_ELSE].emit = else_emit;
1631 bld_base->op_actions[TGSI_OPCODE_ENDIF].emit = endif_emit;
1632 bld_base->op_actions[TGSI_OPCODE_ENDLOOP].emit = endloop_emit;
1633 bld_base->op_actions[TGSI_OPCODE_EX2].emit = build_tgsi_intrinsic_nomem;
1634 bld_base->op_actions[TGSI_OPCODE_EX2].intr_name =
1635 HAVE_LLVM >= 0x0308 ? "llvm.exp2.f32" : "llvm.AMDIL.exp.";
1636 bld_base->op_actions[TGSI_OPCODE_FLR].emit = build_tgsi_intrinsic_nomem;
1637 bld_base->op_actions[TGSI_OPCODE_FLR].intr_name = "llvm.floor.f32";
1638 bld_base->op_actions[TGSI_OPCODE_FMA].emit = build_tgsi_intrinsic_nomem;
1639 bld_base->op_actions[TGSI_OPCODE_FMA].intr_name = "llvm.fma.f32";
1640 bld_base->op_actions[TGSI_OPCODE_FRC].emit = emit_frac;
1641 bld_base->op_actions[TGSI_OPCODE_F2I].emit = emit_f2i;
1642 bld_base->op_actions[TGSI_OPCODE_F2U].emit = emit_f2u;
1643 bld_base->op_actions[TGSI_OPCODE_FSEQ].emit = emit_fcmp;
1644 bld_base->op_actions[TGSI_OPCODE_FSGE].emit = emit_fcmp;
1645 bld_base->op_actions[TGSI_OPCODE_FSLT].emit = emit_fcmp;
1646 bld_base->op_actions[TGSI_OPCODE_FSNE].emit = emit_fcmp;
1647 bld_base->op_actions[TGSI_OPCODE_IABS].emit = emit_iabs;
1648 bld_base->op_actions[TGSI_OPCODE_IBFE].emit = build_tgsi_intrinsic_nomem;
1649 bld_base->op_actions[TGSI_OPCODE_IBFE].intr_name = "llvm.AMDGPU.bfe.i32";
1650 bld_base->op_actions[TGSI_OPCODE_IDIV].emit = emit_idiv;
1651 bld_base->op_actions[TGSI_OPCODE_IF].emit = if_emit;
1652 bld_base->op_actions[TGSI_OPCODE_UIF].emit = uif_emit;
1653 bld_base->op_actions[TGSI_OPCODE_IMAX].emit = emit_minmax_int;
1654 bld_base->op_actions[TGSI_OPCODE_IMIN].emit = emit_minmax_int;
1655 bld_base->op_actions[TGSI_OPCODE_IMSB].emit = emit_imsb;
1656 bld_base->op_actions[TGSI_OPCODE_INEG].emit = emit_ineg;
1657 bld_base->op_actions[TGSI_OPCODE_ISHR].emit = emit_ishr;
1658 bld_base->op_actions[TGSI_OPCODE_ISGE].emit = emit_icmp;
1659 bld_base->op_actions[TGSI_OPCODE_ISLT].emit = emit_icmp;
1660 bld_base->op_actions[TGSI_OPCODE_ISSG].emit = emit_ssg;
1661 bld_base->op_actions[TGSI_OPCODE_I2F].emit = emit_i2f;
1662 bld_base->op_actions[TGSI_OPCODE_KILL_IF].fetch_args = kill_if_fetch_args;
1663 bld_base->op_actions[TGSI_OPCODE_KILL_IF].emit = kil_emit;
1664 bld_base->op_actions[TGSI_OPCODE_KILL_IF].intr_name = "llvm.AMDGPU.kill";
1665 bld_base->op_actions[TGSI_OPCODE_KILL].emit = lp_build_tgsi_intrinsic;
1666 bld_base->op_actions[TGSI_OPCODE_KILL].intr_name = "llvm.AMDGPU.kilp";
1667 bld_base->op_actions[TGSI_OPCODE_LSB].emit = emit_lsb;
1668 bld_base->op_actions[TGSI_OPCODE_LG2].emit = build_tgsi_intrinsic_nomem;
1669 bld_base->op_actions[TGSI_OPCODE_LG2].intr_name = "llvm.log2.f32";
1670 bld_base->op_actions[TGSI_OPCODE_MOD].emit = emit_mod;
1671 bld_base->op_actions[TGSI_OPCODE_UMSB].emit = emit_umsb;
1672 bld_base->op_actions[TGSI_OPCODE_NOT].emit = emit_not;
1673 bld_base->op_actions[TGSI_OPCODE_OR].emit = emit_or;
1674 bld_base->op_actions[TGSI_OPCODE_PK2H].fetch_args = pk2h_fetch_args;
1675 bld_base->op_actions[TGSI_OPCODE_PK2H].emit = emit_pk2h;
1676 bld_base->op_actions[TGSI_OPCODE_POPC].emit = build_tgsi_intrinsic_nomem;
1677 bld_base->op_actions[TGSI_OPCODE_POPC].intr_name = "llvm.ctpop.i32";
1678 bld_base->op_actions[TGSI_OPCODE_POW].emit = build_tgsi_intrinsic_nomem;
1679 bld_base->op_actions[TGSI_OPCODE_POW].intr_name = "llvm.pow.f32";
1680 bld_base->op_actions[TGSI_OPCODE_ROUND].emit = build_tgsi_intrinsic_nomem;
1681 bld_base->op_actions[TGSI_OPCODE_ROUND].intr_name = "llvm.rint.f32";
1682 bld_base->op_actions[TGSI_OPCODE_RSQ].emit = emit_rsq;
1683 bld_base->op_actions[TGSI_OPCODE_SGE].emit = emit_set_cond;
1684 bld_base->op_actions[TGSI_OPCODE_SEQ].emit = emit_set_cond;
1685 bld_base->op_actions[TGSI_OPCODE_SHL].emit = emit_shl;
1686 bld_base->op_actions[TGSI_OPCODE_SLE].emit = emit_set_cond;
1687 bld_base->op_actions[TGSI_OPCODE_SLT].emit = emit_set_cond;
1688 bld_base->op_actions[TGSI_OPCODE_SNE].emit = emit_set_cond;
1689 bld_base->op_actions[TGSI_OPCODE_SGT].emit = emit_set_cond;
1690 bld_base->op_actions[TGSI_OPCODE_SIN].emit = build_tgsi_intrinsic_nomem;
1691 bld_base->op_actions[TGSI_OPCODE_SIN].intr_name = "llvm.sin.f32";
1692 bld_base->op_actions[TGSI_OPCODE_SQRT].emit = build_tgsi_intrinsic_nomem;
1693 bld_base->op_actions[TGSI_OPCODE_SQRT].intr_name = "llvm.sqrt.f32";
1694 bld_base->op_actions[TGSI_OPCODE_SSG].emit = emit_ssg;
1695 bld_base->op_actions[TGSI_OPCODE_TRUNC].emit = build_tgsi_intrinsic_nomem;
1696 bld_base->op_actions[TGSI_OPCODE_TRUNC].intr_name = "llvm.trunc.f32";
1697 bld_base->op_actions[TGSI_OPCODE_UADD].emit = emit_uadd;
1698 bld_base->op_actions[TGSI_OPCODE_UBFE].emit = build_tgsi_intrinsic_nomem;
1699 bld_base->op_actions[TGSI_OPCODE_UBFE].intr_name = "llvm.AMDGPU.bfe.u32";
1700 bld_base->op_actions[TGSI_OPCODE_UDIV].emit = emit_udiv;
1701 bld_base->op_actions[TGSI_OPCODE_UMAX].emit = emit_minmax_int;
1702 bld_base->op_actions[TGSI_OPCODE_UMIN].emit = emit_minmax_int;
1703 bld_base->op_actions[TGSI_OPCODE_UMOD].emit = emit_umod;
1704 bld_base->op_actions[TGSI_OPCODE_USEQ].emit = emit_icmp;
1705 bld_base->op_actions[TGSI_OPCODE_USGE].emit = emit_icmp;
1706 bld_base->op_actions[TGSI_OPCODE_USHR].emit = emit_ushr;
1707 bld_base->op_actions[TGSI_OPCODE_USLT].emit = emit_icmp;
1708 bld_base->op_actions[TGSI_OPCODE_USNE].emit = emit_icmp;
1709 bld_base->op_actions[TGSI_OPCODE_U2F].emit = emit_u2f;
1710 bld_base->op_actions[TGSI_OPCODE_XOR].emit = emit_xor;
1711 bld_base->op_actions[TGSI_OPCODE_UCMP].emit = emit_ucmp;
1712 bld_base->op_actions[TGSI_OPCODE_UP2H].fetch_args = up2h_fetch_args;
1713 bld_base->op_actions[TGSI_OPCODE_UP2H].emit = emit_up2h;
1714 }
1715
1716 void radeon_llvm_create_func(struct radeon_llvm_context * ctx,
1717 LLVMTypeRef *return_types, unsigned num_return_elems,
1718 LLVMTypeRef *ParamTypes, unsigned ParamCount)
1719 {
1720 LLVMTypeRef main_fn_type, ret_type;
1721 LLVMBasicBlockRef main_fn_body;
1722
1723 if (num_return_elems)
1724 ret_type = LLVMStructTypeInContext(ctx->gallivm.context,
1725 return_types,
1726 num_return_elems, true);
1727 else
1728 ret_type = LLVMVoidTypeInContext(ctx->gallivm.context);
1729
1730 /* Setup the function */
1731 ctx->return_type = ret_type;
1732 main_fn_type = LLVMFunctionType(ret_type, ParamTypes, ParamCount, 0);
1733 ctx->main_fn = LLVMAddFunction(ctx->gallivm.module, "main", main_fn_type);
1734 main_fn_body = LLVMAppendBasicBlockInContext(ctx->gallivm.context,
1735 ctx->main_fn, "main_body");
1736 LLVMPositionBuilderAtEnd(ctx->gallivm.builder, main_fn_body);
1737 }
1738
1739 void radeon_llvm_finalize_module(struct radeon_llvm_context * ctx)
1740 {
1741 struct gallivm_state * gallivm = ctx->soa.bld_base.base.gallivm;
1742 const char *triple = LLVMGetTarget(gallivm->module);
1743 LLVMTargetLibraryInfoRef target_library_info;
1744
1745 /* Create the pass manager */
1746 gallivm->passmgr = LLVMCreateFunctionPassManagerForModule(
1747 gallivm->module);
1748
1749 target_library_info = gallivm_create_target_library_info(triple);
1750 LLVMAddTargetLibraryInfo(target_library_info, gallivm->passmgr);
1751
1752 /* This pass should eliminate all the load and store instructions */
1753 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
1754
1755 /* Add some optimization passes */
1756 LLVMAddScalarReplAggregatesPass(gallivm->passmgr);
1757 LLVMAddLICMPass(gallivm->passmgr);
1758 LLVMAddAggressiveDCEPass(gallivm->passmgr);
1759 LLVMAddCFGSimplificationPass(gallivm->passmgr);
1760 LLVMAddInstructionCombiningPass(gallivm->passmgr);
1761
1762 /* Run the pass */
1763 LLVMRunFunctionPassManager(gallivm->passmgr, ctx->main_fn);
1764
1765 LLVMDisposeBuilder(gallivm->builder);
1766 LLVMDisposePassManager(gallivm->passmgr);
1767 gallivm_dispose_target_library_info(target_library_info);
1768 }
1769
1770 void radeon_llvm_dispose(struct radeon_llvm_context * ctx)
1771 {
1772 LLVMDisposeModule(ctx->soa.bld_base.base.gallivm->module);
1773 LLVMContextDispose(ctx->soa.bld_base.base.gallivm->context);
1774 FREE(ctx->arrays);
1775 ctx->arrays = NULL;
1776 FREE(ctx->temps);
1777 ctx->temps = NULL;
1778 ctx->temps_count = 0;
1779 FREE(ctx->loop);
1780 ctx->loop = NULL;
1781 ctx->loop_depth_max = 0;
1782 FREE(ctx->branch);
1783 ctx->branch = NULL;
1784 ctx->branch_depth_max = 0;
1785 }