gallium/radeon: add basic code for setting shader return values
[mesa.git] / src / gallium / drivers / radeon / radeon_setup_tgsi_llvm.c
1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors: Tom Stellard <thomas.stellard@amd.com>
24 *
25 */
26 #include "radeon_llvm.h"
27
28 #include "gallivm/lp_bld_const.h"
29 #include "gallivm/lp_bld_gather.h"
30 #include "gallivm/lp_bld_flow.h"
31 #include "gallivm/lp_bld_init.h"
32 #include "gallivm/lp_bld_intr.h"
33 #include "gallivm/lp_bld_misc.h"
34 #include "gallivm/lp_bld_swizzle.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "util/u_math.h"
38 #include "util/u_memory.h"
39 #include "util/u_debug.h"
40
41 #include <llvm-c/Core.h>
42 #include <llvm-c/Transforms/Scalar.h>
43
44 static struct radeon_llvm_loop * get_current_loop(struct radeon_llvm_context * ctx)
45 {
46 return ctx->loop_depth > 0 ? ctx->loop + (ctx->loop_depth - 1) : NULL;
47 }
48
49 static struct radeon_llvm_branch * get_current_branch(
50 struct radeon_llvm_context * ctx)
51 {
52 return ctx->branch_depth > 0 ?
53 ctx->branch + (ctx->branch_depth - 1) : NULL;
54 }
55
56 unsigned radeon_llvm_reg_index_soa(unsigned index, unsigned chan)
57 {
58 return (index * 4) + chan;
59 }
60
61 static LLVMValueRef emit_swizzle(
62 struct lp_build_tgsi_context * bld_base,
63 LLVMValueRef value,
64 unsigned swizzle_x,
65 unsigned swizzle_y,
66 unsigned swizzle_z,
67 unsigned swizzle_w)
68 {
69 LLVMValueRef swizzles[4];
70 LLVMTypeRef i32t =
71 LLVMInt32TypeInContext(bld_base->base.gallivm->context);
72
73 swizzles[0] = LLVMConstInt(i32t, swizzle_x, 0);
74 swizzles[1] = LLVMConstInt(i32t, swizzle_y, 0);
75 swizzles[2] = LLVMConstInt(i32t, swizzle_z, 0);
76 swizzles[3] = LLVMConstInt(i32t, swizzle_w, 0);
77
78 return LLVMBuildShuffleVector(bld_base->base.gallivm->builder,
79 value,
80 LLVMGetUndef(LLVMTypeOf(value)),
81 LLVMConstVector(swizzles, 4), "");
82 }
83
84 static struct tgsi_declaration_range
85 get_array_range(struct lp_build_tgsi_context *bld_base,
86 unsigned File, const struct tgsi_ind_register *reg)
87 {
88 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
89
90 if (File != TGSI_FILE_TEMPORARY || reg->ArrayID == 0 ||
91 reg->ArrayID > bld_base->info->array_max[TGSI_FILE_TEMPORARY]) {
92 struct tgsi_declaration_range range;
93 range.First = 0;
94 range.Last = bld_base->info->file_max[File];
95 return range;
96 }
97
98 return ctx->arrays[reg->ArrayID - 1];
99 }
100
101 static LLVMValueRef
102 emit_array_index(
103 struct lp_build_tgsi_soa_context *bld,
104 const struct tgsi_ind_register *reg,
105 unsigned offset)
106 {
107 struct gallivm_state * gallivm = bld->bld_base.base.gallivm;
108
109 LLVMValueRef addr = LLVMBuildLoad(gallivm->builder, bld->addr[reg->Index][reg->Swizzle], "");
110 return LLVMBuildAdd(gallivm->builder, addr, lp_build_const_int32(gallivm, offset), "");
111 }
112
113 LLVMValueRef
114 radeon_llvm_emit_fetch_double(
115 struct lp_build_tgsi_context *bld_base,
116 LLVMValueRef ptr,
117 LLVMValueRef ptr2)
118 {
119 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
120 LLVMValueRef result;
121
122 result = LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), bld_base->base.type.length * 2));
123
124 result = LLVMBuildInsertElement(builder,
125 result,
126 bitcast(bld_base, TGSI_TYPE_UNSIGNED, ptr),
127 bld_base->int_bld.zero, "");
128 result = LLVMBuildInsertElement(builder,
129 result,
130 bitcast(bld_base, TGSI_TYPE_UNSIGNED, ptr2),
131 bld_base->int_bld.one, "");
132 return bitcast(bld_base, TGSI_TYPE_DOUBLE, result);
133 }
134
135 static LLVMValueRef
136 emit_array_fetch(
137 struct lp_build_tgsi_context *bld_base,
138 unsigned File, enum tgsi_opcode_type type,
139 struct tgsi_declaration_range range,
140 unsigned swizzle)
141 {
142 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
143 struct gallivm_state * gallivm = bld->bld_base.base.gallivm;
144 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
145
146 unsigned i, size = range.Last - range.First + 1;
147 LLVMTypeRef vec = LLVMVectorType(tgsi2llvmtype(bld_base, type), size);
148 LLVMValueRef result = LLVMGetUndef(vec);
149
150 struct tgsi_full_src_register tmp_reg = {};
151 tmp_reg.Register.File = File;
152
153 for (i = 0; i < size; ++i) {
154 tmp_reg.Register.Index = i + range.First;
155 LLVMValueRef temp = radeon_llvm_emit_fetch(bld_base, &tmp_reg, type, swizzle);
156 result = LLVMBuildInsertElement(builder, result, temp,
157 lp_build_const_int32(gallivm, i), "");
158 }
159 return result;
160 }
161
162 static bool uses_temp_indirect_addressing(
163 struct lp_build_tgsi_context *bld_base)
164 {
165 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
166 return (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY));
167 }
168
169 LLVMValueRef radeon_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
170 const struct tgsi_full_src_register *reg,
171 enum tgsi_opcode_type type,
172 unsigned swizzle)
173 {
174 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
175 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
176 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
177 LLVMValueRef result = NULL, ptr, ptr2;
178
179 if (swizzle == ~0) {
180 LLVMValueRef values[TGSI_NUM_CHANNELS];
181 unsigned chan;
182 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
183 values[chan] = radeon_llvm_emit_fetch(bld_base, reg, type, chan);
184 }
185 return lp_build_gather_values(bld_base->base.gallivm, values,
186 TGSI_NUM_CHANNELS);
187 }
188
189 if (reg->Register.Indirect) {
190 struct tgsi_declaration_range range = get_array_range(bld_base,
191 reg->Register.File, &reg->Indirect);
192 return LLVMBuildExtractElement(builder,
193 emit_array_fetch(bld_base, reg->Register.File, type, range, swizzle),
194 emit_array_index(bld, &reg->Indirect, reg->Register.Index - range.First),
195 "");
196 }
197
198 switch(reg->Register.File) {
199 case TGSI_FILE_IMMEDIATE: {
200 LLVMTypeRef ctype = tgsi2llvmtype(bld_base, type);
201 if (type == TGSI_TYPE_DOUBLE) {
202 result = LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), bld_base->base.type.length * 2));
203 result = LLVMConstInsertElement(result,
204 bld->immediates[reg->Register.Index][swizzle],
205 bld_base->int_bld.zero);
206 result = LLVMConstInsertElement(result,
207 bld->immediates[reg->Register.Index][swizzle + 1],
208 bld_base->int_bld.one);
209 return LLVMConstBitCast(result, ctype);
210 } else {
211 return LLVMConstBitCast(bld->immediates[reg->Register.Index][swizzle], ctype);
212 }
213 }
214
215 case TGSI_FILE_INPUT:
216 result = ctx->inputs[radeon_llvm_reg_index_soa(reg->Register.Index, swizzle)];
217 if (type == TGSI_TYPE_DOUBLE) {
218 ptr = result;
219 ptr2 = ctx->inputs[radeon_llvm_reg_index_soa(reg->Register.Index, swizzle + 1)];
220 return radeon_llvm_emit_fetch_double(bld_base, ptr, ptr2);
221 }
222 break;
223
224 case TGSI_FILE_TEMPORARY:
225 if (reg->Register.Index >= ctx->temps_count)
226 return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
227 if (uses_temp_indirect_addressing(bld_base)) {
228 ptr = lp_get_temp_ptr_soa(bld, reg->Register.Index, swizzle);
229 break;
230 }
231 ptr = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle];
232 if (type == TGSI_TYPE_DOUBLE) {
233 ptr2 = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle + 1];
234 return radeon_llvm_emit_fetch_double(bld_base,
235 LLVMBuildLoad(builder, ptr, ""),
236 LLVMBuildLoad(builder, ptr2, ""));
237 }
238 result = LLVMBuildLoad(builder, ptr, "");
239 break;
240
241 case TGSI_FILE_OUTPUT:
242 ptr = lp_get_output_ptr(bld, reg->Register.Index, swizzle);
243 if (type == TGSI_TYPE_DOUBLE) {
244 ptr2 = lp_get_output_ptr(bld, reg->Register.Index, swizzle + 1);
245 return radeon_llvm_emit_fetch_double(bld_base,
246 LLVMBuildLoad(builder, ptr, ""),
247 LLVMBuildLoad(builder, ptr2, ""));
248 }
249 result = LLVMBuildLoad(builder, ptr, "");
250 break;
251
252 default:
253 return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
254 }
255
256 return bitcast(bld_base, type, result);
257 }
258
259 static LLVMValueRef fetch_system_value(
260 struct lp_build_tgsi_context * bld_base,
261 const struct tgsi_full_src_register *reg,
262 enum tgsi_opcode_type type,
263 unsigned swizzle)
264 {
265 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
266 struct gallivm_state *gallivm = bld_base->base.gallivm;
267
268 LLVMValueRef cval = ctx->system_values[reg->Register.Index];
269 if (LLVMGetTypeKind(LLVMTypeOf(cval)) == LLVMVectorTypeKind) {
270 cval = LLVMBuildExtractElement(gallivm->builder, cval,
271 lp_build_const_int32(gallivm, swizzle), "");
272 }
273 return bitcast(bld_base, type, cval);
274 }
275
276 static LLVMValueRef si_build_alloca_undef(struct gallivm_state *gallivm,
277 LLVMTypeRef type,
278 const char *name)
279 {
280 LLVMValueRef ptr = lp_build_alloca(gallivm, type, name);
281 LLVMBuildStore(gallivm->builder, LLVMGetUndef(type), ptr);
282 return ptr;
283 }
284
285 static void emit_declaration(
286 struct lp_build_tgsi_context * bld_base,
287 const struct tgsi_full_declaration *decl)
288 {
289 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
290 unsigned first, last, i, idx;
291 switch(decl->Declaration.File) {
292 case TGSI_FILE_ADDRESS:
293 {
294 unsigned idx;
295 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
296 unsigned chan;
297 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
298 ctx->soa.addr[idx][chan] = si_build_alloca_undef(
299 &ctx->gallivm,
300 ctx->soa.bld_base.uint_bld.elem_type, "");
301 }
302 }
303 break;
304 }
305
306 case TGSI_FILE_TEMPORARY:
307 if (decl->Declaration.Array) {
308 if (!ctx->arrays) {
309 int size = bld_base->info->array_max[TGSI_FILE_TEMPORARY];
310 ctx->arrays = MALLOC(sizeof(ctx->arrays[0]) * size);
311 }
312
313 ctx->arrays[decl->Array.ArrayID - 1] = decl->Range;
314 }
315 if (uses_temp_indirect_addressing(bld_base)) {
316 lp_emit_declaration_soa(bld_base, decl);
317 break;
318 }
319 first = decl->Range.First;
320 last = decl->Range.Last;
321 if (!ctx->temps_count) {
322 ctx->temps_count = bld_base->info->file_max[TGSI_FILE_TEMPORARY] + 1;
323 ctx->temps = MALLOC(TGSI_NUM_CHANNELS * ctx->temps_count * sizeof(LLVMValueRef));
324 }
325 for (idx = first; idx <= last; idx++) {
326 for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
327 ctx->temps[idx * TGSI_NUM_CHANNELS + i] =
328 si_build_alloca_undef(bld_base->base.gallivm,
329 bld_base->base.vec_type,
330 "temp");
331 }
332 }
333 break;
334
335 case TGSI_FILE_INPUT:
336 {
337 unsigned idx;
338 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
339 if (ctx->load_input)
340 ctx->load_input(ctx, idx, decl);
341 }
342 }
343 break;
344
345 case TGSI_FILE_SYSTEM_VALUE:
346 {
347 unsigned idx;
348 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
349 ctx->load_system_value(ctx, idx, decl);
350 }
351 }
352 break;
353
354 case TGSI_FILE_OUTPUT:
355 {
356 unsigned idx;
357 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
358 unsigned chan;
359 assert(idx < RADEON_LLVM_MAX_OUTPUTS);
360 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
361 ctx->soa.outputs[idx][chan] = si_build_alloca_undef(
362 &ctx->gallivm,
363 ctx->soa.bld_base.base.elem_type, "");
364 }
365 }
366
367 ctx->output_reg_count = MAX2(ctx->output_reg_count,
368 decl->Range.Last + 1);
369 break;
370 }
371
372 default:
373 break;
374 }
375 }
376
377 LLVMValueRef radeon_llvm_saturate(struct lp_build_tgsi_context *bld_base,
378 LLVMValueRef value)
379 {
380 struct lp_build_emit_data clamp_emit_data;
381
382 memset(&clamp_emit_data, 0, sizeof(clamp_emit_data));
383 clamp_emit_data.arg_count = 3;
384 clamp_emit_data.args[0] = value;
385 clamp_emit_data.args[2] = bld_base->base.one;
386 clamp_emit_data.args[1] = bld_base->base.zero;
387
388 return lp_build_emit_llvm(bld_base, TGSI_OPCODE_CLAMP,
389 &clamp_emit_data);
390 }
391
392 void radeon_llvm_emit_store(
393 struct lp_build_tgsi_context * bld_base,
394 const struct tgsi_full_instruction * inst,
395 const struct tgsi_opcode_info * info,
396 LLVMValueRef dst[4])
397 {
398 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
399 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
400 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
401 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
402 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
403 LLVMValueRef temp_ptr, temp_ptr2 = NULL;
404 unsigned chan, chan_index;
405 boolean is_vec_store = FALSE;
406 enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
407
408 if (dst[0]) {
409 LLVMTypeKind k = LLVMGetTypeKind(LLVMTypeOf(dst[0]));
410 is_vec_store = (k == LLVMVectorTypeKind);
411 }
412
413 if (is_vec_store) {
414 LLVMValueRef values[4] = {};
415 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan) {
416 LLVMValueRef index = lp_build_const_int32(gallivm, chan);
417 values[chan] = LLVMBuildExtractElement(gallivm->builder,
418 dst[0], index, "");
419 }
420 bld_base->emit_store(bld_base, inst, info, values);
421 return;
422 }
423
424 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
425 LLVMValueRef value = dst[chan_index];
426
427 if (dtype == TGSI_TYPE_DOUBLE && (chan_index == 1 || chan_index == 3))
428 continue;
429 if (inst->Instruction.Saturate)
430 value = radeon_llvm_saturate(bld_base, value);
431
432 if (reg->Register.File == TGSI_FILE_ADDRESS) {
433 temp_ptr = bld->addr[reg->Register.Index][chan_index];
434 LLVMBuildStore(builder, value, temp_ptr);
435 continue;
436 }
437
438 if (dtype != TGSI_TYPE_DOUBLE)
439 value = bitcast(bld_base, TGSI_TYPE_FLOAT, value);
440
441 if (reg->Register.Indirect) {
442 struct tgsi_declaration_range range = get_array_range(bld_base,
443 reg->Register.File, &reg->Indirect);
444
445 unsigned i, size = range.Last - range.First + 1;
446 LLVMValueRef array = LLVMBuildInsertElement(builder,
447 emit_array_fetch(bld_base, reg->Register.File, TGSI_TYPE_FLOAT, range, chan_index),
448 value, emit_array_index(bld, &reg->Indirect, reg->Register.Index - range.First), "");
449
450 for (i = 0; i < size; ++i) {
451 switch(reg->Register.File) {
452 case TGSI_FILE_OUTPUT:
453 temp_ptr = bld->outputs[i + range.First][chan_index];
454 break;
455
456 case TGSI_FILE_TEMPORARY:
457 if (range.First + i >= ctx->temps_count)
458 continue;
459 if (uses_temp_indirect_addressing(bld_base))
460 temp_ptr = lp_get_temp_ptr_soa(bld, i + range.First, chan_index);
461 else
462 temp_ptr = ctx->temps[(i + range.First) * TGSI_NUM_CHANNELS + chan_index];
463 break;
464
465 default:
466 return;
467 }
468 value = LLVMBuildExtractElement(builder, array,
469 lp_build_const_int32(gallivm, i), "");
470 LLVMBuildStore(builder, value, temp_ptr);
471 }
472
473 } else {
474 switch(reg->Register.File) {
475 case TGSI_FILE_OUTPUT:
476 temp_ptr = bld->outputs[reg->Register.Index][chan_index];
477 if (dtype == TGSI_TYPE_DOUBLE)
478 temp_ptr2 = bld->outputs[reg->Register.Index][chan_index + 1];
479 break;
480
481 case TGSI_FILE_TEMPORARY:
482 if (reg->Register.Index >= ctx->temps_count)
483 continue;
484 if (uses_temp_indirect_addressing(bld_base)) {
485 temp_ptr = NULL;
486 break;
487 }
488 temp_ptr = ctx->temps[ TGSI_NUM_CHANNELS * reg->Register.Index + chan_index];
489 if (dtype == TGSI_TYPE_DOUBLE)
490 temp_ptr2 = ctx->temps[ TGSI_NUM_CHANNELS * reg->Register.Index + chan_index + 1];
491
492 break;
493
494 default:
495 return;
496 }
497 if (dtype != TGSI_TYPE_DOUBLE)
498 LLVMBuildStore(builder, value, temp_ptr);
499 else {
500 LLVMValueRef ptr = LLVMBuildBitCast(builder, value,
501 LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), 2), "");
502 LLVMValueRef val2;
503 value = LLVMBuildExtractElement(builder, ptr,
504 bld_base->uint_bld.zero, "");
505 val2 = LLVMBuildExtractElement(builder, ptr,
506 bld_base->uint_bld.one, "");
507
508 LLVMBuildStore(builder, bitcast(bld_base, TGSI_TYPE_FLOAT, value), temp_ptr);
509 LLVMBuildStore(builder, bitcast(bld_base, TGSI_TYPE_FLOAT, val2), temp_ptr2);
510 }
511 }
512 }
513 }
514
515 static void bgnloop_emit(
516 const struct lp_build_tgsi_action * action,
517 struct lp_build_tgsi_context * bld_base,
518 struct lp_build_emit_data * emit_data)
519 {
520 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
521 struct gallivm_state * gallivm = bld_base->base.gallivm;
522 LLVMBasicBlockRef loop_block;
523 LLVMBasicBlockRef endloop_block;
524 endloop_block = LLVMAppendBasicBlockInContext(gallivm->context,
525 ctx->main_fn, "ENDLOOP");
526 loop_block = LLVMInsertBasicBlockInContext(gallivm->context,
527 endloop_block, "LOOP");
528 LLVMBuildBr(gallivm->builder, loop_block);
529 LLVMPositionBuilderAtEnd(gallivm->builder, loop_block);
530
531 if (++ctx->loop_depth > ctx->loop_depth_max) {
532 unsigned new_max = ctx->loop_depth_max << 1;
533
534 if (!new_max)
535 new_max = RADEON_LLVM_INITIAL_CF_DEPTH;
536
537 ctx->loop = REALLOC(ctx->loop, ctx->loop_depth_max *
538 sizeof(ctx->loop[0]),
539 new_max * sizeof(ctx->loop[0]));
540 ctx->loop_depth_max = new_max;
541 }
542
543 ctx->loop[ctx->loop_depth - 1].loop_block = loop_block;
544 ctx->loop[ctx->loop_depth - 1].endloop_block = endloop_block;
545 }
546
547 static void brk_emit(
548 const struct lp_build_tgsi_action * action,
549 struct lp_build_tgsi_context * bld_base,
550 struct lp_build_emit_data * emit_data)
551 {
552 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
553 struct gallivm_state * gallivm = bld_base->base.gallivm;
554 struct radeon_llvm_loop * current_loop = get_current_loop(ctx);
555
556 LLVMBuildBr(gallivm->builder, current_loop->endloop_block);
557 }
558
559 static void cont_emit(
560 const struct lp_build_tgsi_action * action,
561 struct lp_build_tgsi_context * bld_base,
562 struct lp_build_emit_data * emit_data)
563 {
564 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
565 struct gallivm_state * gallivm = bld_base->base.gallivm;
566 struct radeon_llvm_loop * current_loop = get_current_loop(ctx);
567
568 LLVMBuildBr(gallivm->builder, current_loop->loop_block);
569 }
570
571 static void else_emit(
572 const struct lp_build_tgsi_action * action,
573 struct lp_build_tgsi_context * bld_base,
574 struct lp_build_emit_data * emit_data)
575 {
576 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
577 struct gallivm_state * gallivm = bld_base->base.gallivm;
578 struct radeon_llvm_branch * current_branch = get_current_branch(ctx);
579 LLVMBasicBlockRef current_block = LLVMGetInsertBlock(gallivm->builder);
580
581 /* We need to add a terminator to the current block if the previous
582 * instruction was an ENDIF.Example:
583 * IF
584 * [code]
585 * IF
586 * [code]
587 * ELSE
588 * [code]
589 * ENDIF <--
590 * ELSE<--
591 * [code]
592 * ENDIF
593 */
594
595 if (current_block != current_branch->if_block) {
596 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
597 }
598 if (!LLVMGetBasicBlockTerminator(current_branch->if_block)) {
599 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
600 }
601 current_branch->has_else = 1;
602 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->else_block);
603 }
604
605 static void endif_emit(
606 const struct lp_build_tgsi_action * action,
607 struct lp_build_tgsi_context * bld_base,
608 struct lp_build_emit_data * emit_data)
609 {
610 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
611 struct gallivm_state * gallivm = bld_base->base.gallivm;
612 struct radeon_llvm_branch * current_branch = get_current_branch(ctx);
613 LLVMBasicBlockRef current_block = LLVMGetInsertBlock(gallivm->builder);
614
615 /* If we have consecutive ENDIF instructions, then the first ENDIF
616 * will not have a terminator, so we need to add one. */
617 if (current_block != current_branch->if_block
618 && current_block != current_branch->else_block
619 && !LLVMGetBasicBlockTerminator(current_block)) {
620
621 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
622 }
623 if (!LLVMGetBasicBlockTerminator(current_branch->else_block)) {
624 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->else_block);
625 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
626 }
627
628 if (!LLVMGetBasicBlockTerminator(current_branch->if_block)) {
629 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->if_block);
630 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
631 }
632
633 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->endif_block);
634 ctx->branch_depth--;
635 }
636
637 static void endloop_emit(
638 const struct lp_build_tgsi_action * action,
639 struct lp_build_tgsi_context * bld_base,
640 struct lp_build_emit_data * emit_data)
641 {
642 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
643 struct gallivm_state * gallivm = bld_base->base.gallivm;
644 struct radeon_llvm_loop * current_loop = get_current_loop(ctx);
645
646 if (!LLVMGetBasicBlockTerminator(LLVMGetInsertBlock(gallivm->builder))) {
647 LLVMBuildBr(gallivm->builder, current_loop->loop_block);
648 }
649
650 LLVMPositionBuilderAtEnd(gallivm->builder, current_loop->endloop_block);
651 ctx->loop_depth--;
652 }
653
654 static void if_cond_emit(
655 const struct lp_build_tgsi_action * action,
656 struct lp_build_tgsi_context * bld_base,
657 struct lp_build_emit_data * emit_data,
658 LLVMValueRef cond)
659 {
660 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
661 struct gallivm_state * gallivm = bld_base->base.gallivm;
662 LLVMBasicBlockRef if_block, else_block, endif_block;
663
664 endif_block = LLVMAppendBasicBlockInContext(gallivm->context,
665 ctx->main_fn, "ENDIF");
666 if_block = LLVMInsertBasicBlockInContext(gallivm->context,
667 endif_block, "IF");
668 else_block = LLVMInsertBasicBlockInContext(gallivm->context,
669 endif_block, "ELSE");
670 LLVMBuildCondBr(gallivm->builder, cond, if_block, else_block);
671 LLVMPositionBuilderAtEnd(gallivm->builder, if_block);
672
673 if (++ctx->branch_depth > ctx->branch_depth_max) {
674 unsigned new_max = ctx->branch_depth_max << 1;
675
676 if (!new_max)
677 new_max = RADEON_LLVM_INITIAL_CF_DEPTH;
678
679 ctx->branch = REALLOC(ctx->branch, ctx->branch_depth_max *
680 sizeof(ctx->branch[0]),
681 new_max * sizeof(ctx->branch[0]));
682 ctx->branch_depth_max = new_max;
683 }
684
685 ctx->branch[ctx->branch_depth - 1].endif_block = endif_block;
686 ctx->branch[ctx->branch_depth - 1].if_block = if_block;
687 ctx->branch[ctx->branch_depth - 1].else_block = else_block;
688 ctx->branch[ctx->branch_depth - 1].has_else = 0;
689 }
690
691 static void if_emit(
692 const struct lp_build_tgsi_action * action,
693 struct lp_build_tgsi_context * bld_base,
694 struct lp_build_emit_data * emit_data)
695 {
696 struct gallivm_state * gallivm = bld_base->base.gallivm;
697 LLVMValueRef cond;
698
699 cond = LLVMBuildFCmp(gallivm->builder, LLVMRealUNE,
700 emit_data->args[0],
701 bld_base->base.zero, "");
702
703 if_cond_emit(action, bld_base, emit_data, cond);
704 }
705
706 static void uif_emit(
707 const struct lp_build_tgsi_action * action,
708 struct lp_build_tgsi_context * bld_base,
709 struct lp_build_emit_data * emit_data)
710 {
711 struct gallivm_state * gallivm = bld_base->base.gallivm;
712 LLVMValueRef cond;
713
714 cond = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
715 bitcast(bld_base, TGSI_TYPE_UNSIGNED, emit_data->args[0]),
716 bld_base->int_bld.zero, "");
717
718 if_cond_emit(action, bld_base, emit_data, cond);
719 }
720
721 static void kill_if_fetch_args(
722 struct lp_build_tgsi_context * bld_base,
723 struct lp_build_emit_data * emit_data)
724 {
725 const struct tgsi_full_instruction * inst = emit_data->inst;
726 struct gallivm_state *gallivm = bld_base->base.gallivm;
727 LLVMBuilderRef builder = gallivm->builder;
728 unsigned i;
729 LLVMValueRef conds[TGSI_NUM_CHANNELS];
730
731 for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
732 LLVMValueRef value = lp_build_emit_fetch(bld_base, inst, 0, i);
733 conds[i] = LLVMBuildFCmp(builder, LLVMRealOLT, value,
734 bld_base->base.zero, "");
735 }
736
737 /* Or the conditions together */
738 for (i = TGSI_NUM_CHANNELS - 1; i > 0; i--) {
739 conds[i - 1] = LLVMBuildOr(builder, conds[i], conds[i - 1], "");
740 }
741
742 emit_data->dst_type = LLVMVoidTypeInContext(gallivm->context);
743 emit_data->arg_count = 1;
744 emit_data->args[0] = LLVMBuildSelect(builder, conds[0],
745 lp_build_const_float(gallivm, -1.0f),
746 bld_base->base.zero, "");
747 }
748
749 static void kil_emit(
750 const struct lp_build_tgsi_action * action,
751 struct lp_build_tgsi_context * bld_base,
752 struct lp_build_emit_data * emit_data)
753 {
754 unsigned i;
755 for (i = 0; i < emit_data->arg_count; i++) {
756 emit_data->output[i] = lp_build_intrinsic_unary(
757 bld_base->base.gallivm->builder,
758 action->intr_name,
759 emit_data->dst_type, emit_data->args[i]);
760 }
761 }
762
763 static void radeon_llvm_cube_to_2d_coords(struct lp_build_tgsi_context *bld_base,
764 LLVMValueRef *in, LLVMValueRef *out)
765 {
766 struct gallivm_state * gallivm = bld_base->base.gallivm;
767 LLVMBuilderRef builder = gallivm->builder;
768 LLVMTypeRef type = bld_base->base.elem_type;
769 LLVMValueRef coords[4];
770 LLVMValueRef mad_args[3];
771 LLVMValueRef v, cube_vec;
772 unsigned i;
773
774 cube_vec = lp_build_gather_values(bld_base->base.gallivm, in, 4);
775 v = lp_build_intrinsic(builder, "llvm.AMDGPU.cube", LLVMVectorType(type, 4),
776 &cube_vec, 1, LLVMReadNoneAttribute);
777
778 for (i = 0; i < 4; ++i)
779 coords[i] = LLVMBuildExtractElement(builder, v,
780 lp_build_const_int32(gallivm, i), "");
781
782 coords[2] = lp_build_intrinsic(builder, "llvm.fabs.f32",
783 type, &coords[2], 1, LLVMReadNoneAttribute);
784 coords[2] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_RCP, coords[2]);
785
786 mad_args[1] = coords[2];
787 mad_args[2] = LLVMConstReal(type, 1.5);
788
789 mad_args[0] = coords[0];
790 coords[0] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
791 mad_args[0], mad_args[1], mad_args[2]);
792
793 mad_args[0] = coords[1];
794 coords[1] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
795 mad_args[0], mad_args[1], mad_args[2]);
796
797 /* apply xyz = yxw swizzle to cooords */
798 out[0] = coords[1];
799 out[1] = coords[0];
800 out[2] = coords[3];
801 }
802
803 void radeon_llvm_emit_prepare_cube_coords(
804 struct lp_build_tgsi_context * bld_base,
805 struct lp_build_emit_data * emit_data,
806 LLVMValueRef *coords_arg,
807 LLVMValueRef *derivs_arg)
808 {
809
810 unsigned target = emit_data->inst->Texture.Texture;
811 unsigned opcode = emit_data->inst->Instruction.Opcode;
812 struct gallivm_state * gallivm = bld_base->base.gallivm;
813 LLVMBuilderRef builder = gallivm->builder;
814 LLVMValueRef coords[4];
815 unsigned i;
816
817 radeon_llvm_cube_to_2d_coords(bld_base, coords_arg, coords);
818
819 if (opcode == TGSI_OPCODE_TXD && derivs_arg) {
820 LLVMValueRef derivs[4];
821 int axis;
822
823 /* Convert cube derivatives to 2D derivatives. */
824 for (axis = 0; axis < 2; axis++) {
825 LLVMValueRef shifted_cube_coords[4], shifted_coords[4];
826
827 /* Shift the cube coordinates by the derivatives to get
828 * the cube coordinates of the "neighboring pixel".
829 */
830 for (i = 0; i < 3; i++)
831 shifted_cube_coords[i] =
832 LLVMBuildFAdd(builder, coords_arg[i],
833 derivs_arg[axis*3+i], "");
834 shifted_cube_coords[3] = LLVMGetUndef(bld_base->base.elem_type);
835
836 /* Project the shifted cube coordinates onto the face. */
837 radeon_llvm_cube_to_2d_coords(bld_base, shifted_cube_coords,
838 shifted_coords);
839
840 /* Subtract both sets of 2D coordinates to get 2D derivatives.
841 * This won't work if the shifted coordinates ended up
842 * in a different face.
843 */
844 for (i = 0; i < 2; i++)
845 derivs[axis * 2 + i] =
846 LLVMBuildFSub(builder, shifted_coords[i],
847 coords[i], "");
848 }
849
850 memcpy(derivs_arg, derivs, sizeof(derivs));
851 }
852
853 if (target == TGSI_TEXTURE_CUBE_ARRAY ||
854 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
855 /* for cube arrays coord.z = coord.w(array_index) * 8 + face */
856 /* coords_arg.w component - array_index for cube arrays */
857 coords[2] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
858 coords_arg[3], lp_build_const_float(gallivm, 8.0), coords[2]);
859 }
860
861 /* Preserve compare/lod/bias. Put it in coords.w. */
862 if (opcode == TGSI_OPCODE_TEX2 ||
863 opcode == TGSI_OPCODE_TXB2 ||
864 opcode == TGSI_OPCODE_TXL2) {
865 coords[3] = coords_arg[4];
866 } else if (opcode == TGSI_OPCODE_TXB ||
867 opcode == TGSI_OPCODE_TXL ||
868 target == TGSI_TEXTURE_SHADOWCUBE) {
869 coords[3] = coords_arg[3];
870 }
871
872 memcpy(coords_arg, coords, sizeof(coords));
873 }
874
875 static void emit_icmp(
876 const struct lp_build_tgsi_action * action,
877 struct lp_build_tgsi_context * bld_base,
878 struct lp_build_emit_data * emit_data)
879 {
880 unsigned pred;
881 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
882 LLVMContextRef context = bld_base->base.gallivm->context;
883
884 switch (emit_data->inst->Instruction.Opcode) {
885 case TGSI_OPCODE_USEQ: pred = LLVMIntEQ; break;
886 case TGSI_OPCODE_USNE: pred = LLVMIntNE; break;
887 case TGSI_OPCODE_USGE: pred = LLVMIntUGE; break;
888 case TGSI_OPCODE_USLT: pred = LLVMIntULT; break;
889 case TGSI_OPCODE_ISGE: pred = LLVMIntSGE; break;
890 case TGSI_OPCODE_ISLT: pred = LLVMIntSLT; break;
891 default:
892 assert(!"unknown instruction");
893 pred = 0;
894 break;
895 }
896
897 LLVMValueRef v = LLVMBuildICmp(builder, pred,
898 emit_data->args[0], emit_data->args[1],"");
899
900 v = LLVMBuildSExtOrBitCast(builder, v,
901 LLVMInt32TypeInContext(context), "");
902
903 emit_data->output[emit_data->chan] = v;
904 }
905
906 static void emit_ucmp(
907 const struct lp_build_tgsi_action * action,
908 struct lp_build_tgsi_context * bld_base,
909 struct lp_build_emit_data * emit_data)
910 {
911 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
912
913 LLVMValueRef arg0 = LLVMBuildBitCast(builder, emit_data->args[0],
914 bld_base->uint_bld.elem_type, "");
915
916 LLVMValueRef v = LLVMBuildICmp(builder, LLVMIntNE, arg0,
917 bld_base->uint_bld.zero, "");
918
919 emit_data->output[emit_data->chan] =
920 LLVMBuildSelect(builder, v, emit_data->args[1], emit_data->args[2], "");
921 }
922
923 static void emit_cmp(const struct lp_build_tgsi_action *action,
924 struct lp_build_tgsi_context *bld_base,
925 struct lp_build_emit_data *emit_data)
926 {
927 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
928 LLVMValueRef cond, *args = emit_data->args;
929
930 cond = LLVMBuildFCmp(builder, LLVMRealOLT, args[0],
931 bld_base->base.zero, "");
932
933 emit_data->output[emit_data->chan] =
934 LLVMBuildSelect(builder, cond, args[1], args[2], "");
935 }
936
937 static void emit_set_cond(
938 const struct lp_build_tgsi_action *action,
939 struct lp_build_tgsi_context * bld_base,
940 struct lp_build_emit_data * emit_data)
941 {
942 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
943 LLVMRealPredicate pred;
944 LLVMValueRef cond;
945
946 /* Use ordered for everything but NE (which is usual for
947 * float comparisons)
948 */
949 switch (emit_data->inst->Instruction.Opcode) {
950 case TGSI_OPCODE_SGE: pred = LLVMRealOGE; break;
951 case TGSI_OPCODE_SEQ: pred = LLVMRealOEQ; break;
952 case TGSI_OPCODE_SLE: pred = LLVMRealOLE; break;
953 case TGSI_OPCODE_SLT: pred = LLVMRealOLT; break;
954 case TGSI_OPCODE_SNE: pred = LLVMRealUNE; break;
955 case TGSI_OPCODE_SGT: pred = LLVMRealOGT; break;
956 default: assert(!"unknown instruction"); pred = 0; break;
957 }
958
959 cond = LLVMBuildFCmp(builder,
960 pred, emit_data->args[0], emit_data->args[1], "");
961
962 emit_data->output[emit_data->chan] = LLVMBuildSelect(builder,
963 cond, bld_base->base.one, bld_base->base.zero, "");
964 }
965
966 static void emit_fcmp(
967 const struct lp_build_tgsi_action *action,
968 struct lp_build_tgsi_context * bld_base,
969 struct lp_build_emit_data * emit_data)
970 {
971 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
972 LLVMContextRef context = bld_base->base.gallivm->context;
973 LLVMRealPredicate pred;
974
975 /* Use ordered for everything but NE (which is usual for
976 * float comparisons)
977 */
978 switch (emit_data->inst->Instruction.Opcode) {
979 case TGSI_OPCODE_FSEQ: pred = LLVMRealOEQ; break;
980 case TGSI_OPCODE_FSGE: pred = LLVMRealOGE; break;
981 case TGSI_OPCODE_FSLT: pred = LLVMRealOLT; break;
982 case TGSI_OPCODE_FSNE: pred = LLVMRealUNE; break;
983 default: assert(!"unknown instruction"); pred = 0; break;
984 }
985
986 LLVMValueRef v = LLVMBuildFCmp(builder, pred,
987 emit_data->args[0], emit_data->args[1],"");
988
989 v = LLVMBuildSExtOrBitCast(builder, v,
990 LLVMInt32TypeInContext(context), "");
991
992 emit_data->output[emit_data->chan] = v;
993 }
994
995 static void emit_dcmp(
996 const struct lp_build_tgsi_action *action,
997 struct lp_build_tgsi_context * bld_base,
998 struct lp_build_emit_data * emit_data)
999 {
1000 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1001 LLVMContextRef context = bld_base->base.gallivm->context;
1002 LLVMRealPredicate pred;
1003
1004 /* Use ordered for everything but NE (which is usual for
1005 * float comparisons)
1006 */
1007 switch (emit_data->inst->Instruction.Opcode) {
1008 case TGSI_OPCODE_DSEQ: pred = LLVMRealOEQ; break;
1009 case TGSI_OPCODE_DSGE: pred = LLVMRealOGE; break;
1010 case TGSI_OPCODE_DSLT: pred = LLVMRealOLT; break;
1011 case TGSI_OPCODE_DSNE: pred = LLVMRealUNE; break;
1012 default: assert(!"unknown instruction"); pred = 0; break;
1013 }
1014
1015 LLVMValueRef v = LLVMBuildFCmp(builder, pred,
1016 emit_data->args[0], emit_data->args[1],"");
1017
1018 v = LLVMBuildSExtOrBitCast(builder, v,
1019 LLVMInt32TypeInContext(context), "");
1020
1021 emit_data->output[emit_data->chan] = v;
1022 }
1023
1024 static void emit_not(
1025 const struct lp_build_tgsi_action * action,
1026 struct lp_build_tgsi_context * bld_base,
1027 struct lp_build_emit_data * emit_data)
1028 {
1029 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1030 LLVMValueRef v = bitcast(bld_base, TGSI_TYPE_UNSIGNED,
1031 emit_data->args[0]);
1032 emit_data->output[emit_data->chan] = LLVMBuildNot(builder, v, "");
1033 }
1034
1035 static void emit_arl(
1036 const struct lp_build_tgsi_action * action,
1037 struct lp_build_tgsi_context * bld_base,
1038 struct lp_build_emit_data * emit_data)
1039 {
1040 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1041 LLVMValueRef floor_index = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_FLR, emit_data->args[0]);
1042 emit_data->output[emit_data->chan] = LLVMBuildFPToSI(builder,
1043 floor_index, bld_base->base.int_elem_type , "");
1044 }
1045
1046 static void emit_and(
1047 const struct lp_build_tgsi_action * action,
1048 struct lp_build_tgsi_context * bld_base,
1049 struct lp_build_emit_data * emit_data)
1050 {
1051 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1052 emit_data->output[emit_data->chan] = LLVMBuildAnd(builder,
1053 emit_data->args[0], emit_data->args[1], "");
1054 }
1055
1056 static void emit_or(
1057 const struct lp_build_tgsi_action * action,
1058 struct lp_build_tgsi_context * bld_base,
1059 struct lp_build_emit_data * emit_data)
1060 {
1061 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1062 emit_data->output[emit_data->chan] = LLVMBuildOr(builder,
1063 emit_data->args[0], emit_data->args[1], "");
1064 }
1065
1066 static void emit_uadd(
1067 const struct lp_build_tgsi_action * action,
1068 struct lp_build_tgsi_context * bld_base,
1069 struct lp_build_emit_data * emit_data)
1070 {
1071 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1072 emit_data->output[emit_data->chan] = LLVMBuildAdd(builder,
1073 emit_data->args[0], emit_data->args[1], "");
1074 }
1075
1076 static void emit_udiv(
1077 const struct lp_build_tgsi_action * action,
1078 struct lp_build_tgsi_context * bld_base,
1079 struct lp_build_emit_data * emit_data)
1080 {
1081 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1082 emit_data->output[emit_data->chan] = LLVMBuildUDiv(builder,
1083 emit_data->args[0], emit_data->args[1], "");
1084 }
1085
1086 static void emit_idiv(
1087 const struct lp_build_tgsi_action * action,
1088 struct lp_build_tgsi_context * bld_base,
1089 struct lp_build_emit_data * emit_data)
1090 {
1091 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1092 emit_data->output[emit_data->chan] = LLVMBuildSDiv(builder,
1093 emit_data->args[0], emit_data->args[1], "");
1094 }
1095
1096 static void emit_mod(
1097 const struct lp_build_tgsi_action * action,
1098 struct lp_build_tgsi_context * bld_base,
1099 struct lp_build_emit_data * emit_data)
1100 {
1101 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1102 emit_data->output[emit_data->chan] = LLVMBuildSRem(builder,
1103 emit_data->args[0], emit_data->args[1], "");
1104 }
1105
1106 static void emit_umod(
1107 const struct lp_build_tgsi_action * action,
1108 struct lp_build_tgsi_context * bld_base,
1109 struct lp_build_emit_data * emit_data)
1110 {
1111 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1112 emit_data->output[emit_data->chan] = LLVMBuildURem(builder,
1113 emit_data->args[0], emit_data->args[1], "");
1114 }
1115
1116 static void emit_shl(
1117 const struct lp_build_tgsi_action * action,
1118 struct lp_build_tgsi_context * bld_base,
1119 struct lp_build_emit_data * emit_data)
1120 {
1121 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1122 emit_data->output[emit_data->chan] = LLVMBuildShl(builder,
1123 emit_data->args[0], emit_data->args[1], "");
1124 }
1125
1126 static void emit_ushr(
1127 const struct lp_build_tgsi_action * action,
1128 struct lp_build_tgsi_context * bld_base,
1129 struct lp_build_emit_data * emit_data)
1130 {
1131 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1132 emit_data->output[emit_data->chan] = LLVMBuildLShr(builder,
1133 emit_data->args[0], emit_data->args[1], "");
1134 }
1135 static void emit_ishr(
1136 const struct lp_build_tgsi_action * action,
1137 struct lp_build_tgsi_context * bld_base,
1138 struct lp_build_emit_data * emit_data)
1139 {
1140 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1141 emit_data->output[emit_data->chan] = LLVMBuildAShr(builder,
1142 emit_data->args[0], emit_data->args[1], "");
1143 }
1144
1145 static void emit_xor(
1146 const struct lp_build_tgsi_action * action,
1147 struct lp_build_tgsi_context * bld_base,
1148 struct lp_build_emit_data * emit_data)
1149 {
1150 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1151 emit_data->output[emit_data->chan] = LLVMBuildXor(builder,
1152 emit_data->args[0], emit_data->args[1], "");
1153 }
1154
1155 static void emit_ssg(
1156 const struct lp_build_tgsi_action * action,
1157 struct lp_build_tgsi_context * bld_base,
1158 struct lp_build_emit_data * emit_data)
1159 {
1160 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1161
1162 LLVMValueRef cmp, val;
1163
1164 if (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_ISSG) {
1165 cmp = LLVMBuildICmp(builder, LLVMIntSGT, emit_data->args[0], bld_base->int_bld.zero, "");
1166 val = LLVMBuildSelect(builder, cmp, bld_base->int_bld.one, emit_data->args[0], "");
1167 cmp = LLVMBuildICmp(builder, LLVMIntSGE, val, bld_base->int_bld.zero, "");
1168 val = LLVMBuildSelect(builder, cmp, val, LLVMConstInt(bld_base->int_bld.elem_type, -1, true), "");
1169 } else { // float SSG
1170 cmp = LLVMBuildFCmp(builder, LLVMRealOGT, emit_data->args[0], bld_base->base.zero, "");
1171 val = LLVMBuildSelect(builder, cmp, bld_base->base.one, emit_data->args[0], "");
1172 cmp = LLVMBuildFCmp(builder, LLVMRealOGE, val, bld_base->base.zero, "");
1173 val = LLVMBuildSelect(builder, cmp, val, LLVMConstReal(bld_base->base.elem_type, -1), "");
1174 }
1175
1176 emit_data->output[emit_data->chan] = val;
1177 }
1178
1179 static void emit_ineg(
1180 const struct lp_build_tgsi_action * action,
1181 struct lp_build_tgsi_context * bld_base,
1182 struct lp_build_emit_data * emit_data)
1183 {
1184 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1185 emit_data->output[emit_data->chan] = LLVMBuildNeg(builder,
1186 emit_data->args[0], "");
1187 }
1188
1189 static void emit_dneg(
1190 const struct lp_build_tgsi_action * action,
1191 struct lp_build_tgsi_context * bld_base,
1192 struct lp_build_emit_data * emit_data)
1193 {
1194 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1195 emit_data->output[emit_data->chan] = LLVMBuildFNeg(builder,
1196 emit_data->args[0], "");
1197 }
1198
1199 static void emit_frac(
1200 const struct lp_build_tgsi_action * action,
1201 struct lp_build_tgsi_context * bld_base,
1202 struct lp_build_emit_data * emit_data)
1203 {
1204 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1205 char *intr;
1206
1207 if (emit_data->info->opcode == TGSI_OPCODE_FRC)
1208 intr = "llvm.floor.f32";
1209 else if (emit_data->info->opcode == TGSI_OPCODE_DFRAC)
1210 intr = "llvm.floor.f64";
1211 else {
1212 assert(0);
1213 return;
1214 }
1215
1216 LLVMValueRef floor = lp_build_intrinsic(builder, intr, emit_data->dst_type,
1217 &emit_data->args[0], 1,
1218 LLVMReadNoneAttribute);
1219 emit_data->output[emit_data->chan] = LLVMBuildFSub(builder,
1220 emit_data->args[0], floor, "");
1221 }
1222
1223 static void emit_f2i(
1224 const struct lp_build_tgsi_action * action,
1225 struct lp_build_tgsi_context * bld_base,
1226 struct lp_build_emit_data * emit_data)
1227 {
1228 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1229 emit_data->output[emit_data->chan] = LLVMBuildFPToSI(builder,
1230 emit_data->args[0], bld_base->int_bld.elem_type, "");
1231 }
1232
1233 static void emit_f2u(
1234 const struct lp_build_tgsi_action * action,
1235 struct lp_build_tgsi_context * bld_base,
1236 struct lp_build_emit_data * emit_data)
1237 {
1238 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1239 emit_data->output[emit_data->chan] = LLVMBuildFPToUI(builder,
1240 emit_data->args[0], bld_base->uint_bld.elem_type, "");
1241 }
1242
1243 static void emit_i2f(
1244 const struct lp_build_tgsi_action * action,
1245 struct lp_build_tgsi_context * bld_base,
1246 struct lp_build_emit_data * emit_data)
1247 {
1248 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1249 emit_data->output[emit_data->chan] = LLVMBuildSIToFP(builder,
1250 emit_data->args[0], bld_base->base.elem_type, "");
1251 }
1252
1253 static void emit_u2f(
1254 const struct lp_build_tgsi_action * action,
1255 struct lp_build_tgsi_context * bld_base,
1256 struct lp_build_emit_data * emit_data)
1257 {
1258 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1259 emit_data->output[emit_data->chan] = LLVMBuildUIToFP(builder,
1260 emit_data->args[0], bld_base->base.elem_type, "");
1261 }
1262
1263 static void emit_immediate(struct lp_build_tgsi_context * bld_base,
1264 const struct tgsi_full_immediate *imm)
1265 {
1266 unsigned i;
1267 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
1268
1269 for (i = 0; i < 4; ++i) {
1270 ctx->soa.immediates[ctx->soa.num_immediates][i] =
1271 LLVMConstInt(bld_base->uint_bld.elem_type, imm->u[i].Uint, false );
1272 }
1273
1274 ctx->soa.num_immediates++;
1275 }
1276
1277 void
1278 build_tgsi_intrinsic_nomem(const struct lp_build_tgsi_action *action,
1279 struct lp_build_tgsi_context *bld_base,
1280 struct lp_build_emit_data *emit_data)
1281 {
1282 struct lp_build_context * base = &bld_base->base;
1283 emit_data->output[emit_data->chan] =
1284 lp_build_intrinsic(base->gallivm->builder, action->intr_name,
1285 emit_data->dst_type, emit_data->args,
1286 emit_data->arg_count, LLVMReadNoneAttribute);
1287 }
1288
1289 static void emit_bfi(const struct lp_build_tgsi_action * action,
1290 struct lp_build_tgsi_context * bld_base,
1291 struct lp_build_emit_data * emit_data)
1292 {
1293 struct gallivm_state *gallivm = bld_base->base.gallivm;
1294 LLVMBuilderRef builder = gallivm->builder;
1295 LLVMValueRef bfi_args[3];
1296
1297 // Calculate the bitmask: (((1 << src3) - 1) << src2
1298 bfi_args[0] = LLVMBuildShl(builder,
1299 LLVMBuildSub(builder,
1300 LLVMBuildShl(builder,
1301 bld_base->int_bld.one,
1302 emit_data->args[3], ""),
1303 bld_base->int_bld.one, ""),
1304 emit_data->args[2], "");
1305
1306 bfi_args[1] = LLVMBuildShl(builder, emit_data->args[1],
1307 emit_data->args[2], "");
1308
1309 bfi_args[2] = emit_data->args[0];
1310
1311 /* Calculate:
1312 * (arg0 & arg1) | (~arg0 & arg2) = arg2 ^ (arg0 & (arg1 ^ arg2)
1313 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
1314 */
1315 emit_data->output[emit_data->chan] =
1316 LLVMBuildXor(builder, bfi_args[2],
1317 LLVMBuildAnd(builder, bfi_args[0],
1318 LLVMBuildXor(builder, bfi_args[1], bfi_args[2],
1319 ""), ""), "");
1320 }
1321
1322 /* this is ffs in C */
1323 static void emit_lsb(const struct lp_build_tgsi_action * action,
1324 struct lp_build_tgsi_context * bld_base,
1325 struct lp_build_emit_data * emit_data)
1326 {
1327 struct gallivm_state *gallivm = bld_base->base.gallivm;
1328 LLVMValueRef args[2] = {
1329 emit_data->args[0],
1330
1331 /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
1332 * add special code to check for x=0. The reason is that
1333 * the LLVM behavior for x=0 is different from what we
1334 * need here.
1335 *
1336 * The hardware already implements the correct behavior.
1337 */
1338 lp_build_const_int32(gallivm, 1)
1339 };
1340
1341 emit_data->output[emit_data->chan] =
1342 lp_build_intrinsic(gallivm->builder, "llvm.cttz.i32",
1343 emit_data->dst_type, args, Elements(args),
1344 LLVMReadNoneAttribute);
1345 }
1346
1347 /* Find the last bit set. */
1348 static void emit_umsb(const struct lp_build_tgsi_action * action,
1349 struct lp_build_tgsi_context * bld_base,
1350 struct lp_build_emit_data * emit_data)
1351 {
1352 struct gallivm_state *gallivm = bld_base->base.gallivm;
1353 LLVMBuilderRef builder = gallivm->builder;
1354 LLVMValueRef args[2] = {
1355 emit_data->args[0],
1356 /* Don't generate code for handling zero: */
1357 lp_build_const_int32(gallivm, 1)
1358 };
1359
1360 LLVMValueRef msb =
1361 lp_build_intrinsic(builder, "llvm.ctlz.i32",
1362 emit_data->dst_type, args, Elements(args),
1363 LLVMReadNoneAttribute);
1364
1365 /* The HW returns the last bit index from MSB, but TGSI wants
1366 * the index from LSB. Invert it by doing "31 - msb". */
1367 msb = LLVMBuildSub(builder, lp_build_const_int32(gallivm, 31),
1368 msb, "");
1369
1370 /* Check for zero: */
1371 emit_data->output[emit_data->chan] =
1372 LLVMBuildSelect(builder,
1373 LLVMBuildICmp(builder, LLVMIntEQ, args[0],
1374 bld_base->uint_bld.zero, ""),
1375 lp_build_const_int32(gallivm, -1), msb, "");
1376 }
1377
1378 /* Find the last bit opposite of the sign bit. */
1379 static void emit_imsb(const struct lp_build_tgsi_action * action,
1380 struct lp_build_tgsi_context * bld_base,
1381 struct lp_build_emit_data * emit_data)
1382 {
1383 struct gallivm_state *gallivm = bld_base->base.gallivm;
1384 LLVMBuilderRef builder = gallivm->builder;
1385 LLVMValueRef arg = emit_data->args[0];
1386
1387 LLVMValueRef msb =
1388 lp_build_intrinsic(builder, "llvm.AMDGPU.flbit.i32",
1389 emit_data->dst_type, &arg, 1,
1390 LLVMReadNoneAttribute);
1391
1392 /* The HW returns the last bit index from MSB, but TGSI wants
1393 * the index from LSB. Invert it by doing "31 - msb". */
1394 msb = LLVMBuildSub(builder, lp_build_const_int32(gallivm, 31),
1395 msb, "");
1396
1397 /* If arg == 0 || arg == -1 (0xffffffff), return -1. */
1398 LLVMValueRef all_ones = lp_build_const_int32(gallivm, -1);
1399
1400 LLVMValueRef cond =
1401 LLVMBuildOr(builder,
1402 LLVMBuildICmp(builder, LLVMIntEQ, arg,
1403 bld_base->uint_bld.zero, ""),
1404 LLVMBuildICmp(builder, LLVMIntEQ, arg,
1405 all_ones, ""), "");
1406
1407 emit_data->output[emit_data->chan] =
1408 LLVMBuildSelect(builder, cond, all_ones, msb, "");
1409 }
1410
1411 static void emit_iabs(const struct lp_build_tgsi_action *action,
1412 struct lp_build_tgsi_context *bld_base,
1413 struct lp_build_emit_data *emit_data)
1414 {
1415 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1416
1417 emit_data->output[emit_data->chan] =
1418 lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_IMAX,
1419 emit_data->args[0],
1420 LLVMBuildNeg(builder,
1421 emit_data->args[0], ""));
1422 }
1423
1424 static void emit_minmax_int(const struct lp_build_tgsi_action *action,
1425 struct lp_build_tgsi_context *bld_base,
1426 struct lp_build_emit_data *emit_data)
1427 {
1428 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1429 LLVMIntPredicate op;
1430
1431 switch (emit_data->info->opcode) {
1432 default:
1433 assert(0);
1434 case TGSI_OPCODE_IMAX:
1435 op = LLVMIntSGT;
1436 break;
1437 case TGSI_OPCODE_IMIN:
1438 op = LLVMIntSLT;
1439 break;
1440 case TGSI_OPCODE_UMAX:
1441 op = LLVMIntUGT;
1442 break;
1443 case TGSI_OPCODE_UMIN:
1444 op = LLVMIntULT;
1445 break;
1446 }
1447
1448 emit_data->output[emit_data->chan] =
1449 LLVMBuildSelect(builder,
1450 LLVMBuildICmp(builder, op, emit_data->args[0],
1451 emit_data->args[1], ""),
1452 emit_data->args[0],
1453 emit_data->args[1], "");
1454 }
1455
1456 static void pk2h_fetch_args(struct lp_build_tgsi_context * bld_base,
1457 struct lp_build_emit_data * emit_data)
1458 {
1459 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
1460 0, TGSI_CHAN_X);
1461 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
1462 0, TGSI_CHAN_Y);
1463 }
1464
1465 static void emit_pk2h(const struct lp_build_tgsi_action *action,
1466 struct lp_build_tgsi_context *bld_base,
1467 struct lp_build_emit_data *emit_data)
1468 {
1469 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1470 LLVMContextRef context = bld_base->base.gallivm->context;
1471 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1472 LLVMTypeRef fp16, i16;
1473 LLVMValueRef const16, comp[2];
1474 unsigned i;
1475
1476 fp16 = LLVMHalfTypeInContext(context);
1477 i16 = LLVMInt16TypeInContext(context);
1478 const16 = lp_build_const_int32(uint_bld->gallivm, 16);
1479
1480 for (i = 0; i < 2; i++) {
1481 comp[i] = LLVMBuildFPTrunc(builder, emit_data->args[i], fp16, "");
1482 comp[i] = LLVMBuildBitCast(builder, comp[i], i16, "");
1483 comp[i] = LLVMBuildZExt(builder, comp[i], uint_bld->elem_type, "");
1484 }
1485
1486 comp[1] = LLVMBuildShl(builder, comp[1], const16, "");
1487 comp[0] = LLVMBuildOr(builder, comp[0], comp[1], "");
1488
1489 emit_data->output[emit_data->chan] = comp[0];
1490 }
1491
1492 static void up2h_fetch_args(struct lp_build_tgsi_context * bld_base,
1493 struct lp_build_emit_data * emit_data)
1494 {
1495 emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
1496 0, TGSI_CHAN_X);
1497 }
1498
1499 static void emit_up2h(const struct lp_build_tgsi_action *action,
1500 struct lp_build_tgsi_context *bld_base,
1501 struct lp_build_emit_data *emit_data)
1502 {
1503 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1504 LLVMContextRef context = bld_base->base.gallivm->context;
1505 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1506 LLVMTypeRef fp16, i16;
1507 LLVMValueRef const16, input, val;
1508 unsigned i;
1509
1510 fp16 = LLVMHalfTypeInContext(context);
1511 i16 = LLVMInt16TypeInContext(context);
1512 const16 = lp_build_const_int32(uint_bld->gallivm, 16);
1513 input = emit_data->args[0];
1514
1515 for (i = 0; i < 2; i++) {
1516 val = i == 1 ? LLVMBuildLShr(builder, input, const16, "") : input;
1517 val = LLVMBuildTrunc(builder, val, i16, "");
1518 val = LLVMBuildBitCast(builder, val, fp16, "");
1519 emit_data->output[i] =
1520 LLVMBuildFPExt(builder, val, bld_base->base.elem_type, "");
1521 }
1522 }
1523
1524 void radeon_llvm_context_init(struct radeon_llvm_context * ctx, const char *triple)
1525 {
1526 struct lp_type type;
1527
1528 /* Initialize the gallivm object:
1529 * We are only using the module, context, and builder fields of this struct.
1530 * This should be enough for us to be able to pass our gallivm struct to the
1531 * helper functions in the gallivm module.
1532 */
1533 memset(&ctx->gallivm, 0, sizeof (ctx->gallivm));
1534 memset(&ctx->soa, 0, sizeof(ctx->soa));
1535 ctx->gallivm.context = LLVMContextCreate();
1536 ctx->gallivm.module = LLVMModuleCreateWithNameInContext("tgsi",
1537 ctx->gallivm.context);
1538 LLVMSetTarget(ctx->gallivm.module,
1539
1540 #if HAVE_LLVM < 0x0306
1541 "r600--");
1542 #else
1543 triple);
1544 #endif
1545 ctx->gallivm.builder = LLVMCreateBuilderInContext(ctx->gallivm.context);
1546
1547 struct lp_build_tgsi_context * bld_base = &ctx->soa.bld_base;
1548
1549 type.floating = TRUE;
1550 type.fixed = FALSE;
1551 type.sign = TRUE;
1552 type.norm = FALSE;
1553 type.width = 32;
1554 type.length = 1;
1555
1556 lp_build_context_init(&bld_base->base, &ctx->gallivm, type);
1557 lp_build_context_init(&ctx->soa.bld_base.uint_bld, &ctx->gallivm, lp_uint_type(type));
1558 lp_build_context_init(&ctx->soa.bld_base.int_bld, &ctx->gallivm, lp_int_type(type));
1559 {
1560 struct lp_type dbl_type;
1561 dbl_type = type;
1562 dbl_type.width *= 2;
1563 lp_build_context_init(&ctx->soa.bld_base.dbl_bld, &ctx->gallivm, dbl_type);
1564 }
1565
1566 bld_base->soa = 1;
1567 bld_base->emit_store = radeon_llvm_emit_store;
1568 bld_base->emit_swizzle = emit_swizzle;
1569 bld_base->emit_declaration = emit_declaration;
1570 bld_base->emit_immediate = emit_immediate;
1571
1572 bld_base->emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = radeon_llvm_emit_fetch;
1573 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = radeon_llvm_emit_fetch;
1574 bld_base->emit_fetch_funcs[TGSI_FILE_TEMPORARY] = radeon_llvm_emit_fetch;
1575 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = radeon_llvm_emit_fetch;
1576 bld_base->emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = fetch_system_value;
1577
1578 /* Allocate outputs */
1579 ctx->soa.outputs = ctx->outputs;
1580
1581 lp_set_default_actions(bld_base);
1582
1583 bld_base->op_actions[TGSI_OPCODE_ABS].emit = build_tgsi_intrinsic_nomem;
1584 bld_base->op_actions[TGSI_OPCODE_ABS].intr_name = "llvm.fabs.f32";
1585 bld_base->op_actions[TGSI_OPCODE_AND].emit = emit_and;
1586 bld_base->op_actions[TGSI_OPCODE_ARL].emit = emit_arl;
1587 bld_base->op_actions[TGSI_OPCODE_BFI].emit = emit_bfi;
1588 bld_base->op_actions[TGSI_OPCODE_BGNLOOP].emit = bgnloop_emit;
1589 bld_base->op_actions[TGSI_OPCODE_BREV].emit = build_tgsi_intrinsic_nomem;
1590 bld_base->op_actions[TGSI_OPCODE_BREV].intr_name =
1591 HAVE_LLVM >= 0x0308 ? "llvm.bitreverse.i32" : "llvm.AMDGPU.brev";
1592 bld_base->op_actions[TGSI_OPCODE_BRK].emit = brk_emit;
1593 bld_base->op_actions[TGSI_OPCODE_CEIL].emit = build_tgsi_intrinsic_nomem;
1594 bld_base->op_actions[TGSI_OPCODE_CEIL].intr_name = "llvm.ceil.f32";
1595 bld_base->op_actions[TGSI_OPCODE_CLAMP].emit = build_tgsi_intrinsic_nomem;
1596 bld_base->op_actions[TGSI_OPCODE_CLAMP].intr_name =
1597 HAVE_LLVM >= 0x0308 ? "llvm.AMDGPU.clamp." : "llvm.AMDIL.clamp.";
1598 bld_base->op_actions[TGSI_OPCODE_CMP].emit = emit_cmp;
1599 bld_base->op_actions[TGSI_OPCODE_CONT].emit = cont_emit;
1600 bld_base->op_actions[TGSI_OPCODE_COS].emit = build_tgsi_intrinsic_nomem;
1601 bld_base->op_actions[TGSI_OPCODE_COS].intr_name = "llvm.cos.f32";
1602 bld_base->op_actions[TGSI_OPCODE_DABS].emit = build_tgsi_intrinsic_nomem;
1603 bld_base->op_actions[TGSI_OPCODE_DABS].intr_name = "llvm.fabs.f64";
1604 bld_base->op_actions[TGSI_OPCODE_DFMA].emit = build_tgsi_intrinsic_nomem;
1605 bld_base->op_actions[TGSI_OPCODE_DFMA].intr_name = "llvm.fma.f64";
1606 bld_base->op_actions[TGSI_OPCODE_DFRAC].emit = emit_frac;
1607 bld_base->op_actions[TGSI_OPCODE_DNEG].emit = emit_dneg;
1608 bld_base->op_actions[TGSI_OPCODE_DSEQ].emit = emit_dcmp;
1609 bld_base->op_actions[TGSI_OPCODE_DSGE].emit = emit_dcmp;
1610 bld_base->op_actions[TGSI_OPCODE_DSLT].emit = emit_dcmp;
1611 bld_base->op_actions[TGSI_OPCODE_DSNE].emit = emit_dcmp;
1612 bld_base->op_actions[TGSI_OPCODE_DRSQ].emit = build_tgsi_intrinsic_nomem;
1613 bld_base->op_actions[TGSI_OPCODE_DRSQ].intr_name = "llvm.AMDGPU.rsq.f64";
1614 bld_base->op_actions[TGSI_OPCODE_DSQRT].emit = build_tgsi_intrinsic_nomem;
1615 bld_base->op_actions[TGSI_OPCODE_DSQRT].intr_name = "llvm.sqrt.f64";
1616 bld_base->op_actions[TGSI_OPCODE_ELSE].emit = else_emit;
1617 bld_base->op_actions[TGSI_OPCODE_ENDIF].emit = endif_emit;
1618 bld_base->op_actions[TGSI_OPCODE_ENDLOOP].emit = endloop_emit;
1619 bld_base->op_actions[TGSI_OPCODE_EX2].emit = build_tgsi_intrinsic_nomem;
1620 bld_base->op_actions[TGSI_OPCODE_EX2].intr_name =
1621 HAVE_LLVM >= 0x0308 ? "llvm.exp2.f32" : "llvm.AMDIL.exp.";
1622 bld_base->op_actions[TGSI_OPCODE_FLR].emit = build_tgsi_intrinsic_nomem;
1623 bld_base->op_actions[TGSI_OPCODE_FLR].intr_name = "llvm.floor.f32";
1624 bld_base->op_actions[TGSI_OPCODE_FMA].emit = build_tgsi_intrinsic_nomem;
1625 bld_base->op_actions[TGSI_OPCODE_FMA].intr_name = "llvm.fma.f32";
1626 bld_base->op_actions[TGSI_OPCODE_FRC].emit = emit_frac;
1627 bld_base->op_actions[TGSI_OPCODE_F2I].emit = emit_f2i;
1628 bld_base->op_actions[TGSI_OPCODE_F2U].emit = emit_f2u;
1629 bld_base->op_actions[TGSI_OPCODE_FSEQ].emit = emit_fcmp;
1630 bld_base->op_actions[TGSI_OPCODE_FSGE].emit = emit_fcmp;
1631 bld_base->op_actions[TGSI_OPCODE_FSLT].emit = emit_fcmp;
1632 bld_base->op_actions[TGSI_OPCODE_FSNE].emit = emit_fcmp;
1633 bld_base->op_actions[TGSI_OPCODE_IABS].emit = emit_iabs;
1634 bld_base->op_actions[TGSI_OPCODE_IBFE].emit = build_tgsi_intrinsic_nomem;
1635 bld_base->op_actions[TGSI_OPCODE_IBFE].intr_name = "llvm.AMDGPU.bfe.i32";
1636 bld_base->op_actions[TGSI_OPCODE_IDIV].emit = emit_idiv;
1637 bld_base->op_actions[TGSI_OPCODE_IF].emit = if_emit;
1638 bld_base->op_actions[TGSI_OPCODE_UIF].emit = uif_emit;
1639 bld_base->op_actions[TGSI_OPCODE_IMAX].emit = emit_minmax_int;
1640 bld_base->op_actions[TGSI_OPCODE_IMIN].emit = emit_minmax_int;
1641 bld_base->op_actions[TGSI_OPCODE_IMSB].emit = emit_imsb;
1642 bld_base->op_actions[TGSI_OPCODE_INEG].emit = emit_ineg;
1643 bld_base->op_actions[TGSI_OPCODE_ISHR].emit = emit_ishr;
1644 bld_base->op_actions[TGSI_OPCODE_ISGE].emit = emit_icmp;
1645 bld_base->op_actions[TGSI_OPCODE_ISLT].emit = emit_icmp;
1646 bld_base->op_actions[TGSI_OPCODE_ISSG].emit = emit_ssg;
1647 bld_base->op_actions[TGSI_OPCODE_I2F].emit = emit_i2f;
1648 bld_base->op_actions[TGSI_OPCODE_KILL_IF].fetch_args = kill_if_fetch_args;
1649 bld_base->op_actions[TGSI_OPCODE_KILL_IF].emit = kil_emit;
1650 bld_base->op_actions[TGSI_OPCODE_KILL_IF].intr_name = "llvm.AMDGPU.kill";
1651 bld_base->op_actions[TGSI_OPCODE_KILL].emit = lp_build_tgsi_intrinsic;
1652 bld_base->op_actions[TGSI_OPCODE_KILL].intr_name = "llvm.AMDGPU.kilp";
1653 bld_base->op_actions[TGSI_OPCODE_LSB].emit = emit_lsb;
1654 bld_base->op_actions[TGSI_OPCODE_LG2].emit = build_tgsi_intrinsic_nomem;
1655 bld_base->op_actions[TGSI_OPCODE_LG2].intr_name = "llvm.log2.f32";
1656 bld_base->op_actions[TGSI_OPCODE_MOD].emit = emit_mod;
1657 bld_base->op_actions[TGSI_OPCODE_UMSB].emit = emit_umsb;
1658 bld_base->op_actions[TGSI_OPCODE_NOT].emit = emit_not;
1659 bld_base->op_actions[TGSI_OPCODE_OR].emit = emit_or;
1660 bld_base->op_actions[TGSI_OPCODE_PK2H].fetch_args = pk2h_fetch_args;
1661 bld_base->op_actions[TGSI_OPCODE_PK2H].emit = emit_pk2h;
1662 bld_base->op_actions[TGSI_OPCODE_POPC].emit = build_tgsi_intrinsic_nomem;
1663 bld_base->op_actions[TGSI_OPCODE_POPC].intr_name = "llvm.ctpop.i32";
1664 bld_base->op_actions[TGSI_OPCODE_POW].emit = build_tgsi_intrinsic_nomem;
1665 bld_base->op_actions[TGSI_OPCODE_POW].intr_name = "llvm.pow.f32";
1666 bld_base->op_actions[TGSI_OPCODE_ROUND].emit = build_tgsi_intrinsic_nomem;
1667 bld_base->op_actions[TGSI_OPCODE_ROUND].intr_name = "llvm.rint.f32";
1668 bld_base->op_actions[TGSI_OPCODE_RSQ].intr_name = "llvm.AMDGPU.rsq.clamped.f32";
1669 bld_base->op_actions[TGSI_OPCODE_RSQ].emit = build_tgsi_intrinsic_nomem;
1670 bld_base->op_actions[TGSI_OPCODE_SGE].emit = emit_set_cond;
1671 bld_base->op_actions[TGSI_OPCODE_SEQ].emit = emit_set_cond;
1672 bld_base->op_actions[TGSI_OPCODE_SHL].emit = emit_shl;
1673 bld_base->op_actions[TGSI_OPCODE_SLE].emit = emit_set_cond;
1674 bld_base->op_actions[TGSI_OPCODE_SLT].emit = emit_set_cond;
1675 bld_base->op_actions[TGSI_OPCODE_SNE].emit = emit_set_cond;
1676 bld_base->op_actions[TGSI_OPCODE_SGT].emit = emit_set_cond;
1677 bld_base->op_actions[TGSI_OPCODE_SIN].emit = build_tgsi_intrinsic_nomem;
1678 bld_base->op_actions[TGSI_OPCODE_SIN].intr_name = "llvm.sin.f32";
1679 bld_base->op_actions[TGSI_OPCODE_SQRT].emit = build_tgsi_intrinsic_nomem;
1680 bld_base->op_actions[TGSI_OPCODE_SQRT].intr_name = "llvm.sqrt.f32";
1681 bld_base->op_actions[TGSI_OPCODE_SSG].emit = emit_ssg;
1682 bld_base->op_actions[TGSI_OPCODE_TRUNC].emit = build_tgsi_intrinsic_nomem;
1683 bld_base->op_actions[TGSI_OPCODE_TRUNC].intr_name = "llvm.trunc.f32";
1684 bld_base->op_actions[TGSI_OPCODE_UADD].emit = emit_uadd;
1685 bld_base->op_actions[TGSI_OPCODE_UBFE].emit = build_tgsi_intrinsic_nomem;
1686 bld_base->op_actions[TGSI_OPCODE_UBFE].intr_name = "llvm.AMDGPU.bfe.u32";
1687 bld_base->op_actions[TGSI_OPCODE_UDIV].emit = emit_udiv;
1688 bld_base->op_actions[TGSI_OPCODE_UMAX].emit = emit_minmax_int;
1689 bld_base->op_actions[TGSI_OPCODE_UMIN].emit = emit_minmax_int;
1690 bld_base->op_actions[TGSI_OPCODE_UMOD].emit = emit_umod;
1691 bld_base->op_actions[TGSI_OPCODE_USEQ].emit = emit_icmp;
1692 bld_base->op_actions[TGSI_OPCODE_USGE].emit = emit_icmp;
1693 bld_base->op_actions[TGSI_OPCODE_USHR].emit = emit_ushr;
1694 bld_base->op_actions[TGSI_OPCODE_USLT].emit = emit_icmp;
1695 bld_base->op_actions[TGSI_OPCODE_USNE].emit = emit_icmp;
1696 bld_base->op_actions[TGSI_OPCODE_U2F].emit = emit_u2f;
1697 bld_base->op_actions[TGSI_OPCODE_XOR].emit = emit_xor;
1698 bld_base->op_actions[TGSI_OPCODE_UCMP].emit = emit_ucmp;
1699 bld_base->op_actions[TGSI_OPCODE_UP2H].fetch_args = up2h_fetch_args;
1700 bld_base->op_actions[TGSI_OPCODE_UP2H].emit = emit_up2h;
1701 }
1702
1703 void radeon_llvm_create_func(struct radeon_llvm_context * ctx,
1704 LLVMTypeRef *return_types, unsigned num_return_elems,
1705 LLVMTypeRef *ParamTypes, unsigned ParamCount)
1706 {
1707 LLVMTypeRef main_fn_type, ret_type;
1708 LLVMBasicBlockRef main_fn_body;
1709
1710 if (num_return_elems)
1711 ret_type = LLVMStructTypeInContext(ctx->gallivm.context,
1712 return_types,
1713 num_return_elems, true);
1714 else
1715 ret_type = LLVMVoidTypeInContext(ctx->gallivm.context);
1716
1717 /* Setup the function */
1718 ctx->return_type = ret_type;
1719 main_fn_type = LLVMFunctionType(ret_type, ParamTypes, ParamCount, 0);
1720 ctx->main_fn = LLVMAddFunction(ctx->gallivm.module, "main", main_fn_type);
1721 main_fn_body = LLVMAppendBasicBlockInContext(ctx->gallivm.context,
1722 ctx->main_fn, "main_body");
1723 LLVMPositionBuilderAtEnd(ctx->gallivm.builder, main_fn_body);
1724 }
1725
1726 void radeon_llvm_finalize_module(struct radeon_llvm_context * ctx)
1727 {
1728 struct gallivm_state * gallivm = ctx->soa.bld_base.base.gallivm;
1729 const char *triple = LLVMGetTarget(gallivm->module);
1730 LLVMTargetLibraryInfoRef target_library_info;
1731
1732 /* Create the pass manager */
1733 gallivm->passmgr = LLVMCreateFunctionPassManagerForModule(
1734 gallivm->module);
1735
1736 target_library_info = gallivm_create_target_library_info(triple);
1737 LLVMAddTargetLibraryInfo(target_library_info, gallivm->passmgr);
1738
1739 /* This pass should eliminate all the load and store instructions */
1740 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
1741
1742 /* Add some optimization passes */
1743 LLVMAddScalarReplAggregatesPass(gallivm->passmgr);
1744 LLVMAddLICMPass(gallivm->passmgr);
1745 LLVMAddAggressiveDCEPass(gallivm->passmgr);
1746 LLVMAddCFGSimplificationPass(gallivm->passmgr);
1747 LLVMAddInstructionCombiningPass(gallivm->passmgr);
1748
1749 /* Run the pass */
1750 LLVMRunFunctionPassManager(gallivm->passmgr, ctx->main_fn);
1751
1752 LLVMDisposeBuilder(gallivm->builder);
1753 LLVMDisposePassManager(gallivm->passmgr);
1754 gallivm_dispose_target_library_info(target_library_info);
1755 }
1756
1757 void radeon_llvm_dispose(struct radeon_llvm_context * ctx)
1758 {
1759 LLVMDisposeModule(ctx->soa.bld_base.base.gallivm->module);
1760 LLVMContextDispose(ctx->soa.bld_base.base.gallivm->context);
1761 FREE(ctx->arrays);
1762 ctx->arrays = NULL;
1763 FREE(ctx->temps);
1764 ctx->temps = NULL;
1765 ctx->temps_count = 0;
1766 FREE(ctx->loop);
1767 ctx->loop = NULL;
1768 ctx->loop_depth_max = 0;
1769 FREE(ctx->branch);
1770 ctx->branch = NULL;
1771 ctx->branch_depth_max = 0;
1772 }