r600g: use a bitfield to track dirty atoms
[mesa.git] / src / gallium / drivers / radeon / radeon_setup_tgsi_llvm.c
1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors: Tom Stellard <thomas.stellard@amd.com>
24 *
25 */
26 #include "radeon_llvm.h"
27
28 #include "gallivm/lp_bld_const.h"
29 #include "gallivm/lp_bld_gather.h"
30 #include "gallivm/lp_bld_flow.h"
31 #include "gallivm/lp_bld_init.h"
32 #include "gallivm/lp_bld_intr.h"
33 #include "gallivm/lp_bld_swizzle.h"
34 #include "tgsi/tgsi_info.h"
35 #include "tgsi/tgsi_parse.h"
36 #include "util/u_math.h"
37 #include "util/u_memory.h"
38 #include "util/u_debug.h"
39
40 #include <llvm-c/Core.h>
41 #include <llvm-c/Transforms/Scalar.h>
42
43 static struct radeon_llvm_loop * get_current_loop(struct radeon_llvm_context * ctx)
44 {
45 return ctx->loop_depth > 0 ? ctx->loop + (ctx->loop_depth - 1) : NULL;
46 }
47
48 static struct radeon_llvm_branch * get_current_branch(
49 struct radeon_llvm_context * ctx)
50 {
51 return ctx->branch_depth > 0 ?
52 ctx->branch + (ctx->branch_depth - 1) : NULL;
53 }
54
55 unsigned radeon_llvm_reg_index_soa(unsigned index, unsigned chan)
56 {
57 return (index * 4) + chan;
58 }
59
60 static LLVMValueRef emit_swizzle(
61 struct lp_build_tgsi_context * bld_base,
62 LLVMValueRef value,
63 unsigned swizzle_x,
64 unsigned swizzle_y,
65 unsigned swizzle_z,
66 unsigned swizzle_w)
67 {
68 LLVMValueRef swizzles[4];
69 LLVMTypeRef i32t =
70 LLVMInt32TypeInContext(bld_base->base.gallivm->context);
71
72 swizzles[0] = LLVMConstInt(i32t, swizzle_x, 0);
73 swizzles[1] = LLVMConstInt(i32t, swizzle_y, 0);
74 swizzles[2] = LLVMConstInt(i32t, swizzle_z, 0);
75 swizzles[3] = LLVMConstInt(i32t, swizzle_w, 0);
76
77 return LLVMBuildShuffleVector(bld_base->base.gallivm->builder,
78 value,
79 LLVMGetUndef(LLVMTypeOf(value)),
80 LLVMConstVector(swizzles, 4), "");
81 }
82
83 static struct tgsi_declaration_range
84 get_array_range(struct lp_build_tgsi_context *bld_base,
85 unsigned File, const struct tgsi_ind_register *reg)
86 {
87 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
88
89 if (File != TGSI_FILE_TEMPORARY || reg->ArrayID == 0 ||
90 reg->ArrayID > bld_base->info->array_max[TGSI_FILE_TEMPORARY]) {
91 struct tgsi_declaration_range range;
92 range.First = 0;
93 range.Last = bld_base->info->file_max[File];
94 return range;
95 }
96
97 return ctx->arrays[reg->ArrayID - 1];
98 }
99
100 static LLVMValueRef
101 emit_array_index(
102 struct lp_build_tgsi_soa_context *bld,
103 const struct tgsi_ind_register *reg,
104 unsigned offset)
105 {
106 struct gallivm_state * gallivm = bld->bld_base.base.gallivm;
107
108 LLVMValueRef addr = LLVMBuildLoad(gallivm->builder, bld->addr[reg->Index][reg->Swizzle], "");
109 return LLVMBuildAdd(gallivm->builder, addr, lp_build_const_int32(gallivm, offset), "");
110 }
111
112 LLVMValueRef
113 radeon_llvm_emit_fetch_double(
114 struct lp_build_tgsi_context *bld_base,
115 LLVMValueRef ptr,
116 LLVMValueRef ptr2)
117 {
118 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
119 LLVMValueRef result;
120
121 result = LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), bld_base->base.type.length * 2));
122
123 result = LLVMBuildInsertElement(builder,
124 result,
125 bitcast(bld_base, TGSI_TYPE_UNSIGNED, ptr),
126 bld_base->int_bld.zero, "");
127 result = LLVMBuildInsertElement(builder,
128 result,
129 bitcast(bld_base, TGSI_TYPE_UNSIGNED, ptr2),
130 bld_base->int_bld.one, "");
131 return bitcast(bld_base, TGSI_TYPE_DOUBLE, result);
132 }
133
134 static LLVMValueRef
135 emit_array_fetch(
136 struct lp_build_tgsi_context *bld_base,
137 unsigned File, enum tgsi_opcode_type type,
138 struct tgsi_declaration_range range,
139 unsigned swizzle)
140 {
141 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
142 struct gallivm_state * gallivm = bld->bld_base.base.gallivm;
143 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
144
145 unsigned i, size = range.Last - range.First + 1;
146 LLVMTypeRef vec = LLVMVectorType(tgsi2llvmtype(bld_base, type), size);
147 LLVMValueRef result = LLVMGetUndef(vec);
148
149 struct tgsi_full_src_register tmp_reg = {};
150 tmp_reg.Register.File = File;
151
152 for (i = 0; i < size; ++i) {
153 tmp_reg.Register.Index = i + range.First;
154 LLVMValueRef temp = radeon_llvm_emit_fetch(bld_base, &tmp_reg, type, swizzle);
155 result = LLVMBuildInsertElement(builder, result, temp,
156 lp_build_const_int32(gallivm, i), "");
157 }
158 return result;
159 }
160
161 static bool uses_temp_indirect_addressing(
162 struct lp_build_tgsi_context *bld_base)
163 {
164 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
165 return (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY));
166 }
167
168 LLVMValueRef radeon_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
169 const struct tgsi_full_src_register *reg,
170 enum tgsi_opcode_type type,
171 unsigned swizzle)
172 {
173 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
174 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
175 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
176 LLVMValueRef result = NULL, ptr, ptr2;
177
178 if (swizzle == ~0) {
179 LLVMValueRef values[TGSI_NUM_CHANNELS];
180 unsigned chan;
181 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
182 values[chan] = radeon_llvm_emit_fetch(bld_base, reg, type, chan);
183 }
184 return lp_build_gather_values(bld_base->base.gallivm, values,
185 TGSI_NUM_CHANNELS);
186 }
187
188 if (reg->Register.Indirect) {
189 struct tgsi_declaration_range range = get_array_range(bld_base,
190 reg->Register.File, &reg->Indirect);
191 return LLVMBuildExtractElement(builder,
192 emit_array_fetch(bld_base, reg->Register.File, type, range, swizzle),
193 emit_array_index(bld, &reg->Indirect, reg->Register.Index - range.First),
194 "");
195 }
196
197 switch(reg->Register.File) {
198 case TGSI_FILE_IMMEDIATE: {
199 LLVMTypeRef ctype = tgsi2llvmtype(bld_base, type);
200 if (type == TGSI_TYPE_DOUBLE) {
201 result = LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), bld_base->base.type.length * 2));
202 result = LLVMConstInsertElement(result,
203 bld->immediates[reg->Register.Index][swizzle],
204 bld_base->int_bld.zero);
205 result = LLVMConstInsertElement(result,
206 bld->immediates[reg->Register.Index][swizzle + 1],
207 bld_base->int_bld.one);
208 return LLVMConstBitCast(result, ctype);
209 } else {
210 return LLVMConstBitCast(bld->immediates[reg->Register.Index][swizzle], ctype);
211 }
212 }
213
214 case TGSI_FILE_INPUT:
215 result = ctx->inputs[radeon_llvm_reg_index_soa(reg->Register.Index, swizzle)];
216 if (type == TGSI_TYPE_DOUBLE) {
217 ptr = result;
218 ptr2 = ctx->inputs[radeon_llvm_reg_index_soa(reg->Register.Index, swizzle + 1)];
219 return radeon_llvm_emit_fetch_double(bld_base, ptr, ptr2);
220 }
221 break;
222
223 case TGSI_FILE_TEMPORARY:
224 if (reg->Register.Index >= ctx->temps_count)
225 return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
226 if (uses_temp_indirect_addressing(bld_base)) {
227 ptr = lp_get_temp_ptr_soa(bld, reg->Register.Index, swizzle);
228 break;
229 }
230 ptr = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle];
231 if (type == TGSI_TYPE_DOUBLE) {
232 ptr2 = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle + 1];
233 return radeon_llvm_emit_fetch_double(bld_base,
234 LLVMBuildLoad(builder, ptr, ""),
235 LLVMBuildLoad(builder, ptr2, ""));
236 }
237 result = LLVMBuildLoad(builder, ptr, "");
238 break;
239
240 case TGSI_FILE_OUTPUT:
241 ptr = lp_get_output_ptr(bld, reg->Register.Index, swizzle);
242 if (type == TGSI_TYPE_DOUBLE) {
243 ptr2 = lp_get_output_ptr(bld, reg->Register.Index, swizzle + 1);
244 return radeon_llvm_emit_fetch_double(bld_base,
245 LLVMBuildLoad(builder, ptr, ""),
246 LLVMBuildLoad(builder, ptr2, ""));
247 }
248 result = LLVMBuildLoad(builder, ptr, "");
249 break;
250
251 default:
252 return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
253 }
254
255 return bitcast(bld_base, type, result);
256 }
257
258 static LLVMValueRef fetch_system_value(
259 struct lp_build_tgsi_context * bld_base,
260 const struct tgsi_full_src_register *reg,
261 enum tgsi_opcode_type type,
262 unsigned swizzle)
263 {
264 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
265 struct gallivm_state *gallivm = bld_base->base.gallivm;
266
267 LLVMValueRef cval = ctx->system_values[reg->Register.Index];
268 if (LLVMGetTypeKind(LLVMTypeOf(cval)) == LLVMVectorTypeKind) {
269 cval = LLVMBuildExtractElement(gallivm->builder, cval,
270 lp_build_const_int32(gallivm, swizzle), "");
271 }
272 return bitcast(bld_base, type, cval);
273 }
274
275 static void emit_declaration(
276 struct lp_build_tgsi_context * bld_base,
277 const struct tgsi_full_declaration *decl)
278 {
279 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
280 unsigned first, last, i, idx;
281 switch(decl->Declaration.File) {
282 case TGSI_FILE_ADDRESS:
283 {
284 unsigned idx;
285 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
286 unsigned chan;
287 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
288 ctx->soa.addr[idx][chan] = lp_build_alloca(
289 &ctx->gallivm,
290 ctx->soa.bld_base.uint_bld.elem_type, "");
291 }
292 }
293 break;
294 }
295
296 case TGSI_FILE_TEMPORARY:
297 if (decl->Declaration.Array) {
298 if (!ctx->arrays) {
299 int size = bld_base->info->array_max[TGSI_FILE_TEMPORARY];
300 ctx->arrays = MALLOC(sizeof(ctx->arrays[0]) * size);
301 }
302
303 ctx->arrays[decl->Array.ArrayID - 1] = decl->Range;
304 }
305 if (uses_temp_indirect_addressing(bld_base)) {
306 lp_emit_declaration_soa(bld_base, decl);
307 break;
308 }
309 first = decl->Range.First;
310 last = decl->Range.Last;
311 if (!ctx->temps_count) {
312 ctx->temps_count = bld_base->info->file_max[TGSI_FILE_TEMPORARY] + 1;
313 ctx->temps = MALLOC(TGSI_NUM_CHANNELS * ctx->temps_count * sizeof(LLVMValueRef));
314 }
315 for (idx = first; idx <= last; idx++) {
316 for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
317 ctx->temps[idx * TGSI_NUM_CHANNELS + i] =
318 lp_build_alloca(bld_base->base.gallivm, bld_base->base.vec_type,
319 "temp");
320 }
321 }
322 break;
323
324 case TGSI_FILE_INPUT:
325 {
326 unsigned idx;
327 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
328 if (ctx->load_input)
329 ctx->load_input(ctx, idx, decl);
330 }
331 }
332 break;
333
334 case TGSI_FILE_SYSTEM_VALUE:
335 {
336 unsigned idx;
337 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
338 ctx->load_system_value(ctx, idx, decl);
339 }
340 }
341 break;
342
343 case TGSI_FILE_OUTPUT:
344 {
345 unsigned idx;
346 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
347 unsigned chan;
348 assert(idx < RADEON_LLVM_MAX_OUTPUTS);
349 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
350 ctx->soa.outputs[idx][chan] = lp_build_alloca(&ctx->gallivm,
351 ctx->soa.bld_base.base.elem_type, "");
352 }
353 }
354
355 ctx->output_reg_count = MAX2(ctx->output_reg_count,
356 decl->Range.Last + 1);
357 break;
358 }
359
360 default:
361 break;
362 }
363 }
364
365 LLVMValueRef radeon_llvm_saturate(struct lp_build_tgsi_context *bld_base,
366 LLVMValueRef value)
367 {
368 struct lp_build_emit_data clamp_emit_data;
369
370 memset(&clamp_emit_data, 0, sizeof(clamp_emit_data));
371 clamp_emit_data.arg_count = 3;
372 clamp_emit_data.args[0] = value;
373 clamp_emit_data.args[2] = bld_base->base.one;
374 clamp_emit_data.args[1] = bld_base->base.zero;
375
376 return lp_build_emit_llvm(bld_base, TGSI_OPCODE_CLAMP,
377 &clamp_emit_data);
378 }
379
380 void radeon_llvm_emit_store(
381 struct lp_build_tgsi_context * bld_base,
382 const struct tgsi_full_instruction * inst,
383 const struct tgsi_opcode_info * info,
384 LLVMValueRef dst[4])
385 {
386 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
387 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
388 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
389 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
390 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
391 LLVMValueRef temp_ptr, temp_ptr2 = NULL;
392 unsigned chan, chan_index;
393 boolean is_vec_store = FALSE;
394 enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
395
396 if (dst[0]) {
397 LLVMTypeKind k = LLVMGetTypeKind(LLVMTypeOf(dst[0]));
398 is_vec_store = (k == LLVMVectorTypeKind);
399 }
400
401 if (is_vec_store) {
402 LLVMValueRef values[4] = {};
403 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan) {
404 LLVMValueRef index = lp_build_const_int32(gallivm, chan);
405 values[chan] = LLVMBuildExtractElement(gallivm->builder,
406 dst[0], index, "");
407 }
408 bld_base->emit_store(bld_base, inst, info, values);
409 return;
410 }
411
412 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
413 LLVMValueRef value = dst[chan_index];
414
415 if (dtype == TGSI_TYPE_DOUBLE && (chan_index == 1 || chan_index == 3))
416 continue;
417 if (inst->Instruction.Saturate)
418 value = radeon_llvm_saturate(bld_base, value);
419
420 if (reg->Register.File == TGSI_FILE_ADDRESS) {
421 temp_ptr = bld->addr[reg->Register.Index][chan_index];
422 LLVMBuildStore(builder, value, temp_ptr);
423 continue;
424 }
425
426 if (dtype != TGSI_TYPE_DOUBLE)
427 value = bitcast(bld_base, TGSI_TYPE_FLOAT, value);
428
429 if (reg->Register.Indirect) {
430 struct tgsi_declaration_range range = get_array_range(bld_base,
431 reg->Register.File, &reg->Indirect);
432
433 unsigned i, size = range.Last - range.First + 1;
434 LLVMValueRef array = LLVMBuildInsertElement(builder,
435 emit_array_fetch(bld_base, reg->Register.File, TGSI_TYPE_FLOAT, range, chan_index),
436 value, emit_array_index(bld, &reg->Indirect, reg->Register.Index - range.First), "");
437
438 for (i = 0; i < size; ++i) {
439 switch(reg->Register.File) {
440 case TGSI_FILE_OUTPUT:
441 temp_ptr = bld->outputs[i + range.First][chan_index];
442 break;
443
444 case TGSI_FILE_TEMPORARY:
445 if (range.First + i >= ctx->temps_count)
446 continue;
447 if (uses_temp_indirect_addressing(bld_base))
448 temp_ptr = lp_get_temp_ptr_soa(bld, i + range.First, chan_index);
449 else
450 temp_ptr = ctx->temps[(i + range.First) * TGSI_NUM_CHANNELS + chan_index];
451 break;
452
453 default:
454 return;
455 }
456 value = LLVMBuildExtractElement(builder, array,
457 lp_build_const_int32(gallivm, i), "");
458 LLVMBuildStore(builder, value, temp_ptr);
459 }
460
461 } else {
462 switch(reg->Register.File) {
463 case TGSI_FILE_OUTPUT:
464 temp_ptr = bld->outputs[reg->Register.Index][chan_index];
465 if (dtype == TGSI_TYPE_DOUBLE)
466 temp_ptr2 = bld->outputs[reg->Register.Index][chan_index + 1];
467 break;
468
469 case TGSI_FILE_TEMPORARY:
470 if (reg->Register.Index >= ctx->temps_count)
471 continue;
472 if (uses_temp_indirect_addressing(bld_base)) {
473 temp_ptr = NULL;
474 break;
475 }
476 temp_ptr = ctx->temps[ TGSI_NUM_CHANNELS * reg->Register.Index + chan_index];
477 if (dtype == TGSI_TYPE_DOUBLE)
478 temp_ptr2 = ctx->temps[ TGSI_NUM_CHANNELS * reg->Register.Index + chan_index + 1];
479
480 break;
481
482 default:
483 return;
484 }
485 if (dtype != TGSI_TYPE_DOUBLE)
486 LLVMBuildStore(builder, value, temp_ptr);
487 else {
488 LLVMValueRef ptr = LLVMBuildBitCast(builder, value,
489 LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), 2), "");
490 LLVMValueRef val2;
491 value = LLVMBuildExtractElement(builder, ptr,
492 bld_base->uint_bld.zero, "");
493 val2 = LLVMBuildExtractElement(builder, ptr,
494 bld_base->uint_bld.one, "");
495
496 LLVMBuildStore(builder, bitcast(bld_base, TGSI_TYPE_FLOAT, value), temp_ptr);
497 LLVMBuildStore(builder, bitcast(bld_base, TGSI_TYPE_FLOAT, val2), temp_ptr2);
498 }
499 }
500 }
501 }
502
503 static void bgnloop_emit(
504 const struct lp_build_tgsi_action * action,
505 struct lp_build_tgsi_context * bld_base,
506 struct lp_build_emit_data * emit_data)
507 {
508 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
509 struct gallivm_state * gallivm = bld_base->base.gallivm;
510 LLVMBasicBlockRef loop_block;
511 LLVMBasicBlockRef endloop_block;
512 endloop_block = LLVMAppendBasicBlockInContext(gallivm->context,
513 ctx->main_fn, "ENDLOOP");
514 loop_block = LLVMInsertBasicBlockInContext(gallivm->context,
515 endloop_block, "LOOP");
516 LLVMBuildBr(gallivm->builder, loop_block);
517 LLVMPositionBuilderAtEnd(gallivm->builder, loop_block);
518
519 if (++ctx->loop_depth > ctx->loop_depth_max) {
520 unsigned new_max = ctx->loop_depth_max << 1;
521
522 if (!new_max)
523 new_max = RADEON_LLVM_INITIAL_CF_DEPTH;
524
525 ctx->loop = REALLOC(ctx->loop, ctx->loop_depth_max *
526 sizeof(ctx->loop[0]),
527 new_max * sizeof(ctx->loop[0]));
528 ctx->loop_depth_max = new_max;
529 }
530
531 ctx->loop[ctx->loop_depth - 1].loop_block = loop_block;
532 ctx->loop[ctx->loop_depth - 1].endloop_block = endloop_block;
533 }
534
535 static void brk_emit(
536 const struct lp_build_tgsi_action * action,
537 struct lp_build_tgsi_context * bld_base,
538 struct lp_build_emit_data * emit_data)
539 {
540 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
541 struct gallivm_state * gallivm = bld_base->base.gallivm;
542 struct radeon_llvm_loop * current_loop = get_current_loop(ctx);
543
544 LLVMBuildBr(gallivm->builder, current_loop->endloop_block);
545 }
546
547 static void cont_emit(
548 const struct lp_build_tgsi_action * action,
549 struct lp_build_tgsi_context * bld_base,
550 struct lp_build_emit_data * emit_data)
551 {
552 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
553 struct gallivm_state * gallivm = bld_base->base.gallivm;
554 struct radeon_llvm_loop * current_loop = get_current_loop(ctx);
555
556 LLVMBuildBr(gallivm->builder, current_loop->loop_block);
557 }
558
559 static void else_emit(
560 const struct lp_build_tgsi_action * action,
561 struct lp_build_tgsi_context * bld_base,
562 struct lp_build_emit_data * emit_data)
563 {
564 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
565 struct gallivm_state * gallivm = bld_base->base.gallivm;
566 struct radeon_llvm_branch * current_branch = get_current_branch(ctx);
567 LLVMBasicBlockRef current_block = LLVMGetInsertBlock(gallivm->builder);
568
569 /* We need to add a terminator to the current block if the previous
570 * instruction was an ENDIF.Example:
571 * IF
572 * [code]
573 * IF
574 * [code]
575 * ELSE
576 * [code]
577 * ENDIF <--
578 * ELSE<--
579 * [code]
580 * ENDIF
581 */
582
583 if (current_block != current_branch->if_block) {
584 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
585 }
586 if (!LLVMGetBasicBlockTerminator(current_branch->if_block)) {
587 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
588 }
589 current_branch->has_else = 1;
590 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->else_block);
591 }
592
593 static void endif_emit(
594 const struct lp_build_tgsi_action * action,
595 struct lp_build_tgsi_context * bld_base,
596 struct lp_build_emit_data * emit_data)
597 {
598 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
599 struct gallivm_state * gallivm = bld_base->base.gallivm;
600 struct radeon_llvm_branch * current_branch = get_current_branch(ctx);
601 LLVMBasicBlockRef current_block = LLVMGetInsertBlock(gallivm->builder);
602
603 /* If we have consecutive ENDIF instructions, then the first ENDIF
604 * will not have a terminator, so we need to add one. */
605 if (current_block != current_branch->if_block
606 && current_block != current_branch->else_block
607 && !LLVMGetBasicBlockTerminator(current_block)) {
608
609 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
610 }
611 if (!LLVMGetBasicBlockTerminator(current_branch->else_block)) {
612 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->else_block);
613 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
614 }
615
616 if (!LLVMGetBasicBlockTerminator(current_branch->if_block)) {
617 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->if_block);
618 LLVMBuildBr(gallivm->builder, current_branch->endif_block);
619 }
620
621 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->endif_block);
622 ctx->branch_depth--;
623 }
624
625 static void endloop_emit(
626 const struct lp_build_tgsi_action * action,
627 struct lp_build_tgsi_context * bld_base,
628 struct lp_build_emit_data * emit_data)
629 {
630 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
631 struct gallivm_state * gallivm = bld_base->base.gallivm;
632 struct radeon_llvm_loop * current_loop = get_current_loop(ctx);
633
634 if (!LLVMGetBasicBlockTerminator(LLVMGetInsertBlock(gallivm->builder))) {
635 LLVMBuildBr(gallivm->builder, current_loop->loop_block);
636 }
637
638 LLVMPositionBuilderAtEnd(gallivm->builder, current_loop->endloop_block);
639 ctx->loop_depth--;
640 }
641
642 static void if_cond_emit(
643 const struct lp_build_tgsi_action * action,
644 struct lp_build_tgsi_context * bld_base,
645 struct lp_build_emit_data * emit_data,
646 LLVMValueRef cond)
647 {
648 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
649 struct gallivm_state * gallivm = bld_base->base.gallivm;
650 LLVMBasicBlockRef if_block, else_block, endif_block;
651
652 endif_block = LLVMAppendBasicBlockInContext(gallivm->context,
653 ctx->main_fn, "ENDIF");
654 if_block = LLVMInsertBasicBlockInContext(gallivm->context,
655 endif_block, "IF");
656 else_block = LLVMInsertBasicBlockInContext(gallivm->context,
657 endif_block, "ELSE");
658 LLVMBuildCondBr(gallivm->builder, cond, if_block, else_block);
659 LLVMPositionBuilderAtEnd(gallivm->builder, if_block);
660
661 if (++ctx->branch_depth > ctx->branch_depth_max) {
662 unsigned new_max = ctx->branch_depth_max << 1;
663
664 if (!new_max)
665 new_max = RADEON_LLVM_INITIAL_CF_DEPTH;
666
667 ctx->branch = REALLOC(ctx->branch, ctx->branch_depth_max *
668 sizeof(ctx->branch[0]),
669 new_max * sizeof(ctx->branch[0]));
670 ctx->branch_depth_max = new_max;
671 }
672
673 ctx->branch[ctx->branch_depth - 1].endif_block = endif_block;
674 ctx->branch[ctx->branch_depth - 1].if_block = if_block;
675 ctx->branch[ctx->branch_depth - 1].else_block = else_block;
676 ctx->branch[ctx->branch_depth - 1].has_else = 0;
677 }
678
679 static void if_emit(
680 const struct lp_build_tgsi_action * action,
681 struct lp_build_tgsi_context * bld_base,
682 struct lp_build_emit_data * emit_data)
683 {
684 struct gallivm_state * gallivm = bld_base->base.gallivm;
685 LLVMValueRef cond;
686
687 cond = LLVMBuildFCmp(gallivm->builder, LLVMRealUNE,
688 emit_data->args[0],
689 bld_base->base.zero, "");
690
691 if_cond_emit(action, bld_base, emit_data, cond);
692 }
693
694 static void uif_emit(
695 const struct lp_build_tgsi_action * action,
696 struct lp_build_tgsi_context * bld_base,
697 struct lp_build_emit_data * emit_data)
698 {
699 struct gallivm_state * gallivm = bld_base->base.gallivm;
700 LLVMValueRef cond;
701
702 cond = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
703 bitcast(bld_base, TGSI_TYPE_UNSIGNED, emit_data->args[0]),
704 bld_base->int_bld.zero, "");
705
706 if_cond_emit(action, bld_base, emit_data, cond);
707 }
708
709 static void kill_if_fetch_args(
710 struct lp_build_tgsi_context * bld_base,
711 struct lp_build_emit_data * emit_data)
712 {
713 const struct tgsi_full_instruction * inst = emit_data->inst;
714 struct gallivm_state *gallivm = bld_base->base.gallivm;
715 LLVMBuilderRef builder = gallivm->builder;
716 unsigned i;
717 LLVMValueRef conds[TGSI_NUM_CHANNELS];
718
719 for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
720 LLVMValueRef value = lp_build_emit_fetch(bld_base, inst, 0, i);
721 conds[i] = LLVMBuildFCmp(builder, LLVMRealOLT, value,
722 bld_base->base.zero, "");
723 }
724
725 /* Or the conditions together */
726 for (i = TGSI_NUM_CHANNELS - 1; i > 0; i--) {
727 conds[i - 1] = LLVMBuildOr(builder, conds[i], conds[i - 1], "");
728 }
729
730 emit_data->dst_type = LLVMVoidTypeInContext(gallivm->context);
731 emit_data->arg_count = 1;
732 emit_data->args[0] = LLVMBuildSelect(builder, conds[0],
733 lp_build_const_float(gallivm, -1.0f),
734 bld_base->base.zero, "");
735 }
736
737 static void kil_emit(
738 const struct lp_build_tgsi_action * action,
739 struct lp_build_tgsi_context * bld_base,
740 struct lp_build_emit_data * emit_data)
741 {
742 unsigned i;
743 for (i = 0; i < emit_data->arg_count; i++) {
744 emit_data->output[i] = lp_build_intrinsic_unary(
745 bld_base->base.gallivm->builder,
746 action->intr_name,
747 emit_data->dst_type, emit_data->args[i]);
748 }
749 }
750
751 static void radeon_llvm_cube_to_2d_coords(struct lp_build_tgsi_context *bld_base,
752 LLVMValueRef *in, LLVMValueRef *out)
753 {
754 struct gallivm_state * gallivm = bld_base->base.gallivm;
755 LLVMBuilderRef builder = gallivm->builder;
756 LLVMTypeRef type = bld_base->base.elem_type;
757 LLVMValueRef coords[4];
758 LLVMValueRef mad_args[3];
759 LLVMValueRef v, cube_vec;
760 unsigned i;
761
762 cube_vec = lp_build_gather_values(bld_base->base.gallivm, in, 4);
763 v = lp_build_intrinsic(builder, "llvm.AMDGPU.cube", LLVMVectorType(type, 4),
764 &cube_vec, 1, LLVMReadNoneAttribute);
765
766 for (i = 0; i < 4; ++i)
767 coords[i] = LLVMBuildExtractElement(builder, v,
768 lp_build_const_int32(gallivm, i), "");
769
770 coords[2] = lp_build_intrinsic(builder, "llvm.fabs.f32",
771 type, &coords[2], 1, LLVMReadNoneAttribute);
772 coords[2] = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_RCP, coords[2]);
773
774 mad_args[1] = coords[2];
775 mad_args[2] = LLVMConstReal(type, 1.5);
776
777 mad_args[0] = coords[0];
778 coords[0] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
779 mad_args[0], mad_args[1], mad_args[2]);
780
781 mad_args[0] = coords[1];
782 coords[1] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
783 mad_args[0], mad_args[1], mad_args[2]);
784
785 /* apply xyz = yxw swizzle to cooords */
786 out[0] = coords[1];
787 out[1] = coords[0];
788 out[2] = coords[3];
789 }
790
791 void radeon_llvm_emit_prepare_cube_coords(
792 struct lp_build_tgsi_context * bld_base,
793 struct lp_build_emit_data * emit_data,
794 LLVMValueRef *coords_arg,
795 LLVMValueRef *derivs_arg)
796 {
797
798 unsigned target = emit_data->inst->Texture.Texture;
799 unsigned opcode = emit_data->inst->Instruction.Opcode;
800 struct gallivm_state * gallivm = bld_base->base.gallivm;
801 LLVMBuilderRef builder = gallivm->builder;
802 LLVMValueRef coords[4];
803 unsigned i;
804
805 radeon_llvm_cube_to_2d_coords(bld_base, coords_arg, coords);
806
807 if (opcode == TGSI_OPCODE_TXD && derivs_arg) {
808 LLVMValueRef derivs[4];
809 int axis;
810
811 /* Convert cube derivatives to 2D derivatives. */
812 for (axis = 0; axis < 2; axis++) {
813 LLVMValueRef shifted_cube_coords[4], shifted_coords[4];
814
815 /* Shift the cube coordinates by the derivatives to get
816 * the cube coordinates of the "neighboring pixel".
817 */
818 for (i = 0; i < 3; i++)
819 shifted_cube_coords[i] =
820 LLVMBuildFAdd(builder, coords_arg[i],
821 derivs_arg[axis*3+i], "");
822 shifted_cube_coords[3] = LLVMGetUndef(bld_base->base.elem_type);
823
824 /* Project the shifted cube coordinates onto the face. */
825 radeon_llvm_cube_to_2d_coords(bld_base, shifted_cube_coords,
826 shifted_coords);
827
828 /* Subtract both sets of 2D coordinates to get 2D derivatives.
829 * This won't work if the shifted coordinates ended up
830 * in a different face.
831 */
832 for (i = 0; i < 2; i++)
833 derivs[axis * 2 + i] =
834 LLVMBuildFSub(builder, shifted_coords[i],
835 coords[i], "");
836 }
837
838 memcpy(derivs_arg, derivs, sizeof(derivs));
839 }
840
841 if (target == TGSI_TEXTURE_CUBE_ARRAY ||
842 target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
843 /* for cube arrays coord.z = coord.w(array_index) * 8 + face */
844 /* coords_arg.w component - array_index for cube arrays */
845 coords[2] = lp_build_emit_llvm_ternary(bld_base, TGSI_OPCODE_MAD,
846 coords_arg[3], lp_build_const_float(gallivm, 8.0), coords[2]);
847 }
848
849 /* Preserve compare/lod/bias. Put it in coords.w. */
850 if (opcode == TGSI_OPCODE_TEX2 ||
851 opcode == TGSI_OPCODE_TXB2 ||
852 opcode == TGSI_OPCODE_TXL2) {
853 coords[3] = coords_arg[4];
854 } else if (opcode == TGSI_OPCODE_TXB ||
855 opcode == TGSI_OPCODE_TXL ||
856 target == TGSI_TEXTURE_SHADOWCUBE) {
857 coords[3] = coords_arg[3];
858 }
859
860 memcpy(coords_arg, coords, sizeof(coords));
861 }
862
863 static void emit_icmp(
864 const struct lp_build_tgsi_action * action,
865 struct lp_build_tgsi_context * bld_base,
866 struct lp_build_emit_data * emit_data)
867 {
868 unsigned pred;
869 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
870 LLVMContextRef context = bld_base->base.gallivm->context;
871
872 switch (emit_data->inst->Instruction.Opcode) {
873 case TGSI_OPCODE_USEQ: pred = LLVMIntEQ; break;
874 case TGSI_OPCODE_USNE: pred = LLVMIntNE; break;
875 case TGSI_OPCODE_USGE: pred = LLVMIntUGE; break;
876 case TGSI_OPCODE_USLT: pred = LLVMIntULT; break;
877 case TGSI_OPCODE_ISGE: pred = LLVMIntSGE; break;
878 case TGSI_OPCODE_ISLT: pred = LLVMIntSLT; break;
879 default:
880 assert(!"unknown instruction");
881 pred = 0;
882 break;
883 }
884
885 LLVMValueRef v = LLVMBuildICmp(builder, pred,
886 emit_data->args[0], emit_data->args[1],"");
887
888 v = LLVMBuildSExtOrBitCast(builder, v,
889 LLVMInt32TypeInContext(context), "");
890
891 emit_data->output[emit_data->chan] = v;
892 }
893
894 static void emit_ucmp(
895 const struct lp_build_tgsi_action * action,
896 struct lp_build_tgsi_context * bld_base,
897 struct lp_build_emit_data * emit_data)
898 {
899 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
900
901 LLVMValueRef arg0 = LLVMBuildBitCast(builder, emit_data->args[0],
902 bld_base->uint_bld.elem_type, "");
903
904 LLVMValueRef v = LLVMBuildICmp(builder, LLVMIntNE, arg0,
905 bld_base->uint_bld.zero, "");
906
907 emit_data->output[emit_data->chan] =
908 LLVMBuildSelect(builder, v, emit_data->args[1], emit_data->args[2], "");
909 }
910
911 static void emit_cmp(
912 const struct lp_build_tgsi_action *action,
913 struct lp_build_tgsi_context * bld_base,
914 struct lp_build_emit_data * emit_data)
915 {
916 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
917 LLVMRealPredicate pred;
918 LLVMValueRef cond;
919
920 /* Use ordered for everything but NE (which is usual for
921 * float comparisons)
922 */
923 switch (emit_data->inst->Instruction.Opcode) {
924 case TGSI_OPCODE_SGE: pred = LLVMRealOGE; break;
925 case TGSI_OPCODE_SEQ: pred = LLVMRealOEQ; break;
926 case TGSI_OPCODE_SLE: pred = LLVMRealOLE; break;
927 case TGSI_OPCODE_SLT: pred = LLVMRealOLT; break;
928 case TGSI_OPCODE_SNE: pred = LLVMRealUNE; break;
929 case TGSI_OPCODE_SGT: pred = LLVMRealOGT; break;
930 default: assert(!"unknown instruction"); pred = 0; break;
931 }
932
933 cond = LLVMBuildFCmp(builder,
934 pred, emit_data->args[0], emit_data->args[1], "");
935
936 emit_data->output[emit_data->chan] = LLVMBuildSelect(builder,
937 cond, bld_base->base.one, bld_base->base.zero, "");
938 }
939
940 static void emit_fcmp(
941 const struct lp_build_tgsi_action *action,
942 struct lp_build_tgsi_context * bld_base,
943 struct lp_build_emit_data * emit_data)
944 {
945 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
946 LLVMContextRef context = bld_base->base.gallivm->context;
947 LLVMRealPredicate pred;
948
949 /* Use ordered for everything but NE (which is usual for
950 * float comparisons)
951 */
952 switch (emit_data->inst->Instruction.Opcode) {
953 case TGSI_OPCODE_FSEQ: pred = LLVMRealOEQ; break;
954 case TGSI_OPCODE_FSGE: pred = LLVMRealOGE; break;
955 case TGSI_OPCODE_FSLT: pred = LLVMRealOLT; break;
956 case TGSI_OPCODE_FSNE: pred = LLVMRealUNE; break;
957 default: assert(!"unknown instruction"); pred = 0; break;
958 }
959
960 LLVMValueRef v = LLVMBuildFCmp(builder, pred,
961 emit_data->args[0], emit_data->args[1],"");
962
963 v = LLVMBuildSExtOrBitCast(builder, v,
964 LLVMInt32TypeInContext(context), "");
965
966 emit_data->output[emit_data->chan] = v;
967 }
968
969 static void emit_dcmp(
970 const struct lp_build_tgsi_action *action,
971 struct lp_build_tgsi_context * bld_base,
972 struct lp_build_emit_data * emit_data)
973 {
974 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
975 LLVMContextRef context = bld_base->base.gallivm->context;
976 LLVMRealPredicate pred;
977
978 /* Use ordered for everything but NE (which is usual for
979 * float comparisons)
980 */
981 switch (emit_data->inst->Instruction.Opcode) {
982 case TGSI_OPCODE_DSEQ: pred = LLVMRealOEQ; break;
983 case TGSI_OPCODE_DSGE: pred = LLVMRealOGE; break;
984 case TGSI_OPCODE_DSLT: pred = LLVMRealOLT; break;
985 case TGSI_OPCODE_DSNE: pred = LLVMRealUNE; break;
986 default: assert(!"unknown instruction"); pred = 0; break;
987 }
988
989 LLVMValueRef v = LLVMBuildFCmp(builder, pred,
990 emit_data->args[0], emit_data->args[1],"");
991
992 v = LLVMBuildSExtOrBitCast(builder, v,
993 LLVMInt32TypeInContext(context), "");
994
995 emit_data->output[emit_data->chan] = v;
996 }
997
998 static void emit_not(
999 const struct lp_build_tgsi_action * action,
1000 struct lp_build_tgsi_context * bld_base,
1001 struct lp_build_emit_data * emit_data)
1002 {
1003 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1004 LLVMValueRef v = bitcast(bld_base, TGSI_TYPE_UNSIGNED,
1005 emit_data->args[0]);
1006 emit_data->output[emit_data->chan] = LLVMBuildNot(builder, v, "");
1007 }
1008
1009 static void emit_arl(
1010 const struct lp_build_tgsi_action * action,
1011 struct lp_build_tgsi_context * bld_base,
1012 struct lp_build_emit_data * emit_data)
1013 {
1014 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1015 LLVMValueRef floor_index = lp_build_emit_llvm_unary(bld_base, TGSI_OPCODE_FLR, emit_data->args[0]);
1016 emit_data->output[emit_data->chan] = LLVMBuildFPToSI(builder,
1017 floor_index, bld_base->base.int_elem_type , "");
1018 }
1019
1020 static void emit_and(
1021 const struct lp_build_tgsi_action * action,
1022 struct lp_build_tgsi_context * bld_base,
1023 struct lp_build_emit_data * emit_data)
1024 {
1025 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1026 emit_data->output[emit_data->chan] = LLVMBuildAnd(builder,
1027 emit_data->args[0], emit_data->args[1], "");
1028 }
1029
1030 static void emit_or(
1031 const struct lp_build_tgsi_action * action,
1032 struct lp_build_tgsi_context * bld_base,
1033 struct lp_build_emit_data * emit_data)
1034 {
1035 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1036 emit_data->output[emit_data->chan] = LLVMBuildOr(builder,
1037 emit_data->args[0], emit_data->args[1], "");
1038 }
1039
1040 static void emit_uadd(
1041 const struct lp_build_tgsi_action * action,
1042 struct lp_build_tgsi_context * bld_base,
1043 struct lp_build_emit_data * emit_data)
1044 {
1045 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1046 emit_data->output[emit_data->chan] = LLVMBuildAdd(builder,
1047 emit_data->args[0], emit_data->args[1], "");
1048 }
1049
1050 static void emit_udiv(
1051 const struct lp_build_tgsi_action * action,
1052 struct lp_build_tgsi_context * bld_base,
1053 struct lp_build_emit_data * emit_data)
1054 {
1055 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1056 emit_data->output[emit_data->chan] = LLVMBuildUDiv(builder,
1057 emit_data->args[0], emit_data->args[1], "");
1058 }
1059
1060 static void emit_idiv(
1061 const struct lp_build_tgsi_action * action,
1062 struct lp_build_tgsi_context * bld_base,
1063 struct lp_build_emit_data * emit_data)
1064 {
1065 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1066 emit_data->output[emit_data->chan] = LLVMBuildSDiv(builder,
1067 emit_data->args[0], emit_data->args[1], "");
1068 }
1069
1070 static void emit_mod(
1071 const struct lp_build_tgsi_action * action,
1072 struct lp_build_tgsi_context * bld_base,
1073 struct lp_build_emit_data * emit_data)
1074 {
1075 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1076 emit_data->output[emit_data->chan] = LLVMBuildSRem(builder,
1077 emit_data->args[0], emit_data->args[1], "");
1078 }
1079
1080 static void emit_umod(
1081 const struct lp_build_tgsi_action * action,
1082 struct lp_build_tgsi_context * bld_base,
1083 struct lp_build_emit_data * emit_data)
1084 {
1085 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1086 emit_data->output[emit_data->chan] = LLVMBuildURem(builder,
1087 emit_data->args[0], emit_data->args[1], "");
1088 }
1089
1090 static void emit_shl(
1091 const struct lp_build_tgsi_action * action,
1092 struct lp_build_tgsi_context * bld_base,
1093 struct lp_build_emit_data * emit_data)
1094 {
1095 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1096 emit_data->output[emit_data->chan] = LLVMBuildShl(builder,
1097 emit_data->args[0], emit_data->args[1], "");
1098 }
1099
1100 static void emit_ushr(
1101 const struct lp_build_tgsi_action * action,
1102 struct lp_build_tgsi_context * bld_base,
1103 struct lp_build_emit_data * emit_data)
1104 {
1105 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1106 emit_data->output[emit_data->chan] = LLVMBuildLShr(builder,
1107 emit_data->args[0], emit_data->args[1], "");
1108 }
1109 static void emit_ishr(
1110 const struct lp_build_tgsi_action * action,
1111 struct lp_build_tgsi_context * bld_base,
1112 struct lp_build_emit_data * emit_data)
1113 {
1114 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1115 emit_data->output[emit_data->chan] = LLVMBuildAShr(builder,
1116 emit_data->args[0], emit_data->args[1], "");
1117 }
1118
1119 static void emit_xor(
1120 const struct lp_build_tgsi_action * action,
1121 struct lp_build_tgsi_context * bld_base,
1122 struct lp_build_emit_data * emit_data)
1123 {
1124 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1125 emit_data->output[emit_data->chan] = LLVMBuildXor(builder,
1126 emit_data->args[0], emit_data->args[1], "");
1127 }
1128
1129 static void emit_ssg(
1130 const struct lp_build_tgsi_action * action,
1131 struct lp_build_tgsi_context * bld_base,
1132 struct lp_build_emit_data * emit_data)
1133 {
1134 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1135
1136 LLVMValueRef cmp, val;
1137
1138 if (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_ISSG) {
1139 cmp = LLVMBuildICmp(builder, LLVMIntSGT, emit_data->args[0], bld_base->int_bld.zero, "");
1140 val = LLVMBuildSelect(builder, cmp, bld_base->int_bld.one, emit_data->args[0], "");
1141 cmp = LLVMBuildICmp(builder, LLVMIntSGE, val, bld_base->int_bld.zero, "");
1142 val = LLVMBuildSelect(builder, cmp, val, LLVMConstInt(bld_base->int_bld.elem_type, -1, true), "");
1143 } else { // float SSG
1144 cmp = LLVMBuildFCmp(builder, LLVMRealOGT, emit_data->args[0], bld_base->base.zero, "");
1145 val = LLVMBuildSelect(builder, cmp, bld_base->base.one, emit_data->args[0], "");
1146 cmp = LLVMBuildFCmp(builder, LLVMRealOGE, val, bld_base->base.zero, "");
1147 val = LLVMBuildSelect(builder, cmp, val, LLVMConstReal(bld_base->base.elem_type, -1), "");
1148 }
1149
1150 emit_data->output[emit_data->chan] = val;
1151 }
1152
1153 static void emit_ineg(
1154 const struct lp_build_tgsi_action * action,
1155 struct lp_build_tgsi_context * bld_base,
1156 struct lp_build_emit_data * emit_data)
1157 {
1158 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1159 emit_data->output[emit_data->chan] = LLVMBuildNeg(builder,
1160 emit_data->args[0], "");
1161 }
1162
1163 static void emit_dneg(
1164 const struct lp_build_tgsi_action * action,
1165 struct lp_build_tgsi_context * bld_base,
1166 struct lp_build_emit_data * emit_data)
1167 {
1168 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1169 emit_data->output[emit_data->chan] = LLVMBuildFNeg(builder,
1170 emit_data->args[0], "");
1171 }
1172
1173 static void emit_frac(
1174 const struct lp_build_tgsi_action * action,
1175 struct lp_build_tgsi_context * bld_base,
1176 struct lp_build_emit_data * emit_data)
1177 {
1178 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1179 char *intr;
1180
1181 if (emit_data->info->opcode == TGSI_OPCODE_FRC)
1182 intr = "llvm.floor.f32";
1183 else if (emit_data->info->opcode == TGSI_OPCODE_DFRAC)
1184 intr = "llvm.floor.f64";
1185 else {
1186 assert(0);
1187 return;
1188 }
1189
1190 LLVMValueRef floor = lp_build_intrinsic(builder, intr, emit_data->dst_type,
1191 &emit_data->args[0], 1,
1192 LLVMReadNoneAttribute);
1193 emit_data->output[emit_data->chan] = LLVMBuildFSub(builder,
1194 emit_data->args[0], floor, "");
1195 }
1196
1197 static void emit_f2i(
1198 const struct lp_build_tgsi_action * action,
1199 struct lp_build_tgsi_context * bld_base,
1200 struct lp_build_emit_data * emit_data)
1201 {
1202 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1203 emit_data->output[emit_data->chan] = LLVMBuildFPToSI(builder,
1204 emit_data->args[0], bld_base->int_bld.elem_type, "");
1205 }
1206
1207 static void emit_f2u(
1208 const struct lp_build_tgsi_action * action,
1209 struct lp_build_tgsi_context * bld_base,
1210 struct lp_build_emit_data * emit_data)
1211 {
1212 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1213 emit_data->output[emit_data->chan] = LLVMBuildFPToUI(builder,
1214 emit_data->args[0], bld_base->uint_bld.elem_type, "");
1215 }
1216
1217 static void emit_i2f(
1218 const struct lp_build_tgsi_action * action,
1219 struct lp_build_tgsi_context * bld_base,
1220 struct lp_build_emit_data * emit_data)
1221 {
1222 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1223 emit_data->output[emit_data->chan] = LLVMBuildSIToFP(builder,
1224 emit_data->args[0], bld_base->base.elem_type, "");
1225 }
1226
1227 static void emit_u2f(
1228 const struct lp_build_tgsi_action * action,
1229 struct lp_build_tgsi_context * bld_base,
1230 struct lp_build_emit_data * emit_data)
1231 {
1232 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1233 emit_data->output[emit_data->chan] = LLVMBuildUIToFP(builder,
1234 emit_data->args[0], bld_base->base.elem_type, "");
1235 }
1236
1237 static void emit_immediate(struct lp_build_tgsi_context * bld_base,
1238 const struct tgsi_full_immediate *imm)
1239 {
1240 unsigned i;
1241 struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
1242
1243 for (i = 0; i < 4; ++i) {
1244 ctx->soa.immediates[ctx->soa.num_immediates][i] =
1245 LLVMConstInt(bld_base->uint_bld.elem_type, imm->u[i].Uint, false );
1246 }
1247
1248 ctx->soa.num_immediates++;
1249 }
1250
1251 void
1252 build_tgsi_intrinsic_nomem(const struct lp_build_tgsi_action *action,
1253 struct lp_build_tgsi_context *bld_base,
1254 struct lp_build_emit_data *emit_data)
1255 {
1256 struct lp_build_context * base = &bld_base->base;
1257 emit_data->output[emit_data->chan] =
1258 lp_build_intrinsic(base->gallivm->builder, action->intr_name,
1259 emit_data->dst_type, emit_data->args,
1260 emit_data->arg_count, LLVMReadNoneAttribute);
1261 }
1262
1263 static void emit_bfi(const struct lp_build_tgsi_action * action,
1264 struct lp_build_tgsi_context * bld_base,
1265 struct lp_build_emit_data * emit_data)
1266 {
1267 struct gallivm_state *gallivm = bld_base->base.gallivm;
1268 LLVMBuilderRef builder = gallivm->builder;
1269 LLVMValueRef bfi_args[3];
1270
1271 // Calculate the bitmask: (((1 << src3) - 1) << src2
1272 bfi_args[0] = LLVMBuildShl(builder,
1273 LLVMBuildSub(builder,
1274 LLVMBuildShl(builder,
1275 bld_base->int_bld.one,
1276 emit_data->args[3], ""),
1277 bld_base->int_bld.one, ""),
1278 emit_data->args[2], "");
1279
1280 bfi_args[1] = LLVMBuildShl(builder, emit_data->args[1],
1281 emit_data->args[2], "");
1282
1283 bfi_args[2] = emit_data->args[0];
1284
1285 /* Calculate:
1286 * (arg0 & arg1) | (~arg0 & arg2) = arg2 ^ (arg0 & (arg1 ^ arg2)
1287 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
1288 */
1289 emit_data->output[emit_data->chan] =
1290 LLVMBuildXor(builder, bfi_args[2],
1291 LLVMBuildAnd(builder, bfi_args[0],
1292 LLVMBuildXor(builder, bfi_args[1], bfi_args[2],
1293 ""), ""), "");
1294 }
1295
1296 /* this is ffs in C */
1297 static void emit_lsb(const struct lp_build_tgsi_action * action,
1298 struct lp_build_tgsi_context * bld_base,
1299 struct lp_build_emit_data * emit_data)
1300 {
1301 struct gallivm_state *gallivm = bld_base->base.gallivm;
1302 LLVMValueRef args[2] = {
1303 emit_data->args[0],
1304
1305 /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
1306 * add special code to check for x=0. The reason is that
1307 * the LLVM behavior for x=0 is different from what we
1308 * need here.
1309 *
1310 * The hardware already implements the correct behavior.
1311 */
1312 lp_build_const_int32(gallivm, 1)
1313 };
1314
1315 emit_data->output[emit_data->chan] =
1316 lp_build_intrinsic(gallivm->builder, "llvm.cttz.i32",
1317 emit_data->dst_type, args, Elements(args),
1318 LLVMReadNoneAttribute);
1319 }
1320
1321 /* Find the last bit set. */
1322 static void emit_umsb(const struct lp_build_tgsi_action * action,
1323 struct lp_build_tgsi_context * bld_base,
1324 struct lp_build_emit_data * emit_data)
1325 {
1326 struct gallivm_state *gallivm = bld_base->base.gallivm;
1327 LLVMBuilderRef builder = gallivm->builder;
1328 LLVMValueRef args[2] = {
1329 emit_data->args[0],
1330 /* Don't generate code for handling zero: */
1331 lp_build_const_int32(gallivm, 1)
1332 };
1333
1334 LLVMValueRef msb =
1335 lp_build_intrinsic(builder, "llvm.ctlz.i32",
1336 emit_data->dst_type, args, Elements(args),
1337 LLVMReadNoneAttribute);
1338
1339 /* The HW returns the last bit index from MSB, but TGSI wants
1340 * the index from LSB. Invert it by doing "31 - msb". */
1341 msb = LLVMBuildSub(builder, lp_build_const_int32(gallivm, 31),
1342 msb, "");
1343
1344 /* Check for zero: */
1345 emit_data->output[emit_data->chan] =
1346 LLVMBuildSelect(builder,
1347 LLVMBuildICmp(builder, LLVMIntEQ, args[0],
1348 bld_base->uint_bld.zero, ""),
1349 lp_build_const_int32(gallivm, -1), msb, "");
1350 }
1351
1352 /* Find the last bit opposite of the sign bit. */
1353 static void emit_imsb(const struct lp_build_tgsi_action * action,
1354 struct lp_build_tgsi_context * bld_base,
1355 struct lp_build_emit_data * emit_data)
1356 {
1357 struct gallivm_state *gallivm = bld_base->base.gallivm;
1358 LLVMBuilderRef builder = gallivm->builder;
1359 LLVMValueRef arg = emit_data->args[0];
1360
1361 LLVMValueRef msb =
1362 lp_build_intrinsic(builder, "llvm.AMDGPU.flbit.i32",
1363 emit_data->dst_type, &arg, 1,
1364 LLVMReadNoneAttribute);
1365
1366 /* The HW returns the last bit index from MSB, but TGSI wants
1367 * the index from LSB. Invert it by doing "31 - msb". */
1368 msb = LLVMBuildSub(builder, lp_build_const_int32(gallivm, 31),
1369 msb, "");
1370
1371 /* If arg == 0 || arg == -1 (0xffffffff), return -1. */
1372 LLVMValueRef all_ones = lp_build_const_int32(gallivm, -1);
1373
1374 LLVMValueRef cond =
1375 LLVMBuildOr(builder,
1376 LLVMBuildICmp(builder, LLVMIntEQ, arg,
1377 bld_base->uint_bld.zero, ""),
1378 LLVMBuildICmp(builder, LLVMIntEQ, arg,
1379 all_ones, ""), "");
1380
1381 emit_data->output[emit_data->chan] =
1382 LLVMBuildSelect(builder, cond, all_ones, msb, "");
1383 }
1384
1385 void radeon_llvm_context_init(struct radeon_llvm_context * ctx)
1386 {
1387 struct lp_type type;
1388
1389 /* Initialize the gallivm object:
1390 * We are only using the module, context, and builder fields of this struct.
1391 * This should be enough for us to be able to pass our gallivm struct to the
1392 * helper functions in the gallivm module.
1393 */
1394 memset(&ctx->gallivm, 0, sizeof (ctx->gallivm));
1395 memset(&ctx->soa, 0, sizeof(ctx->soa));
1396 ctx->gallivm.context = LLVMContextCreate();
1397 ctx->gallivm.module = LLVMModuleCreateWithNameInContext("tgsi",
1398 ctx->gallivm.context);
1399 ctx->gallivm.builder = LLVMCreateBuilderInContext(ctx->gallivm.context);
1400
1401 struct lp_build_tgsi_context * bld_base = &ctx->soa.bld_base;
1402
1403 type.floating = TRUE;
1404 type.fixed = FALSE;
1405 type.sign = TRUE;
1406 type.norm = FALSE;
1407 type.width = 32;
1408 type.length = 1;
1409
1410 lp_build_context_init(&bld_base->base, &ctx->gallivm, type);
1411 lp_build_context_init(&ctx->soa.bld_base.uint_bld, &ctx->gallivm, lp_uint_type(type));
1412 lp_build_context_init(&ctx->soa.bld_base.int_bld, &ctx->gallivm, lp_int_type(type));
1413 {
1414 struct lp_type dbl_type;
1415 dbl_type = type;
1416 dbl_type.width *= 2;
1417 lp_build_context_init(&ctx->soa.bld_base.dbl_bld, &ctx->gallivm, dbl_type);
1418 }
1419
1420 bld_base->soa = 1;
1421 bld_base->emit_store = radeon_llvm_emit_store;
1422 bld_base->emit_swizzle = emit_swizzle;
1423 bld_base->emit_declaration = emit_declaration;
1424 bld_base->emit_immediate = emit_immediate;
1425
1426 bld_base->emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = radeon_llvm_emit_fetch;
1427 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = radeon_llvm_emit_fetch;
1428 bld_base->emit_fetch_funcs[TGSI_FILE_TEMPORARY] = radeon_llvm_emit_fetch;
1429 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = radeon_llvm_emit_fetch;
1430 bld_base->emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = fetch_system_value;
1431
1432 /* Allocate outputs */
1433 ctx->soa.outputs = ctx->outputs;
1434
1435 lp_set_default_actions(bld_base);
1436
1437 bld_base->op_actions[TGSI_OPCODE_ABS].emit = build_tgsi_intrinsic_nomem;
1438 bld_base->op_actions[TGSI_OPCODE_ABS].intr_name = "llvm.fabs.f32";
1439 bld_base->op_actions[TGSI_OPCODE_AND].emit = emit_and;
1440 bld_base->op_actions[TGSI_OPCODE_ARL].emit = emit_arl;
1441 bld_base->op_actions[TGSI_OPCODE_BFI].emit = emit_bfi;
1442 bld_base->op_actions[TGSI_OPCODE_BGNLOOP].emit = bgnloop_emit;
1443 bld_base->op_actions[TGSI_OPCODE_BREV].emit = build_tgsi_intrinsic_nomem;
1444 bld_base->op_actions[TGSI_OPCODE_BREV].intr_name = "llvm.AMDGPU.brev";
1445 bld_base->op_actions[TGSI_OPCODE_BRK].emit = brk_emit;
1446 bld_base->op_actions[TGSI_OPCODE_CEIL].emit = build_tgsi_intrinsic_nomem;
1447 bld_base->op_actions[TGSI_OPCODE_CEIL].intr_name = "llvm.ceil.f32";
1448 bld_base->op_actions[TGSI_OPCODE_CLAMP].emit = build_tgsi_intrinsic_nomem;
1449 bld_base->op_actions[TGSI_OPCODE_CLAMP].intr_name = "llvm.AMDIL.clamp.";
1450 bld_base->op_actions[TGSI_OPCODE_CMP].emit = build_tgsi_intrinsic_nomem;
1451 bld_base->op_actions[TGSI_OPCODE_CMP].intr_name = "llvm.AMDGPU.cndlt";
1452 bld_base->op_actions[TGSI_OPCODE_CONT].emit = cont_emit;
1453 bld_base->op_actions[TGSI_OPCODE_COS].emit = build_tgsi_intrinsic_nomem;
1454 bld_base->op_actions[TGSI_OPCODE_COS].intr_name = "llvm.cos.f32";
1455 bld_base->op_actions[TGSI_OPCODE_DABS].emit = build_tgsi_intrinsic_nomem;
1456 bld_base->op_actions[TGSI_OPCODE_DABS].intr_name = "llvm.fabs.f64";
1457 bld_base->op_actions[TGSI_OPCODE_DFMA].emit = build_tgsi_intrinsic_nomem;
1458 bld_base->op_actions[TGSI_OPCODE_DFMA].intr_name = "llvm.fma.f64";
1459 bld_base->op_actions[TGSI_OPCODE_DFRAC].emit = emit_frac;
1460 bld_base->op_actions[TGSI_OPCODE_DNEG].emit = emit_dneg;
1461 bld_base->op_actions[TGSI_OPCODE_DSEQ].emit = emit_dcmp;
1462 bld_base->op_actions[TGSI_OPCODE_DSGE].emit = emit_dcmp;
1463 bld_base->op_actions[TGSI_OPCODE_DSLT].emit = emit_dcmp;
1464 bld_base->op_actions[TGSI_OPCODE_DSNE].emit = emit_dcmp;
1465 bld_base->op_actions[TGSI_OPCODE_DRSQ].emit = build_tgsi_intrinsic_nomem;
1466 bld_base->op_actions[TGSI_OPCODE_DRSQ].intr_name = "llvm.AMDGPU.rsq.f64";
1467 bld_base->op_actions[TGSI_OPCODE_DSQRT].emit = build_tgsi_intrinsic_nomem;
1468 bld_base->op_actions[TGSI_OPCODE_DSQRT].intr_name = "llvm.sqrt.f64";
1469 bld_base->op_actions[TGSI_OPCODE_ELSE].emit = else_emit;
1470 bld_base->op_actions[TGSI_OPCODE_ENDIF].emit = endif_emit;
1471 bld_base->op_actions[TGSI_OPCODE_ENDLOOP].emit = endloop_emit;
1472 bld_base->op_actions[TGSI_OPCODE_EX2].emit = build_tgsi_intrinsic_nomem;
1473 bld_base->op_actions[TGSI_OPCODE_EX2].intr_name = "llvm.AMDIL.exp.";
1474 bld_base->op_actions[TGSI_OPCODE_FLR].emit = build_tgsi_intrinsic_nomem;
1475 bld_base->op_actions[TGSI_OPCODE_FLR].intr_name = "llvm.floor.f32";
1476 bld_base->op_actions[TGSI_OPCODE_FMA].emit = build_tgsi_intrinsic_nomem;
1477 bld_base->op_actions[TGSI_OPCODE_FMA].intr_name = "llvm.fma.f32";
1478 bld_base->op_actions[TGSI_OPCODE_FRC].emit = emit_frac;
1479 bld_base->op_actions[TGSI_OPCODE_F2I].emit = emit_f2i;
1480 bld_base->op_actions[TGSI_OPCODE_F2U].emit = emit_f2u;
1481 bld_base->op_actions[TGSI_OPCODE_FSEQ].emit = emit_fcmp;
1482 bld_base->op_actions[TGSI_OPCODE_FSGE].emit = emit_fcmp;
1483 bld_base->op_actions[TGSI_OPCODE_FSLT].emit = emit_fcmp;
1484 bld_base->op_actions[TGSI_OPCODE_FSNE].emit = emit_fcmp;
1485 bld_base->op_actions[TGSI_OPCODE_IABS].emit = build_tgsi_intrinsic_nomem;
1486 bld_base->op_actions[TGSI_OPCODE_IABS].intr_name = "llvm.AMDIL.abs.";
1487 bld_base->op_actions[TGSI_OPCODE_IBFE].emit = build_tgsi_intrinsic_nomem;
1488 bld_base->op_actions[TGSI_OPCODE_IBFE].intr_name = "llvm.AMDGPU.bfe.i32";
1489 bld_base->op_actions[TGSI_OPCODE_IDIV].emit = emit_idiv;
1490 bld_base->op_actions[TGSI_OPCODE_IF].emit = if_emit;
1491 bld_base->op_actions[TGSI_OPCODE_UIF].emit = uif_emit;
1492 bld_base->op_actions[TGSI_OPCODE_IMAX].emit = build_tgsi_intrinsic_nomem;
1493 bld_base->op_actions[TGSI_OPCODE_IMAX].intr_name = "llvm.AMDGPU.imax";
1494 bld_base->op_actions[TGSI_OPCODE_IMIN].emit = build_tgsi_intrinsic_nomem;
1495 bld_base->op_actions[TGSI_OPCODE_IMIN].intr_name = "llvm.AMDGPU.imin";
1496 bld_base->op_actions[TGSI_OPCODE_IMSB].emit = emit_imsb;
1497 bld_base->op_actions[TGSI_OPCODE_INEG].emit = emit_ineg;
1498 bld_base->op_actions[TGSI_OPCODE_ISHR].emit = emit_ishr;
1499 bld_base->op_actions[TGSI_OPCODE_ISGE].emit = emit_icmp;
1500 bld_base->op_actions[TGSI_OPCODE_ISLT].emit = emit_icmp;
1501 bld_base->op_actions[TGSI_OPCODE_ISSG].emit = emit_ssg;
1502 bld_base->op_actions[TGSI_OPCODE_I2F].emit = emit_i2f;
1503 bld_base->op_actions[TGSI_OPCODE_KILL_IF].fetch_args = kill_if_fetch_args;
1504 bld_base->op_actions[TGSI_OPCODE_KILL_IF].emit = kil_emit;
1505 bld_base->op_actions[TGSI_OPCODE_KILL_IF].intr_name = "llvm.AMDGPU.kill";
1506 bld_base->op_actions[TGSI_OPCODE_KILL].emit = lp_build_tgsi_intrinsic;
1507 bld_base->op_actions[TGSI_OPCODE_KILL].intr_name = "llvm.AMDGPU.kilp";
1508 bld_base->op_actions[TGSI_OPCODE_LSB].emit = emit_lsb;
1509 bld_base->op_actions[TGSI_OPCODE_LG2].emit = build_tgsi_intrinsic_nomem;
1510 bld_base->op_actions[TGSI_OPCODE_LG2].intr_name = "llvm.log2.f32";
1511 bld_base->op_actions[TGSI_OPCODE_LRP].emit = build_tgsi_intrinsic_nomem;
1512 bld_base->op_actions[TGSI_OPCODE_LRP].intr_name = "llvm.AMDGPU.lrp";
1513 bld_base->op_actions[TGSI_OPCODE_MOD].emit = emit_mod;
1514 bld_base->op_actions[TGSI_OPCODE_UMSB].emit = emit_umsb;
1515 bld_base->op_actions[TGSI_OPCODE_NOT].emit = emit_not;
1516 bld_base->op_actions[TGSI_OPCODE_OR].emit = emit_or;
1517 bld_base->op_actions[TGSI_OPCODE_POPC].emit = build_tgsi_intrinsic_nomem;
1518 bld_base->op_actions[TGSI_OPCODE_POPC].intr_name = "llvm.ctpop.i32";
1519 bld_base->op_actions[TGSI_OPCODE_POW].emit = build_tgsi_intrinsic_nomem;
1520 bld_base->op_actions[TGSI_OPCODE_POW].intr_name = "llvm.pow.f32";
1521 bld_base->op_actions[TGSI_OPCODE_ROUND].emit = build_tgsi_intrinsic_nomem;
1522 bld_base->op_actions[TGSI_OPCODE_ROUND].intr_name = "llvm.AMDIL.round.nearest.";
1523 bld_base->op_actions[TGSI_OPCODE_RSQ].intr_name =
1524 HAVE_LLVM >= 0x0305 ? "llvm.AMDGPU.rsq.clamped.f32" : "llvm.AMDGPU.rsq";
1525 bld_base->op_actions[TGSI_OPCODE_RSQ].emit = build_tgsi_intrinsic_nomem;
1526 bld_base->op_actions[TGSI_OPCODE_SGE].emit = emit_cmp;
1527 bld_base->op_actions[TGSI_OPCODE_SEQ].emit = emit_cmp;
1528 bld_base->op_actions[TGSI_OPCODE_SHL].emit = emit_shl;
1529 bld_base->op_actions[TGSI_OPCODE_SLE].emit = emit_cmp;
1530 bld_base->op_actions[TGSI_OPCODE_SLT].emit = emit_cmp;
1531 bld_base->op_actions[TGSI_OPCODE_SNE].emit = emit_cmp;
1532 bld_base->op_actions[TGSI_OPCODE_SGT].emit = emit_cmp;
1533 bld_base->op_actions[TGSI_OPCODE_SIN].emit = build_tgsi_intrinsic_nomem;
1534 bld_base->op_actions[TGSI_OPCODE_SIN].intr_name = "llvm.sin.f32";
1535 bld_base->op_actions[TGSI_OPCODE_SQRT].emit = build_tgsi_intrinsic_nomem;
1536 bld_base->op_actions[TGSI_OPCODE_SQRT].intr_name = "llvm.sqrt.f32";
1537 bld_base->op_actions[TGSI_OPCODE_SSG].emit = emit_ssg;
1538 bld_base->op_actions[TGSI_OPCODE_TRUNC].emit = build_tgsi_intrinsic_nomem;
1539 bld_base->op_actions[TGSI_OPCODE_TRUNC].intr_name = "llvm.AMDGPU.trunc";
1540 bld_base->op_actions[TGSI_OPCODE_UADD].emit = emit_uadd;
1541 bld_base->op_actions[TGSI_OPCODE_UBFE].emit = build_tgsi_intrinsic_nomem;
1542 bld_base->op_actions[TGSI_OPCODE_UBFE].intr_name = "llvm.AMDGPU.bfe.u32";
1543 bld_base->op_actions[TGSI_OPCODE_UDIV].emit = emit_udiv;
1544 bld_base->op_actions[TGSI_OPCODE_UMAX].emit = build_tgsi_intrinsic_nomem;
1545 bld_base->op_actions[TGSI_OPCODE_UMAX].intr_name = "llvm.AMDGPU.umax";
1546 bld_base->op_actions[TGSI_OPCODE_UMIN].emit = build_tgsi_intrinsic_nomem;
1547 bld_base->op_actions[TGSI_OPCODE_UMIN].intr_name = "llvm.AMDGPU.umin";
1548 bld_base->op_actions[TGSI_OPCODE_UMOD].emit = emit_umod;
1549 bld_base->op_actions[TGSI_OPCODE_USEQ].emit = emit_icmp;
1550 bld_base->op_actions[TGSI_OPCODE_USGE].emit = emit_icmp;
1551 bld_base->op_actions[TGSI_OPCODE_USHR].emit = emit_ushr;
1552 bld_base->op_actions[TGSI_OPCODE_USLT].emit = emit_icmp;
1553 bld_base->op_actions[TGSI_OPCODE_USNE].emit = emit_icmp;
1554 bld_base->op_actions[TGSI_OPCODE_U2F].emit = emit_u2f;
1555 bld_base->op_actions[TGSI_OPCODE_XOR].emit = emit_xor;
1556 bld_base->op_actions[TGSI_OPCODE_UCMP].emit = emit_ucmp;
1557 }
1558
1559 void radeon_llvm_create_func(struct radeon_llvm_context * ctx,
1560 LLVMTypeRef *ParamTypes, unsigned ParamCount)
1561 {
1562 LLVMTypeRef main_fn_type;
1563 LLVMBasicBlockRef main_fn_body;
1564
1565 /* Setup the function */
1566 main_fn_type = LLVMFunctionType(LLVMVoidTypeInContext(ctx->gallivm.context),
1567 ParamTypes, ParamCount, 0);
1568 ctx->main_fn = LLVMAddFunction(ctx->gallivm.module, "main", main_fn_type);
1569 main_fn_body = LLVMAppendBasicBlockInContext(ctx->gallivm.context,
1570 ctx->main_fn, "main_body");
1571 LLVMPositionBuilderAtEnd(ctx->gallivm.builder, main_fn_body);
1572 }
1573
1574 void radeon_llvm_finalize_module(struct radeon_llvm_context * ctx)
1575 {
1576 struct gallivm_state * gallivm = ctx->soa.bld_base.base.gallivm;
1577 /* End the main function with Return*/
1578 LLVMBuildRetVoid(gallivm->builder);
1579
1580 /* Create the pass manager */
1581 ctx->gallivm.passmgr = LLVMCreateFunctionPassManagerForModule(
1582 gallivm->module);
1583
1584 /* This pass should eliminate all the load and store instructions */
1585 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
1586
1587 /* Add some optimization passes */
1588 LLVMAddScalarReplAggregatesPass(gallivm->passmgr);
1589 LLVMAddLICMPass(gallivm->passmgr);
1590 LLVMAddAggressiveDCEPass(gallivm->passmgr);
1591 LLVMAddCFGSimplificationPass(gallivm->passmgr);
1592 LLVMAddInstructionCombiningPass(gallivm->passmgr);
1593
1594 /* Run the pass */
1595 LLVMRunFunctionPassManager(gallivm->passmgr, ctx->main_fn);
1596
1597 LLVMDisposeBuilder(gallivm->builder);
1598 LLVMDisposePassManager(gallivm->passmgr);
1599
1600 }
1601
1602 void radeon_llvm_dispose(struct radeon_llvm_context * ctx)
1603 {
1604 LLVMDisposeModule(ctx->soa.bld_base.base.gallivm->module);
1605 LLVMContextDispose(ctx->soa.bld_base.base.gallivm->context);
1606 FREE(ctx->arrays);
1607 ctx->arrays = NULL;
1608 FREE(ctx->temps);
1609 ctx->temps = NULL;
1610 ctx->temps_count = 0;
1611 FREE(ctx->loop);
1612 ctx->loop = NULL;
1613 ctx->loop_depth_max = 0;
1614 FREE(ctx->branch);
1615 ctx->branch = NULL;
1616 ctx->branch_depth_max = 0;
1617 }