radeon/ac: switch from radeon_shader_binary to ac_shader_binary
[mesa.git] / src / gallium / drivers / radeonsi / si_shader_tgsi_setup.c
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_shader_internal.h"
25 #include "si_pipe.h"
26 #include "radeon/radeon_elf_util.h"
27
28 #include "gallivm/lp_bld_const.h"
29 #include "gallivm/lp_bld_gather.h"
30 #include "gallivm/lp_bld_flow.h"
31 #include "gallivm/lp_bld_init.h"
32 #include "gallivm/lp_bld_intr.h"
33 #include "gallivm/lp_bld_misc.h"
34 #include "gallivm/lp_bld_swizzle.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "util/u_math.h"
38 #include "util/u_memory.h"
39 #include "util/u_debug.h"
40
41 #include <stdio.h>
42 #include <llvm-c/Transforms/IPO.h>
43 #include <llvm-c/Transforms/Scalar.h>
44
45 /* Data for if/else/endif and bgnloop/endloop control flow structures.
46 */
47 struct si_llvm_flow {
48 /* Loop exit or next part of if/else/endif. */
49 LLVMBasicBlockRef next_block;
50 LLVMBasicBlockRef loop_entry_block;
51 };
52
53 #define CPU_STRING_LEN 30
54 #define FS_STRING_LEN 30
55 #define TRIPLE_STRING_LEN 7
56
57 /**
58 * Shader types for the LLVM backend.
59 */
60 enum si_llvm_shader_type {
61 RADEON_LLVM_SHADER_PS = 0,
62 RADEON_LLVM_SHADER_VS = 1,
63 RADEON_LLVM_SHADER_GS = 2,
64 RADEON_LLVM_SHADER_CS = 3,
65 };
66
67 enum si_llvm_calling_convention {
68 RADEON_LLVM_AMDGPU_VS = 87,
69 RADEON_LLVM_AMDGPU_GS = 88,
70 RADEON_LLVM_AMDGPU_PS = 89,
71 RADEON_LLVM_AMDGPU_CS = 90,
72 };
73
74 void si_llvm_add_attribute(LLVMValueRef F, const char *name, int value)
75 {
76 char str[16];
77
78 snprintf(str, sizeof(str), "%i", value);
79 LLVMAddTargetDependentFunctionAttr(F, name, str);
80 }
81
82 /**
83 * Set the shader type we want to compile
84 *
85 * @param type shader type to set
86 */
87 void si_llvm_shader_type(LLVMValueRef F, unsigned type)
88 {
89 enum si_llvm_shader_type llvm_type;
90 enum si_llvm_calling_convention calling_conv;
91
92 switch (type) {
93 case PIPE_SHADER_VERTEX:
94 case PIPE_SHADER_TESS_CTRL:
95 case PIPE_SHADER_TESS_EVAL:
96 llvm_type = RADEON_LLVM_SHADER_VS;
97 calling_conv = RADEON_LLVM_AMDGPU_VS;
98 break;
99 case PIPE_SHADER_GEOMETRY:
100 llvm_type = RADEON_LLVM_SHADER_GS;
101 calling_conv = RADEON_LLVM_AMDGPU_GS;
102 break;
103 case PIPE_SHADER_FRAGMENT:
104 llvm_type = RADEON_LLVM_SHADER_PS;
105 calling_conv = RADEON_LLVM_AMDGPU_PS;
106 break;
107 case PIPE_SHADER_COMPUTE:
108 llvm_type = RADEON_LLVM_SHADER_CS;
109 calling_conv = RADEON_LLVM_AMDGPU_CS;
110 break;
111 default:
112 unreachable("Unhandle shader type");
113 }
114
115 if (HAVE_LLVM >= 0x309)
116 LLVMSetFunctionCallConv(F, calling_conv);
117 else
118 si_llvm_add_attribute(F, "ShaderType", llvm_type);
119 }
120
121 static void init_amdgpu_target()
122 {
123 gallivm_init_llvm_targets();
124 #if HAVE_LLVM < 0x0307
125 LLVMInitializeR600TargetInfo();
126 LLVMInitializeR600Target();
127 LLVMInitializeR600TargetMC();
128 LLVMInitializeR600AsmPrinter();
129 #else
130 LLVMInitializeAMDGPUTargetInfo();
131 LLVMInitializeAMDGPUTarget();
132 LLVMInitializeAMDGPUTargetMC();
133 LLVMInitializeAMDGPUAsmPrinter();
134
135 #endif
136 }
137
138 static once_flag init_amdgpu_target_once_flag = ONCE_FLAG_INIT;
139
140 LLVMTargetRef si_llvm_get_amdgpu_target(const char *triple)
141 {
142 LLVMTargetRef target = NULL;
143 char *err_message = NULL;
144
145 call_once(&init_amdgpu_target_once_flag, init_amdgpu_target);
146
147 if (LLVMGetTargetFromTriple(triple, &target, &err_message)) {
148 fprintf(stderr, "Cannot find target for triple %s ", triple);
149 if (err_message) {
150 fprintf(stderr, "%s\n", err_message);
151 }
152 LLVMDisposeMessage(err_message);
153 return NULL;
154 }
155 return target;
156 }
157
158 struct si_llvm_diagnostics {
159 struct pipe_debug_callback *debug;
160 unsigned retval;
161 };
162
163 static void si_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
164 {
165 struct si_llvm_diagnostics *diag = (struct si_llvm_diagnostics *)context;
166 LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
167 char *description = LLVMGetDiagInfoDescription(di);
168 const char *severity_str = NULL;
169
170 switch (severity) {
171 case LLVMDSError:
172 severity_str = "error";
173 break;
174 case LLVMDSWarning:
175 severity_str = "warning";
176 break;
177 case LLVMDSRemark:
178 severity_str = "remark";
179 break;
180 case LLVMDSNote:
181 severity_str = "note";
182 break;
183 default:
184 severity_str = "unknown";
185 }
186
187 pipe_debug_message(diag->debug, SHADER_INFO,
188 "LLVM diagnostic (%s): %s", severity_str, description);
189
190 if (severity == LLVMDSError) {
191 diag->retval = 1;
192 fprintf(stderr,"LLVM triggered Diagnostic Handler: %s\n", description);
193 }
194
195 LLVMDisposeMessage(description);
196 }
197
198 /**
199 * Compile an LLVM module to machine code.
200 *
201 * @returns 0 for success, 1 for failure
202 */
203 unsigned si_llvm_compile(LLVMModuleRef M, struct ac_shader_binary *binary,
204 LLVMTargetMachineRef tm,
205 struct pipe_debug_callback *debug)
206 {
207 struct si_llvm_diagnostics diag;
208 char *err;
209 LLVMContextRef llvm_ctx;
210 LLVMMemoryBufferRef out_buffer;
211 unsigned buffer_size;
212 const char *buffer_data;
213 LLVMBool mem_err;
214
215 diag.debug = debug;
216 diag.retval = 0;
217
218 /* Setup Diagnostic Handler*/
219 llvm_ctx = LLVMGetModuleContext(M);
220
221 LLVMContextSetDiagnosticHandler(llvm_ctx, si_diagnostic_handler, &diag);
222
223 /* Compile IR*/
224 mem_err = LLVMTargetMachineEmitToMemoryBuffer(tm, M, LLVMObjectFile, &err,
225 &out_buffer);
226
227 /* Process Errors/Warnings */
228 if (mem_err) {
229 fprintf(stderr, "%s: %s", __FUNCTION__, err);
230 pipe_debug_message(debug, SHADER_INFO,
231 "LLVM emit error: %s", err);
232 FREE(err);
233 diag.retval = 1;
234 goto out;
235 }
236
237 /* Extract Shader Code*/
238 buffer_size = LLVMGetBufferSize(out_buffer);
239 buffer_data = LLVMGetBufferStart(out_buffer);
240
241 radeon_elf_read(buffer_data, buffer_size, binary);
242
243 /* Clean up */
244 LLVMDisposeMemoryBuffer(out_buffer);
245
246 out:
247 if (diag.retval != 0)
248 pipe_debug_message(debug, SHADER_INFO, "LLVM compile failed");
249 return diag.retval;
250 }
251
252 LLVMTypeRef tgsi2llvmtype(struct lp_build_tgsi_context *bld_base,
253 enum tgsi_opcode_type type)
254 {
255 LLVMContextRef ctx = bld_base->base.gallivm->context;
256
257 switch (type) {
258 case TGSI_TYPE_UNSIGNED:
259 case TGSI_TYPE_SIGNED:
260 return LLVMInt32TypeInContext(ctx);
261 case TGSI_TYPE_UNSIGNED64:
262 case TGSI_TYPE_SIGNED64:
263 return LLVMInt64TypeInContext(ctx);
264 case TGSI_TYPE_DOUBLE:
265 return LLVMDoubleTypeInContext(ctx);
266 case TGSI_TYPE_UNTYPED:
267 case TGSI_TYPE_FLOAT:
268 return LLVMFloatTypeInContext(ctx);
269 default: break;
270 }
271 return 0;
272 }
273
274 LLVMValueRef bitcast(struct lp_build_tgsi_context *bld_base,
275 enum tgsi_opcode_type type, LLVMValueRef value)
276 {
277 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
278 LLVMTypeRef dst_type = tgsi2llvmtype(bld_base, type);
279
280 if (dst_type)
281 return LLVMBuildBitCast(builder, value, dst_type, "");
282 else
283 return value;
284 }
285
286 /**
287 * Return a value that is equal to the given i32 \p index if it lies in [0,num)
288 * or an undefined value in the same interval otherwise.
289 */
290 LLVMValueRef si_llvm_bound_index(struct si_shader_context *ctx,
291 LLVMValueRef index,
292 unsigned num)
293 {
294 struct gallivm_state *gallivm = &ctx->gallivm;
295 LLVMBuilderRef builder = gallivm->builder;
296 LLVMValueRef c_max = lp_build_const_int32(gallivm, num - 1);
297 LLVMValueRef cc;
298
299 if (util_is_power_of_two(num)) {
300 index = LLVMBuildAnd(builder, index, c_max, "");
301 } else {
302 /* In theory, this MAX pattern should result in code that is
303 * as good as the bit-wise AND above.
304 *
305 * In practice, LLVM generates worse code (at the time of
306 * writing), because its value tracking is not strong enough.
307 */
308 cc = LLVMBuildICmp(builder, LLVMIntULE, index, c_max, "");
309 index = LLVMBuildSelect(builder, cc, index, c_max, "");
310 }
311
312 return index;
313 }
314
315 static struct si_llvm_flow *
316 get_current_flow(struct si_shader_context *ctx)
317 {
318 if (ctx->flow_depth > 0)
319 return &ctx->flow[ctx->flow_depth - 1];
320 return NULL;
321 }
322
323 static struct si_llvm_flow *
324 get_innermost_loop(struct si_shader_context *ctx)
325 {
326 for (unsigned i = ctx->flow_depth; i > 0; --i) {
327 if (ctx->flow[i - 1].loop_entry_block)
328 return &ctx->flow[i - 1];
329 }
330 return NULL;
331 }
332
333 static struct si_llvm_flow *
334 push_flow(struct si_shader_context *ctx)
335 {
336 struct si_llvm_flow *flow;
337
338 if (ctx->flow_depth >= ctx->flow_depth_max) {
339 unsigned new_max = MAX2(ctx->flow_depth << 1, RADEON_LLVM_INITIAL_CF_DEPTH);
340 ctx->flow = REALLOC(ctx->flow,
341 ctx->flow_depth_max * sizeof(*ctx->flow),
342 new_max * sizeof(*ctx->flow));
343 ctx->flow_depth_max = new_max;
344 }
345
346 flow = &ctx->flow[ctx->flow_depth];
347 ctx->flow_depth++;
348
349 flow->next_block = NULL;
350 flow->loop_entry_block = NULL;
351 return flow;
352 }
353
354 static LLVMValueRef emit_swizzle(struct lp_build_tgsi_context *bld_base,
355 LLVMValueRef value,
356 unsigned swizzle_x,
357 unsigned swizzle_y,
358 unsigned swizzle_z,
359 unsigned swizzle_w)
360 {
361 LLVMValueRef swizzles[4];
362 LLVMTypeRef i32t =
363 LLVMInt32TypeInContext(bld_base->base.gallivm->context);
364
365 swizzles[0] = LLVMConstInt(i32t, swizzle_x, 0);
366 swizzles[1] = LLVMConstInt(i32t, swizzle_y, 0);
367 swizzles[2] = LLVMConstInt(i32t, swizzle_z, 0);
368 swizzles[3] = LLVMConstInt(i32t, swizzle_w, 0);
369
370 return LLVMBuildShuffleVector(bld_base->base.gallivm->builder,
371 value,
372 LLVMGetUndef(LLVMTypeOf(value)),
373 LLVMConstVector(swizzles, 4), "");
374 }
375
376 /**
377 * Return the description of the array covering the given temporary register
378 * index.
379 */
380 static unsigned
381 get_temp_array_id(struct lp_build_tgsi_context *bld_base,
382 unsigned reg_index,
383 const struct tgsi_ind_register *reg)
384 {
385 struct si_shader_context *ctx = si_shader_context(bld_base);
386 unsigned num_arrays = ctx->bld_base.info->array_max[TGSI_FILE_TEMPORARY];
387 unsigned i;
388
389 if (reg && reg->ArrayID > 0 && reg->ArrayID <= num_arrays)
390 return reg->ArrayID;
391
392 for (i = 0; i < num_arrays; i++) {
393 const struct tgsi_array_info *array = &ctx->temp_arrays[i];
394
395 if (reg_index >= array->range.First && reg_index <= array->range.Last)
396 return i + 1;
397 }
398
399 return 0;
400 }
401
402 static struct tgsi_declaration_range
403 get_array_range(struct lp_build_tgsi_context *bld_base,
404 unsigned File, unsigned reg_index,
405 const struct tgsi_ind_register *reg)
406 {
407 struct si_shader_context *ctx = si_shader_context(bld_base);
408 struct tgsi_declaration_range range;
409
410 if (File == TGSI_FILE_TEMPORARY) {
411 unsigned array_id = get_temp_array_id(bld_base, reg_index, reg);
412 if (array_id)
413 return ctx->temp_arrays[array_id - 1].range;
414 }
415
416 range.First = 0;
417 range.Last = bld_base->info->file_max[File];
418 return range;
419 }
420
421 static LLVMValueRef
422 emit_array_index(struct si_shader_context *ctx,
423 const struct tgsi_ind_register *reg,
424 unsigned offset)
425 {
426 struct gallivm_state *gallivm = ctx->bld_base.base.gallivm;
427
428 if (!reg) {
429 return lp_build_const_int32(gallivm, offset);
430 }
431 LLVMValueRef addr = LLVMBuildLoad(gallivm->builder, ctx->addrs[reg->Index][reg->Swizzle], "");
432 return LLVMBuildAdd(gallivm->builder, addr, lp_build_const_int32(gallivm, offset), "");
433 }
434
435 /**
436 * For indirect registers, construct a pointer directly to the requested
437 * element using getelementptr if possible.
438 *
439 * Returns NULL if the insertelement/extractelement fallback for array access
440 * must be used.
441 */
442 static LLVMValueRef
443 get_pointer_into_array(struct si_shader_context *ctx,
444 unsigned file,
445 unsigned swizzle,
446 unsigned reg_index,
447 const struct tgsi_ind_register *reg_indirect)
448 {
449 unsigned array_id;
450 struct tgsi_array_info *array;
451 struct gallivm_state *gallivm = ctx->bld_base.base.gallivm;
452 LLVMBuilderRef builder = gallivm->builder;
453 LLVMValueRef idxs[2];
454 LLVMValueRef index;
455 LLVMValueRef alloca;
456
457 if (file != TGSI_FILE_TEMPORARY)
458 return NULL;
459
460 array_id = get_temp_array_id(&ctx->bld_base, reg_index, reg_indirect);
461 if (!array_id)
462 return NULL;
463
464 alloca = ctx->temp_array_allocas[array_id - 1];
465 if (!alloca)
466 return NULL;
467
468 array = &ctx->temp_arrays[array_id - 1];
469
470 if (!(array->writemask & (1 << swizzle)))
471 return ctx->undef_alloca;
472
473 index = emit_array_index(ctx, reg_indirect,
474 reg_index - ctx->temp_arrays[array_id - 1].range.First);
475
476 /* Ensure that the index is within a valid range, to guard against
477 * VM faults and overwriting critical data (e.g. spilled resource
478 * descriptors).
479 *
480 * TODO It should be possible to avoid the additional instructions
481 * if LLVM is changed so that it guarantuees:
482 * 1. the scratch space descriptor isolates the current wave (this
483 * could even save the scratch offset SGPR at the cost of an
484 * additional SALU instruction)
485 * 2. the memory for allocas must be allocated at the _end_ of the
486 * scratch space (after spilled registers)
487 */
488 index = si_llvm_bound_index(ctx, index, array->range.Last - array->range.First + 1);
489
490 index = LLVMBuildMul(
491 builder, index,
492 lp_build_const_int32(gallivm, util_bitcount(array->writemask)),
493 "");
494 index = LLVMBuildAdd(
495 builder, index,
496 lp_build_const_int32(
497 gallivm,
498 util_bitcount(array->writemask & ((1 << swizzle) - 1))),
499 "");
500 idxs[0] = ctx->bld_base.uint_bld.zero;
501 idxs[1] = index;
502 return LLVMBuildGEP(builder, alloca, idxs, 2, "");
503 }
504
505 LLVMValueRef
506 si_llvm_emit_fetch_64bit(struct lp_build_tgsi_context *bld_base,
507 enum tgsi_opcode_type type,
508 LLVMValueRef ptr,
509 LLVMValueRef ptr2)
510 {
511 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
512 LLVMValueRef result;
513
514 result = LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), bld_base->base.type.length * 2));
515
516 result = LLVMBuildInsertElement(builder,
517 result,
518 bitcast(bld_base, TGSI_TYPE_UNSIGNED, ptr),
519 bld_base->int_bld.zero, "");
520 result = LLVMBuildInsertElement(builder,
521 result,
522 bitcast(bld_base, TGSI_TYPE_UNSIGNED, ptr2),
523 bld_base->int_bld.one, "");
524 return bitcast(bld_base, type, result);
525 }
526
527 static LLVMValueRef
528 emit_array_fetch(struct lp_build_tgsi_context *bld_base,
529 unsigned File, enum tgsi_opcode_type type,
530 struct tgsi_declaration_range range,
531 unsigned swizzle)
532 {
533 struct si_shader_context *ctx = si_shader_context(bld_base);
534 struct gallivm_state *gallivm = ctx->bld_base.base.gallivm;
535
536 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
537
538 unsigned i, size = range.Last - range.First + 1;
539 LLVMTypeRef vec = LLVMVectorType(tgsi2llvmtype(bld_base, type), size);
540 LLVMValueRef result = LLVMGetUndef(vec);
541
542 struct tgsi_full_src_register tmp_reg = {};
543 tmp_reg.Register.File = File;
544
545 for (i = 0; i < size; ++i) {
546 tmp_reg.Register.Index = i + range.First;
547 LLVMValueRef temp = si_llvm_emit_fetch(bld_base, &tmp_reg, type, swizzle);
548 result = LLVMBuildInsertElement(builder, result, temp,
549 lp_build_const_int32(gallivm, i), "array_vector");
550 }
551 return result;
552 }
553
554 static LLVMValueRef
555 load_value_from_array(struct lp_build_tgsi_context *bld_base,
556 unsigned file,
557 enum tgsi_opcode_type type,
558 unsigned swizzle,
559 unsigned reg_index,
560 const struct tgsi_ind_register *reg_indirect)
561 {
562 struct si_shader_context *ctx = si_shader_context(bld_base);
563 struct gallivm_state *gallivm = bld_base->base.gallivm;
564 LLVMBuilderRef builder = gallivm->builder;
565 LLVMValueRef ptr;
566
567 ptr = get_pointer_into_array(ctx, file, swizzle, reg_index, reg_indirect);
568 if (ptr) {
569 LLVMValueRef val = LLVMBuildLoad(builder, ptr, "");
570 if (tgsi_type_is_64bit(type)) {
571 LLVMValueRef ptr_hi, val_hi;
572 ptr_hi = LLVMBuildGEP(builder, ptr, &bld_base->uint_bld.one, 1, "");
573 val_hi = LLVMBuildLoad(builder, ptr_hi, "");
574 val = si_llvm_emit_fetch_64bit(bld_base, type, val, val_hi);
575 }
576
577 return val;
578 } else {
579 struct tgsi_declaration_range range =
580 get_array_range(bld_base, file, reg_index, reg_indirect);
581 LLVMValueRef index =
582 emit_array_index(ctx, reg_indirect, reg_index - range.First);
583 LLVMValueRef array =
584 emit_array_fetch(bld_base, file, type, range, swizzle);
585 return LLVMBuildExtractElement(builder, array, index, "");
586 }
587 }
588
589 static void
590 store_value_to_array(struct lp_build_tgsi_context *bld_base,
591 LLVMValueRef value,
592 unsigned file,
593 unsigned chan_index,
594 unsigned reg_index,
595 const struct tgsi_ind_register *reg_indirect)
596 {
597 struct si_shader_context *ctx = si_shader_context(bld_base);
598 struct gallivm_state *gallivm = bld_base->base.gallivm;
599 LLVMBuilderRef builder = gallivm->builder;
600 LLVMValueRef ptr;
601
602 ptr = get_pointer_into_array(ctx, file, chan_index, reg_index, reg_indirect);
603 if (ptr) {
604 LLVMBuildStore(builder, value, ptr);
605 } else {
606 unsigned i, size;
607 struct tgsi_declaration_range range = get_array_range(bld_base, file, reg_index, reg_indirect);
608 LLVMValueRef index = emit_array_index(ctx, reg_indirect, reg_index - range.First);
609 LLVMValueRef array =
610 emit_array_fetch(bld_base, file, TGSI_TYPE_FLOAT, range, chan_index);
611 LLVMValueRef temp_ptr;
612
613 array = LLVMBuildInsertElement(builder, array, value, index, "");
614
615 size = range.Last - range.First + 1;
616 for (i = 0; i < size; ++i) {
617 switch(file) {
618 case TGSI_FILE_OUTPUT:
619 temp_ptr = ctx->outputs[i + range.First][chan_index];
620 break;
621
622 case TGSI_FILE_TEMPORARY:
623 if (range.First + i >= ctx->temps_count)
624 continue;
625 temp_ptr = ctx->temps[(i + range.First) * TGSI_NUM_CHANNELS + chan_index];
626 break;
627
628 default:
629 continue;
630 }
631 value = LLVMBuildExtractElement(builder, array,
632 lp_build_const_int32(gallivm, i), "");
633 LLVMBuildStore(builder, value, temp_ptr);
634 }
635 }
636 }
637
638 /* If this is true, preload FS inputs at the beginning of shaders. Otherwise,
639 * reload them at each use. This must be true if the shader is using
640 * derivatives and KILL, because KILL can leave the WQM and then a lazy
641 * input load isn't in the WQM anymore.
642 */
643 static bool si_preload_fs_inputs(struct si_shader_context *ctx)
644 {
645 struct si_shader_selector *sel = ctx->shader->selector;
646
647 return sel->info.uses_derivatives &&
648 sel->info.uses_kill;
649 }
650
651 static LLVMValueRef
652 get_output_ptr(struct lp_build_tgsi_context *bld_base, unsigned index,
653 unsigned chan)
654 {
655 struct si_shader_context *ctx = si_shader_context(bld_base);
656
657 assert(index <= ctx->bld_base.info->file_max[TGSI_FILE_OUTPUT]);
658 return ctx->outputs[index][chan];
659 }
660
661 LLVMValueRef si_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
662 const struct tgsi_full_src_register *reg,
663 enum tgsi_opcode_type type,
664 unsigned swizzle)
665 {
666 struct si_shader_context *ctx = si_shader_context(bld_base);
667 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
668 LLVMValueRef result = NULL, ptr, ptr2;
669
670 if (swizzle == ~0) {
671 LLVMValueRef values[TGSI_NUM_CHANNELS];
672 unsigned chan;
673 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
674 values[chan] = si_llvm_emit_fetch(bld_base, reg, type, chan);
675 }
676 return lp_build_gather_values(bld_base->base.gallivm, values,
677 TGSI_NUM_CHANNELS);
678 }
679
680 if (reg->Register.Indirect) {
681 LLVMValueRef load = load_value_from_array(bld_base, reg->Register.File, type,
682 swizzle, reg->Register.Index, &reg->Indirect);
683 return bitcast(bld_base, type, load);
684 }
685
686 switch(reg->Register.File) {
687 case TGSI_FILE_IMMEDIATE: {
688 LLVMTypeRef ctype = tgsi2llvmtype(bld_base, type);
689 if (tgsi_type_is_64bit(type)) {
690 result = LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), bld_base->base.type.length * 2));
691 result = LLVMConstInsertElement(result,
692 ctx->imms[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle],
693 bld_base->int_bld.zero);
694 result = LLVMConstInsertElement(result,
695 ctx->imms[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle + 1],
696 bld_base->int_bld.one);
697 return LLVMConstBitCast(result, ctype);
698 } else {
699 return LLVMConstBitCast(ctx->imms[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle], ctype);
700 }
701 }
702
703 case TGSI_FILE_INPUT: {
704 unsigned index = reg->Register.Index;
705 LLVMValueRef input[4];
706
707 /* I don't think doing this for vertex shaders is beneficial.
708 * For those, we want to make sure the VMEM loads are executed
709 * only once. Fragment shaders don't care much, because
710 * v_interp instructions are much cheaper than VMEM loads.
711 */
712 if (!si_preload_fs_inputs(ctx) &&
713 ctx->bld_base.info->processor == PIPE_SHADER_FRAGMENT)
714 ctx->load_input(ctx, index, &ctx->input_decls[index], input);
715 else
716 memcpy(input, &ctx->inputs[index * 4], sizeof(input));
717
718 result = input[swizzle];
719
720 if (tgsi_type_is_64bit(type)) {
721 ptr = result;
722 ptr2 = input[swizzle + 1];
723 return si_llvm_emit_fetch_64bit(bld_base, type, ptr, ptr2);
724 }
725 break;
726 }
727
728 case TGSI_FILE_TEMPORARY:
729 if (reg->Register.Index >= ctx->temps_count)
730 return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
731 ptr = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle];
732 if (tgsi_type_is_64bit(type)) {
733 ptr2 = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle + 1];
734 return si_llvm_emit_fetch_64bit(bld_base, type,
735 LLVMBuildLoad(builder, ptr, ""),
736 LLVMBuildLoad(builder, ptr2, ""));
737 }
738 result = LLVMBuildLoad(builder, ptr, "");
739 break;
740
741 case TGSI_FILE_OUTPUT:
742 ptr = get_output_ptr(bld_base, reg->Register.Index, swizzle);
743 if (tgsi_type_is_64bit(type)) {
744 ptr2 = get_output_ptr(bld_base, reg->Register.Index, swizzle + 1);
745 return si_llvm_emit_fetch_64bit(bld_base, type,
746 LLVMBuildLoad(builder, ptr, ""),
747 LLVMBuildLoad(builder, ptr2, ""));
748 }
749 result = LLVMBuildLoad(builder, ptr, "");
750 break;
751
752 default:
753 return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
754 }
755
756 return bitcast(bld_base, type, result);
757 }
758
759 static LLVMValueRef fetch_system_value(struct lp_build_tgsi_context *bld_base,
760 const struct tgsi_full_src_register *reg,
761 enum tgsi_opcode_type type,
762 unsigned swizzle)
763 {
764 struct si_shader_context *ctx = si_shader_context(bld_base);
765 struct gallivm_state *gallivm = bld_base->base.gallivm;
766
767 LLVMValueRef cval = ctx->system_values[reg->Register.Index];
768 if (LLVMGetTypeKind(LLVMTypeOf(cval)) == LLVMVectorTypeKind) {
769 cval = LLVMBuildExtractElement(gallivm->builder, cval,
770 lp_build_const_int32(gallivm, swizzle), "");
771 }
772 return bitcast(bld_base, type, cval);
773 }
774
775 static void emit_declaration(struct lp_build_tgsi_context *bld_base,
776 const struct tgsi_full_declaration *decl)
777 {
778 struct si_shader_context *ctx = si_shader_context(bld_base);
779 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
780 unsigned first, last, i;
781 switch(decl->Declaration.File) {
782 case TGSI_FILE_ADDRESS:
783 {
784 unsigned idx;
785 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
786 unsigned chan;
787 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
788 ctx->addrs[idx][chan] = lp_build_alloca_undef(
789 &ctx->gallivm,
790 ctx->bld_base.uint_bld.elem_type, "");
791 }
792 }
793 break;
794 }
795
796 case TGSI_FILE_TEMPORARY:
797 {
798 char name[16] = "";
799 LLVMValueRef array_alloca = NULL;
800 unsigned decl_size;
801 unsigned writemask = decl->Declaration.UsageMask;
802 first = decl->Range.First;
803 last = decl->Range.Last;
804 decl_size = 4 * ((last - first) + 1);
805
806 if (decl->Declaration.Array) {
807 unsigned id = decl->Array.ArrayID - 1;
808 unsigned array_size;
809
810 writemask &= ctx->temp_arrays[id].writemask;
811 ctx->temp_arrays[id].writemask = writemask;
812 array_size = ((last - first) + 1) * util_bitcount(writemask);
813
814 /* If the array has more than 16 elements, store it
815 * in memory using an alloca that spans the entire
816 * array.
817 *
818 * Otherwise, store each array element individually.
819 * We will then generate vectors (per-channel, up to
820 * <16 x float> if the usagemask is a single bit) for
821 * indirect addressing.
822 *
823 * Note that 16 is the number of vector elements that
824 * LLVM will store in a register, so theoretically an
825 * array with up to 4 * 16 = 64 elements could be
826 * handled this way, but whether that's a good idea
827 * depends on VGPR register pressure elsewhere.
828 *
829 * FIXME: We shouldn't need to have the non-alloca
830 * code path for arrays. LLVM should be smart enough to
831 * promote allocas into registers when profitable.
832 *
833 * LLVM 3.8 crashes with this.
834 */
835 if (HAVE_LLVM >= 0x0309 && array_size > 16) {
836 array_alloca = LLVMBuildAlloca(builder,
837 LLVMArrayType(bld_base->base.vec_type,
838 array_size), "array");
839 ctx->temp_array_allocas[id] = array_alloca;
840 }
841 }
842
843 if (!ctx->temps_count) {
844 ctx->temps_count = bld_base->info->file_max[TGSI_FILE_TEMPORARY] + 1;
845 ctx->temps = MALLOC(TGSI_NUM_CHANNELS * ctx->temps_count * sizeof(LLVMValueRef));
846 }
847 if (!array_alloca) {
848 for (i = 0; i < decl_size; ++i) {
849 #ifdef DEBUG
850 snprintf(name, sizeof(name), "TEMP%d.%c",
851 first + i / 4, "xyzw"[i % 4]);
852 #endif
853 ctx->temps[first * TGSI_NUM_CHANNELS + i] =
854 lp_build_alloca_undef(bld_base->base.gallivm,
855 bld_base->base.vec_type,
856 name);
857 }
858 } else {
859 LLVMValueRef idxs[2] = {
860 bld_base->uint_bld.zero,
861 NULL
862 };
863 unsigned j = 0;
864
865 if (writemask != TGSI_WRITEMASK_XYZW &&
866 !ctx->undef_alloca) {
867 /* Create a dummy alloca. We use it so that we
868 * have a pointer that is safe to load from if
869 * a shader ever reads from a channel that
870 * it never writes to.
871 */
872 ctx->undef_alloca = lp_build_alloca_undef(
873 bld_base->base.gallivm,
874 bld_base->base.vec_type, "undef");
875 }
876
877 for (i = 0; i < decl_size; ++i) {
878 LLVMValueRef ptr;
879 if (writemask & (1 << (i % 4))) {
880 #ifdef DEBUG
881 snprintf(name, sizeof(name), "TEMP%d.%c",
882 first + i / 4, "xyzw"[i % 4]);
883 #endif
884 idxs[1] = lp_build_const_int32(bld_base->base.gallivm, j);
885 ptr = LLVMBuildGEP(builder, array_alloca, idxs, 2, name);
886 j++;
887 } else {
888 ptr = ctx->undef_alloca;
889 }
890 ctx->temps[first * TGSI_NUM_CHANNELS + i] = ptr;
891 }
892 }
893 break;
894 }
895 case TGSI_FILE_INPUT:
896 {
897 unsigned idx;
898 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
899 if (ctx->load_input &&
900 ctx->input_decls[idx].Declaration.File != TGSI_FILE_INPUT) {
901 ctx->input_decls[idx] = *decl;
902 ctx->input_decls[idx].Range.First = idx;
903 ctx->input_decls[idx].Range.Last = idx;
904 ctx->input_decls[idx].Semantic.Index += idx - decl->Range.First;
905
906 if (si_preload_fs_inputs(ctx) ||
907 bld_base->info->processor != PIPE_SHADER_FRAGMENT)
908 ctx->load_input(ctx, idx, &ctx->input_decls[idx],
909 &ctx->inputs[idx * 4]);
910 }
911 }
912 }
913 break;
914
915 case TGSI_FILE_SYSTEM_VALUE:
916 {
917 unsigned idx;
918 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
919 ctx->load_system_value(ctx, idx, decl);
920 }
921 }
922 break;
923
924 case TGSI_FILE_OUTPUT:
925 {
926 char name[16] = "";
927 unsigned idx;
928 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
929 unsigned chan;
930 assert(idx < RADEON_LLVM_MAX_OUTPUTS);
931 if (ctx->outputs[idx][0])
932 continue;
933 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
934 #ifdef DEBUG
935 snprintf(name, sizeof(name), "OUT%d.%c",
936 idx, "xyzw"[chan % 4]);
937 #endif
938 ctx->outputs[idx][chan] = lp_build_alloca_undef(
939 &ctx->gallivm,
940 ctx->bld_base.base.elem_type, name);
941 }
942 }
943 break;
944 }
945
946 case TGSI_FILE_MEMORY:
947 ctx->declare_memory_region(ctx, decl);
948 break;
949
950 default:
951 break;
952 }
953 }
954
955 void si_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
956 const struct tgsi_full_instruction *inst,
957 const struct tgsi_opcode_info *info,
958 LLVMValueRef dst[4])
959 {
960 struct si_shader_context *ctx = si_shader_context(bld_base);
961 struct gallivm_state *gallivm = ctx->bld_base.base.gallivm;
962 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
963 LLVMBuilderRef builder = ctx->bld_base.base.gallivm->builder;
964 LLVMValueRef temp_ptr, temp_ptr2 = NULL;
965 unsigned chan, chan_index;
966 bool is_vec_store = false;
967 enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
968
969 if (dst[0]) {
970 LLVMTypeKind k = LLVMGetTypeKind(LLVMTypeOf(dst[0]));
971 is_vec_store = (k == LLVMVectorTypeKind);
972 }
973
974 if (is_vec_store) {
975 LLVMValueRef values[4] = {};
976 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan) {
977 LLVMValueRef index = lp_build_const_int32(gallivm, chan);
978 values[chan] = LLVMBuildExtractElement(gallivm->builder,
979 dst[0], index, "");
980 }
981 bld_base->emit_store(bld_base, inst, info, values);
982 return;
983 }
984
985 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
986 LLVMValueRef value = dst[chan_index];
987
988 if (tgsi_type_is_64bit(dtype) && (chan_index == 1 || chan_index == 3))
989 continue;
990 if (inst->Instruction.Saturate)
991 value = ac_emit_clamp(&ctx->ac, value);
992
993 if (reg->Register.File == TGSI_FILE_ADDRESS) {
994 temp_ptr = ctx->addrs[reg->Register.Index][chan_index];
995 LLVMBuildStore(builder, value, temp_ptr);
996 continue;
997 }
998
999 if (!tgsi_type_is_64bit(dtype))
1000 value = bitcast(bld_base, TGSI_TYPE_FLOAT, value);
1001
1002 if (reg->Register.Indirect) {
1003 unsigned file = reg->Register.File;
1004 unsigned reg_index = reg->Register.Index;
1005 store_value_to_array(bld_base, value, file, chan_index,
1006 reg_index, &reg->Indirect);
1007 } else {
1008 switch(reg->Register.File) {
1009 case TGSI_FILE_OUTPUT:
1010 temp_ptr = ctx->outputs[reg->Register.Index][chan_index];
1011 if (tgsi_type_is_64bit(dtype))
1012 temp_ptr2 = ctx->outputs[reg->Register.Index][chan_index + 1];
1013 break;
1014
1015 case TGSI_FILE_TEMPORARY:
1016 {
1017 if (reg->Register.Index >= ctx->temps_count)
1018 continue;
1019
1020 temp_ptr = ctx->temps[ TGSI_NUM_CHANNELS * reg->Register.Index + chan_index];
1021 if (tgsi_type_is_64bit(dtype))
1022 temp_ptr2 = ctx->temps[ TGSI_NUM_CHANNELS * reg->Register.Index + chan_index + 1];
1023
1024 break;
1025 }
1026 default:
1027 return;
1028 }
1029 if (!tgsi_type_is_64bit(dtype))
1030 LLVMBuildStore(builder, value, temp_ptr);
1031 else {
1032 LLVMValueRef ptr = LLVMBuildBitCast(builder, value,
1033 LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), 2), "");
1034 LLVMValueRef val2;
1035 value = LLVMBuildExtractElement(builder, ptr,
1036 bld_base->uint_bld.zero, "");
1037 val2 = LLVMBuildExtractElement(builder, ptr,
1038 bld_base->uint_bld.one, "");
1039
1040 LLVMBuildStore(builder, bitcast(bld_base, TGSI_TYPE_FLOAT, value), temp_ptr);
1041 LLVMBuildStore(builder, bitcast(bld_base, TGSI_TYPE_FLOAT, val2), temp_ptr2);
1042 }
1043 }
1044 }
1045 }
1046
1047 static void set_basicblock_name(LLVMBasicBlockRef bb, const char *base, int pc)
1048 {
1049 char buf[32];
1050 /* Subtract 1 so that the number shown is that of the corresponding
1051 * opcode in the TGSI dump, e.g. an if block has the same suffix as
1052 * the instruction number of the corresponding TGSI IF.
1053 */
1054 snprintf(buf, sizeof(buf), "%s%d", base, pc - 1);
1055 LLVMSetValueName(LLVMBasicBlockAsValue(bb), buf);
1056 }
1057
1058 /* Append a basic block at the level of the parent flow.
1059 */
1060 static LLVMBasicBlockRef append_basic_block(struct si_shader_context *ctx,
1061 const char *name)
1062 {
1063 struct gallivm_state *gallivm = &ctx->gallivm;
1064
1065 assert(ctx->flow_depth >= 1);
1066
1067 if (ctx->flow_depth >= 2) {
1068 struct si_llvm_flow *flow = &ctx->flow[ctx->flow_depth - 2];
1069
1070 return LLVMInsertBasicBlockInContext(gallivm->context,
1071 flow->next_block, name);
1072 }
1073
1074 return LLVMAppendBasicBlockInContext(gallivm->context, ctx->main_fn, name);
1075 }
1076
1077 /* Emit a branch to the given default target for the current block if
1078 * applicable -- that is, if the current block does not already contain a
1079 * branch from a break or continue.
1080 */
1081 static void emit_default_branch(LLVMBuilderRef builder, LLVMBasicBlockRef target)
1082 {
1083 if (!LLVMGetBasicBlockTerminator(LLVMGetInsertBlock(builder)))
1084 LLVMBuildBr(builder, target);
1085 }
1086
1087 static void bgnloop_emit(const struct lp_build_tgsi_action *action,
1088 struct lp_build_tgsi_context *bld_base,
1089 struct lp_build_emit_data *emit_data)
1090 {
1091 struct si_shader_context *ctx = si_shader_context(bld_base);
1092 struct gallivm_state *gallivm = bld_base->base.gallivm;
1093 struct si_llvm_flow *flow = push_flow(ctx);
1094 flow->loop_entry_block = append_basic_block(ctx, "LOOP");
1095 flow->next_block = append_basic_block(ctx, "ENDLOOP");
1096 set_basicblock_name(flow->loop_entry_block, "loop", bld_base->pc);
1097 LLVMBuildBr(gallivm->builder, flow->loop_entry_block);
1098 LLVMPositionBuilderAtEnd(gallivm->builder, flow->loop_entry_block);
1099 }
1100
1101 static void brk_emit(const struct lp_build_tgsi_action *action,
1102 struct lp_build_tgsi_context *bld_base,
1103 struct lp_build_emit_data *emit_data)
1104 {
1105 struct si_shader_context *ctx = si_shader_context(bld_base);
1106 struct gallivm_state *gallivm = bld_base->base.gallivm;
1107 struct si_llvm_flow *flow = get_innermost_loop(ctx);
1108
1109 LLVMBuildBr(gallivm->builder, flow->next_block);
1110 }
1111
1112 static void cont_emit(const struct lp_build_tgsi_action *action,
1113 struct lp_build_tgsi_context *bld_base,
1114 struct lp_build_emit_data *emit_data)
1115 {
1116 struct si_shader_context *ctx = si_shader_context(bld_base);
1117 struct gallivm_state *gallivm = bld_base->base.gallivm;
1118 struct si_llvm_flow *flow = get_innermost_loop(ctx);
1119
1120 LLVMBuildBr(gallivm->builder, flow->loop_entry_block);
1121 }
1122
1123 static void else_emit(const struct lp_build_tgsi_action *action,
1124 struct lp_build_tgsi_context *bld_base,
1125 struct lp_build_emit_data *emit_data)
1126 {
1127 struct si_shader_context *ctx = si_shader_context(bld_base);
1128 struct gallivm_state *gallivm = bld_base->base.gallivm;
1129 struct si_llvm_flow *current_branch = get_current_flow(ctx);
1130 LLVMBasicBlockRef endif_block;
1131
1132 assert(!current_branch->loop_entry_block);
1133
1134 endif_block = append_basic_block(ctx, "ENDIF");
1135 emit_default_branch(gallivm->builder, endif_block);
1136
1137 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->next_block);
1138 set_basicblock_name(current_branch->next_block, "else", bld_base->pc);
1139
1140 current_branch->next_block = endif_block;
1141 }
1142
1143 static void endif_emit(const struct lp_build_tgsi_action *action,
1144 struct lp_build_tgsi_context *bld_base,
1145 struct lp_build_emit_data *emit_data)
1146 {
1147 struct si_shader_context *ctx = si_shader_context(bld_base);
1148 struct gallivm_state *gallivm = bld_base->base.gallivm;
1149 struct si_llvm_flow *current_branch = get_current_flow(ctx);
1150
1151 assert(!current_branch->loop_entry_block);
1152
1153 emit_default_branch(gallivm->builder, current_branch->next_block);
1154 LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->next_block);
1155 set_basicblock_name(current_branch->next_block, "endif", bld_base->pc);
1156
1157 ctx->flow_depth--;
1158 }
1159
1160 static void endloop_emit(const struct lp_build_tgsi_action *action,
1161 struct lp_build_tgsi_context *bld_base,
1162 struct lp_build_emit_data *emit_data)
1163 {
1164 struct si_shader_context *ctx = si_shader_context(bld_base);
1165 struct gallivm_state *gallivm = bld_base->base.gallivm;
1166 struct si_llvm_flow *current_loop = get_current_flow(ctx);
1167
1168 assert(current_loop->loop_entry_block);
1169
1170 emit_default_branch(gallivm->builder, current_loop->loop_entry_block);
1171
1172 LLVMPositionBuilderAtEnd(gallivm->builder, current_loop->next_block);
1173 set_basicblock_name(current_loop->next_block, "endloop", bld_base->pc);
1174 ctx->flow_depth--;
1175 }
1176
1177 static void if_cond_emit(const struct lp_build_tgsi_action *action,
1178 struct lp_build_tgsi_context *bld_base,
1179 struct lp_build_emit_data *emit_data,
1180 LLVMValueRef cond)
1181 {
1182 struct si_shader_context *ctx = si_shader_context(bld_base);
1183 struct gallivm_state *gallivm = bld_base->base.gallivm;
1184 struct si_llvm_flow *flow = push_flow(ctx);
1185 LLVMBasicBlockRef if_block;
1186
1187 if_block = append_basic_block(ctx, "IF");
1188 flow->next_block = append_basic_block(ctx, "ELSE");
1189 set_basicblock_name(if_block, "if", bld_base->pc);
1190 LLVMBuildCondBr(gallivm->builder, cond, if_block, flow->next_block);
1191 LLVMPositionBuilderAtEnd(gallivm->builder, if_block);
1192 }
1193
1194 static void if_emit(const struct lp_build_tgsi_action *action,
1195 struct lp_build_tgsi_context *bld_base,
1196 struct lp_build_emit_data *emit_data)
1197 {
1198 struct gallivm_state *gallivm = bld_base->base.gallivm;
1199 LLVMValueRef cond;
1200
1201 cond = LLVMBuildFCmp(gallivm->builder, LLVMRealUNE,
1202 emit_data->args[0],
1203 bld_base->base.zero, "");
1204
1205 if_cond_emit(action, bld_base, emit_data, cond);
1206 }
1207
1208 static void uif_emit(const struct lp_build_tgsi_action *action,
1209 struct lp_build_tgsi_context *bld_base,
1210 struct lp_build_emit_data *emit_data)
1211 {
1212 struct gallivm_state *gallivm = bld_base->base.gallivm;
1213 LLVMValueRef cond;
1214
1215 cond = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
1216 bitcast(bld_base, TGSI_TYPE_UNSIGNED, emit_data->args[0]),
1217 bld_base->int_bld.zero, "");
1218
1219 if_cond_emit(action, bld_base, emit_data, cond);
1220 }
1221
1222 static void emit_immediate(struct lp_build_tgsi_context *bld_base,
1223 const struct tgsi_full_immediate *imm)
1224 {
1225 unsigned i;
1226 struct si_shader_context *ctx = si_shader_context(bld_base);
1227
1228 for (i = 0; i < 4; ++i) {
1229 ctx->imms[ctx->imms_num * TGSI_NUM_CHANNELS + i] =
1230 LLVMConstInt(bld_base->uint_bld.elem_type, imm->u[i].Uint, false );
1231 }
1232
1233 ctx->imms_num++;
1234 }
1235
1236 void si_llvm_context_init(struct si_shader_context *ctx,
1237 struct si_screen *sscreen,
1238 struct si_shader *shader,
1239 LLVMTargetMachineRef tm,
1240 const struct tgsi_shader_info *info,
1241 const struct tgsi_token *tokens)
1242 {
1243 struct lp_type type;
1244
1245 /* Initialize the gallivm object:
1246 * We are only using the module, context, and builder fields of this struct.
1247 * This should be enough for us to be able to pass our gallivm struct to the
1248 * helper functions in the gallivm module.
1249 */
1250 memset(ctx, 0, sizeof(*ctx));
1251 ctx->shader = shader;
1252 ctx->screen = sscreen;
1253 ctx->tm = tm;
1254 ctx->type = info ? info->processor : -1;
1255
1256 ctx->gallivm.context = LLVMContextCreate();
1257 ctx->gallivm.module = LLVMModuleCreateWithNameInContext("tgsi",
1258 ctx->gallivm.context);
1259 LLVMSetTarget(ctx->gallivm.module, "amdgcn--");
1260
1261 #if HAVE_LLVM >= 0x0309
1262 LLVMTargetDataRef data_layout = LLVMCreateTargetDataLayout(tm);
1263 char *data_layout_str = LLVMCopyStringRepOfTargetData(data_layout);
1264 LLVMSetDataLayout(ctx->gallivm.module, data_layout_str);
1265 LLVMDisposeTargetData(data_layout);
1266 LLVMDisposeMessage(data_layout_str);
1267 #endif
1268
1269 bool unsafe_fpmath = (sscreen->b.debug_flags & DBG_UNSAFE_MATH) != 0;
1270 enum lp_float_mode float_mode =
1271 unsafe_fpmath ? LP_FLOAT_MODE_UNSAFE_FP_MATH :
1272 LP_FLOAT_MODE_NO_SIGNED_ZEROS_FP_MATH;
1273
1274 ctx->gallivm.builder = lp_create_builder(ctx->gallivm.context,
1275 float_mode);
1276
1277 ac_llvm_context_init(&ctx->ac, ctx->gallivm.context);
1278 ctx->ac.module = ctx->gallivm.module;
1279 ctx->ac.builder = ctx->gallivm.builder;
1280
1281 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
1282
1283 bld_base->info = info;
1284
1285 if (info && info->array_max[TGSI_FILE_TEMPORARY] > 0) {
1286 int size = info->array_max[TGSI_FILE_TEMPORARY];
1287
1288 ctx->temp_arrays = CALLOC(size, sizeof(ctx->temp_arrays[0]));
1289 ctx->temp_array_allocas = CALLOC(size, sizeof(ctx->temp_array_allocas[0]));
1290
1291 if (tokens)
1292 tgsi_scan_arrays(tokens, TGSI_FILE_TEMPORARY, size,
1293 ctx->temp_arrays);
1294 }
1295
1296 if (info && info->file_max[TGSI_FILE_IMMEDIATE] >= 0) {
1297 int size = info->file_max[TGSI_FILE_IMMEDIATE] + 1;
1298 ctx->imms = MALLOC(size * TGSI_NUM_CHANNELS * sizeof(LLVMValueRef));
1299 }
1300
1301 type.floating = true;
1302 type.fixed = false;
1303 type.sign = true;
1304 type.norm = false;
1305 type.width = 32;
1306 type.length = 1;
1307
1308 lp_build_context_init(&bld_base->base, &ctx->gallivm, type);
1309 lp_build_context_init(&ctx->bld_base.uint_bld, &ctx->gallivm, lp_uint_type(type));
1310 lp_build_context_init(&ctx->bld_base.int_bld, &ctx->gallivm, lp_int_type(type));
1311 type.width *= 2;
1312 lp_build_context_init(&ctx->bld_base.dbl_bld, &ctx->gallivm, type);
1313 lp_build_context_init(&ctx->bld_base.uint64_bld, &ctx->gallivm, lp_uint_type(type));
1314 lp_build_context_init(&ctx->bld_base.int64_bld, &ctx->gallivm, lp_int_type(type));
1315
1316 bld_base->soa = 1;
1317 bld_base->emit_store = si_llvm_emit_store;
1318 bld_base->emit_swizzle = emit_swizzle;
1319 bld_base->emit_declaration = emit_declaration;
1320 bld_base->emit_immediate = emit_immediate;
1321
1322 bld_base->emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = si_llvm_emit_fetch;
1323 bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = si_llvm_emit_fetch;
1324 bld_base->emit_fetch_funcs[TGSI_FILE_TEMPORARY] = si_llvm_emit_fetch;
1325 bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = si_llvm_emit_fetch;
1326 bld_base->emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = fetch_system_value;
1327
1328 /* metadata allowing 2.5 ULP */
1329 ctx->fpmath_md_kind = LLVMGetMDKindIDInContext(ctx->gallivm.context,
1330 "fpmath", 6);
1331 LLVMValueRef arg = lp_build_const_float(&ctx->gallivm, 2.5);
1332 ctx->fpmath_md_2p5_ulp = LLVMMDNodeInContext(ctx->gallivm.context,
1333 &arg, 1);
1334
1335 bld_base->op_actions[TGSI_OPCODE_BGNLOOP].emit = bgnloop_emit;
1336 bld_base->op_actions[TGSI_OPCODE_BRK].emit = brk_emit;
1337 bld_base->op_actions[TGSI_OPCODE_CONT].emit = cont_emit;
1338 bld_base->op_actions[TGSI_OPCODE_IF].emit = if_emit;
1339 bld_base->op_actions[TGSI_OPCODE_UIF].emit = uif_emit;
1340 bld_base->op_actions[TGSI_OPCODE_ELSE].emit = else_emit;
1341 bld_base->op_actions[TGSI_OPCODE_ENDIF].emit = endif_emit;
1342 bld_base->op_actions[TGSI_OPCODE_ENDLOOP].emit = endloop_emit;
1343
1344 si_shader_context_init_alu(&ctx->bld_base);
1345
1346 ctx->voidt = LLVMVoidTypeInContext(ctx->gallivm.context);
1347 ctx->i1 = LLVMInt1TypeInContext(ctx->gallivm.context);
1348 ctx->i8 = LLVMInt8TypeInContext(ctx->gallivm.context);
1349 ctx->i32 = LLVMInt32TypeInContext(ctx->gallivm.context);
1350 ctx->i64 = LLVMInt64TypeInContext(ctx->gallivm.context);
1351 ctx->i128 = LLVMIntTypeInContext(ctx->gallivm.context, 128);
1352 ctx->f32 = LLVMFloatTypeInContext(ctx->gallivm.context);
1353 ctx->v16i8 = LLVMVectorType(ctx->i8, 16);
1354 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
1355 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
1356 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
1357 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
1358
1359 ctx->i32_0 = LLVMConstInt(ctx->i32, 0, 0);
1360 ctx->i32_1 = LLVMConstInt(ctx->i32, 1, 0);
1361 }
1362
1363 void si_llvm_create_func(struct si_shader_context *ctx,
1364 const char *name,
1365 LLVMTypeRef *return_types, unsigned num_return_elems,
1366 LLVMTypeRef *ParamTypes, unsigned ParamCount)
1367 {
1368 LLVMTypeRef main_fn_type, ret_type;
1369 LLVMBasicBlockRef main_fn_body;
1370
1371 if (num_return_elems)
1372 ret_type = LLVMStructTypeInContext(ctx->gallivm.context,
1373 return_types,
1374 num_return_elems, true);
1375 else
1376 ret_type = LLVMVoidTypeInContext(ctx->gallivm.context);
1377
1378 /* Setup the function */
1379 ctx->return_type = ret_type;
1380 main_fn_type = LLVMFunctionType(ret_type, ParamTypes, ParamCount, 0);
1381 ctx->main_fn = LLVMAddFunction(ctx->gallivm.module, name, main_fn_type);
1382 main_fn_body = LLVMAppendBasicBlockInContext(ctx->gallivm.context,
1383 ctx->main_fn, "main_body");
1384 LLVMPositionBuilderAtEnd(ctx->gallivm.builder, main_fn_body);
1385 }
1386
1387 void si_llvm_finalize_module(struct si_shader_context *ctx,
1388 bool run_verifier)
1389 {
1390 struct gallivm_state *gallivm = ctx->bld_base.base.gallivm;
1391 const char *triple = LLVMGetTarget(gallivm->module);
1392 LLVMTargetLibraryInfoRef target_library_info;
1393
1394 /* Create the pass manager */
1395 gallivm->passmgr = LLVMCreatePassManager();
1396
1397 target_library_info = gallivm_create_target_library_info(triple);
1398 LLVMAddTargetLibraryInfo(target_library_info, gallivm->passmgr);
1399
1400 if (run_verifier)
1401 LLVMAddVerifierPass(gallivm->passmgr);
1402
1403 LLVMAddAlwaysInlinerPass(gallivm->passmgr);
1404
1405 /* This pass should eliminate all the load and store instructions */
1406 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
1407
1408 /* Add some optimization passes */
1409 LLVMAddScalarReplAggregatesPass(gallivm->passmgr);
1410 LLVMAddLICMPass(gallivm->passmgr);
1411 LLVMAddAggressiveDCEPass(gallivm->passmgr);
1412 LLVMAddCFGSimplificationPass(gallivm->passmgr);
1413 LLVMAddInstructionCombiningPass(gallivm->passmgr);
1414
1415 /* Run the pass */
1416 LLVMRunPassManager(gallivm->passmgr, ctx->gallivm.module);
1417
1418 LLVMDisposeBuilder(gallivm->builder);
1419 LLVMDisposePassManager(gallivm->passmgr);
1420 gallivm_dispose_target_library_info(target_library_info);
1421 }
1422
1423 void si_llvm_dispose(struct si_shader_context *ctx)
1424 {
1425 LLVMDisposeModule(ctx->bld_base.base.gallivm->module);
1426 LLVMContextDispose(ctx->bld_base.base.gallivm->context);
1427 FREE(ctx->temp_arrays);
1428 ctx->temp_arrays = NULL;
1429 FREE(ctx->temp_array_allocas);
1430 ctx->temp_array_allocas = NULL;
1431 FREE(ctx->temps);
1432 ctx->temps = NULL;
1433 ctx->temps_count = 0;
1434 FREE(ctx->imms);
1435 ctx->imms = NULL;
1436 ctx->imms_num = 0;
1437 FREE(ctx->flow);
1438 ctx->flow = NULL;
1439 ctx->flow_depth_max = 0;
1440 }