radeonsi: rename si_compiler -> ac_llvm_compiler
[mesa.git] / src / gallium / drivers / radeonsi / si_shader_tgsi_setup.c
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_shader_internal.h"
26 #include "si_pipe.h"
27 #include "ac_llvm_util.h"
28 #include "util/u_memory.h"
29
30 enum si_llvm_calling_convention {
31 RADEON_LLVM_AMDGPU_VS = 87,
32 RADEON_LLVM_AMDGPU_GS = 88,
33 RADEON_LLVM_AMDGPU_PS = 89,
34 RADEON_LLVM_AMDGPU_CS = 90,
35 RADEON_LLVM_AMDGPU_HS = 93,
36 };
37
38 struct si_llvm_diagnostics {
39 struct pipe_debug_callback *debug;
40 unsigned retval;
41 };
42
43 static void si_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
44 {
45 struct si_llvm_diagnostics *diag = (struct si_llvm_diagnostics *)context;
46 LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
47 char *description = LLVMGetDiagInfoDescription(di);
48 const char *severity_str = NULL;
49
50 switch (severity) {
51 case LLVMDSError:
52 severity_str = "error";
53 break;
54 case LLVMDSWarning:
55 severity_str = "warning";
56 break;
57 case LLVMDSRemark:
58 severity_str = "remark";
59 break;
60 case LLVMDSNote:
61 severity_str = "note";
62 break;
63 default:
64 severity_str = "unknown";
65 }
66
67 pipe_debug_message(diag->debug, SHADER_INFO,
68 "LLVM diagnostic (%s): %s", severity_str, description);
69
70 if (severity == LLVMDSError) {
71 diag->retval = 1;
72 fprintf(stderr,"LLVM triggered Diagnostic Handler: %s\n", description);
73 }
74
75 LLVMDisposeMessage(description);
76 }
77
78 /**
79 * Compile an LLVM module to machine code.
80 *
81 * @returns 0 for success, 1 for failure
82 */
83 unsigned si_llvm_compile(LLVMModuleRef M, struct ac_shader_binary *binary,
84 struct ac_llvm_compiler *compiler,
85 struct pipe_debug_callback *debug)
86 {
87 struct si_llvm_diagnostics diag;
88 char *err;
89 LLVMContextRef llvm_ctx;
90 LLVMMemoryBufferRef out_buffer;
91 unsigned buffer_size;
92 const char *buffer_data;
93 LLVMBool mem_err;
94
95 diag.debug = debug;
96 diag.retval = 0;
97
98 /* Setup Diagnostic Handler*/
99 llvm_ctx = LLVMGetModuleContext(M);
100
101 LLVMContextSetDiagnosticHandler(llvm_ctx, si_diagnostic_handler, &diag);
102
103 /* Compile IR*/
104 mem_err = LLVMTargetMachineEmitToMemoryBuffer(compiler->tm, M,
105 LLVMObjectFile, &err,
106 &out_buffer);
107
108 /* Process Errors/Warnings */
109 if (mem_err) {
110 fprintf(stderr, "%s: %s", __FUNCTION__, err);
111 pipe_debug_message(debug, SHADER_INFO,
112 "LLVM emit error: %s", err);
113 FREE(err);
114 diag.retval = 1;
115 goto out;
116 }
117
118 /* Extract Shader Code*/
119 buffer_size = LLVMGetBufferSize(out_buffer);
120 buffer_data = LLVMGetBufferStart(out_buffer);
121
122 if (!ac_elf_read(buffer_data, buffer_size, binary)) {
123 fprintf(stderr, "radeonsi: cannot read an ELF shader binary\n");
124 diag.retval = 1;
125 }
126
127 /* Clean up */
128 LLVMDisposeMemoryBuffer(out_buffer);
129
130 out:
131 if (diag.retval != 0)
132 pipe_debug_message(debug, SHADER_INFO, "LLVM compile failed");
133 return diag.retval;
134 }
135
136 LLVMTypeRef tgsi2llvmtype(struct lp_build_tgsi_context *bld_base,
137 enum tgsi_opcode_type type)
138 {
139 struct si_shader_context *ctx = si_shader_context(bld_base);
140
141 switch (type) {
142 case TGSI_TYPE_UNSIGNED:
143 case TGSI_TYPE_SIGNED:
144 return ctx->ac.i32;
145 case TGSI_TYPE_UNSIGNED64:
146 case TGSI_TYPE_SIGNED64:
147 return ctx->ac.i64;
148 case TGSI_TYPE_DOUBLE:
149 return ctx->ac.f64;
150 case TGSI_TYPE_UNTYPED:
151 case TGSI_TYPE_FLOAT:
152 return ctx->ac.f32;
153 default: break;
154 }
155 return 0;
156 }
157
158 LLVMValueRef bitcast(struct lp_build_tgsi_context *bld_base,
159 enum tgsi_opcode_type type, LLVMValueRef value)
160 {
161 struct si_shader_context *ctx = si_shader_context(bld_base);
162 LLVMTypeRef dst_type = tgsi2llvmtype(bld_base, type);
163
164 if (dst_type)
165 return LLVMBuildBitCast(ctx->ac.builder, value, dst_type, "");
166 else
167 return value;
168 }
169
170 /**
171 * Return a value that is equal to the given i32 \p index if it lies in [0,num)
172 * or an undefined value in the same interval otherwise.
173 */
174 LLVMValueRef si_llvm_bound_index(struct si_shader_context *ctx,
175 LLVMValueRef index,
176 unsigned num)
177 {
178 LLVMBuilderRef builder = ctx->ac.builder;
179 LLVMValueRef c_max = LLVMConstInt(ctx->i32, num - 1, 0);
180 LLVMValueRef cc;
181
182 if (util_is_power_of_two_or_zero(num)) {
183 index = LLVMBuildAnd(builder, index, c_max, "");
184 } else {
185 /* In theory, this MAX pattern should result in code that is
186 * as good as the bit-wise AND above.
187 *
188 * In practice, LLVM generates worse code (at the time of
189 * writing), because its value tracking is not strong enough.
190 */
191 cc = LLVMBuildICmp(builder, LLVMIntULE, index, c_max, "");
192 index = LLVMBuildSelect(builder, cc, index, c_max, "");
193 }
194
195 return index;
196 }
197
198 static LLVMValueRef emit_swizzle(struct lp_build_tgsi_context *bld_base,
199 LLVMValueRef value,
200 unsigned swizzle_x,
201 unsigned swizzle_y,
202 unsigned swizzle_z,
203 unsigned swizzle_w)
204 {
205 struct si_shader_context *ctx = si_shader_context(bld_base);
206 LLVMValueRef swizzles[4];
207
208 swizzles[0] = LLVMConstInt(ctx->i32, swizzle_x, 0);
209 swizzles[1] = LLVMConstInt(ctx->i32, swizzle_y, 0);
210 swizzles[2] = LLVMConstInt(ctx->i32, swizzle_z, 0);
211 swizzles[3] = LLVMConstInt(ctx->i32, swizzle_w, 0);
212
213 return LLVMBuildShuffleVector(ctx->ac.builder,
214 value,
215 LLVMGetUndef(LLVMTypeOf(value)),
216 LLVMConstVector(swizzles, 4), "");
217 }
218
219 /**
220 * Return the description of the array covering the given temporary register
221 * index.
222 */
223 static unsigned
224 get_temp_array_id(struct lp_build_tgsi_context *bld_base,
225 unsigned reg_index,
226 const struct tgsi_ind_register *reg)
227 {
228 struct si_shader_context *ctx = si_shader_context(bld_base);
229 unsigned num_arrays = ctx->bld_base.info->array_max[TGSI_FILE_TEMPORARY];
230 unsigned i;
231
232 if (reg && reg->ArrayID > 0 && reg->ArrayID <= num_arrays)
233 return reg->ArrayID;
234
235 for (i = 0; i < num_arrays; i++) {
236 const struct tgsi_array_info *array = &ctx->temp_arrays[i];
237
238 if (reg_index >= array->range.First && reg_index <= array->range.Last)
239 return i + 1;
240 }
241
242 return 0;
243 }
244
245 static struct tgsi_declaration_range
246 get_array_range(struct lp_build_tgsi_context *bld_base,
247 unsigned File, unsigned reg_index,
248 const struct tgsi_ind_register *reg)
249 {
250 struct si_shader_context *ctx = si_shader_context(bld_base);
251 struct tgsi_declaration_range range;
252
253 if (File == TGSI_FILE_TEMPORARY) {
254 unsigned array_id = get_temp_array_id(bld_base, reg_index, reg);
255 if (array_id)
256 return ctx->temp_arrays[array_id - 1].range;
257 }
258
259 range.First = 0;
260 range.Last = bld_base->info->file_max[File];
261 return range;
262 }
263
264 /**
265 * For indirect registers, construct a pointer directly to the requested
266 * element using getelementptr if possible.
267 *
268 * Returns NULL if the insertelement/extractelement fallback for array access
269 * must be used.
270 */
271 static LLVMValueRef
272 get_pointer_into_array(struct si_shader_context *ctx,
273 unsigned file,
274 unsigned swizzle,
275 unsigned reg_index,
276 const struct tgsi_ind_register *reg_indirect)
277 {
278 unsigned array_id;
279 struct tgsi_array_info *array;
280 LLVMBuilderRef builder = ctx->ac.builder;
281 LLVMValueRef idxs[2];
282 LLVMValueRef index;
283 LLVMValueRef alloca;
284
285 if (file != TGSI_FILE_TEMPORARY)
286 return NULL;
287
288 array_id = get_temp_array_id(&ctx->bld_base, reg_index, reg_indirect);
289 if (!array_id)
290 return NULL;
291
292 alloca = ctx->temp_array_allocas[array_id - 1];
293 if (!alloca)
294 return NULL;
295
296 array = &ctx->temp_arrays[array_id - 1];
297
298 if (!(array->writemask & (1 << swizzle)))
299 return ctx->undef_alloca;
300
301 index = si_get_indirect_index(ctx, reg_indirect, 1,
302 reg_index - ctx->temp_arrays[array_id - 1].range.First);
303
304 /* Ensure that the index is within a valid range, to guard against
305 * VM faults and overwriting critical data (e.g. spilled resource
306 * descriptors).
307 *
308 * TODO It should be possible to avoid the additional instructions
309 * if LLVM is changed so that it guarantuees:
310 * 1. the scratch space descriptor isolates the current wave (this
311 * could even save the scratch offset SGPR at the cost of an
312 * additional SALU instruction)
313 * 2. the memory for allocas must be allocated at the _end_ of the
314 * scratch space (after spilled registers)
315 */
316 index = si_llvm_bound_index(ctx, index, array->range.Last - array->range.First + 1);
317
318 index = LLVMBuildMul(
319 builder, index,
320 LLVMConstInt(ctx->i32, util_bitcount(array->writemask), 0),
321 "");
322 index = LLVMBuildAdd(
323 builder, index,
324 LLVMConstInt(ctx->i32,
325 util_bitcount(array->writemask & ((1 << swizzle) - 1)), 0),
326 "");
327 idxs[0] = ctx->i32_0;
328 idxs[1] = index;
329 return LLVMBuildGEP(ctx->ac.builder, alloca, idxs, 2, "");
330 }
331
332 LLVMValueRef
333 si_llvm_emit_fetch_64bit(struct lp_build_tgsi_context *bld_base,
334 LLVMTypeRef type,
335 LLVMValueRef ptr,
336 LLVMValueRef ptr2)
337 {
338 struct si_shader_context *ctx = si_shader_context(bld_base);
339 LLVMValueRef result;
340
341 result = LLVMGetUndef(LLVMVectorType(ctx->i32, 2));
342
343 result = LLVMBuildInsertElement(ctx->ac.builder,
344 result,
345 ac_to_integer(&ctx->ac, ptr),
346 ctx->i32_0, "");
347 result = LLVMBuildInsertElement(ctx->ac.builder,
348 result,
349 ac_to_integer(&ctx->ac, ptr2),
350 ctx->i32_1, "");
351 return LLVMBuildBitCast(ctx->ac.builder, result, type, "");
352 }
353
354 static LLVMValueRef
355 emit_array_fetch(struct lp_build_tgsi_context *bld_base,
356 unsigned File, enum tgsi_opcode_type type,
357 struct tgsi_declaration_range range,
358 unsigned swizzle)
359 {
360 struct si_shader_context *ctx = si_shader_context(bld_base);
361 unsigned i, size = range.Last - range.First + 1;
362 LLVMTypeRef vec = LLVMVectorType(tgsi2llvmtype(bld_base, type), size);
363 LLVMValueRef result = LLVMGetUndef(vec);
364
365 struct tgsi_full_src_register tmp_reg = {};
366 tmp_reg.Register.File = File;
367
368 for (i = 0; i < size; ++i) {
369 tmp_reg.Register.Index = i + range.First;
370 LLVMValueRef temp = si_llvm_emit_fetch(bld_base, &tmp_reg, type, swizzle);
371 result = LLVMBuildInsertElement(ctx->ac.builder, result, temp,
372 LLVMConstInt(ctx->i32, i, 0), "array_vector");
373 }
374 return result;
375 }
376
377 static LLVMValueRef
378 load_value_from_array(struct lp_build_tgsi_context *bld_base,
379 unsigned file,
380 enum tgsi_opcode_type type,
381 unsigned swizzle,
382 unsigned reg_index,
383 const struct tgsi_ind_register *reg_indirect)
384 {
385 struct si_shader_context *ctx = si_shader_context(bld_base);
386 LLVMBuilderRef builder = ctx->ac.builder;
387 LLVMValueRef ptr;
388
389 ptr = get_pointer_into_array(ctx, file, swizzle, reg_index, reg_indirect);
390 if (ptr) {
391 LLVMValueRef val = LLVMBuildLoad(builder, ptr, "");
392 if (tgsi_type_is_64bit(type)) {
393 LLVMValueRef ptr_hi, val_hi;
394 ptr_hi = LLVMBuildGEP(builder, ptr, &ctx->i32_1, 1, "");
395 val_hi = LLVMBuildLoad(builder, ptr_hi, "");
396 val = si_llvm_emit_fetch_64bit(bld_base, tgsi2llvmtype(bld_base, type),
397 val, val_hi);
398 }
399
400 return val;
401 } else {
402 struct tgsi_declaration_range range =
403 get_array_range(bld_base, file, reg_index, reg_indirect);
404 LLVMValueRef index =
405 si_get_indirect_index(ctx, reg_indirect, 1, reg_index - range.First);
406 LLVMValueRef array =
407 emit_array_fetch(bld_base, file, type, range, swizzle);
408 return LLVMBuildExtractElement(builder, array, index, "");
409 }
410 }
411
412 static void
413 store_value_to_array(struct lp_build_tgsi_context *bld_base,
414 LLVMValueRef value,
415 unsigned file,
416 unsigned chan_index,
417 unsigned reg_index,
418 const struct tgsi_ind_register *reg_indirect)
419 {
420 struct si_shader_context *ctx = si_shader_context(bld_base);
421 LLVMBuilderRef builder = ctx->ac.builder;
422 LLVMValueRef ptr;
423
424 ptr = get_pointer_into_array(ctx, file, chan_index, reg_index, reg_indirect);
425 if (ptr) {
426 LLVMBuildStore(builder, value, ptr);
427 } else {
428 unsigned i, size;
429 struct tgsi_declaration_range range = get_array_range(bld_base, file, reg_index, reg_indirect);
430 LLVMValueRef index = si_get_indirect_index(ctx, reg_indirect, 1, reg_index - range.First);
431 LLVMValueRef array =
432 emit_array_fetch(bld_base, file, TGSI_TYPE_FLOAT, range, chan_index);
433 LLVMValueRef temp_ptr;
434
435 array = LLVMBuildInsertElement(builder, array, value, index, "");
436
437 size = range.Last - range.First + 1;
438 for (i = 0; i < size; ++i) {
439 switch(file) {
440 case TGSI_FILE_OUTPUT:
441 temp_ptr = ctx->outputs[i + range.First][chan_index];
442 break;
443
444 case TGSI_FILE_TEMPORARY:
445 if (range.First + i >= ctx->temps_count)
446 continue;
447 temp_ptr = ctx->temps[(i + range.First) * TGSI_NUM_CHANNELS + chan_index];
448 break;
449
450 default:
451 continue;
452 }
453 value = LLVMBuildExtractElement(builder, array,
454 LLVMConstInt(ctx->i32, i, 0), "");
455 LLVMBuildStore(builder, value, temp_ptr);
456 }
457 }
458 }
459
460 /* If this is true, preload FS inputs at the beginning of shaders. Otherwise,
461 * reload them at each use. This must be true if the shader is using
462 * derivatives and KILL, because KILL can leave the WQM and then a lazy
463 * input load isn't in the WQM anymore.
464 */
465 static bool si_preload_fs_inputs(struct si_shader_context *ctx)
466 {
467 struct si_shader_selector *sel = ctx->shader->selector;
468
469 return sel->info.uses_derivatives &&
470 sel->info.uses_kill;
471 }
472
473 static LLVMValueRef
474 get_output_ptr(struct lp_build_tgsi_context *bld_base, unsigned index,
475 unsigned chan)
476 {
477 struct si_shader_context *ctx = si_shader_context(bld_base);
478
479 assert(index <= ctx->bld_base.info->file_max[TGSI_FILE_OUTPUT]);
480 return ctx->outputs[index][chan];
481 }
482
483 LLVMValueRef si_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
484 const struct tgsi_full_src_register *reg,
485 enum tgsi_opcode_type type,
486 unsigned swizzle)
487 {
488 struct si_shader_context *ctx = si_shader_context(bld_base);
489 LLVMBuilderRef builder = ctx->ac.builder;
490 LLVMValueRef result = NULL, ptr, ptr2;
491
492 if (swizzle == ~0) {
493 LLVMValueRef values[TGSI_NUM_CHANNELS];
494 unsigned chan;
495 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
496 values[chan] = si_llvm_emit_fetch(bld_base, reg, type, chan);
497 }
498 return ac_build_gather_values(&ctx->ac, values,
499 TGSI_NUM_CHANNELS);
500 }
501
502 if (reg->Register.Indirect) {
503 LLVMValueRef load = load_value_from_array(bld_base, reg->Register.File, type,
504 swizzle, reg->Register.Index, &reg->Indirect);
505 return bitcast(bld_base, type, load);
506 }
507
508 switch(reg->Register.File) {
509 case TGSI_FILE_IMMEDIATE: {
510 LLVMTypeRef ctype = tgsi2llvmtype(bld_base, type);
511 if (tgsi_type_is_64bit(type)) {
512 result = LLVMGetUndef(LLVMVectorType(ctx->i32, 2));
513 result = LLVMConstInsertElement(result,
514 ctx->imms[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle],
515 ctx->i32_0);
516 result = LLVMConstInsertElement(result,
517 ctx->imms[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle + 1],
518 ctx->i32_1);
519 return LLVMConstBitCast(result, ctype);
520 } else {
521 return LLVMConstBitCast(ctx->imms[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle], ctype);
522 }
523 }
524
525 case TGSI_FILE_INPUT: {
526 unsigned index = reg->Register.Index;
527 LLVMValueRef input[4];
528
529 /* I don't think doing this for vertex shaders is beneficial.
530 * For those, we want to make sure the VMEM loads are executed
531 * only once. Fragment shaders don't care much, because
532 * v_interp instructions are much cheaper than VMEM loads.
533 */
534 if (!si_preload_fs_inputs(ctx) &&
535 ctx->bld_base.info->processor == PIPE_SHADER_FRAGMENT)
536 ctx->load_input(ctx, index, &ctx->input_decls[index], input);
537 else
538 memcpy(input, &ctx->inputs[index * 4], sizeof(input));
539
540 result = input[swizzle];
541
542 if (tgsi_type_is_64bit(type)) {
543 ptr = result;
544 ptr2 = input[swizzle + 1];
545 return si_llvm_emit_fetch_64bit(bld_base, tgsi2llvmtype(bld_base, type),
546 ptr, ptr2);
547 }
548 break;
549 }
550
551 case TGSI_FILE_TEMPORARY:
552 if (reg->Register.Index >= ctx->temps_count)
553 return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
554 ptr = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle];
555 if (tgsi_type_is_64bit(type)) {
556 ptr2 = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle + 1];
557 return si_llvm_emit_fetch_64bit(bld_base, tgsi2llvmtype(bld_base, type),
558 LLVMBuildLoad(builder, ptr, ""),
559 LLVMBuildLoad(builder, ptr2, ""));
560 }
561 result = LLVMBuildLoad(builder, ptr, "");
562 break;
563
564 case TGSI_FILE_OUTPUT:
565 ptr = get_output_ptr(bld_base, reg->Register.Index, swizzle);
566 if (tgsi_type_is_64bit(type)) {
567 ptr2 = get_output_ptr(bld_base, reg->Register.Index, swizzle + 1);
568 return si_llvm_emit_fetch_64bit(bld_base, tgsi2llvmtype(bld_base, type),
569 LLVMBuildLoad(builder, ptr, ""),
570 LLVMBuildLoad(builder, ptr2, ""));
571 }
572 result = LLVMBuildLoad(builder, ptr, "");
573 break;
574
575 default:
576 return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
577 }
578
579 return bitcast(bld_base, type, result);
580 }
581
582 static LLVMValueRef fetch_system_value(struct lp_build_tgsi_context *bld_base,
583 const struct tgsi_full_src_register *reg,
584 enum tgsi_opcode_type type,
585 unsigned swizzle)
586 {
587 struct si_shader_context *ctx = si_shader_context(bld_base);
588 LLVMBuilderRef builder = ctx->ac.builder;
589 LLVMValueRef cval = ctx->system_values[reg->Register.Index];
590
591 if (tgsi_type_is_64bit(type)) {
592 LLVMValueRef lo, hi;
593
594 assert(swizzle == 0 || swizzle == 2);
595
596 lo = LLVMBuildExtractElement(
597 builder, cval, LLVMConstInt(ctx->i32, swizzle, 0), "");
598 hi = LLVMBuildExtractElement(
599 builder, cval, LLVMConstInt(ctx->i32, swizzle + 1, 0), "");
600
601 return si_llvm_emit_fetch_64bit(bld_base, tgsi2llvmtype(bld_base, type),
602 lo, hi);
603 }
604
605 if (LLVMGetTypeKind(LLVMTypeOf(cval)) == LLVMVectorTypeKind) {
606 cval = LLVMBuildExtractElement(
607 builder, cval, LLVMConstInt(ctx->i32, swizzle, 0), "");
608 } else {
609 assert(swizzle == 0);
610 }
611
612 return bitcast(bld_base, type, cval);
613 }
614
615 static void emit_declaration(struct lp_build_tgsi_context *bld_base,
616 const struct tgsi_full_declaration *decl)
617 {
618 struct si_shader_context *ctx = si_shader_context(bld_base);
619 LLVMBuilderRef builder = ctx->ac.builder;
620 unsigned first, last, i;
621 switch(decl->Declaration.File) {
622 case TGSI_FILE_ADDRESS:
623 {
624 unsigned idx;
625 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
626 unsigned chan;
627 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
628 ctx->addrs[idx][chan] = ac_build_alloca_undef(
629 &ctx->ac, ctx->i32, "");
630 }
631 }
632 break;
633 }
634
635 case TGSI_FILE_TEMPORARY:
636 {
637 char name[18] = "";
638 LLVMValueRef array_alloca = NULL;
639 unsigned decl_size;
640 unsigned writemask = decl->Declaration.UsageMask;
641 first = decl->Range.First;
642 last = decl->Range.Last;
643 decl_size = 4 * ((last - first) + 1);
644
645 if (decl->Declaration.Array) {
646 unsigned id = decl->Array.ArrayID - 1;
647 unsigned array_size;
648
649 writemask &= ctx->temp_arrays[id].writemask;
650 ctx->temp_arrays[id].writemask = writemask;
651 array_size = ((last - first) + 1) * util_bitcount(writemask);
652
653 /* If the array has more than 16 elements, store it
654 * in memory using an alloca that spans the entire
655 * array.
656 *
657 * Otherwise, store each array element individually.
658 * We will then generate vectors (per-channel, up to
659 * <16 x float> if the usagemask is a single bit) for
660 * indirect addressing.
661 *
662 * Note that 16 is the number of vector elements that
663 * LLVM will store in a register, so theoretically an
664 * array with up to 4 * 16 = 64 elements could be
665 * handled this way, but whether that's a good idea
666 * depends on VGPR register pressure elsewhere.
667 *
668 * FIXME: We shouldn't need to have the non-alloca
669 * code path for arrays. LLVM should be smart enough to
670 * promote allocas into registers when profitable.
671 */
672 if (array_size > 16 ||
673 !ctx->screen->llvm_has_working_vgpr_indexing) {
674 array_alloca = ac_build_alloca_undef(&ctx->ac,
675 LLVMArrayType(ctx->f32,
676 array_size), "array");
677 ctx->temp_array_allocas[id] = array_alloca;
678 }
679 }
680
681 if (!ctx->temps_count) {
682 ctx->temps_count = bld_base->info->file_max[TGSI_FILE_TEMPORARY] + 1;
683 ctx->temps = MALLOC(TGSI_NUM_CHANNELS * ctx->temps_count * sizeof(LLVMValueRef));
684 }
685 if (!array_alloca) {
686 for (i = 0; i < decl_size; ++i) {
687 #ifdef DEBUG
688 snprintf(name, sizeof(name), "TEMP%d.%c",
689 first + i / 4, "xyzw"[i % 4]);
690 #endif
691 ctx->temps[first * TGSI_NUM_CHANNELS + i] =
692 ac_build_alloca_undef(&ctx->ac,
693 ctx->f32,
694 name);
695 }
696 } else {
697 LLVMValueRef idxs[2] = {
698 ctx->i32_0,
699 NULL
700 };
701 unsigned j = 0;
702
703 if (writemask != TGSI_WRITEMASK_XYZW &&
704 !ctx->undef_alloca) {
705 /* Create a dummy alloca. We use it so that we
706 * have a pointer that is safe to load from if
707 * a shader ever reads from a channel that
708 * it never writes to.
709 */
710 ctx->undef_alloca = ac_build_alloca_undef(
711 &ctx->ac, ctx->f32, "undef");
712 }
713
714 for (i = 0; i < decl_size; ++i) {
715 LLVMValueRef ptr;
716 if (writemask & (1 << (i % 4))) {
717 #ifdef DEBUG
718 snprintf(name, sizeof(name), "TEMP%d.%c",
719 first + i / 4, "xyzw"[i % 4]);
720 #endif
721 idxs[1] = LLVMConstInt(ctx->i32, j, 0);
722 ptr = LLVMBuildGEP(builder, array_alloca, idxs, 2, name);
723 j++;
724 } else {
725 ptr = ctx->undef_alloca;
726 }
727 ctx->temps[first * TGSI_NUM_CHANNELS + i] = ptr;
728 }
729 }
730 break;
731 }
732 case TGSI_FILE_INPUT:
733 {
734 unsigned idx;
735 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
736 if (ctx->load_input &&
737 ctx->input_decls[idx].Declaration.File != TGSI_FILE_INPUT) {
738 ctx->input_decls[idx] = *decl;
739 ctx->input_decls[idx].Range.First = idx;
740 ctx->input_decls[idx].Range.Last = idx;
741 ctx->input_decls[idx].Semantic.Index += idx - decl->Range.First;
742
743 if (si_preload_fs_inputs(ctx) ||
744 bld_base->info->processor != PIPE_SHADER_FRAGMENT)
745 ctx->load_input(ctx, idx, &ctx->input_decls[idx],
746 &ctx->inputs[idx * 4]);
747 }
748 }
749 }
750 break;
751
752 case TGSI_FILE_SYSTEM_VALUE:
753 {
754 unsigned idx;
755 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
756 si_load_system_value(ctx, idx, decl);
757 }
758 }
759 break;
760
761 case TGSI_FILE_OUTPUT:
762 {
763 char name[16] = "";
764 unsigned idx;
765 for (idx = decl->Range.First; idx <= decl->Range.Last; idx++) {
766 unsigned chan;
767 assert(idx < RADEON_LLVM_MAX_OUTPUTS);
768 if (ctx->outputs[idx][0])
769 continue;
770 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
771 #ifdef DEBUG
772 snprintf(name, sizeof(name), "OUT%d.%c",
773 idx, "xyzw"[chan % 4]);
774 #endif
775 ctx->outputs[idx][chan] = ac_build_alloca_undef(
776 &ctx->ac, ctx->f32, name);
777 }
778 }
779 break;
780 }
781
782 case TGSI_FILE_MEMORY:
783 si_tgsi_declare_compute_memory(ctx, decl);
784 break;
785
786 default:
787 break;
788 }
789 }
790
791 void si_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
792 const struct tgsi_full_instruction *inst,
793 const struct tgsi_opcode_info *info,
794 unsigned index,
795 LLVMValueRef dst[4])
796 {
797 struct si_shader_context *ctx = si_shader_context(bld_base);
798 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
799 LLVMBuilderRef builder = ctx->ac.builder;
800 LLVMValueRef temp_ptr, temp_ptr2 = NULL;
801 bool is_vec_store = false;
802 enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode, index);
803
804 if (dst[0]) {
805 LLVMTypeKind k = LLVMGetTypeKind(LLVMTypeOf(dst[0]));
806 is_vec_store = (k == LLVMVectorTypeKind);
807 }
808
809 if (is_vec_store) {
810 LLVMValueRef values[4] = {};
811 uint32_t writemask = reg->Register.WriteMask;
812 while (writemask) {
813 unsigned chan = u_bit_scan(&writemask);
814 LLVMValueRef index = LLVMConstInt(ctx->i32, chan, 0);
815 values[chan] = LLVMBuildExtractElement(ctx->ac.builder,
816 dst[0], index, "");
817 }
818 bld_base->emit_store(bld_base, inst, info, index, values);
819 return;
820 }
821
822 uint32_t writemask = reg->Register.WriteMask;
823 while (writemask) {
824 unsigned chan_index = u_bit_scan(&writemask);
825 LLVMValueRef value = dst[chan_index];
826
827 if (tgsi_type_is_64bit(dtype) && (chan_index == 1 || chan_index == 3))
828 continue;
829 if (inst->Instruction.Saturate)
830 value = ac_build_clamp(&ctx->ac, value);
831
832 if (reg->Register.File == TGSI_FILE_ADDRESS) {
833 temp_ptr = ctx->addrs[reg->Register.Index][chan_index];
834 LLVMBuildStore(builder, value, temp_ptr);
835 continue;
836 }
837
838 if (!tgsi_type_is_64bit(dtype))
839 value = ac_to_float(&ctx->ac, value);
840
841 if (reg->Register.Indirect) {
842 unsigned file = reg->Register.File;
843 unsigned reg_index = reg->Register.Index;
844 store_value_to_array(bld_base, value, file, chan_index,
845 reg_index, &reg->Indirect);
846 } else {
847 switch(reg->Register.File) {
848 case TGSI_FILE_OUTPUT:
849 temp_ptr = ctx->outputs[reg->Register.Index][chan_index];
850 if (tgsi_type_is_64bit(dtype))
851 temp_ptr2 = ctx->outputs[reg->Register.Index][chan_index + 1];
852 break;
853
854 case TGSI_FILE_TEMPORARY:
855 {
856 if (reg->Register.Index >= ctx->temps_count)
857 continue;
858
859 temp_ptr = ctx->temps[ TGSI_NUM_CHANNELS * reg->Register.Index + chan_index];
860 if (tgsi_type_is_64bit(dtype))
861 temp_ptr2 = ctx->temps[ TGSI_NUM_CHANNELS * reg->Register.Index + chan_index + 1];
862
863 break;
864 }
865 default:
866 return;
867 }
868 if (!tgsi_type_is_64bit(dtype))
869 LLVMBuildStore(builder, value, temp_ptr);
870 else {
871 LLVMValueRef ptr = LLVMBuildBitCast(builder, value,
872 LLVMVectorType(ctx->i32, 2), "");
873 LLVMValueRef val2;
874 value = LLVMBuildExtractElement(builder, ptr,
875 ctx->i32_0, "");
876 val2 = LLVMBuildExtractElement(builder, ptr,
877 ctx->i32_1, "");
878
879 LLVMBuildStore(builder, ac_to_float(&ctx->ac, value), temp_ptr);
880 LLVMBuildStore(builder, ac_to_float(&ctx->ac, val2), temp_ptr2);
881 }
882 }
883 }
884 }
885
886 static int get_line(int pc)
887 {
888 /* Subtract 1 so that the number shown is that of the corresponding
889 * opcode in the TGSI dump, e.g. an if block has the same suffix as
890 * the instruction number of the corresponding TGSI IF.
891 */
892 return pc - 1;
893 }
894
895 static void bgnloop_emit(const struct lp_build_tgsi_action *action,
896 struct lp_build_tgsi_context *bld_base,
897 struct lp_build_emit_data *emit_data)
898 {
899 struct si_shader_context *ctx = si_shader_context(bld_base);
900 ac_build_bgnloop(&ctx->ac, get_line(bld_base->pc));
901 }
902
903 static void brk_emit(const struct lp_build_tgsi_action *action,
904 struct lp_build_tgsi_context *bld_base,
905 struct lp_build_emit_data *emit_data)
906 {
907 struct si_shader_context *ctx = si_shader_context(bld_base);
908 ac_build_break(&ctx->ac);
909 }
910
911 static void cont_emit(const struct lp_build_tgsi_action *action,
912 struct lp_build_tgsi_context *bld_base,
913 struct lp_build_emit_data *emit_data)
914 {
915 struct si_shader_context *ctx = si_shader_context(bld_base);
916 ac_build_continue(&ctx->ac);
917 }
918
919 static void else_emit(const struct lp_build_tgsi_action *action,
920 struct lp_build_tgsi_context *bld_base,
921 struct lp_build_emit_data *emit_data)
922 {
923 struct si_shader_context *ctx = si_shader_context(bld_base);
924 ac_build_else(&ctx->ac, get_line(bld_base->pc));
925 }
926
927 static void endif_emit(const struct lp_build_tgsi_action *action,
928 struct lp_build_tgsi_context *bld_base,
929 struct lp_build_emit_data *emit_data)
930 {
931 struct si_shader_context *ctx = si_shader_context(bld_base);
932 ac_build_endif(&ctx->ac, get_line(bld_base->pc));
933 }
934
935 static void endloop_emit(const struct lp_build_tgsi_action *action,
936 struct lp_build_tgsi_context *bld_base,
937 struct lp_build_emit_data *emit_data)
938 {
939 struct si_shader_context *ctx = si_shader_context(bld_base);
940 ac_build_endloop(&ctx->ac, get_line(bld_base->pc));
941 }
942
943 static void if_emit(const struct lp_build_tgsi_action *action,
944 struct lp_build_tgsi_context *bld_base,
945 struct lp_build_emit_data *emit_data)
946 {
947 struct si_shader_context *ctx = si_shader_context(bld_base);
948 ac_build_if(&ctx->ac, emit_data->args[0], get_line(bld_base->pc));
949 }
950
951 static void uif_emit(const struct lp_build_tgsi_action *action,
952 struct lp_build_tgsi_context *bld_base,
953 struct lp_build_emit_data *emit_data)
954 {
955 struct si_shader_context *ctx = si_shader_context(bld_base);
956 ac_build_uif(&ctx->ac, emit_data->args[0], get_line(bld_base->pc));
957 }
958
959 static void emit_immediate(struct lp_build_tgsi_context *bld_base,
960 const struct tgsi_full_immediate *imm)
961 {
962 unsigned i;
963 struct si_shader_context *ctx = si_shader_context(bld_base);
964
965 for (i = 0; i < 4; ++i) {
966 ctx->imms[ctx->imms_num * TGSI_NUM_CHANNELS + i] =
967 LLVMConstInt(ctx->i32, imm->u[i].Uint, false );
968 }
969
970 ctx->imms_num++;
971 }
972
973 void si_llvm_context_init(struct si_shader_context *ctx,
974 struct si_screen *sscreen,
975 struct ac_llvm_compiler *compiler)
976 {
977 struct lp_type type;
978
979 /* Initialize the gallivm object:
980 * We are only using the module, context, and builder fields of this struct.
981 * This should be enough for us to be able to pass our gallivm struct to the
982 * helper functions in the gallivm module.
983 */
984 memset(ctx, 0, sizeof(*ctx));
985 ctx->screen = sscreen;
986 ctx->compiler = compiler;
987
988 ctx->gallivm.context = LLVMContextCreate();
989 ctx->gallivm.module = ac_create_module(compiler->tm, ctx->gallivm.context);
990
991 bool unsafe_fpmath = (sscreen->debug_flags & DBG(UNSAFE_MATH)) != 0;
992 enum ac_float_mode float_mode =
993 unsafe_fpmath ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
994 AC_FLOAT_MODE_NO_SIGNED_ZEROS_FP_MATH;
995
996 ctx->gallivm.builder = ac_create_builder(ctx->gallivm.context,
997 float_mode);
998
999 ac_llvm_context_init(&ctx->ac, ctx->gallivm.context,
1000 sscreen->info.chip_class, sscreen->info.family);
1001 ctx->ac.module = ctx->gallivm.module;
1002 ctx->ac.builder = ctx->gallivm.builder;
1003
1004 struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
1005
1006 type.floating = true;
1007 type.fixed = false;
1008 type.sign = true;
1009 type.norm = false;
1010 type.width = 32;
1011 type.length = 1;
1012
1013 lp_build_context_init(&bld_base->base, &ctx->gallivm, type);
1014 lp_build_context_init(&ctx->bld_base.uint_bld, &ctx->gallivm, lp_uint_type(type));
1015 lp_build_context_init(&ctx->bld_base.int_bld, &ctx->gallivm, lp_int_type(type));
1016 type.width *= 2;
1017 lp_build_context_init(&ctx->bld_base.dbl_bld, &ctx->gallivm, type);
1018 lp_build_context_init(&ctx->bld_base.uint64_bld, &ctx->gallivm, lp_uint_type(type));
1019 lp_build_context_init(&ctx->bld_base.int64_bld, &ctx->gallivm, lp_int_type(type));
1020
1021 bld_base->soa = 1;
1022 bld_base->emit_swizzle = emit_swizzle;
1023 bld_base->emit_declaration = emit_declaration;
1024 bld_base->emit_immediate = emit_immediate;
1025
1026 bld_base->op_actions[TGSI_OPCODE_BGNLOOP].emit = bgnloop_emit;
1027 bld_base->op_actions[TGSI_OPCODE_BRK].emit = brk_emit;
1028 bld_base->op_actions[TGSI_OPCODE_CONT].emit = cont_emit;
1029 bld_base->op_actions[TGSI_OPCODE_IF].emit = if_emit;
1030 bld_base->op_actions[TGSI_OPCODE_UIF].emit = uif_emit;
1031 bld_base->op_actions[TGSI_OPCODE_ELSE].emit = else_emit;
1032 bld_base->op_actions[TGSI_OPCODE_ENDIF].emit = endif_emit;
1033 bld_base->op_actions[TGSI_OPCODE_ENDLOOP].emit = endloop_emit;
1034
1035 si_shader_context_init_alu(&ctx->bld_base);
1036 si_shader_context_init_mem(ctx);
1037
1038 ctx->voidt = LLVMVoidTypeInContext(ctx->ac.context);
1039 ctx->i1 = LLVMInt1TypeInContext(ctx->ac.context);
1040 ctx->i8 = LLVMInt8TypeInContext(ctx->ac.context);
1041 ctx->i32 = LLVMInt32TypeInContext(ctx->ac.context);
1042 ctx->i64 = LLVMInt64TypeInContext(ctx->ac.context);
1043 ctx->i128 = LLVMIntTypeInContext(ctx->ac.context, 128);
1044 ctx->f32 = LLVMFloatTypeInContext(ctx->ac.context);
1045 ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
1046 ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
1047 ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
1048 ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
1049
1050 ctx->i32_0 = LLVMConstInt(ctx->i32, 0, 0);
1051 ctx->i32_1 = LLVMConstInt(ctx->i32, 1, 0);
1052 }
1053
1054 /* Set the context to a certain TGSI shader. Can be called repeatedly
1055 * to change the shader. */
1056 void si_llvm_context_set_tgsi(struct si_shader_context *ctx,
1057 struct si_shader *shader)
1058 {
1059 const struct tgsi_shader_info *info = NULL;
1060 const struct tgsi_token *tokens = NULL;
1061
1062 if (shader && shader->selector) {
1063 info = &shader->selector->info;
1064 tokens = shader->selector->tokens;
1065 }
1066
1067 ctx->shader = shader;
1068 ctx->type = info ? info->processor : -1;
1069 ctx->bld_base.info = info;
1070
1071 /* Clean up the old contents. */
1072 FREE(ctx->temp_arrays);
1073 ctx->temp_arrays = NULL;
1074 FREE(ctx->temp_array_allocas);
1075 ctx->temp_array_allocas = NULL;
1076
1077 FREE(ctx->imms);
1078 ctx->imms = NULL;
1079 ctx->imms_num = 0;
1080
1081 FREE(ctx->temps);
1082 ctx->temps = NULL;
1083 ctx->temps_count = 0;
1084
1085 if (!info)
1086 return;
1087
1088 ctx->num_const_buffers = util_last_bit(info->const_buffers_declared);
1089 ctx->num_shader_buffers = util_last_bit(info->shader_buffers_declared);
1090
1091 ctx->num_samplers = util_last_bit(info->samplers_declared);
1092 ctx->num_images = util_last_bit(info->images_declared);
1093
1094 if (!tokens)
1095 return;
1096
1097 if (info->array_max[TGSI_FILE_TEMPORARY] > 0) {
1098 int size = info->array_max[TGSI_FILE_TEMPORARY];
1099
1100 ctx->temp_arrays = CALLOC(size, sizeof(ctx->temp_arrays[0]));
1101 ctx->temp_array_allocas = CALLOC(size, sizeof(ctx->temp_array_allocas[0]));
1102
1103 tgsi_scan_arrays(tokens, TGSI_FILE_TEMPORARY, size,
1104 ctx->temp_arrays);
1105 }
1106 if (info->file_max[TGSI_FILE_IMMEDIATE] >= 0) {
1107 int size = info->file_max[TGSI_FILE_IMMEDIATE] + 1;
1108 ctx->imms = MALLOC(size * TGSI_NUM_CHANNELS * sizeof(LLVMValueRef));
1109 }
1110
1111 /* Re-set these to start with a clean slate. */
1112 ctx->bld_base.num_instructions = 0;
1113 ctx->bld_base.pc = 0;
1114 memset(ctx->outputs, 0, sizeof(ctx->outputs));
1115
1116 ctx->bld_base.emit_store = si_llvm_emit_store;
1117 ctx->bld_base.emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = si_llvm_emit_fetch;
1118 ctx->bld_base.emit_fetch_funcs[TGSI_FILE_INPUT] = si_llvm_emit_fetch;
1119 ctx->bld_base.emit_fetch_funcs[TGSI_FILE_TEMPORARY] = si_llvm_emit_fetch;
1120 ctx->bld_base.emit_fetch_funcs[TGSI_FILE_OUTPUT] = si_llvm_emit_fetch;
1121 ctx->bld_base.emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = fetch_system_value;
1122 }
1123
1124 void si_llvm_create_func(struct si_shader_context *ctx,
1125 const char *name,
1126 LLVMTypeRef *return_types, unsigned num_return_elems,
1127 LLVMTypeRef *ParamTypes, unsigned ParamCount)
1128 {
1129 LLVMTypeRef main_fn_type, ret_type;
1130 LLVMBasicBlockRef main_fn_body;
1131 enum si_llvm_calling_convention call_conv;
1132 unsigned real_shader_type;
1133
1134 if (num_return_elems)
1135 ret_type = LLVMStructTypeInContext(ctx->ac.context,
1136 return_types,
1137 num_return_elems, true);
1138 else
1139 ret_type = ctx->voidt;
1140
1141 /* Setup the function */
1142 ctx->return_type = ret_type;
1143 main_fn_type = LLVMFunctionType(ret_type, ParamTypes, ParamCount, 0);
1144 ctx->main_fn = LLVMAddFunction(ctx->gallivm.module, name, main_fn_type);
1145 main_fn_body = LLVMAppendBasicBlockInContext(ctx->ac.context,
1146 ctx->main_fn, "main_body");
1147 LLVMPositionBuilderAtEnd(ctx->ac.builder, main_fn_body);
1148
1149 real_shader_type = ctx->type;
1150
1151 /* LS is merged into HS (TCS), and ES is merged into GS. */
1152 if (ctx->screen->info.chip_class >= GFX9) {
1153 if (ctx->shader->key.as_ls)
1154 real_shader_type = PIPE_SHADER_TESS_CTRL;
1155 else if (ctx->shader->key.as_es)
1156 real_shader_type = PIPE_SHADER_GEOMETRY;
1157 }
1158
1159 switch (real_shader_type) {
1160 case PIPE_SHADER_VERTEX:
1161 case PIPE_SHADER_TESS_EVAL:
1162 call_conv = RADEON_LLVM_AMDGPU_VS;
1163 break;
1164 case PIPE_SHADER_TESS_CTRL:
1165 call_conv = RADEON_LLVM_AMDGPU_HS;
1166 break;
1167 case PIPE_SHADER_GEOMETRY:
1168 call_conv = RADEON_LLVM_AMDGPU_GS;
1169 break;
1170 case PIPE_SHADER_FRAGMENT:
1171 call_conv = RADEON_LLVM_AMDGPU_PS;
1172 break;
1173 case PIPE_SHADER_COMPUTE:
1174 call_conv = RADEON_LLVM_AMDGPU_CS;
1175 break;
1176 default:
1177 unreachable("Unhandle shader type");
1178 }
1179
1180 LLVMSetFunctionCallConv(ctx->main_fn, call_conv);
1181 }
1182
1183 void si_llvm_optimize_module(struct si_shader_context *ctx)
1184 {
1185 /* Dump LLVM IR before any optimization passes */
1186 if (ctx->screen->debug_flags & DBG(PREOPT_IR) &&
1187 si_can_dump_shader(ctx->screen, ctx->type))
1188 LLVMDumpModule(ctx->gallivm.module);
1189
1190 /* Run the pass */
1191 LLVMRunPassManager(ctx->compiler->passmgr, ctx->gallivm.module);
1192 LLVMDisposeBuilder(ctx->ac.builder);
1193 }
1194
1195 void si_llvm_dispose(struct si_shader_context *ctx)
1196 {
1197 LLVMDisposeModule(ctx->gallivm.module);
1198 LLVMContextDispose(ctx->gallivm.context);
1199 FREE(ctx->temp_arrays);
1200 ctx->temp_arrays = NULL;
1201 FREE(ctx->temp_array_allocas);
1202 ctx->temp_array_allocas = NULL;
1203 FREE(ctx->temps);
1204 ctx->temps = NULL;
1205 ctx->temps_count = 0;
1206 FREE(ctx->imms);
1207 ctx->imms = NULL;
1208 ctx->imms_num = 0;
1209 ac_llvm_context_dispose(&ctx->ac);
1210 }