if (HAVE_LLVM <= 0x0308)
return LLVMGetUndef(ctx->i32);
- return radeon_llvm_bound_index(ctx, result, num);
+ return si_llvm_bound_index(ctx, result, num);
}
value2 = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
swizzle * 4 + 4, 1, 0);
- return radeon_llvm_emit_fetch_64bit(bld_base, type, value, value2);
+ return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
}
/**
dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
lp_build_const_int32(gallivm, swizzle + 1));
value2 = build_indexed_load(ctx, ctx->lds, dw_addr, false);
- return radeon_llvm_emit_fetch_64bit(bld_base, type, value, value2);
+ return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
}
return LLVMBuildBitCast(gallivm->builder, value,
*/
if (reg->Register.File != TGSI_FILE_OUTPUT ||
(dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
- radeon_llvm_emit_store(bld_base, inst, info, dst);
+ si_llvm_emit_store(bld_base, inst, info, dst);
return;
}
LLVMValueRef value = dst[chan_index];
if (inst->Instruction.Saturate)
- value = radeon_llvm_saturate(bld_base, value);
+ value = si_llvm_saturate(bld_base, value);
lds_store(bld_base, chan_index, dw_addr, value);
"llvm.SI.buffer.load.dword.i32.i32",
ctx->i32, args, 9,
LLVMReadOnlyAttribute);
- return radeon_llvm_emit_fetch_64bit(bld_base, type,
- value, value2);
+ return si_llvm_emit_fetch_64bit(bld_base, type,
+ value, value2);
}
return LLVMBuildBitCast(gallivm->builder,
value,
LLVMConstInt(ctx->i32, 4, 0));
result2 = buffer_load_const(ctx, bufp, addr2);
- result = radeon_llvm_emit_fetch_64bit(bld_base, type,
- result, result2);
+ result = si_llvm_emit_fetch_64bit(bld_base, type,
+ result, result2);
}
return result;
}
case V_028714_SPI_SHADER_UNORM16_ABGR:
for (chan = 0; chan < 4; chan++) {
- val[chan] = radeon_llvm_saturate(bld_base, values[chan]);
+ val[chan] = si_llvm_saturate(bld_base, values[chan]);
val[chan] = LLVMBuildFMul(builder, val[chan],
lp_build_const_float(gallivm, 65535), "");
val[chan] = LLVMBuildFAdd(builder, val[chan],
for (j = 0; j < 4; j++) {
addr = ctx->soa.outputs[i][j];
val = LLVMBuildLoad(gallivm->builder, addr, "");
- val = radeon_llvm_saturate(bld_base, val);
+ val = si_llvm_saturate(bld_base, val);
LLVMBuildStore(gallivm->builder, val, addr);
}
}
/* Clamp color */
if (ctx->shader->key.ps.epilog.clamp_color)
for (i = 0; i < 4; i++)
- color[i] = radeon_llvm_saturate(bld_base, color[i]);
+ color[i] = si_llvm_saturate(bld_base, color[i]);
/* Alpha to one */
if (ctx->shader->key.ps.epilog.alpha_to_one)
* Z32_FLOAT, but we don't know that here.
*/
if (ctx->screen->b.chip_class == VI)
- z = radeon_llvm_saturate(bld_base, z);
+ z = si_llvm_saturate(bld_base, z);
address[count++] = z;
}
{
int i;
- radeon_llvm_create_func(ctx, returns, num_returns,
- params, num_params);
- radeon_llvm_shader_type(ctx->main_fn, ctx->type);
+ si_llvm_create_func(ctx, returns, num_returns,
+ params, num_params);
+ si_llvm_shader_type(ctx->main_fn, ctx->type);
ctx->return_value = LLVMGetUndef(ctx->return_type);
for (i = 0; i <= last_sgpr; ++i) {
/* Reserve register locations for VGPR inputs the PS prolog may need. */
if (ctx->type == PIPE_SHADER_FRAGMENT &&
!ctx->is_monolithic) {
- radeon_llvm_add_attribute(ctx->main_fn,
- "InitialPSInputAddr",
- S_0286D0_PERSP_SAMPLE_ENA(1) |
- S_0286D0_PERSP_CENTER_ENA(1) |
- S_0286D0_PERSP_CENTROID_ENA(1) |
- S_0286D0_LINEAR_SAMPLE_ENA(1) |
- S_0286D0_LINEAR_CENTER_ENA(1) |
- S_0286D0_LINEAR_CENTROID_ENA(1) |
- S_0286D0_FRONT_FACE_ENA(1) |
- S_0286D0_POS_FIXED_PT_ENA(1));
+ si_llvm_add_attribute(ctx->main_fn,
+ "InitialPSInputAddr",
+ S_0286D0_PERSP_SAMPLE_ENA(1) |
+ S_0286D0_PERSP_CENTER_ENA(1) |
+ S_0286D0_PERSP_CENTROID_ENA(1) |
+ S_0286D0_LINEAR_SAMPLE_ENA(1) |
+ S_0286D0_LINEAR_CENTER_ENA(1) |
+ S_0286D0_LINEAR_CENTROID_ENA(1) |
+ S_0286D0_FRONT_FACE_ENA(1) |
+ S_0286D0_POS_FIXED_PT_ENA(1));
} else if (ctx->type == PIPE_SHADER_COMPUTE) {
const unsigned *properties = shader->selector->info.properties;
unsigned max_work_group_size =
max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
}
- radeon_llvm_add_attribute(ctx->main_fn,
- "amdgpu-max-work-group-size",
- max_work_group_size);
+ si_llvm_add_attribute(ctx->main_fn,
+ "amdgpu-max-work-group-size",
+ max_work_group_size);
}
shader->info.num_input_sgprs = 0;
}
if (!si_replace_shader(count, binary)) {
- r = radeon_llvm_compile(mod, binary, tm, debug);
+ r = si_llvm_compile(mod, binary, tm, debug);
if (r)
return r;
}
r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
LLVMDumpModule(bld_base->base.gallivm->module);
- radeon_llvm_finalize_module(ctx,
+ si_llvm_finalize_module(ctx,
r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_GEOMETRY));
r = si_compile_llvm(sscreen, &ctx->shader->binary,
r = si_shader_binary_upload(sscreen, ctx->shader);
}
- radeon_llvm_dispose(ctx);
+ si_llvm_dispose(ctx);
FREE(outputs);
return r;
struct lp_build_tgsi_action tmpl = {};
memset(ctx, 0, sizeof(*ctx));
- radeon_llvm_context_init(
+ si_llvm_context_init(
ctx, "amdgcn--",
(shader && shader->selector) ? &shader->selector->info : NULL,
(shader && shader->selector) ? shader->selector->tokens : NULL);
r600_can_dump_shader(&sscreen->b, ctx.type))
LLVMDumpModule(mod);
- radeon_llvm_finalize_module(&ctx,
+ si_llvm_finalize_module(&ctx,
r600_extra_shader_checks(&sscreen->b, ctx.type));
r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
goto out;
}
- radeon_llvm_dispose(&ctx);
+ si_llvm_dispose(&ctx);
/* Validate SGPR and VGPR usage for compute to detect compiler bugs.
* LLVM 3.9svn has this bug.
/* Compile. */
si_llvm_build_ret(&ctx, ret);
- radeon_llvm_finalize_module(&ctx,
+ si_llvm_finalize_module(&ctx,
r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_VERTEX));
if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
"Vertex Shader Prolog"))
status = false;
- radeon_llvm_dispose(&ctx);
+ si_llvm_dispose(&ctx);
return status;
}
/* Compile. */
LLVMBuildRetVoid(gallivm->builder);
- radeon_llvm_finalize_module(&ctx,
+ si_llvm_finalize_module(&ctx,
r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_VERTEX));
if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
"Vertex Shader Epilog"))
status = false;
- radeon_llvm_dispose(&ctx);
+ si_llvm_dispose(&ctx);
return status;
}
/* Compile. */
LLVMBuildRetVoid(gallivm->builder);
- radeon_llvm_finalize_module(&ctx,
+ si_llvm_finalize_module(&ctx,
r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_TESS_CTRL));
if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
"Tessellation Control Shader Epilog"))
status = false;
- radeon_llvm_dispose(&ctx);
+ si_llvm_dispose(&ctx);
return status;
}
/* Compile. */
si_llvm_build_ret(&ctx, ret);
- radeon_llvm_finalize_module(&ctx,
+ si_llvm_finalize_module(&ctx,
r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_FRAGMENT));
if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
"Fragment Shader Prolog"))
status = false;
- radeon_llvm_dispose(&ctx);
+ si_llvm_dispose(&ctx);
return status;
}
/* Create the function. */
si_create_function(&ctx, NULL, 0, params, num_params, last_sgpr);
/* Disable elimination of unused inputs. */
- radeon_llvm_add_attribute(ctx.main_fn,
+ si_llvm_add_attribute(ctx.main_fn,
"InitialPSInputAddr", 0xffffff);
/* Process colors. */
/* Compile. */
LLVMBuildRetVoid(gallivm->builder);
- radeon_llvm_finalize_module(&ctx,
+ si_llvm_finalize_module(&ctx,
r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_FRAGMENT));
if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
"Fragment Shader Epilog"))
status = false;
- radeon_llvm_dispose(&ctx);
+ si_llvm_dispose(&ctx);
return status;
}
#define RADEON_LLVM_MAX_SYSTEM_VALUES 4
-struct radeon_llvm_flow;
+struct si_llvm_flow;
struct si_shader_context {
struct lp_build_tgsi_soa_context soa;
unsigned temps_count;
LLVMValueRef system_values[RADEON_LLVM_MAX_SYSTEM_VALUES];
- struct radeon_llvm_flow *flow;
+ struct si_llvm_flow *flow;
unsigned flow_depth;
unsigned flow_depth_max;
return (struct si_shader_context*)bld_base;
}
-void radeon_llvm_add_attribute(LLVMValueRef F, const char *name, int value);
-void radeon_llvm_shader_type(LLVMValueRef F, unsigned type);
+void si_llvm_add_attribute(LLVMValueRef F, const char *name, int value);
+void si_llvm_shader_type(LLVMValueRef F, unsigned type);
-LLVMTargetRef radeon_llvm_get_r600_target(const char *triple);
+LLVMTargetRef si_llvm_get_amdgpu_target(const char *triple);
-unsigned radeon_llvm_compile(LLVMModuleRef M, struct radeon_shader_binary *binary,
- LLVMTargetMachineRef tm,
- struct pipe_debug_callback *debug);
+unsigned si_llvm_compile(LLVMModuleRef M, struct radeon_shader_binary *binary,
+ LLVMTargetMachineRef tm,
+ struct pipe_debug_callback *debug);
LLVMTypeRef tgsi2llvmtype(struct lp_build_tgsi_context *bld_base,
enum tgsi_opcode_type type);
LLVMValueRef bitcast(struct lp_build_tgsi_context *bld_base,
enum tgsi_opcode_type type, LLVMValueRef value);
-LLVMValueRef radeon_llvm_bound_index(struct si_shader_context *ctx,
- LLVMValueRef index,
- unsigned num);
+LLVMValueRef si_llvm_bound_index(struct si_shader_context *ctx,
+ LLVMValueRef index,
+ unsigned num);
-void radeon_llvm_context_init(struct si_shader_context *ctx,
- const char *triple,
- const struct tgsi_shader_info *info,
- const struct tgsi_token *tokens);
+void si_llvm_context_init(struct si_shader_context *ctx,
+ const char *triple,
+ const struct tgsi_shader_info *info,
+ const struct tgsi_token *tokens);
-void radeon_llvm_create_func(struct si_shader_context *ctx,
- LLVMTypeRef *return_types, unsigned num_return_elems,
- LLVMTypeRef *ParamTypes, unsigned ParamCount);
+void si_llvm_create_func(struct si_shader_context *ctx,
+ LLVMTypeRef *return_types, unsigned num_return_elems,
+ LLVMTypeRef *ParamTypes, unsigned ParamCount);
-void radeon_llvm_dispose(struct si_shader_context *ctx);
+void si_llvm_dispose(struct si_shader_context *ctx);
-void radeon_llvm_finalize_module(struct si_shader_context *ctx,
- bool run_verifier);
+void si_llvm_finalize_module(struct si_shader_context *ctx,
+ bool run_verifier);
-LLVMValueRef radeon_llvm_emit_fetch_64bit(struct lp_build_tgsi_context *bld_base,
- enum tgsi_opcode_type type,
- LLVMValueRef ptr,
- LLVMValueRef ptr2);
+LLVMValueRef si_llvm_emit_fetch_64bit(struct lp_build_tgsi_context *bld_base,
+ enum tgsi_opcode_type type,
+ LLVMValueRef ptr,
+ LLVMValueRef ptr2);
-LLVMValueRef radeon_llvm_saturate(struct lp_build_tgsi_context *bld_base,
- LLVMValueRef value);
+LLVMValueRef si_llvm_saturate(struct lp_build_tgsi_context *bld_base,
+ LLVMValueRef value);
-LLVMValueRef radeon_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
- const struct tgsi_full_src_register *reg,
- enum tgsi_opcode_type type,
- unsigned swizzle);
+LLVMValueRef si_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
+ const struct tgsi_full_src_register *reg,
+ enum tgsi_opcode_type type,
+ unsigned swizzle);
-void radeon_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
- const struct tgsi_full_instruction *inst,
- const struct tgsi_opcode_info *info,
- LLVMValueRef dst[4]);
+void si_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
+ const struct tgsi_full_instruction *inst,
+ const struct tgsi_opcode_info *info,
+ LLVMValueRef dst[4]);
void si_shader_context_init_alu(struct lp_build_tgsi_context *bld_base);
void si_prepare_cube_coords(struct lp_build_tgsi_context *bld_base,
/* Data for if/else/endif and bgnloop/endloop control flow structures.
*/
-struct radeon_llvm_flow {
+struct si_llvm_flow {
/* Loop exit or next part of if/else/endif. */
LLVMBasicBlockRef next_block;
LLVMBasicBlockRef loop_entry_block;
/**
* Shader types for the LLVM backend.
*/
-enum radeon_llvm_shader_type {
+enum si_llvm_shader_type {
RADEON_LLVM_SHADER_PS = 0,
RADEON_LLVM_SHADER_VS = 1,
RADEON_LLVM_SHADER_GS = 2,
RADEON_LLVM_SHADER_CS = 3,
};
-enum radeon_llvm_calling_convention {
+enum si_llvm_calling_convention {
RADEON_LLVM_AMDGPU_VS = 87,
RADEON_LLVM_AMDGPU_GS = 88,
RADEON_LLVM_AMDGPU_PS = 89,
RADEON_LLVM_AMDGPU_CS = 90,
};
-void radeon_llvm_add_attribute(LLVMValueRef F, const char *name, int value)
+void si_llvm_add_attribute(LLVMValueRef F, const char *name, int value)
{
char str[16];
*
* @param type shader type to set
*/
-void radeon_llvm_shader_type(LLVMValueRef F, unsigned type)
+void si_llvm_shader_type(LLVMValueRef F, unsigned type)
{
- enum radeon_llvm_shader_type llvm_type;
- enum radeon_llvm_calling_convention calling_conv;
+ enum si_llvm_shader_type llvm_type;
+ enum si_llvm_calling_convention calling_conv;
switch (type) {
case PIPE_SHADER_VERTEX:
if (HAVE_LLVM >= 0x309)
LLVMSetFunctionCallConv(F, calling_conv);
else
- radeon_llvm_add_attribute(F, "ShaderType", llvm_type);
+ si_llvm_add_attribute(F, "ShaderType", llvm_type);
}
-static void init_r600_target()
+static void init_amdgpu_target()
{
gallivm_init_llvm_targets();
#if HAVE_LLVM < 0x0307
#endif
}
-static once_flag init_r600_target_once_flag = ONCE_FLAG_INIT;
+static once_flag init_amdgpu_target_once_flag = ONCE_FLAG_INIT;
-LLVMTargetRef radeon_llvm_get_r600_target(const char *triple)
+LLVMTargetRef si_llvm_get_amdgpu_target(const char *triple)
{
LLVMTargetRef target = NULL;
char *err_message = NULL;
- call_once(&init_r600_target_once_flag, init_r600_target);
+ call_once(&init_amdgpu_target_once_flag, init_amdgpu_target);
if (LLVMGetTargetFromTriple(triple, &target, &err_message)) {
fprintf(stderr, "Cannot find target for triple %s ", triple);
return target;
}
-struct radeon_llvm_diagnostics {
+struct si_llvm_diagnostics {
struct pipe_debug_callback *debug;
unsigned retval;
};
-static void radeonDiagnosticHandler(LLVMDiagnosticInfoRef di, void *context)
+static void si_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
{
- struct radeon_llvm_diagnostics *diag = (struct radeon_llvm_diagnostics *)context;
+ struct si_llvm_diagnostics *diag = (struct si_llvm_diagnostics *)context;
LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
char *description = LLVMGetDiagInfoDescription(di);
const char *severity_str = NULL;
*
* @returns 0 for success, 1 for failure
*/
-unsigned radeon_llvm_compile(LLVMModuleRef M, struct radeon_shader_binary *binary,
- LLVMTargetMachineRef tm,
- struct pipe_debug_callback *debug)
+unsigned si_llvm_compile(LLVMModuleRef M, struct radeon_shader_binary *binary,
+ LLVMTargetMachineRef tm,
+ struct pipe_debug_callback *debug)
{
- struct radeon_llvm_diagnostics diag;
+ struct si_llvm_diagnostics diag;
char *err;
LLVMContextRef llvm_ctx;
LLVMMemoryBufferRef out_buffer;
/* Setup Diagnostic Handler*/
llvm_ctx = LLVMGetModuleContext(M);
- LLVMContextSetDiagnosticHandler(llvm_ctx, radeonDiagnosticHandler, &diag);
+ LLVMContextSetDiagnosticHandler(llvm_ctx, si_diagnostic_handler, &diag);
/* Compile IR*/
mem_err = LLVMTargetMachineEmitToMemoryBuffer(tm, M, LLVMObjectFile, &err,
* Return a value that is equal to the given i32 \p index if it lies in [0,num)
* or an undefined value in the same interval otherwise.
*/
-LLVMValueRef radeon_llvm_bound_index(struct si_shader_context *ctx,
- LLVMValueRef index,
- unsigned num)
+LLVMValueRef si_llvm_bound_index(struct si_shader_context *ctx,
+ LLVMValueRef index,
+ unsigned num)
{
struct gallivm_state *gallivm = &ctx->gallivm;
LLVMBuilderRef builder = gallivm->builder;
return index;
}
-static struct radeon_llvm_flow *
+static struct si_llvm_flow *
get_current_flow(struct si_shader_context *ctx)
{
if (ctx->flow_depth > 0)
return NULL;
}
-static struct radeon_llvm_flow *
+static struct si_llvm_flow *
get_innermost_loop(struct si_shader_context *ctx)
{
for (unsigned i = ctx->flow_depth; i > 0; --i) {
return NULL;
}
-static struct radeon_llvm_flow *
+static struct si_llvm_flow *
push_flow(struct si_shader_context *ctx)
{
- struct radeon_llvm_flow *flow;
+ struct si_llvm_flow *flow;
if (ctx->flow_depth >= ctx->flow_depth_max) {
unsigned new_max = MAX2(ctx->flow_depth << 1, RADEON_LLVM_INITIAL_CF_DEPTH);
* 2. the memory for allocas must be allocated at the _end_ of the
* scratch space (after spilled registers)
*/
- index = radeon_llvm_bound_index(ctx, index, array->range.Last - array->range.First + 1);
+ index = si_llvm_bound_index(ctx, index, array->range.Last - array->range.First + 1);
index = LLVMBuildMul(
builder, index,
}
LLVMValueRef
-radeon_llvm_emit_fetch_64bit(struct lp_build_tgsi_context *bld_base,
- enum tgsi_opcode_type type,
- LLVMValueRef ptr,
- LLVMValueRef ptr2)
+si_llvm_emit_fetch_64bit(struct lp_build_tgsi_context *bld_base,
+ enum tgsi_opcode_type type,
+ LLVMValueRef ptr,
+ LLVMValueRef ptr2)
{
LLVMBuilderRef builder = bld_base->base.gallivm->builder;
LLVMValueRef result;
for (i = 0; i < size; ++i) {
tmp_reg.Register.Index = i + range.First;
- LLVMValueRef temp = radeon_llvm_emit_fetch(bld_base, &tmp_reg, type, swizzle);
+ LLVMValueRef temp = si_llvm_emit_fetch(bld_base, &tmp_reg, type, swizzle);
result = LLVMBuildInsertElement(builder, result, temp,
lp_build_const_int32(gallivm, i), "array_vector");
}
LLVMValueRef ptr_hi, val_hi;
ptr_hi = LLVMBuildGEP(builder, ptr, &bld_base->uint_bld.one, 1, "");
val_hi = LLVMBuildLoad(builder, ptr_hi, "");
- val = radeon_llvm_emit_fetch_64bit(bld_base, type, val, val_hi);
+ val = si_llvm_emit_fetch_64bit(bld_base, type, val, val_hi);
}
return val;
}
}
-LLVMValueRef radeon_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
- const struct tgsi_full_src_register *reg,
- enum tgsi_opcode_type type,
- unsigned swizzle)
+LLVMValueRef si_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
+ const struct tgsi_full_src_register *reg,
+ enum tgsi_opcode_type type,
+ unsigned swizzle)
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
LLVMValueRef values[TGSI_NUM_CHANNELS];
unsigned chan;
for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
- values[chan] = radeon_llvm_emit_fetch(bld_base, reg, type, chan);
+ values[chan] = si_llvm_emit_fetch(bld_base, reg, type, chan);
}
return lp_build_gather_values(bld_base->base.gallivm, values,
TGSI_NUM_CHANNELS);
if (tgsi_type_is_64bit(type)) {
ptr = result;
ptr2 = input[swizzle + 1];
- return radeon_llvm_emit_fetch_64bit(bld_base, type, ptr, ptr2);
+ return si_llvm_emit_fetch_64bit(bld_base, type, ptr, ptr2);
}
break;
}
ptr = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle];
if (tgsi_type_is_64bit(type)) {
ptr2 = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle + 1];
- return radeon_llvm_emit_fetch_64bit(bld_base, type,
- LLVMBuildLoad(builder, ptr, ""),
- LLVMBuildLoad(builder, ptr2, ""));
+ return si_llvm_emit_fetch_64bit(bld_base, type,
+ LLVMBuildLoad(builder, ptr, ""),
+ LLVMBuildLoad(builder, ptr2, ""));
}
result = LLVMBuildLoad(builder, ptr, "");
break;
ptr = lp_get_output_ptr(bld, reg->Register.Index, swizzle);
if (tgsi_type_is_64bit(type)) {
ptr2 = lp_get_output_ptr(bld, reg->Register.Index, swizzle + 1);
- return radeon_llvm_emit_fetch_64bit(bld_base, type,
- LLVMBuildLoad(builder, ptr, ""),
- LLVMBuildLoad(builder, ptr2, ""));
+ return si_llvm_emit_fetch_64bit(bld_base, type,
+ LLVMBuildLoad(builder, ptr, ""),
+ LLVMBuildLoad(builder, ptr2, ""));
}
result = LLVMBuildLoad(builder, ptr, "");
break;
}
}
-LLVMValueRef radeon_llvm_saturate(struct lp_build_tgsi_context *bld_base,
- LLVMValueRef value)
+LLVMValueRef si_llvm_saturate(struct lp_build_tgsi_context *bld_base,
+ LLVMValueRef value)
{
struct lp_build_emit_data clamp_emit_data;
&clamp_emit_data);
}
-void radeon_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
- const struct tgsi_full_instruction *inst,
- const struct tgsi_opcode_info *info,
- LLVMValueRef dst[4])
+void si_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
+ const struct tgsi_full_instruction *inst,
+ const struct tgsi_opcode_info *info,
+ LLVMValueRef dst[4])
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
if (tgsi_type_is_64bit(dtype) && (chan_index == 1 || chan_index == 3))
continue;
if (inst->Instruction.Saturate)
- value = radeon_llvm_saturate(bld_base, value);
+ value = si_llvm_saturate(bld_base, value);
if (reg->Register.File == TGSI_FILE_ADDRESS) {
temp_ptr = bld->addr[reg->Register.Index][chan_index];
assert(ctx->flow_depth >= 1);
if (ctx->flow_depth >= 2) {
- struct radeon_llvm_flow *flow = &ctx->flow[ctx->flow_depth - 2];
+ struct si_llvm_flow *flow = &ctx->flow[ctx->flow_depth - 2];
return LLVMInsertBasicBlockInContext(gallivm->context,
flow->next_block, name);
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
- struct radeon_llvm_flow *flow = push_flow(ctx);
+ struct si_llvm_flow *flow = push_flow(ctx);
flow->loop_entry_block = append_basic_block(ctx, "LOOP");
flow->next_block = append_basic_block(ctx, "ENDLOOP");
set_basicblock_name(flow->loop_entry_block, "loop", bld_base->pc);
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
- struct radeon_llvm_flow *flow = get_innermost_loop(ctx);
+ struct si_llvm_flow *flow = get_innermost_loop(ctx);
LLVMBuildBr(gallivm->builder, flow->next_block);
}
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
- struct radeon_llvm_flow *flow = get_innermost_loop(ctx);
+ struct si_llvm_flow *flow = get_innermost_loop(ctx);
LLVMBuildBr(gallivm->builder, flow->loop_entry_block);
}
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
- struct radeon_llvm_flow *current_branch = get_current_flow(ctx);
+ struct si_llvm_flow *current_branch = get_current_flow(ctx);
LLVMBasicBlockRef endif_block;
assert(!current_branch->loop_entry_block);
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
- struct radeon_llvm_flow *current_branch = get_current_flow(ctx);
+ struct si_llvm_flow *current_branch = get_current_flow(ctx);
assert(!current_branch->loop_entry_block);
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
- struct radeon_llvm_flow *current_loop = get_current_flow(ctx);
+ struct si_llvm_flow *current_loop = get_current_flow(ctx);
assert(current_loop->loop_entry_block);
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
- struct radeon_llvm_flow *flow = push_flow(ctx);
+ struct si_llvm_flow *flow = push_flow(ctx);
LLVMBasicBlockRef if_block;
if_block = append_basic_block(ctx, "IF");
ctx->soa.num_immediates++;
}
-void radeon_llvm_context_init(struct si_shader_context *ctx, const char *triple,
- const struct tgsi_shader_info *info,
- const struct tgsi_token *tokens)
+void si_llvm_context_init(struct si_shader_context *ctx, const char *triple,
+ const struct tgsi_shader_info *info,
+ const struct tgsi_token *tokens)
{
struct lp_type type;
lp_build_context_init(&ctx->soa.bld_base.int64_bld, &ctx->gallivm, lp_int_type(type));
bld_base->soa = 1;
- bld_base->emit_store = radeon_llvm_emit_store;
+ bld_base->emit_store = si_llvm_emit_store;
bld_base->emit_swizzle = emit_swizzle;
bld_base->emit_declaration = emit_declaration;
bld_base->emit_immediate = emit_immediate;
- bld_base->emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = radeon_llvm_emit_fetch;
- bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = radeon_llvm_emit_fetch;
- bld_base->emit_fetch_funcs[TGSI_FILE_TEMPORARY] = radeon_llvm_emit_fetch;
- bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = radeon_llvm_emit_fetch;
+ bld_base->emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = si_llvm_emit_fetch;
+ bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = si_llvm_emit_fetch;
+ bld_base->emit_fetch_funcs[TGSI_FILE_TEMPORARY] = si_llvm_emit_fetch;
+ bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = si_llvm_emit_fetch;
bld_base->emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = fetch_system_value;
/* metadata allowing 2.5 ULP */
bld_base->op_actions[TGSI_OPCODE_ENDLOOP].emit = endloop_emit;
}
-void radeon_llvm_create_func(struct si_shader_context *ctx,
- LLVMTypeRef *return_types, unsigned num_return_elems,
- LLVMTypeRef *ParamTypes, unsigned ParamCount)
+void si_llvm_create_func(struct si_shader_context *ctx,
+ LLVMTypeRef *return_types, unsigned num_return_elems,
+ LLVMTypeRef *ParamTypes, unsigned ParamCount)
{
LLVMTypeRef main_fn_type, ret_type;
LLVMBasicBlockRef main_fn_body;
LLVMPositionBuilderAtEnd(ctx->gallivm.builder, main_fn_body);
}
-void radeon_llvm_finalize_module(struct si_shader_context *ctx,
- bool run_verifier)
+void si_llvm_finalize_module(struct si_shader_context *ctx,
+ bool run_verifier)
{
struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
const char *triple = LLVMGetTarget(gallivm->module);
gallivm_dispose_target_library_info(target_library_info);
}
-void radeon_llvm_dispose(struct si_shader_context *ctx)
+void si_llvm_dispose(struct si_shader_context *ctx)
{
LLVMDisposeModule(ctx->soa.bld_base.base.gallivm->module);
LLVMContextDispose(ctx->soa.bld_base.base.gallivm->context);