#if defined R600_USE_LLVM || defined HAVE_OPENCL
+#define CONSTANT_BUFFER_0_ADDR_SPACE 9
+
static LLVMValueRef llvm_fetch_const(
struct lp_build_tgsi_context * bld_base,
const struct tgsi_full_src_register *reg,
enum tgsi_opcode_type type,
unsigned swizzle)
{
- LLVMValueRef idx = lp_build_const_int32(bld_base->base.gallivm,
- radeon_llvm_reg_index_soa(reg->Register.Index, swizzle));
- LLVMValueRef cval = build_intrinsic(bld_base->base.gallivm->builder,
- "llvm.AMDGPU.load.const", bld_base->base.elem_type,
- &idx, 1, LLVMReadNoneAttribute);
-
+ LLVMValueRef offset[2] = {
+ LLVMConstInt(LLVMInt64TypeInContext(bld_base->base.gallivm->context), 0, false),
+ lp_build_const_int32(bld_base->base.gallivm, reg->Register.Index)
+ };
+ if (reg->Register.Indirect) {
+ struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
+ LLVMValueRef index = LLVMBuildLoad(bld_base->base.gallivm->builder, bld->addr[reg->Indirect.Index][reg->Indirect.SwizzleX], "");
+ offset[1] = LLVMBuildAdd(bld_base->base.gallivm->builder, offset[1], index, "");
+ }
+ LLVMTypeRef const_ptr_type = LLVMPointerType(LLVMArrayType(LLVMVectorType(bld_base->base.elem_type, 4), 1024),
+ CONSTANT_BUFFER_0_ADDR_SPACE);
+ LLVMValueRef const_ptr = LLVMBuildIntToPtr(bld_base->base.gallivm->builder, lp_build_const_int32(bld_base->base.gallivm, 0), const_ptr_type, "");
+ LLVMValueRef ptr = LLVMBuildGEP(bld_base->base.gallivm->builder, const_ptr, offset, 2, "");
+ LLVMValueRef cvecval = LLVMBuildLoad(bld_base->base.gallivm->builder, ptr, "");
+ LLVMValueRef cval = LLVMBuildExtractElement(bld_base->base.gallivm->builder, cvecval, lp_build_const_int32(bld_base->base.gallivm, swizzle), "");
return bitcast(bld_base, type, cval);
}
static unsigned r600_alu_from_byte_stream(struct r600_shader_ctx *ctx,
unsigned char * bytes, unsigned bytes_read)
{
- unsigned src_idx;
+ unsigned src_idx, src_num;
struct r600_bytecode_alu alu;
- unsigned src_const_reg[3];
+ unsigned src_use_sel[3];
+ unsigned src_sel[3] = {};
uint32_t word0, word1;
+ src_num = bytes[bytes_read++];
+
memset(&alu, 0, sizeof(alu));
- for(src_idx = 0; src_idx < 3; src_idx++) {
+ for(src_idx = 0; src_idx < src_num; src_idx++) {
unsigned i;
- src_const_reg[src_idx] = bytes[bytes_read++];
+ src_use_sel[src_idx] = bytes[bytes_read++];
+ for (i = 0; i < 4; i++) {
+ src_sel[src_idx] |= bytes[bytes_read++] << (i * 8);
+ }
for (i = 0; i < 4; i++) {
alu.src[src_idx].value |= bytes[bytes_read++] << (i * 8);
}
break;
}
- for(src_idx = 0; src_idx < 3; src_idx++) {
- if (src_const_reg[src_idx])
- alu.src[src_idx].sel += 512;
+ for(src_idx = 0; src_idx < src_num; src_idx++) {
+ if (src_use_sel[src_idx]) {
+ unsigned sel = src_sel[src_idx];
+
+ alu.src[src_idx].chan = sel & 3;
+ sel >>= 2;
+
+ if (sel>=512) { /* constant */
+ sel -= 512;
+ alu.src[src_idx].kc_bank = sel >> 12;
+ alu.src[src_idx].sel = (sel & 4095) + 512;
+ }
+ else {
+ alu.src[src_idx].sel = sel;
+ }
+ }
}
#if HAVE_LLVM < 0x0302
if (r600_bytecode_add_vtx(ctx->bc, &vtx)) {
fprintf(stderr, "Error adding vtx\n");
}
- /* Use the Texture Cache */
- ctx->bc->cf_last->inst = EG_V_SQ_CF_WORD1_SQ_CF_INST_TEX;
+
+ /* Use the Texture Cache for compute shaders*/
+ if (ctx->bc->chip_class >= EVERGREEN &&
+ ctx->bc->type == TGSI_PROCESSOR_COMPUTE) {
+ ctx->bc->cf_last->inst = EG_V_SQ_CF_WORD1_SQ_CF_INST_TEX;
+ }
return bytes_read;
}
}
#ifdef R600_USE_LLVM
- if (use_llvm && ctx.info.indirect_files) {
+ if (use_llvm && ctx.info.indirect_files && (ctx.info.indirect_files & (1 << TGSI_FILE_CONSTANT)) != ctx.info.indirect_files) {
fprintf(stderr, "Warning: R600 LLVM backend does not support "
"indirect adressing. Falling back to TGSI "
"backend.\n");
}
switch(reg->Register.File) {
+ case TGSI_FILE_ADDRESS:
+ temp_ptr = bld->addr[reg->Register.Index][chan_index];
+ LLVMBuildStore(builder, value, temp_ptr);
+ continue;
case TGSI_FILE_OUTPUT:
temp_ptr = bld->outputs[reg->Register.Index][chan_index];
break;