{
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
struct lp_build_context * base = &bld_base->base;
- unsigned idx;
- LLVMValueRef const_ptr;
- LLVMValueRef offset;
- LLVMValueRef load;
+ LLVMValueRef ptr;
+ LLVMValueRef args[2];
+ LLVMValueRef result;
if (swizzle == LP_CHAN_ALL) {
unsigned chan;
return lp_build_gather_values(bld_base->base.gallivm, values, 4);
}
+ /* Load the resource descriptor */
+ ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
+ args[0] = build_indexed_load(base->gallivm, ptr, bld_base->uint_bld.zero);
+
/* currently not supported */
if (reg->Register.Indirect) {
assert(0);
- load = lp_build_const_int32(base->gallivm, 0);
- return bitcast(bld_base, type, load);
- }
-
- const_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
+ result = lp_build_const_int32(base->gallivm, 0);
+ return bitcast(bld_base, type, result);
+ } else
+ args[1] = lp_build_const_int32(base->gallivm, (reg->Register.Index * 4 + swizzle) * 4);
- /* XXX: This assumes that the constant buffer is not packed, so
- * CONST[0].x will have an offset of 0 and CONST[1].x will have an
- * offset of 4. */
- idx = (reg->Register.Index * 4) + swizzle;
- offset = lp_build_const_int32(base->gallivm, idx);
+ result = build_intrinsic(base->gallivm->builder, "llvm.SI.load.const", base->elem_type,
+ args, 2, LLVMReadOnlyAttribute | LLVMNoUnwindAttribute);
- load = build_indexed_load(base->gallivm, const_ptr, offset);
- return bitcast(bld_base, type, load);
+ return bitcast(bld_base, type, result);
}
/* Initialize arguments for the shader export intrinsic */
v2i32 = LLVMVectorType(i32, 2);
v3i32 = LLVMVectorType(i32, 3);
- params[SI_PARAM_CONST] = LLVMPointerType(f32, CONST_ADDR_SPACE);
- params[SI_PARAM_SAMPLER] = LLVMPointerType(LLVMVectorType(i8, 16), CONST_ADDR_SPACE);
+ params[SI_PARAM_CONST] = LLVMPointerType(LLVMVectorType(i8, 16), CONST_ADDR_SPACE);
+ params[SI_PARAM_SAMPLER] = params[SI_PARAM_CONST];
params[SI_PARAM_RESOURCE] = LLVMPointerType(LLVMVectorType(i8, 32), CONST_ADDR_SPACE);
if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
* Constants
*/
static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
- struct pipe_constant_buffer *cb)
+ struct pipe_constant_buffer *cb)
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct si_resource *rbuffer = cb ? si_resource(cb->buffer) : NULL;
struct si_pm4_state *pm4;
- uint64_t va_offset;
- uint32_t reg, offset;
+ uint32_t offset;
+ uint64_t va;
/* Note that the state tracker can unbind constant buffers by
* passing NULL here.
r600_upload_const_buffer(rctx, &rbuffer, cb->user_buffer, cb->buffer_size, &offset);
else
offset = 0;
- va_offset = r600_resource_va(ctx->screen, (void*)rbuffer);
- va_offset += offset;
+ va = r600_resource_va(ctx->screen, (void*)rbuffer);
+ va += offset;
si_pm4_add_bo(pm4, rbuffer, RADEON_USAGE_READ);
+ si_pm4_sh_data_begin(pm4);
+
+ /* Fill in a T# buffer resource description */
+ si_pm4_sh_data_add(pm4, va);
+ si_pm4_sh_data_add(pm4, (S_008F04_BASE_ADDRESS_HI(va >> 32) |
+ S_008F04_STRIDE(0)));
+ si_pm4_sh_data_add(pm4, cb->buffer_size);
+ si_pm4_sh_data_add(pm4, S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+ S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32));
+
switch (shader) {
case PIPE_SHADER_VERTEX:
- reg = R_00B130_SPI_SHADER_USER_DATA_VS_0 + SI_SGPR_CONST * 4;
- si_pm4_set_reg(pm4, reg, va_offset);
- si_pm4_set_reg(pm4, reg + 4, va_offset >> 32);
+ si_pm4_sh_data_end(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0, SI_SGPR_CONST);
si_pm4_set_state(rctx, vs_const, pm4);
break;
case PIPE_SHADER_FRAGMENT:
- reg = R_00B030_SPI_SHADER_USER_DATA_PS_0 + SI_SGPR_CONST * 4;
- si_pm4_set_reg(pm4, reg, va_offset);
- si_pm4_set_reg(pm4, reg + 4, va_offset >> 32);
+ si_pm4_sh_data_end(pm4, R_00B030_SPI_SHADER_USER_DATA_PS_0, SI_SGPR_CONST);
si_pm4_set_state(rctx, ps_const, pm4);
break;