int alignment = num_components == 3 ? 3 : num_components - 1;
f->imm.alignment = alignment;
- f->imm.offset_vector = 0xf;
+
+ if (load->num_src) {
+ index = ppir_target_get_src_reg_index(&load->src);
+ f->imm.offset_vector = index >> 2;
+ f->imm.offset_scalar = index & 0x3;
+ } else
+ f->imm.offset_vector = 0xf;
if (alignment == 3)
f->imm.index = load->index >> 2;
assert(0);
}
- int num_components = load->num_components;
- int alignment = num_components == 4 ? 2 : num_components - 1;
-
- f->alignment = alignment;
+ /* Uniforms are always aligned to vec4 boundary */
+ f->alignment = 2;
+ f->index = load->index;
- /* TODO: uniform can be also combined like varying */
- f->index = load->index << (2 - alignment);
+ if (load->num_src) {
+ f->offset_en = 1;
+ f->offset_reg = ppir_target_get_src_reg_index(&load->src);
+ }
}
static unsigned shift_to_op(int shift)
}
if (uniform->offset_en) {
- printf(" ");
+ printf("+");
print_source_scalar(uniform->offset_reg, NULL, false, false);
}
}
return true;
}
- assert(ppir_node_has_single_src_succ(node) || ppir_node_is_root(node));
- ppir_node *succ = ppir_node_first_succ(node);
- if (dest->type != ppir_target_register) {
+ /* load can have multiple successors in case if we duplicated load node
+ * that has load node in source
+ */
+ if ((ppir_node_has_single_src_succ(node) || ppir_node_is_root(node)) &&
+ dest->type != ppir_target_register) {
+ ppir_node *succ = ppir_node_first_succ(node);
switch (succ->type) {
case ppir_node_type_alu:
case ppir_node_type_branch: {
lnode->num_components = instr->num_components;
lnode->index = nir_intrinsic_base(instr) * 4 + nir_intrinsic_component(instr);
+ if (nir_src_is_const(instr->src[0]))
+ lnode->index += (uint32_t)(nir_src_as_float(instr->src[0]) * 4);
+ else {
+ lnode->num_src = 1;
+ ppir_node_add_src(block->comp, &lnode->node, &lnode->src, instr->src, 1);
+ }
return &lnode->node;
case nir_intrinsic_load_frag_coord:
lnode->num_components = instr->num_components;
lnode->index = nir_intrinsic_base(instr);
- lnode->index += (uint32_t)nir_src_as_float(instr->src[0]);
+ if (nir_src_is_const(instr->src[0]))
+ lnode->index += (uint32_t)nir_src_as_float(instr->src[0]);
+ else {
+ lnode->num_src = 1;
+ ppir_node_add_src(block->comp, &lnode->node, &lnode->src, instr->src, 1);
+ }
return &lnode->node;
ppir_dest *dest = ppir_node_get_dest(node);
new_lnode->dest = *dest;
+ ppir_src *src = ppir_node_get_src(node, 0);
+ if (src) {
+ new_lnode->num_src = 1;
+ switch (src->type) {
+ case ppir_target_ssa:
+ ppir_node_target_assign(&new_lnode->src, src->node);
+ ppir_node_add_dep(&new_lnode->node, src->node, ppir_dep_src);
+ break;
+ case ppir_target_register:
+ new_lnode->src.type = src->type;
+ new_lnode->src.reg = src->reg;
+ new_lnode->src.node = NULL;
+ break;
+ default:
+ /* Load nodes can't consume pipeline registers */
+ assert(0);
+ }
+ }
+
return &new_lnode->node;
}
case PIPE_SHADER_CAP_MAX_TEMPS:
return 256; /* need investigate */
+ case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
+ case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
+ return 1;
+
+ case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
+ case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
+ return 0;
+
default:
return 0;
}