if (rctx->b.chip_class <= R700) {
use_sb &= (shader->shader.processor_type != TGSI_PROCESSOR_GEOMETRY);
}
- /* disable SB for shaders using ubo array indexing as it doesn't handle those currently */
- use_sb &= !shader->shader.uses_ubo_indexing;
/* disable SB for shaders using doubles */
use_sb &= !shader->shader.uses_doubles;
continue;
}
- if (ctx->src[i].kc_rel)
- ctx->shader->uses_ubo_indexing = true;
-
if (ctx->src[i].rel) {
int chan = inst->Src[i].Indirect.Swizzle;
int treg = r600_get_temp(ctx);
ctx.gs_next_vertex = 0;
ctx.gs_stream_output_info = &so;
- shader->uses_ubo_indexing = false;
ctx.face_gpr = -1;
ctx.fixed_pt_position_gpr = -1;
ctx.fragcoord_input = -1;
boolean has_txq_cube_array_z_comp;
boolean uses_tex_buffers;
boolean gs_prim_id_input;
- /* Temporarily workaround SB not handling ubo indexing */
- boolean uses_ubo_indexing;
/* Size in bytes of a data item in the ring(s) (single vertex data).
Stages with only one ring items 123 will be set to 0. */
bool is_alu_extended() {
assert(op_ptr->flags & CF_ALU);
- return kc[2].mode != KC_LOCK_NONE || kc[3].mode != KC_LOCK_NONE;
+ return kc[2].mode != KC_LOCK_NONE || kc[3].mode != KC_LOCK_NONE ||
+ kc[0].index_mode != KC_INDEX_NONE || kc[1].index_mode != KC_INDEX_NONE ||
+ kc[2].index_mode != KC_INDEX_NONE || kc[3].index_mode != KC_INDEX_NONE;
}
};
void bc_finalizer::emit_set_grad(fetch_node* f) {
- assert(f->src.size() == 12);
+ assert(f->src.size() == 12 || f->src.size() == 13);
unsigned ops[2] = { FETCH_OP_SET_GRADIENTS_V, FETCH_OP_SET_GRADIENTS_H };
unsigned arg_start = 0;
}
sel_chan bc_finalizer::translate_kcache(cf_node* alu, value* v) {
- unsigned sel = v->select.sel();
- unsigned bank = sel >> 12;
+ unsigned sel = v->select.kcache_sel();
+ unsigned bank = v->select.kcache_bank();
unsigned chan = v->select.chan();
static const unsigned kc_base[] = {128, 160, 256, 288};
value *bc_parser::get_cf_index_value(unsigned idx)
{
assert(idx <= 1);
+ assert(cf_index_value[idx]);
return cf_index_value[idx];
}
void bc_parser::save_mova(alu_node *mova)
for (node_iterator I = g->begin(), E = g->end();
I != E; ++I) {
n = static_cast<alu_node*>(*I);
+ bool ubo_indexing[2] = {};
if (!sh->assign_slot(n, slots[cgroup])) {
assert(!"alu slot assignment failed");
bc_kcache &kc = cf->bc.kc[kc_set];
kc_addr = (kc.addr << 4) + (sel & 0x1F);
- n->src[s] = sh->get_kcache_value(kc.bank, kc_addr, src.chan);
+ n->src[s] = sh->get_kcache_value(kc.bank, kc_addr, src.chan, (alu_kcache_index_mode)kc.index_mode);
+
+ if (kc.index_mode != KC_INDEX_NONE) {
+ assert(kc.index_mode != KC_LOCK_LOOP);
+ ubo_indexing[kc.index_mode - KC_INDEX_0] = true;
+ }
} else if (src.sel < MAX_GPR) {
value *v = sh->get_gpr_value(true, src.sel, src.chan, src.rel);
}
}
}
+
+ // add UBO index values if any as dependencies
+ if (ubo_indexing[0]) {
+ n->src.push_back(get_cf_index_value(0));
+ }
+ if (ubo_indexing[1]) {
+ n->src.push_back(get_cf_index_value(1));
+ }
+
if ((n->bc.dst_gpr == CM_V_SQ_MOVA_DST_CF_IDX0 || n->bc.dst_gpr == CM_V_SQ_MOVA_DST_CF_IDX1) &&
ctx.is_cayman())
// Move CF_IDX value into tex instruction operands, scheduler will later re-emit setting of CF_IDX
if (n->bc.sampler_index_mode != V_SQ_CF_INDEX_NONE) {
n->src.push_back(get_cf_index_value(n->bc.sampler_index_mode == V_SQ_CF_INDEX_1));
}
+ if (n->bc.resource_index_mode != V_SQ_CF_INDEX_NONE) {
+ n->src.push_back(get_cf_index_value(n->bc.resource_index_mode == V_SQ_CF_INDEX_1));
+ }
}
}
if ((n.bc.op == ALU_OP1_MOV || n.bc.op == ALU_OP1_MOVA_INT ||
n.bc.op == ALU_OP1_MOVA_GPR_INT)
&& n.bc.clamp == 0 && n.bc.omod == 0
- && n.bc.src[0].abs == 0 && n.bc.src[0].neg == 0) {
+ && n.bc.src[0].abs == 0 && n.bc.src[0].neg == 0 &&
+ n.src.size() == 1 /* RIM/SIM can be appended as additional values */) {
assign_source(n.dst[0], v0);
return true;
}
static unsigned sel(unsigned idx) { return (idx-1) >> 2; }
static unsigned chan(unsigned idx) { return (idx-1) & 3; }
+
+ sel_chan(unsigned bank, unsigned index,
+ unsigned chan, alu_kcache_index_mode index_mode)
+ : id(sel_chan((bank << 12) | index | ((unsigned)index_mode << 28), chan).id) {}
+ unsigned kcache_index_mode() const { return sel() >> 28; }
+ unsigned kcache_sel() const { return sel() & 0x0fffffffu; }
+ unsigned kcache_bank() const { return kcache_sel() >> 12; }
};
inline sb_ostream& operator <<(sb_ostream& o, sel_chan r) {
a->dst.resize(1); // Dummy needed for recolor
PSC_DUMP(
- sblog << "created IDX load: "
+ sblog << "created IDX load: ";
dump::dump_op(a);
sblog << "\n";
);
sblog << " ";
);
- if (f->bc.sampler_index_mode != V_SQ_CF_INDEX_NONE) {
+ // TODO: If same values used can avoid reloading index register
+ if (f->bc.sampler_index_mode != V_SQ_CF_INDEX_NONE ||
+ f->bc.resource_index_mode != V_SQ_CF_INDEX_NONE) {
+ unsigned index_mode = f->bc.sampler_index_mode != V_SQ_CF_INDEX_NONE ?
+ f->bc.sampler_index_mode : f->bc.resource_index_mode;
+
// Currently require prior opt passes to use one TEX per indexed op
assert(f->parent->count() == 1);
value *v = f->src.back(); // Last src is index offset
+ assert(v);
cur_bb->push_front(c);
- load_index_register(v, f->bc.sampler_index_mode);
+ load_index_register(v, index_mode);
f->src.pop_back(); // Don't need index value any more
return;
if (uc) {
n->remove();
+
pending.push_back(n);
PSC_DUMP( sblog << "pending\n"; );
} else {
}
}
+void post_scheduler::emit_index_registers() {
+ for (unsigned i = 0; i < 2; i++) {
+ if (alu.current_idx[i]) {
+ regmap = prev_regmap;
+ alu.discard_current_group();
+
+ load_index_register(alu.current_idx[i], KC_INDEX_0 + i);
+ alu.current_idx[i] = NULL;
+ }
+ }
+}
+
void post_scheduler::emit_clause() {
if (alu.current_ar) {
alu.emit_group();
}
- alu.emit_clause(cur_bb);
+ if (!alu.is_empty()) {
+ alu.emit_clause(cur_bb);
+ }
+
+ emit_index_registers();
}
void post_scheduler::schedule_alu(container_node *c) {
prev_regmap = regmap;
if (!prepare_alu_group()) {
+ if (alu.current_idx[0] || alu.current_idx[1]) {
+ regmap = prev_regmap;
+ emit_clause();
+ init_globals(live, false);
+
+ continue;
+ }
+
if (alu.current_ar) {
emit_load_ar();
continue;
regmap = prev_regmap;
emit_clause();
init_globals(live, false);
+
continue;
}
}
bool post_scheduler::map_src_vec(vvec &vv, bool src) {
+ if (src) {
+ // Handle possible UBO indexing
+ bool ubo_indexing[2] = { false, false };
+ for (vvec::iterator I = vv.begin(), E = vv.end(); I != E; ++I) {
+ value *v = *I;
+ if (!v)
+ continue;
+
+ if (v->is_kcache()) {
+ unsigned index_mode = v->select.kcache_index_mode();
+ if (index_mode == KC_INDEX_0 || index_mode == KC_INDEX_1) {
+ ubo_indexing[index_mode - KC_INDEX_0] = true;
+ }
+ }
+ }
+
+ // idx values stored at end of src vec, see bc_parser::prepare_alu_group
+ for (unsigned i = 2; i != 0; i--) {
+ if (ubo_indexing[i-1]) {
+ // TODO: skip adding value to kcache reservation somehow, causes
+ // unnecessary group breaks and cache line locks
+ value *v = vv.back();
+ if (alu.current_idx[i-1] && alu.current_idx[i-1] != v) {
+ PSC_DUMP(
+ sblog << "IDX" << i-1 << " already set to " <<
+ *alu.current_idx[i-1] << ", trying to set " << *v << "\n";
+ );
+ return false;
+ }
+
+ alu.current_idx[i-1] = v;
+ PSC_DUMP(sblog << "IDX" << i-1 << " set to " << *v << "\n";);
+ }
+ }
+ }
+
for (vvec::iterator I = vv.begin(), E = vv.end(); I != E; ++I) {
value *v = *I;
if (!v)
sblog << " current_AR: " << *alu.current_ar << "\n";
if (alu.current_pr)
sblog << " current_PR: " << *alu.current_pr << "\n";
+ if (alu.current_idx[0])
+ sblog << " current IDX0: " << *alu.current_idx[0] << "\n";
+ if (alu.current_idx[1])
+ sblog << " current IDX1: " << *alu.current_idx[1] << "\n";
}
void post_scheduler::recolor_locals() {
unsigned avail_slots = rt.avail_slots();
+ // Cannot schedule in same clause as instructions using this index value
+ if (!n->dst.empty() && n->dst[0] &&
+ (n->dst[0] == alu.current_idx[0] || n->dst[0] == alu.current_idx[1])) {
+ PSC_DUMP(sblog << " CF_IDX source: " << *n->dst[0] << "\n";);
+ return 0;
+ }
+
if (n->is_alu_packed()) {
alu_packed_node *p = static_cast<alu_packed_node*>(n);
unsigned slots = p->get_slot_mask();
grp0(sh), grp1(sh),
group(), clause(),
push_exec_mask(),
- current_ar(), current_pr() {}
+ current_ar(), current_pr(), current_idx() {}
void alu_clause_tracker::emit_group() {
// reserving slots to load AR and PR values
unsigned reserve_slots = (current_ar ? 1 : 0) + (current_pr ? 1 : 0);
+ // ...and index registers
+ reserve_slots += (current_idx[0] != NULL) + (current_idx[1] != NULL);
if (slot_count + slots > MAX_ALU_SLOTS - reserve_slots)
return false;
unsigned cnt = 0;
for (unsigned i = 0; i < sel_count; ++i) {
- unsigned line = rp[i];
+ unsigned line = rp[i] & 0x1fffffffu;
+ unsigned index_mode = rp[i] >> 29;
if (!line)
return cnt;
--line;
line = (sel_count == 2) ? line >> 5 : line >> 6;
+ line |= index_mode << 29;
if (lines.insert(line).second)
++cnt;
memcpy(old_kc, kc, sizeof(kc));
for (kc_lines::iterator I = lines.begin(), E = lines.end(); I != E; ++I) {
- unsigned line = *I;
+ unsigned index_mode = *I >> 29;
+ unsigned line = *I & 0x1fffffffu;
unsigned bank = line >> 8;
+ assert(index_mode <= KC_INDEX_INVALID);
line &= 0xFF;
- if (c && (bank == kc[c-1].bank) && (kc[c-1].addr + 1 == line))
- ++kc[c-1].mode;
- else {
+ if (c && (bank == kc[c-1].bank) && (kc[c-1].addr + 1 == line) &&
+ kc[c-1].index_mode == index_mode)
+ {
+ kc[c-1].mode = KC_LOCK_2;
+ } else {
if (c == max_kcs) {
memcpy(kc, old_kc, sizeof(kc));
return false;
kc[c].bank = bank;
kc[c].addr = line;
+ kc[c].index_mode = index_mode;
++c;
}
}
class literal_tracker {
literal lt[4];
unsigned uc[4];
+
public:
literal_tracker() : lt(), uc() {}
// bottom-up)
value *current_ar;
value *current_pr;
+ // current values of CF_IDX registers that need preloading
+ value *current_idx[2];
alu_clause_tracker(shader &sh);
val_set cleared_interf;
+ void emit_index_registers();
public:
post_scheduler(shader &sh) : pass(sh),
return get_value(VLK_TEMP, id, 0);
}
-value* shader::get_kcache_value(unsigned bank, unsigned index, unsigned chan) {
+value* shader::get_kcache_value(unsigned bank, unsigned index, unsigned chan, alu_kcache_index_mode index_mode) {
return get_ro_value(kcache_values, VLK_KCACHE,
- sel_chan((bank << 12) | index, chan));
+ sel_chan(bank, index, chan, index_mode));
}
void shader::add_input(unsigned gpr, bool preloaded, unsigned comp_mask) {
value* get_special_ro_value(unsigned sel);
- value* get_kcache_value(unsigned bank, unsigned index, unsigned chan);
+ value* get_kcache_value(unsigned bank, unsigned index, unsigned chan, alu_kcache_index_mode index_mode);
value* get_value_version(value* v, unsigned ver);