r600/sb: fix a bug emitting ar load from a constant.
[mesa.git] / src / gallium / drivers / r600 / sb / sb_sched.cpp
1 /*
2 * Copyright 2013 Vadim Girlin <vadimgirlin@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Vadim Girlin
25 */
26
27 #define PSC_DEBUG 0
28
29 #if PSC_DEBUG
30 #define PSC_DUMP(a) do { a } while (0)
31 #else
32 #define PSC_DUMP(a)
33 #endif
34
35 #include "sb_bc.h"
36 #include "sb_shader.h"
37 #include "sb_pass.h"
38 #include "sb_sched.h"
39 #include "eg_sq.h" // V_SQ_CF_INDEX_NONE/0/1
40
41 namespace r600_sb {
42
43 rp_kcache_tracker::rp_kcache_tracker(shader &sh) : rp(), uc(),
44 // FIXME: for now we'll use "two const pairs" limit for r600, same as
45 // for other chips, otherwise additional check in alu_group_tracker is
46 // required to make sure that all 4 consts in the group fit into 2
47 // kcache sets
48 sel_count(2) {}
49
50 bool rp_kcache_tracker::try_reserve(sel_chan r) {
51 unsigned sel = kc_sel(r);
52
53 for (unsigned i = 0; i < sel_count; ++i) {
54 if (rp[i] == 0) {
55 rp[i] = sel;
56 ++uc[i];
57 return true;
58 }
59 if (rp[i] == sel) {
60 ++uc[i];
61 return true;
62 }
63 }
64 return false;
65 }
66
67 bool rp_kcache_tracker::try_reserve(node* n) {
68 bool need_unreserve = false;
69 vvec::iterator I(n->src.begin()), E(n->src.end());
70
71 for (; I != E; ++I) {
72 value *v = *I;
73 if (v->is_kcache()) {
74 if (!try_reserve(v->select))
75 break;
76 else
77 need_unreserve = true;
78 }
79 }
80 if (I == E)
81 return true;
82
83 if (need_unreserve && I != n->src.begin()) {
84 do {
85 --I;
86 value *v =*I;
87 if (v->is_kcache())
88 unreserve(v->select);
89 } while (I != n->src.begin());
90 }
91 return false;
92 }
93
94 inline
95 void rp_kcache_tracker::unreserve(node* n) {
96 vvec::iterator I(n->src.begin()), E(n->src.end());
97 for (; I != E; ++I) {
98 value *v = *I;
99 if (v->is_kcache())
100 unreserve(v->select);
101 }
102 }
103
104 void rp_kcache_tracker::unreserve(sel_chan r) {
105 unsigned sel = kc_sel(r);
106
107 for (unsigned i = 0; i < sel_count; ++i)
108 if (rp[i] == sel) {
109 if (--uc[i] == 0)
110 rp[i] = 0;
111 return;
112 }
113 assert(0);
114 return;
115 }
116
117 bool literal_tracker::try_reserve(alu_node* n) {
118 bool need_unreserve = false;
119
120 vvec::iterator I(n->src.begin()), E(n->src.end());
121
122 for (; I != E; ++I) {
123 value *v = *I;
124 if (v->is_literal()) {
125 if (!try_reserve(v->literal_value))
126 break;
127 else
128 need_unreserve = true;
129 }
130 }
131 if (I == E)
132 return true;
133
134 if (need_unreserve && I != n->src.begin()) {
135 do {
136 --I;
137 value *v =*I;
138 if (v->is_literal())
139 unreserve(v->literal_value);
140 } while (I != n->src.begin());
141 }
142 return false;
143 }
144
145 void literal_tracker::unreserve(alu_node* n) {
146 unsigned nsrc = n->bc.op_ptr->src_count, i;
147
148 for (i = 0; i < nsrc; ++i) {
149 value *v = n->src[i];
150 if (v->is_literal())
151 unreserve(v->literal_value);
152 }
153 }
154
155 bool literal_tracker::try_reserve(literal l) {
156
157 PSC_DUMP( sblog << "literal reserve " << l.u << " " << l.f << "\n"; );
158
159 for (unsigned i = 0; i < MAX_ALU_LITERALS; ++i) {
160 if (lt[i] == 0) {
161 lt[i] = l;
162 ++uc[i];
163 PSC_DUMP( sblog << " reserved new uc = " << uc[i] << "\n"; );
164 return true;
165 } else if (lt[i] == l) {
166 ++uc[i];
167 PSC_DUMP( sblog << " reserved uc = " << uc[i] << "\n"; );
168 return true;
169 }
170 }
171 PSC_DUMP( sblog << " failed to reserve literal\n"; );
172 return false;
173 }
174
175 void literal_tracker::unreserve(literal l) {
176
177 PSC_DUMP( sblog << "literal unreserve " << l.u << " " << l.f << "\n"; );
178
179 for (unsigned i = 0; i < MAX_ALU_LITERALS; ++i) {
180 if (lt[i] == l) {
181 if (--uc[i] == 0)
182 lt[i] = 0;
183 return;
184 }
185 }
186 assert(0);
187 return;
188 }
189
190 static inline unsigned bs_cycle_vector(unsigned bs, unsigned src) {
191 static const unsigned swz[VEC_NUM][3] = {
192 {0, 1, 2}, {0, 2, 1}, {1, 2, 0}, {1, 0, 2}, {2, 0, 1}, {2, 1, 0}
193 };
194 assert(bs < VEC_NUM && src < 3);
195 return swz[bs][src];
196 }
197
198 static inline unsigned bs_cycle_scalar(unsigned bs, unsigned src) {
199 static const unsigned swz[SCL_NUM][3] = {
200 {2, 1, 0}, {1, 2, 2}, {2, 1, 2}, {2, 2, 1}
201 };
202
203 if (bs >= SCL_NUM || src >= 3) {
204 // this prevents gcc warning "array subscript is above array bounds"
205 // AFAICS we should never hit this path
206 abort();
207 }
208 return swz[bs][src];
209 }
210
211 static inline unsigned bs_cycle(bool trans, unsigned bs, unsigned src) {
212 return trans ? bs_cycle_scalar(bs, src) : bs_cycle_vector(bs, src);
213 }
214
215 inline
216 bool rp_gpr_tracker::try_reserve(unsigned cycle, unsigned sel, unsigned chan) {
217 ++sel;
218 if (rp[cycle][chan] == 0) {
219 rp[cycle][chan] = sel;
220 ++uc[cycle][chan];
221 return true;
222 } else if (rp[cycle][chan] == sel) {
223 ++uc[cycle][chan];
224 return true;
225 }
226 return false;
227 }
228
229 inline
230 void rp_gpr_tracker::unreserve(alu_node* n) {
231 unsigned nsrc = n->bc.op_ptr->src_count, i;
232 unsigned trans = n->bc.slot == SLOT_TRANS;
233 unsigned bs = n->bc.bank_swizzle;
234 unsigned opt = !trans
235 && n->bc.src[0].sel == n->bc.src[1].sel
236 && n->bc.src[0].chan == n->bc.src[1].chan;
237
238 for (i = 0; i < nsrc; ++i) {
239 value *v = n->src[i];
240 if (v->is_readonly() || v->is_undef())
241 continue;
242 if (i == 1 && opt)
243 continue;
244 unsigned cycle = bs_cycle(trans, bs, i);
245 unreserve(cycle, n->bc.src[i].sel, n->bc.src[i].chan);
246 }
247 }
248
249 inline
250 void rp_gpr_tracker::unreserve(unsigned cycle, unsigned sel, unsigned chan) {
251 ++sel;
252 assert(rp[cycle][chan] == sel && uc[cycle][chan]);
253 if (--uc[cycle][chan] == 0)
254 rp[cycle][chan] = 0;
255 }
256
257 inline
258 bool rp_gpr_tracker::try_reserve(alu_node* n) {
259 unsigned nsrc = n->bc.op_ptr->src_count, i;
260 unsigned trans = n->bc.slot == SLOT_TRANS;
261 unsigned bs = n->bc.bank_swizzle;
262 unsigned opt = !trans && nsrc >= 2 &&
263 n->src[0] == n->src[1];
264
265 bool need_unreserve = false;
266 unsigned const_count = 0, min_gpr_cycle = 3;
267
268 for (i = 0; i < nsrc; ++i) {
269 value *v = n->src[i];
270 if (v->is_readonly() || v->is_undef()) {
271 const_count++;
272 if (trans && const_count == 3)
273 break;
274 } else {
275 if (i == 1 && opt)
276 continue;
277
278 unsigned cycle = bs_cycle(trans, bs, i);
279
280 if (trans && cycle < min_gpr_cycle)
281 min_gpr_cycle = cycle;
282
283 if (const_count && cycle < const_count && trans)
284 break;
285
286 if (!try_reserve(cycle, n->bc.src[i].sel, n->bc.src[i].chan))
287 break;
288 else
289 need_unreserve = true;
290 }
291 }
292
293 if ((i == nsrc) && (min_gpr_cycle + 1 > const_count))
294 return true;
295
296 if (need_unreserve && i--) {
297 do {
298 value *v = n->src[i];
299 if (!v->is_readonly() && !v->is_undef()) {
300 if (i == 1 && opt)
301 continue;
302 unreserve(bs_cycle(trans, bs, i), n->bc.src[i].sel,
303 n->bc.src[i].chan);
304 }
305 } while (i--);
306 }
307 return false;
308 }
309
310 alu_group_tracker::alu_group_tracker(shader &sh)
311 : sh(sh), kc(sh),
312 gpr(), lt(), slots(),
313 max_slots(sh.get_ctx().is_cayman() ? 4 : 5),
314 has_mova(), uses_ar(), has_predset(), has_kill(),
315 updates_exec_mask(), chan_count(), interp_param(), next_id() {
316
317 available_slots = sh.get_ctx().has_trans ? 0x1F : 0x0F;
318 }
319
320 inline
321 sel_chan alu_group_tracker::get_value_id(value* v) {
322 unsigned &id = vmap[v];
323 if (!id)
324 id = ++next_id;
325 return sel_chan(id, v->get_final_chan());
326 }
327
328 inline
329 void alu_group_tracker::assign_slot(unsigned slot, alu_node* n) {
330 update_flags(n);
331 slots[slot] = n;
332 available_slots &= ~(1 << slot);
333
334 unsigned param = n->interp_param();
335
336 if (param) {
337 assert(!interp_param || interp_param == param);
338 interp_param = param;
339 }
340 }
341
342
343 void alu_group_tracker::discard_all_slots(container_node &removed_nodes) {
344 PSC_DUMP( sblog << "agt::discard_all_slots\n"; );
345 discard_slots(~available_slots & ((1 << max_slots) - 1), removed_nodes);
346 }
347
348 void alu_group_tracker::discard_slots(unsigned slot_mask,
349 container_node &removed_nodes) {
350
351 PSC_DUMP(
352 sblog << "discard_slots : packed_ops : "
353 << (unsigned)packed_ops.size() << "\n";
354 );
355
356 for (node_vec::iterator N, I = packed_ops.begin();
357 I != packed_ops.end(); I = N) {
358 N = I; ++N;
359
360 alu_packed_node *n = static_cast<alu_packed_node*>(*I);
361 unsigned pslots = n->get_slot_mask();
362
363 PSC_DUMP(
364 sblog << "discard_slots : packed slot_mask : " << pslots << "\n";
365 );
366
367 if (pslots & slot_mask) {
368
369 PSC_DUMP(
370 sblog << "discard_slots : discarding packed...\n";
371 );
372
373 removed_nodes.push_back(n);
374 slot_mask &= ~pslots;
375 N = packed_ops.erase(I);
376 available_slots |= pslots;
377 for (unsigned k = 0; k < max_slots; ++k) {
378 if (pslots & (1 << k))
379 slots[k] = NULL;
380 }
381 }
382 }
383
384 for (unsigned slot = 0; slot < max_slots; ++slot) {
385 unsigned slot_bit = 1 << slot;
386
387 if (slot_mask & slot_bit) {
388 assert(!(available_slots & slot_bit));
389 assert(slots[slot]);
390
391 assert(!(slots[slot]->bc.slot_flags & AF_4SLOT));
392
393 PSC_DUMP(
394 sblog << "discarding slot " << slot << " : ";
395 dump::dump_op(slots[slot]);
396 sblog << "\n";
397 );
398
399 removed_nodes.push_back(slots[slot]);
400 slots[slot] = NULL;
401 available_slots |= slot_bit;
402 }
403 }
404
405 alu_node *t = slots[4];
406 if (t && (t->bc.slot_flags & AF_V)) {
407 unsigned chan = t->bc.dst_chan;
408 if (!slots[chan]) {
409 PSC_DUMP(
410 sblog << "moving ";
411 dump::dump_op(t);
412 sblog << " from trans slot to free slot " << chan << "\n";
413 );
414
415 slots[chan] = t;
416 slots[4] = NULL;
417 t->bc.slot = chan;
418 }
419 }
420
421 reinit();
422 }
423
424 alu_group_node* alu_group_tracker::emit() {
425
426 alu_group_node *g = sh.create_alu_group();
427
428 lt.init_group_literals(g);
429
430 for (unsigned i = 0; i < max_slots; ++i) {
431 alu_node *n = slots[i];
432 if (n) {
433 g->push_back(n);
434 }
435 }
436 return g;
437 }
438
439 bool alu_group_tracker::try_reserve(alu_node* n) {
440 unsigned nsrc = n->bc.op_ptr->src_count;
441 unsigned slot = n->bc.slot;
442 bool trans = slot == 4;
443
444 if (slots[slot])
445 return false;
446
447 unsigned flags = n->bc.op_ptr->flags;
448
449 unsigned param = n->interp_param();
450
451 if (param && interp_param && interp_param != param)
452 return false;
453
454 if ((flags & AF_KILL) && has_predset)
455 return false;
456 if ((flags & AF_ANY_PRED) && (has_kill || has_predset))
457 return false;
458 if ((flags & AF_MOVA) && (has_mova || uses_ar))
459 return false;
460
461 if (n->uses_ar() && has_mova)
462 return false;
463
464 for (unsigned i = 0; i < nsrc; ++i) {
465
466 unsigned last_id = next_id;
467
468 value *v = n->src[i];
469 if (!v->is_any_gpr() && !v->is_rel())
470 continue;
471 sel_chan vid = get_value_id(n->src[i]);
472
473 if (vid > last_id && chan_count[vid.chan()] == 3) {
474 return false;
475 }
476
477 n->bc.src[i].sel = vid.sel();
478 n->bc.src[i].chan = vid.chan();
479 }
480
481 if (!lt.try_reserve(n))
482 return false;
483
484 if (!kc.try_reserve(n)) {
485 lt.unreserve(n);
486 return false;
487 }
488
489 unsigned fbs = n->forced_bank_swizzle();
490
491 n->bc.bank_swizzle = 0;
492
493 if (!trans && fbs)
494 n->bc.bank_swizzle = VEC_210;
495
496 if (gpr.try_reserve(n)) {
497 assign_slot(slot, n);
498 return true;
499 }
500
501 if (!fbs) {
502 unsigned swz_num = trans ? SCL_NUM : VEC_NUM;
503 for (unsigned bs = 0; bs < swz_num; ++bs) {
504 n->bc.bank_swizzle = bs;
505 if (gpr.try_reserve(n)) {
506 assign_slot(slot, n);
507 return true;
508 }
509 }
510 }
511
512 gpr.reset();
513
514 slots[slot] = n;
515 unsigned forced_swz_slots = 0;
516 int first_slot = ~0, first_nf = ~0, last_slot = ~0;
517 unsigned save_bs[5];
518
519 for (unsigned i = 0; i < max_slots; ++i) {
520 alu_node *a = slots[i];
521 if (a) {
522 if (first_slot == ~0)
523 first_slot = i;
524 last_slot = i;
525 save_bs[i] = a->bc.bank_swizzle;
526 if (a->forced_bank_swizzle()) {
527 assert(i != SLOT_TRANS);
528 forced_swz_slots |= (1 << i);
529 a->bc.bank_swizzle = VEC_210;
530 if (!gpr.try_reserve(a))
531 assert(!"internal reservation error");
532 } else {
533 if (first_nf == ~0)
534 first_nf = i;
535
536 a->bc.bank_swizzle = 0;
537 }
538 }
539 }
540
541 if (first_nf == ~0) {
542 assign_slot(slot, n);
543 return true;
544 }
545
546 assert(first_slot != ~0 && last_slot != ~0);
547
548 // silence "array subscript is above array bounds" with gcc 4.8
549 if (last_slot >= 5)
550 abort();
551
552 int i = first_nf;
553 alu_node *a = slots[i];
554 bool backtrack = false;
555
556 while (1) {
557
558 PSC_DUMP(
559 sblog << " bs: trying s" << i << " bs:" << a->bc.bank_swizzle
560 << " bt:" << backtrack << "\n";
561 );
562
563 if (!backtrack && gpr.try_reserve(a)) {
564 PSC_DUMP(
565 sblog << " bs: reserved s" << i << " bs:" << a->bc.bank_swizzle
566 << "\n";
567 );
568
569 while ((++i <= last_slot) && !slots[i]);
570 if (i <= last_slot)
571 a = slots[i];
572 else
573 break;
574 } else {
575 bool itrans = i == SLOT_TRANS;
576 unsigned max_swz = itrans ? SCL_221 : VEC_210;
577
578 if (a->bc.bank_swizzle < max_swz) {
579 ++a->bc.bank_swizzle;
580
581 PSC_DUMP(
582 sblog << " bs: inc s" << i << " bs:" << a->bc.bank_swizzle
583 << "\n";
584 );
585
586 } else {
587
588 a->bc.bank_swizzle = 0;
589 while ((--i >= first_nf) && !slots[i]);
590 if (i < first_nf)
591 break;
592 a = slots[i];
593 PSC_DUMP(
594 sblog << " bs: unreserve s" << i << " bs:" << a->bc.bank_swizzle
595 << "\n";
596 );
597 gpr.unreserve(a);
598 backtrack = true;
599
600 continue;
601 }
602 }
603 backtrack = false;
604 }
605
606 if (i == last_slot + 1) {
607 assign_slot(slot, n);
608 return true;
609 }
610
611 // reservation failed, restore previous state
612 slots[slot] = NULL;
613 gpr.reset();
614 for (unsigned i = 0; i < max_slots; ++i) {
615 alu_node *a = slots[i];
616 if (a) {
617 a->bc.bank_swizzle = save_bs[i];
618 bool b = gpr.try_reserve(a);
619 assert(b);
620 }
621 }
622
623 kc.unreserve(n);
624 lt.unreserve(n);
625 return false;
626 }
627
628 bool alu_group_tracker::try_reserve(alu_packed_node* p) {
629 bool need_unreserve = false;
630 node_iterator I(p->begin()), E(p->end());
631
632 for (; I != E; ++I) {
633 alu_node *n = static_cast<alu_node*>(*I);
634 if (!try_reserve(n))
635 break;
636 else
637 need_unreserve = true;
638 }
639
640 if (I == E) {
641 packed_ops.push_back(p);
642 return true;
643 }
644
645 if (need_unreserve) {
646 while (--I != E) {
647 alu_node *n = static_cast<alu_node*>(*I);
648 slots[n->bc.slot] = NULL;
649 }
650 reinit();
651 }
652 return false;
653 }
654
655 void alu_group_tracker::reinit() {
656 alu_node * s[5];
657 memcpy(s, slots, sizeof(slots));
658
659 reset(true);
660
661 for (int i = max_slots - 1; i >= 0; --i) {
662 if (s[i] && !try_reserve(s[i])) {
663 sblog << "alu_group_tracker: reinit error on slot " << i << "\n";
664 for (unsigned i = 0; i < max_slots; ++i) {
665 sblog << " slot " << i << " : ";
666 if (s[i])
667 dump::dump_op(s[i]);
668
669 sblog << "\n";
670 }
671 assert(!"alu_group_tracker: reinit error");
672 }
673 }
674 }
675
676 void alu_group_tracker::reset(bool keep_packed) {
677 kc.reset();
678 gpr.reset();
679 lt.reset();
680 memset(slots, 0, sizeof(slots));
681 vmap.clear();
682 next_id = 0;
683 has_mova = false;
684 uses_ar = false;
685 has_predset = false;
686 has_kill = false;
687 updates_exec_mask = false;
688 available_slots = sh.get_ctx().has_trans ? 0x1F : 0x0F;
689 interp_param = 0;
690
691 chan_count[0] = 0;
692 chan_count[1] = 0;
693 chan_count[2] = 0;
694 chan_count[3] = 0;
695
696 if (!keep_packed)
697 packed_ops.clear();
698 }
699
700 void alu_group_tracker::update_flags(alu_node* n) {
701 unsigned flags = n->bc.op_ptr->flags;
702 has_kill |= (flags & AF_KILL);
703 has_mova |= (flags & AF_MOVA);
704 has_predset |= (flags & AF_ANY_PRED);
705 uses_ar |= n->uses_ar();
706
707 if (flags & AF_ANY_PRED) {
708 if (n->dst[2] != NULL)
709 updates_exec_mask = true;
710 }
711 }
712
713 int post_scheduler::run() {
714 return run_on(sh.root) ? 0 : 1;
715 }
716
717 bool post_scheduler::run_on(container_node* n) {
718 int r = true;
719 for (node_riterator I = n->rbegin(), E = n->rend(); I != E; ++I) {
720 if (I->is_container()) {
721 if (I->subtype == NST_BB) {
722 bb_node* bb = static_cast<bb_node*>(*I);
723 r = schedule_bb(bb);
724 } else {
725 r = run_on(static_cast<container_node*>(*I));
726 }
727 if (!r)
728 break;
729 }
730 }
731 return r;
732 }
733
734 void post_scheduler::init_uc_val(container_node *c, value *v) {
735 node *d = v->any_def();
736 if (d && d->parent == c)
737 ++ucm[d];
738 }
739
740 void post_scheduler::init_uc_vec(container_node *c, vvec &vv, bool src) {
741 for (vvec::iterator I = vv.begin(), E = vv.end(); I != E; ++I) {
742 value *v = *I;
743 if (!v || v->is_readonly())
744 continue;
745
746 if (v->is_rel()) {
747 init_uc_val(c, v->rel);
748 init_uc_vec(c, v->muse, true);
749 } if (src) {
750 init_uc_val(c, v);
751 }
752 }
753 }
754
755 unsigned post_scheduler::init_ucm(container_node *c, node *n) {
756 init_uc_vec(c, n->src, true);
757 init_uc_vec(c, n->dst, false);
758
759 uc_map::iterator F = ucm.find(n);
760 return F == ucm.end() ? 0 : F->second;
761 }
762
763 bool post_scheduler::schedule_bb(bb_node* bb) {
764 PSC_DUMP(
765 sblog << "scheduling BB " << bb->id << "\n";
766 if (!pending.empty())
767 dump::dump_op_list(&pending);
768 );
769
770 assert(pending.empty());
771 assert(bb_pending.empty());
772 assert(ready.empty());
773
774 bb_pending.append_from(bb);
775 cur_bb = bb;
776
777 node *n;
778
779 while ((n = bb_pending.back())) {
780
781 PSC_DUMP(
782 sblog << "post_sched_bb ";
783 dump::dump_op(n);
784 sblog << "\n";
785 );
786
787 // May require emitting ALU ops to load index registers
788 if (n->is_fetch_clause()) {
789 n->remove();
790 process_fetch(static_cast<container_node *>(n));
791 continue;
792 }
793
794 if (n->is_alu_clause()) {
795 n->remove();
796 bool r = process_alu(static_cast<container_node*>(n));
797 if (r)
798 continue;
799 return false;
800 }
801
802 n->remove();
803 bb->push_front(n);
804 }
805
806 this->cur_bb = NULL;
807 return true;
808 }
809
810 void post_scheduler::init_regmap() {
811
812 regmap.clear();
813
814 PSC_DUMP(
815 sblog << "init_regmap: live: ";
816 dump::dump_set(sh, live);
817 sblog << "\n";
818 );
819
820 for (val_set::iterator I = live.begin(sh), E = live.end(sh); I != E; ++I) {
821 value *v = *I;
822 assert(v);
823 if (!v->is_sgpr() || !v->is_prealloc())
824 continue;
825
826 sel_chan r = v->gpr;
827
828 PSC_DUMP(
829 sblog << "init_regmap: " << r << " <= ";
830 dump::dump_val(v);
831 sblog << "\n";
832 );
833
834 assert(r);
835 regmap[r] = v;
836 }
837 }
838
839 static alu_node *create_set_idx(shader &sh, unsigned ar_idx) {
840 alu_node *a = sh.create_alu();
841
842 assert(ar_idx == V_SQ_CF_INDEX_0 || ar_idx == V_SQ_CF_INDEX_1);
843 if (ar_idx == V_SQ_CF_INDEX_0)
844 a->bc.set_op(ALU_OP0_SET_CF_IDX0);
845 else
846 a->bc.set_op(ALU_OP0_SET_CF_IDX1);
847 a->bc.slot = SLOT_X;
848 a->dst.resize(1); // Dummy needed for recolor
849
850 PSC_DUMP(
851 sblog << "created IDX load: ";
852 dump::dump_op(a);
853 sblog << "\n";
854 );
855
856 return a;
857 }
858
859 void post_scheduler::load_index_register(value *v, unsigned ar_idx)
860 {
861 alu.reset();
862
863 if (!sh.get_ctx().is_cayman()) {
864 // Evergreen has to first load address register, then use CF_SET_IDX0/1
865 alu_group_tracker &rt = alu.grp();
866 alu_node *set_idx = create_set_idx(sh, ar_idx);
867 if (!rt.try_reserve(set_idx)) {
868 sblog << "can't emit SET_CF_IDX";
869 dump::dump_op(set_idx);
870 sblog << "\n";
871 }
872 process_group();
873
874 if (!alu.check_clause_limits()) {
875 // Can't happen since clause only contains MOVA/CF_SET_IDX0/1
876 }
877 alu.emit_group();
878 }
879
880 alu_group_tracker &rt = alu.grp();
881 alu_node *a = alu.create_ar_load(v, ar_idx == V_SQ_CF_INDEX_1 ? SEL_Z : SEL_Y);
882
883 if (!rt.try_reserve(a)) {
884 sblog << "can't emit AR load : ";
885 dump::dump_op(a);
886 sblog << "\n";
887 }
888
889 process_group();
890
891 if (!alu.check_clause_limits()) {
892 // Can't happen since clause only contains MOVA/CF_SET_IDX0/1
893 }
894
895 alu.emit_group();
896 alu.emit_clause(cur_bb);
897 }
898
899 void post_scheduler::process_fetch(container_node *c) {
900 if (c->empty())
901 return;
902
903 for (node_iterator N, I = c->begin(), E = c->end(); I != E; I = N) {
904 N = I;
905 ++N;
906
907 node *n = *I;
908
909 fetch_node *f = static_cast<fetch_node*>(n);
910
911 PSC_DUMP(
912 sblog << "process_tex ";
913 dump::dump_op(n);
914 sblog << " ";
915 );
916
917 // TODO: If same values used can avoid reloading index register
918 if (f->bc.sampler_index_mode != V_SQ_CF_INDEX_NONE ||
919 f->bc.resource_index_mode != V_SQ_CF_INDEX_NONE) {
920 unsigned index_mode = f->bc.sampler_index_mode != V_SQ_CF_INDEX_NONE ?
921 f->bc.sampler_index_mode : f->bc.resource_index_mode;
922
923 // Currently require prior opt passes to use one TEX per indexed op
924 assert(f->parent->count() == 1);
925
926 value *v = f->src.back(); // Last src is index offset
927 assert(v);
928
929 cur_bb->push_front(c);
930
931 load_index_register(v, index_mode);
932 f->src.pop_back(); // Don't need index value any more
933
934 return;
935 }
936 }
937
938 cur_bb->push_front(c);
939 }
940
941 bool post_scheduler::process_alu(container_node *c) {
942
943 if (c->empty())
944 return true;
945
946 ucm.clear();
947 alu.reset();
948
949 live = c->live_after;
950
951 init_globals(c->live_after, true);
952 init_globals(c->live_before, true);
953
954 init_regmap();
955
956 update_local_interferences();
957
958 for (node_riterator N, I = c->rbegin(), E = c->rend(); I != E; I = N) {
959 N = I;
960 ++N;
961
962 node *n = *I;
963 unsigned uc = init_ucm(c, n);
964
965 PSC_DUMP(
966 sblog << "process_alu uc=" << uc << " ";
967 dump::dump_op(n);
968 sblog << " ";
969 );
970
971 if (uc) {
972 n->remove();
973
974 pending.push_back(n);
975 PSC_DUMP( sblog << "pending\n"; );
976 } else {
977 release_op(n);
978 }
979 }
980
981 return schedule_alu(c);
982 }
983
984 void post_scheduler::update_local_interferences() {
985
986 PSC_DUMP(
987 sblog << "update_local_interferences : ";
988 dump::dump_set(sh, live);
989 sblog << "\n";
990 );
991
992
993 for (val_set::iterator I = live.begin(sh), E = live.end(sh); I != E; ++I) {
994 value *v = *I;
995 if (v->is_prealloc())
996 continue;
997
998 v->interferences.add_set(live);
999 }
1000 }
1001
1002 void post_scheduler::update_live_src_vec(vvec &vv, val_set *born, bool src) {
1003 for (vvec::iterator I = vv.begin(), E = vv.end(); I != E; ++I) {
1004 value *v = *I;
1005
1006 if (!v)
1007 continue;
1008
1009 if (src && v->is_any_gpr()) {
1010 if (live.add_val(v)) {
1011 if (!v->is_prealloc()) {
1012 if (!cleared_interf.contains(v)) {
1013 PSC_DUMP(
1014 sblog << "clearing interferences for " << *v << "\n";
1015 );
1016 v->interferences.clear();
1017 cleared_interf.add_val(v);
1018 }
1019 }
1020 if (born)
1021 born->add_val(v);
1022 }
1023 } else if (v->is_rel()) {
1024 if (!v->rel->is_any_gpr())
1025 live.add_val(v->rel);
1026 update_live_src_vec(v->muse, born, true);
1027 }
1028 }
1029 }
1030
1031 void post_scheduler::update_live_dst_vec(vvec &vv) {
1032 for (vvec::iterator I = vv.begin(), E = vv.end(); I != E; ++I) {
1033 value *v = *I;
1034 if (!v)
1035 continue;
1036
1037 if (v->is_rel()) {
1038 update_live_dst_vec(v->mdef);
1039 } else if (v->is_any_gpr()) {
1040 if (!live.remove_val(v)) {
1041 PSC_DUMP(
1042 sblog << "failed to remove ";
1043 dump::dump_val(v);
1044 sblog << " from live : ";
1045 dump::dump_set(sh, live);
1046 sblog << "\n";
1047 );
1048 }
1049 }
1050 }
1051 }
1052
1053 void post_scheduler::update_live(node *n, val_set *born) {
1054 update_live_dst_vec(n->dst);
1055 update_live_src_vec(n->src, born, true);
1056 update_live_src_vec(n->dst, born, false);
1057 }
1058
1059 void post_scheduler::process_group() {
1060 alu_group_tracker &rt = alu.grp();
1061
1062 val_set vals_born;
1063
1064 recolor_locals();
1065
1066 PSC_DUMP(
1067 sblog << "process_group: live_before : ";
1068 dump::dump_set(sh, live);
1069 sblog << "\n";
1070 );
1071
1072 for (unsigned s = 0; s < ctx.num_slots; ++s) {
1073 alu_node *n = rt.slot(s);
1074 if (!n)
1075 continue;
1076
1077 update_live(n, &vals_born);
1078 }
1079
1080 PSC_DUMP(
1081 sblog << "process_group: live_after : ";
1082 dump::dump_set(sh, live);
1083 sblog << "\n";
1084 );
1085
1086 update_local_interferences();
1087
1088 for (unsigned i = 0; i < 5; ++i) {
1089 node *n = rt.slot(i);
1090 if (n && !n->is_mova()) {
1091 release_src_values(n);
1092 }
1093 }
1094 }
1095
1096 void post_scheduler::init_globals(val_set &s, bool prealloc) {
1097
1098 PSC_DUMP(
1099 sblog << "init_globals: ";
1100 dump::dump_set(sh, s);
1101 sblog << "\n";
1102 );
1103
1104 for (val_set::iterator I = s.begin(sh), E = s.end(sh); I != E; ++I) {
1105 value *v = *I;
1106 if (v->is_sgpr() && !v->is_global()) {
1107 v->set_global();
1108
1109 if (prealloc && v->is_fixed()) {
1110 v->set_prealloc();
1111 }
1112 }
1113 }
1114 }
1115
1116 void post_scheduler::emit_index_registers() {
1117 for (unsigned i = 0; i < 2; i++) {
1118 if (alu.current_idx[i]) {
1119 regmap = prev_regmap;
1120 alu.discard_current_group();
1121
1122 load_index_register(alu.current_idx[i], KC_INDEX_0 + i);
1123 alu.current_idx[i] = NULL;
1124 }
1125 }
1126 }
1127
1128 void post_scheduler::emit_clause() {
1129
1130 if (alu.current_ar) {
1131 emit_load_ar();
1132 process_group();
1133 if (!alu.check_clause_limits()) {
1134 // Can't happen since clause only contains MOVA/CF_SET_IDX0/1
1135 }
1136 alu.emit_group();
1137 }
1138
1139 if (!alu.is_empty()) {
1140 alu.emit_clause(cur_bb);
1141 }
1142
1143 emit_index_registers();
1144 }
1145
1146 bool post_scheduler::schedule_alu(container_node *c) {
1147
1148 assert(!ready.empty() || !ready_copies.empty());
1149
1150 bool improving = true;
1151 int last_pending = pending.count();
1152 while (improving) {
1153 prev_regmap = regmap;
1154 if (!prepare_alu_group()) {
1155
1156 int new_pending = pending.count();
1157 improving = (new_pending < last_pending) || (last_pending == 0);
1158 last_pending = new_pending;
1159
1160 if (alu.current_idx[0] || alu.current_idx[1]) {
1161 regmap = prev_regmap;
1162 emit_clause();
1163 init_globals(live, false);
1164
1165 continue;
1166 }
1167
1168 if (alu.current_ar) {
1169 emit_load_ar();
1170 continue;
1171 } else
1172 break;
1173 }
1174
1175 if (!alu.check_clause_limits()) {
1176 regmap = prev_regmap;
1177 emit_clause();
1178 init_globals(live, false);
1179
1180 continue;
1181 }
1182
1183 process_group();
1184 alu.emit_group();
1185 };
1186
1187 if (!alu.is_empty()) {
1188 emit_clause();
1189 }
1190
1191 if (!ready.empty()) {
1192 sblog << "##post_scheduler: unscheduled ready instructions :";
1193 dump::dump_op_list(&ready);
1194 assert(!"unscheduled ready instructions");
1195 }
1196
1197 if (!pending.empty()) {
1198 sblog << "##post_scheduler: unscheduled pending instructions :";
1199 dump::dump_op_list(&pending);
1200 assert(!"unscheduled pending instructions");
1201 }
1202 return improving;
1203 }
1204
1205 void post_scheduler::add_interferences(value *v, sb_bitset &rb, val_set &vs) {
1206 unsigned chan = v->gpr.chan();
1207
1208 for (val_set::iterator I = vs.begin(sh), E = vs.end(sh);
1209 I != E; ++I) {
1210 value *vi = *I;
1211 sel_chan gpr = vi->get_final_gpr();
1212
1213 if (vi->is_any_gpr() && gpr && vi != v &&
1214 (!v->chunk || v->chunk != vi->chunk) &&
1215 vi->is_fixed() && gpr.chan() == chan) {
1216
1217 unsigned r = gpr.sel();
1218
1219 PSC_DUMP(
1220 sblog << "\tadd_interferences: " << *vi << "\n";
1221 );
1222
1223 if (rb.size() <= r)
1224 rb.resize(r + 32);
1225 rb.set(r);
1226 }
1227 }
1228 }
1229
1230 void post_scheduler::set_color_local_val(value *v, sel_chan color) {
1231 v->gpr = color;
1232
1233 PSC_DUMP(
1234 sblog << " recolored: ";
1235 dump::dump_val(v);
1236 sblog << "\n";
1237 );
1238 }
1239
1240 void post_scheduler::set_color_local(value *v, sel_chan color) {
1241 if (v->chunk) {
1242 vvec &vv = v->chunk->values;
1243 for (vvec::iterator I = vv.begin(), E = vv.end(); I != E; ++I) {
1244 value *v2 =*I;
1245 set_color_local_val(v2, color);
1246 }
1247 v->chunk->fix();
1248 } else {
1249 set_color_local_val(v, color);
1250 v->fix();
1251 }
1252 }
1253
1254 bool post_scheduler::recolor_local(value *v) {
1255
1256 sb_bitset rb;
1257
1258 assert(v->is_sgpr());
1259 assert(!v->is_prealloc());
1260 assert(v->gpr);
1261
1262 unsigned chan = v->gpr.chan();
1263
1264 PSC_DUMP(
1265 sblog << "recolor_local: ";
1266 dump::dump_val(v);
1267 sblog << " interferences: ";
1268 dump::dump_set(sh, v->interferences);
1269 sblog << "\n";
1270 if (v->chunk) {
1271 sblog << " in chunk: ";
1272 coalescer::dump_chunk(v->chunk);
1273 sblog << "\n";
1274 }
1275 );
1276
1277 if (v->chunk) {
1278 for (vvec::iterator I = v->chunk->values.begin(),
1279 E = v->chunk->values.end(); I != E; ++I) {
1280 value *v2 = *I;
1281
1282 PSC_DUMP( sblog << " add_interferences for " << *v2 << " :\n"; );
1283
1284 add_interferences(v, rb, v2->interferences);
1285 }
1286 } else {
1287 add_interferences(v, rb, v->interferences);
1288 }
1289
1290 PSC_DUMP(
1291 unsigned sz = rb.size();
1292 sblog << "registers bits: " << sz;
1293 for (unsigned r = 0; r < sz; ++r) {
1294 if ((r & 7) == 0)
1295 sblog << "\n " << r << " ";
1296 sblog << (rb.get(r) ? 1 : 0);
1297 }
1298 );
1299
1300 bool no_temp_gprs = v->is_global();
1301 unsigned rs, re, pass = no_temp_gprs ? 1 : 0;
1302
1303 while (pass < 2) {
1304
1305 if (pass == 0) {
1306 rs = sh.first_temp_gpr();
1307 re = MAX_GPR;
1308 } else {
1309 rs = 0;
1310 re = sh.num_nontemp_gpr();
1311 }
1312
1313 for (unsigned reg = rs; reg < re; ++reg) {
1314 if (reg >= rb.size() || !rb.get(reg)) {
1315 // color found
1316 set_color_local(v, sel_chan(reg, chan));
1317 return true;
1318 }
1319 }
1320 ++pass;
1321 }
1322
1323 assert(!"recolor_local failed");
1324 return true;
1325 }
1326
1327 void post_scheduler::emit_load_ar() {
1328
1329 regmap = prev_regmap;
1330 alu.discard_current_group();
1331
1332 alu_group_tracker &rt = alu.grp();
1333 alu_node *a = alu.create_ar_load(alu.current_ar, SEL_X);
1334
1335 if (!rt.try_reserve(a)) {
1336 sblog << "can't emit AR load : ";
1337 dump::dump_op(a);
1338 sblog << "\n";
1339 }
1340
1341 alu.current_ar = 0;
1342 }
1343
1344 bool post_scheduler::unmap_dst_val(value *d) {
1345
1346 if (d == alu.current_ar) {
1347 emit_load_ar();
1348 return false;
1349 }
1350
1351 if (d->is_prealloc()) {
1352 sel_chan gpr = d->get_final_gpr();
1353 rv_map::iterator F = regmap.find(gpr);
1354 value *c = NULL;
1355 if (F != regmap.end())
1356 c = F->second;
1357
1358 if (c && c!=d && (!c->chunk || c->chunk != d->chunk)) {
1359 PSC_DUMP(
1360 sblog << "dst value conflict : ";
1361 dump::dump_val(d);
1362 sblog << " regmap contains ";
1363 dump::dump_val(c);
1364 sblog << "\n";
1365 );
1366 assert(!"scheduler error");
1367 return false;
1368 } else if (c) {
1369 regmap.erase(F);
1370 }
1371 }
1372 return true;
1373 }
1374
1375 bool post_scheduler::unmap_dst(alu_node *n) {
1376 value *d = n->dst.empty() ? NULL : n->dst[0];
1377
1378 if (!d)
1379 return true;
1380
1381 if (!d->is_rel()) {
1382 if (d && d->is_any_reg()) {
1383
1384 if (d->is_AR()) {
1385 if (alu.current_ar != d) {
1386 sblog << "loading wrong ar value\n";
1387 assert(0);
1388 } else {
1389 alu.current_ar = NULL;
1390 }
1391
1392 } else if (d->is_any_gpr()) {
1393 if (!unmap_dst_val(d))
1394 return false;
1395 }
1396 }
1397 } else {
1398 for (vvec::iterator I = d->mdef.begin(), E = d->mdef.end();
1399 I != E; ++I) {
1400 d = *I;
1401 if (!d)
1402 continue;
1403
1404 assert(d->is_any_gpr());
1405
1406 if (!unmap_dst_val(d))
1407 return false;
1408 }
1409 }
1410 return true;
1411 }
1412
1413 bool post_scheduler::map_src_val(value *v) {
1414
1415 if (!v->is_prealloc())
1416 return true;
1417
1418 sel_chan gpr = v->get_final_gpr();
1419 rv_map::iterator F = regmap.find(gpr);
1420 value *c = NULL;
1421 if (F != regmap.end()) {
1422 c = F->second;
1423 if (!v->v_equal(c)) {
1424 PSC_DUMP(
1425 sblog << "can't map src value ";
1426 dump::dump_val(v);
1427 sblog << ", regmap contains ";
1428 dump::dump_val(c);
1429 sblog << "\n";
1430 );
1431 return false;
1432 }
1433 } else {
1434 regmap.insert(std::make_pair(gpr, v));
1435 }
1436 return true;
1437 }
1438
1439 bool post_scheduler::map_src_vec(vvec &vv, bool src) {
1440 if (src) {
1441 // Handle possible UBO indexing
1442 bool ubo_indexing[2] = { false, false };
1443 for (vvec::iterator I = vv.begin(), E = vv.end(); I != E; ++I) {
1444 value *v = *I;
1445 if (!v)
1446 continue;
1447
1448 if (v->is_kcache()) {
1449 unsigned index_mode = v->select.kcache_index_mode();
1450 if (index_mode == KC_INDEX_0 || index_mode == KC_INDEX_1) {
1451 ubo_indexing[index_mode - KC_INDEX_0] = true;
1452 }
1453 }
1454 }
1455
1456 // idx values stored at end of src vec, see bc_parser::prepare_alu_group
1457 for (unsigned i = 2; i != 0; i--) {
1458 if (ubo_indexing[i-1]) {
1459 // TODO: skip adding value to kcache reservation somehow, causes
1460 // unnecessary group breaks and cache line locks
1461 value *v = vv.back();
1462 if (alu.current_idx[i-1] && alu.current_idx[i-1] != v) {
1463 PSC_DUMP(
1464 sblog << "IDX" << i-1 << " already set to " <<
1465 *alu.current_idx[i-1] << ", trying to set " << *v << "\n";
1466 );
1467 return false;
1468 }
1469
1470 alu.current_idx[i-1] = v;
1471 PSC_DUMP(sblog << "IDX" << i-1 << " set to " << *v << "\n";);
1472 }
1473 }
1474 }
1475
1476 for (vvec::iterator I = vv.begin(), E = vv.end(); I != E; ++I) {
1477 value *v = *I;
1478 if (!v)
1479 continue;
1480
1481 if ((!v->is_any_gpr() || !v->is_fixed()) && !v->is_rel())
1482 continue;
1483
1484 if (v->is_rel()) {
1485 value *rel = v->rel;
1486 assert(rel);
1487
1488 if (!rel->is_const()) {
1489 if (!map_src_vec(v->muse, true))
1490 return false;
1491
1492 if (rel != alu.current_ar) {
1493 if (alu.current_ar) {
1494 PSC_DUMP(
1495 sblog << " current_AR is " << *alu.current_ar
1496 << " trying to use " << *rel << "\n";
1497 );
1498 return false;
1499 }
1500
1501 alu.current_ar = rel;
1502
1503 PSC_DUMP(
1504 sblog << " new current_AR assigned: " << *alu.current_ar
1505 << "\n";
1506 );
1507 }
1508 }
1509
1510 } else if (src) {
1511 if (!map_src_val(v)) {
1512 return false;
1513 }
1514 }
1515 }
1516 return true;
1517 }
1518
1519 bool post_scheduler::map_src(alu_node *n) {
1520 if (!map_src_vec(n->dst, false))
1521 return false;
1522
1523 if (!map_src_vec(n->src, true))
1524 return false;
1525
1526 return true;
1527 }
1528
1529 void post_scheduler::dump_regmap() {
1530
1531 sblog << "# REGMAP :\n";
1532
1533 for(rv_map::iterator I = regmap.begin(), E = regmap.end(); I != E; ++I) {
1534 sblog << " # " << I->first << " => " << *(I->second) << "\n";
1535 }
1536
1537 if (alu.current_ar)
1538 sblog << " current_AR: " << *alu.current_ar << "\n";
1539 if (alu.current_pr)
1540 sblog << " current_PR: " << *alu.current_pr << "\n";
1541 if (alu.current_idx[0])
1542 sblog << " current IDX0: " << *alu.current_idx[0] << "\n";
1543 if (alu.current_idx[1])
1544 sblog << " current IDX1: " << *alu.current_idx[1] << "\n";
1545 }
1546
1547 void post_scheduler::recolor_locals() {
1548 alu_group_tracker &rt = alu.grp();
1549
1550 for (unsigned s = 0; s < ctx.num_slots; ++s) {
1551 alu_node *n = rt.slot(s);
1552 if (n) {
1553 value *d = n->dst[0];
1554 if (d && d->is_sgpr() && !d->is_prealloc()) {
1555 recolor_local(d);
1556 }
1557 }
1558 }
1559 }
1560
1561 // returns true if there are interferences
1562 bool post_scheduler::check_interferences() {
1563
1564 alu_group_tracker &rt = alu.grp();
1565
1566 unsigned interf_slots;
1567
1568 bool discarded = false;
1569
1570 PSC_DUMP(
1571 sblog << "check_interferences: before: \n";
1572 dump_regmap();
1573 );
1574
1575 do {
1576
1577 interf_slots = 0;
1578
1579 for (unsigned s = 0; s < ctx.num_slots; ++s) {
1580 alu_node *n = rt.slot(s);
1581 if (n) {
1582 if (!unmap_dst(n)) {
1583 return true;
1584 }
1585 }
1586 }
1587
1588 for (unsigned s = 0; s < ctx.num_slots; ++s) {
1589 alu_node *n = rt.slot(s);
1590 if (n) {
1591 if (!map_src(n)) {
1592 interf_slots |= (1 << s);
1593 }
1594 }
1595 }
1596
1597 PSC_DUMP(
1598 for (unsigned i = 0; i < 5; ++i) {
1599 if (interf_slots & (1 << i)) {
1600 sblog << "!!!!!! interf slot: " << i << " : ";
1601 dump::dump_op(rt.slot(i));
1602 sblog << "\n";
1603 }
1604 }
1605 );
1606
1607 if (!interf_slots)
1608 break;
1609
1610 PSC_DUMP( sblog << "ci: discarding slots " << interf_slots << "\n"; );
1611
1612 rt.discard_slots(interf_slots, alu.conflict_nodes);
1613 regmap = prev_regmap;
1614 discarded = true;
1615
1616 } while(1);
1617
1618 PSC_DUMP(
1619 sblog << "check_interferences: after: \n";
1620 dump_regmap();
1621 );
1622
1623 return discarded;
1624 }
1625
1626 // add instruction(s) (alu_node or contents of alu_packed_node) to current group
1627 // returns the number of added instructions on success
1628 unsigned post_scheduler::try_add_instruction(node *n) {
1629
1630 alu_group_tracker &rt = alu.grp();
1631
1632 unsigned avail_slots = rt.avail_slots();
1633
1634 // Cannot schedule in same clause as instructions using this index value
1635 if (!n->dst.empty() && n->dst[0] &&
1636 (n->dst[0] == alu.current_idx[0] || n->dst[0] == alu.current_idx[1])) {
1637 PSC_DUMP(sblog << " CF_IDX source: " << *n->dst[0] << "\n";);
1638 return 0;
1639 }
1640
1641 if (n->is_alu_packed()) {
1642 alu_packed_node *p = static_cast<alu_packed_node*>(n);
1643 unsigned slots = p->get_slot_mask();
1644 unsigned cnt = __builtin_popcount(slots);
1645
1646 if ((slots & avail_slots) != slots) {
1647 PSC_DUMP( sblog << " no slots \n"; );
1648 return 0;
1649 }
1650
1651 p->update_packed_items(ctx);
1652
1653 if (!rt.try_reserve(p)) {
1654 PSC_DUMP( sblog << " reservation failed \n"; );
1655 return 0;
1656 }
1657
1658 p->remove();
1659 return cnt;
1660
1661 } else {
1662 alu_node *a = static_cast<alu_node*>(n);
1663 value *d = a->dst.empty() ? NULL : a->dst[0];
1664
1665 if (d && d->is_special_reg()) {
1666 assert((a->bc.op_ptr->flags & AF_MOVA) || d->is_geometry_emit());
1667 d = NULL;
1668 }
1669
1670 unsigned allowed_slots = ctx.alu_slots_mask(a->bc.op_ptr);
1671 unsigned slot;
1672
1673 allowed_slots &= avail_slots;
1674
1675 if (!allowed_slots)
1676 return 0;
1677
1678 if (d) {
1679 slot = d->get_final_chan();
1680 a->bc.dst_chan = slot;
1681 allowed_slots &= (1 << slot) | 0x10;
1682 } else {
1683 if (a->bc.op_ptr->flags & AF_MOVA) {
1684 if (a->bc.slot_flags & AF_V)
1685 allowed_slots &= (1 << SLOT_X);
1686 else
1687 allowed_slots &= (1 << SLOT_TRANS);
1688 }
1689 }
1690
1691 // FIXME workaround for some problems with MULADD in trans slot on r700,
1692 // (is it really needed on r600?)
1693 if ((a->bc.op == ALU_OP3_MULADD || a->bc.op == ALU_OP3_MULADD_IEEE) &&
1694 !ctx.is_egcm()) {
1695 allowed_slots &= 0x0F;
1696 }
1697
1698 if (!allowed_slots) {
1699 PSC_DUMP( sblog << " no suitable slots\n"; );
1700 return 0;
1701 }
1702
1703 slot = __builtin_ctz(allowed_slots);
1704 a->bc.slot = slot;
1705
1706 PSC_DUMP( sblog << "slot: " << slot << "\n"; );
1707
1708 if (!rt.try_reserve(a)) {
1709 PSC_DUMP( sblog << " reservation failed\n"; );
1710 return 0;
1711 }
1712
1713 a->remove();
1714 return 1;
1715 }
1716 }
1717
1718 bool post_scheduler::check_copy(node *n) {
1719 if (!n->is_copy_mov())
1720 return false;
1721
1722 value *s = n->src[0];
1723 value *d = n->dst[0];
1724
1725 if (!s->is_sgpr() || !d->is_sgpr())
1726 return false;
1727
1728 if (!s->is_prealloc()) {
1729 recolor_local(s);
1730
1731 if (!s->chunk || s->chunk != d->chunk)
1732 return false;
1733 }
1734
1735 if (s->gpr == d->gpr) {
1736
1737 PSC_DUMP(
1738 sblog << "check_copy: ";
1739 dump::dump_op(n);
1740 sblog << "\n";
1741 );
1742
1743 rv_map::iterator F = regmap.find(d->gpr);
1744 bool gpr_free = (F == regmap.end());
1745
1746 if (d->is_prealloc()) {
1747 if (gpr_free) {
1748 PSC_DUMP( sblog << " copy not ready...\n";);
1749 return true;
1750 }
1751
1752 value *rv = F->second;
1753 if (rv != d && (!rv->chunk || rv->chunk != d->chunk)) {
1754 PSC_DUMP( sblog << " copy not ready(2)...\n";);
1755 return true;
1756 }
1757
1758 unmap_dst(static_cast<alu_node*>(n));
1759 }
1760
1761 if (s->is_prealloc() && !map_src_val(s))
1762 return true;
1763
1764 update_live(n, NULL);
1765
1766 release_src_values(n);
1767 n->remove();
1768 PSC_DUMP( sblog << " copy coalesced...\n";);
1769 return true;
1770 }
1771 return false;
1772 }
1773
1774 void post_scheduler::dump_group(alu_group_tracker &rt) {
1775 for (unsigned i = 0; i < 5; ++i) {
1776 node *n = rt.slot(i);
1777 if (n) {
1778 sblog << "slot " << i << " : ";
1779 dump::dump_op(n);
1780 sblog << "\n";
1781 }
1782 }
1783 }
1784
1785 void post_scheduler::process_ready_copies() {
1786
1787 node *last;
1788
1789 do {
1790 last = ready_copies.back();
1791
1792 for (node_iterator N, I = ready_copies.begin(), E = ready_copies.end();
1793 I != E; I = N) {
1794 N = I; ++N;
1795
1796 node *n = *I;
1797
1798 if (!check_copy(n)) {
1799 n->remove();
1800 ready.push_back(n);
1801 }
1802 }
1803 } while (last != ready_copies.back());
1804
1805 update_local_interferences();
1806 }
1807
1808
1809 bool post_scheduler::prepare_alu_group() {
1810
1811 alu_group_tracker &rt = alu.grp();
1812
1813 unsigned i1 = 0;
1814
1815 PSC_DUMP(
1816 sblog << "prepare_alu_group: starting...\n";
1817 dump_group(rt);
1818 );
1819
1820 ready.append_from(&alu.conflict_nodes);
1821
1822 // FIXME rework this loop
1823
1824 do {
1825
1826 process_ready_copies();
1827
1828 ++i1;
1829
1830 for (node_iterator N, I = ready.begin(), E = ready.end(); I != E;
1831 I = N) {
1832 N = I; ++N;
1833 node *n = *I;
1834
1835 PSC_DUMP(
1836 sblog << "p_a_g: ";
1837 dump::dump_op(n);
1838 sblog << "\n";
1839 );
1840
1841
1842 unsigned cnt = try_add_instruction(n);
1843
1844 if (!cnt)
1845 continue;
1846
1847 PSC_DUMP(
1848 sblog << "current group:\n";
1849 dump_group(rt);
1850 );
1851
1852 if (rt.inst_count() == ctx.num_slots) {
1853 PSC_DUMP( sblog << " all slots used\n"; );
1854 break;
1855 }
1856 }
1857
1858 if (!check_interferences())
1859 break;
1860
1861 // don't try to add more instructions to the group with mova if this
1862 // can lead to breaking clause slot count limit - we don't want mova to
1863 // end up in the end of the new clause instead of beginning of the
1864 // current clause.
1865 if (rt.has_ar_load() && alu.total_slots() > 121)
1866 break;
1867
1868 if (rt.inst_count() && i1 > 50)
1869 break;
1870
1871 regmap = prev_regmap;
1872
1873 } while (1);
1874
1875 PSC_DUMP(
1876 sblog << " prepare_alu_group done, " << rt.inst_count()
1877 << " slot(s) \n";
1878
1879 sblog << "$$$$$$$$PAG i1=" << i1
1880 << " ready " << ready.count()
1881 << " pending " << pending.count()
1882 << " conflicting " << alu.conflict_nodes.count()
1883 <<"\n";
1884
1885 );
1886
1887 return rt.inst_count();
1888 }
1889
1890 void post_scheduler::release_src_values(node* n) {
1891 release_src_vec(n->src, true);
1892 release_src_vec(n->dst, false);
1893 }
1894
1895 void post_scheduler::release_op(node *n) {
1896 PSC_DUMP(
1897 sblog << "release_op ";
1898 dump::dump_op(n);
1899 sblog << "\n";
1900 );
1901
1902 n->remove();
1903
1904 if (n->is_copy_mov()) {
1905 ready_copies.push_back(n);
1906 } else if (n->is_mova() || n->is_pred_set()) {
1907 ready.push_front(n);
1908 } else {
1909 ready.push_back(n);
1910 }
1911 }
1912
1913 void post_scheduler::release_src_val(value *v) {
1914 node *d = v->any_def();
1915 if (d) {
1916 if (!--ucm[d])
1917 release_op(d);
1918 }
1919 }
1920
1921 void post_scheduler::release_src_vec(vvec& vv, bool src) {
1922
1923 for (vvec::iterator I = vv.begin(), E = vv.end(); I != E; ++I) {
1924 value *v = *I;
1925 if (!v || v->is_readonly())
1926 continue;
1927
1928 if (v->is_rel()) {
1929 release_src_val(v->rel);
1930 release_src_vec(v->muse, true);
1931
1932 } else if (src) {
1933 release_src_val(v);
1934 }
1935 }
1936 }
1937
1938 void literal_tracker::reset() {
1939 memset(lt, 0, sizeof(lt));
1940 memset(uc, 0, sizeof(uc));
1941 }
1942
1943 void rp_gpr_tracker::reset() {
1944 memset(rp, 0, sizeof(rp));
1945 memset(uc, 0, sizeof(uc));
1946 }
1947
1948 void rp_kcache_tracker::reset() {
1949 memset(rp, 0, sizeof(rp));
1950 memset(uc, 0, sizeof(uc));
1951 }
1952
1953 void alu_kcache_tracker::reset() {
1954 memset(kc, 0, sizeof(kc));
1955 lines.clear();
1956 }
1957
1958 void alu_clause_tracker::reset() {
1959 group = 0;
1960 slot_count = 0;
1961 grp0.reset();
1962 grp1.reset();
1963 }
1964
1965 alu_clause_tracker::alu_clause_tracker(shader &sh)
1966 : sh(sh), kt(sh.get_ctx().hw_class), slot_count(),
1967 grp0(sh), grp1(sh),
1968 group(), clause(),
1969 push_exec_mask(),
1970 current_ar(), current_pr(), current_idx() {}
1971
1972 void alu_clause_tracker::emit_group() {
1973
1974 assert(grp().inst_count());
1975
1976 alu_group_node *g = grp().emit();
1977
1978 if (grp().has_update_exec_mask()) {
1979 assert(!push_exec_mask);
1980 push_exec_mask = true;
1981 }
1982
1983 assert(g);
1984
1985 if (!clause) {
1986 clause = sh.create_clause(NST_ALU_CLAUSE);
1987 }
1988
1989 clause->push_front(g);
1990
1991 slot_count += grp().slot_count();
1992
1993 new_group();
1994
1995 PSC_DUMP( sblog << " #### group emitted\n"; );
1996 }
1997
1998 void alu_clause_tracker::emit_clause(container_node *c) {
1999 assert(clause);
2000
2001 kt.init_clause(clause->bc);
2002
2003 assert(!current_ar);
2004 assert(!current_pr);
2005
2006 if (push_exec_mask)
2007 clause->bc.set_op(CF_OP_ALU_PUSH_BEFORE);
2008
2009 c->push_front(clause);
2010
2011 clause = NULL;
2012 push_exec_mask = false;
2013 slot_count = 0;
2014 kt.reset();
2015
2016 PSC_DUMP( sblog << "######### ALU clause emitted\n"; );
2017 }
2018
2019 bool alu_clause_tracker::check_clause_limits() {
2020
2021 alu_group_tracker &gt = grp();
2022
2023 unsigned slots = gt.slot_count();
2024
2025 // reserving slots to load AR and PR values
2026 unsigned reserve_slots = (current_ar ? 1 : 0) + (current_pr ? 1 : 0);
2027 // ...and index registers
2028 reserve_slots += (current_idx[0] != NULL) + (current_idx[1] != NULL);
2029
2030 if (slot_count + slots > MAX_ALU_SLOTS - reserve_slots)
2031 return false;
2032
2033 if (!kt.try_reserve(gt))
2034 return false;
2035
2036 return true;
2037 }
2038
2039 void alu_clause_tracker::new_group() {
2040 group = !group;
2041 grp().reset();
2042 }
2043
2044 bool alu_clause_tracker::is_empty() {
2045 return clause == NULL;
2046 }
2047
2048 void literal_tracker::init_group_literals(alu_group_node* g) {
2049
2050 g->literals.clear();
2051 for (unsigned i = 0; i < 4; ++i) {
2052 if (!lt[i])
2053 break;
2054
2055 g->literals.push_back(lt[i]);
2056
2057 PSC_DUMP(
2058 sblog << "literal emitted: " << lt[i].f;
2059 sblog.print_zw_hex(lt[i].u, 8);
2060 sblog << " " << lt[i].i << "\n";
2061 );
2062 }
2063 }
2064
2065 bool alu_kcache_tracker::try_reserve(alu_group_tracker& gt) {
2066 rp_kcache_tracker &kt = gt.kcache();
2067
2068 if (!kt.num_sels())
2069 return true;
2070
2071 sb_set<unsigned> group_lines;
2072
2073 unsigned nl = kt.get_lines(group_lines);
2074 assert(nl);
2075
2076 sb_set<unsigned> clause_lines(lines);
2077 lines.add_set(group_lines);
2078
2079 if (clause_lines.size() == lines.size())
2080 return true;
2081
2082 if (update_kc())
2083 return true;
2084
2085 lines = clause_lines;
2086
2087 return false;
2088 }
2089
2090 unsigned rp_kcache_tracker::get_lines(kc_lines& lines) {
2091 unsigned cnt = 0;
2092
2093 for (unsigned i = 0; i < sel_count; ++i) {
2094 unsigned line = rp[i] & 0x1fffffffu;
2095 unsigned index_mode = rp[i] >> 29;
2096
2097 if (!line)
2098 return cnt;
2099
2100 --line;
2101 line = (sel_count == 2) ? line >> 5 : line >> 6;
2102 line |= index_mode << 29;
2103
2104 if (lines.insert(line).second)
2105 ++cnt;
2106 }
2107 return cnt;
2108 }
2109
2110 bool alu_kcache_tracker::update_kc() {
2111 unsigned c = 0;
2112
2113 bc_kcache old_kc[4];
2114 memcpy(old_kc, kc, sizeof(kc));
2115
2116 for (kc_lines::iterator I = lines.begin(), E = lines.end(); I != E; ++I) {
2117 unsigned index_mode = *I >> 29;
2118 unsigned line = *I & 0x1fffffffu;
2119 unsigned bank = line >> 8;
2120
2121 assert(index_mode <= KC_INDEX_INVALID);
2122 line &= 0xFF;
2123
2124 if (c && (bank == kc[c-1].bank) && (kc[c-1].addr + 1 == line) &&
2125 kc[c-1].index_mode == index_mode)
2126 {
2127 kc[c-1].mode = KC_LOCK_2;
2128 } else {
2129 if (c == max_kcs) {
2130 memcpy(kc, old_kc, sizeof(kc));
2131 return false;
2132 }
2133
2134 kc[c].mode = KC_LOCK_1;
2135
2136 kc[c].bank = bank;
2137 kc[c].addr = line;
2138 kc[c].index_mode = index_mode;
2139 ++c;
2140 }
2141 }
2142 return true;
2143 }
2144
2145 alu_node* alu_clause_tracker::create_ar_load(value *v, chan_select ar_channel) {
2146 alu_node *a = sh.create_alu();
2147
2148 if (sh.get_ctx().uses_mova_gpr) {
2149 a->bc.set_op(ALU_OP1_MOVA_GPR_INT);
2150 a->bc.slot = SLOT_TRANS;
2151 } else {
2152 a->bc.set_op(ALU_OP1_MOVA_INT);
2153 a->bc.slot = SLOT_X;
2154 }
2155 a->bc.dst_chan = ar_channel;
2156 if (ar_channel != SEL_X && sh.get_ctx().is_cayman()) {
2157 a->bc.dst_gpr = ar_channel == SEL_Y ? CM_V_SQ_MOVA_DST_CF_IDX0 : CM_V_SQ_MOVA_DST_CF_IDX1;
2158 }
2159
2160 a->dst.resize(1);
2161 a->src.push_back(v);
2162
2163 PSC_DUMP(
2164 sblog << "created AR load: ";
2165 dump::dump_op(a);
2166 sblog << "\n";
2167 );
2168
2169 return a;
2170 }
2171
2172 void alu_clause_tracker::discard_current_group() {
2173 PSC_DUMP( sblog << "act::discard_current_group\n"; );
2174 grp().discard_all_slots(conflict_nodes);
2175 }
2176
2177 void rp_gpr_tracker::dump() {
2178 sblog << "=== gpr_tracker dump:\n";
2179 for (int c = 0; c < 3; ++c) {
2180 sblog << "cycle " << c << " ";
2181 for (int h = 0; h < 4; ++h) {
2182 sblog << rp[c][h] << ":" << uc[c][h] << " ";
2183 }
2184 sblog << "\n";
2185 }
2186 }
2187
2188 } // namespace r600_sb