2 * Copyright 2013 Vadim Girlin <vadimgirlin@gmail.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #define FBC_DUMP(q) do { q } while (0)
36 #include "sb_shader.h"
41 int bc_finalizer::run() {
45 regions_vec
&rv
= sh
.get_regions();
46 for (regions_vec::reverse_iterator I
= rv
.rbegin(), E
= rv
.rend(); I
!= E
;
53 if (r
->first
->is_container()) {
54 container_node
*repdep1
= static_cast<container_node
*>(r
->first
);
55 assert(repdep1
->is_depart() || repdep1
->is_repeat());
56 if_node
*n_if
= static_cast<if_node
*>(repdep1
->first
);
57 if (n_if
&& n_if
->is_if())
71 // workaround for some problems on r6xx/7xx
72 // add ALU NOP to each vertex shader
73 if (!ctx
.is_egcm() && (sh
.target
== TARGET_VS
|| sh
.target
== TARGET_ES
)) {
74 cf_node
*c
= sh
.create_clause(NST_ALU_CLAUSE
);
76 alu_group_node
*g
= sh
.create_alu_group();
78 alu_node
*a
= sh
.create_alu();
79 a
->bc
.set_op(ALU_OP0_NOP
);
85 sh
.root
->push_back(c
);
87 c
= sh
.create_cf(CF_OP_NOP
);
88 sh
.root
->push_back(c
);
93 if (!ctx
.is_cayman() && last_cf
->bc
.op_ptr
->flags
& CF_ALU
) {
94 last_cf
= sh
.create_cf(CF_OP_NOP
);
95 sh
.root
->push_back(last_cf
);
98 if (ctx
.is_cayman()) {
100 cf_node
*c
= sh
.create_cf(CF_OP_CF_END
);
101 sh
.root
->push_back(c
);
103 last_cf
->insert_after(sh
.create_cf(CF_OP_CF_END
));
105 last_cf
->bc
.end_of_program
= 1;
107 for (unsigned t
= EXP_PIXEL
; t
< EXP_TYPE_COUNT
; ++t
) {
108 cf_node
*le
= last_export
[t
];
110 le
->bc
.set_op(CF_OP_EXPORT_DONE
);
118 void bc_finalizer::finalize_loop(region_node
* r
) {
120 cf_node
*loop_start
= sh
.create_cf(CF_OP_LOOP_START_DX10
);
121 cf_node
*loop_end
= sh
.create_cf(CF_OP_LOOP_END
);
122 bool has_instr
= false;
125 for (depart_vec::iterator I
= r
->departs
.begin(), E
= r
->departs
.end();
127 depart_node
*dep
= *I
;
137 loop_start
->jump_after(loop_end
);
138 loop_end
->jump_after(loop_start
);
141 for (depart_vec::iterator I
= r
->departs
.begin(), E
= r
->departs
.end();
143 depart_node
*dep
= *I
;
145 cf_node
*loop_break
= sh
.create_cf(CF_OP_LOOP_BREAK
);
146 loop_break
->jump(loop_end
);
147 dep
->push_back(loop_break
);
152 // FIXME produces unnecessary LOOP_CONTINUE
153 for (repeat_vec::iterator I
= r
->repeats
.begin(), E
= r
->repeats
.end();
155 repeat_node
*rep
= *I
;
156 if (!(rep
->parent
== r
&& rep
->prev
== NULL
)) {
157 cf_node
*loop_cont
= sh
.create_cf(CF_OP_LOOP_CONTINUE
);
158 loop_cont
->jump(loop_end
);
159 rep
->push_back(loop_cont
);
165 r
->push_front(loop_start
);
166 r
->push_back(loop_end
);
170 void bc_finalizer::finalize_if(region_node
* r
) {
174 // expecting the following control flow structure here:
177 // - depart/repeat 1 (it may be depart/repeat for some outer region)
181 // - depart/repeat 2 (possibly for outer region)
183 // - some optional code
186 // - optional <else> code> ...
190 container_node
*repdep1
= static_cast<container_node
*>(r
->first
);
191 assert(repdep1
->is_depart() || repdep1
->is_repeat());
193 if_node
*n_if
= static_cast<if_node
*>(repdep1
->first
);
198 assert(n_if
->is_if());
200 container_node
*repdep2
= static_cast<container_node
*>(n_if
->first
);
201 assert(repdep2
->is_depart() || repdep2
->is_repeat());
203 cf_node
*if_jump
= sh
.create_cf(CF_OP_JUMP
);
204 cf_node
*if_pop
= sh
.create_cf(CF_OP_POP
);
206 if_pop
->bc
.pop_count
= 1;
207 if_pop
->jump_after(if_pop
);
209 r
->push_front(if_jump
);
210 r
->push_back(if_pop
);
212 bool has_else
= n_if
->next
;
215 cf_node
*nelse
= sh
.create_cf(CF_OP_ELSE
);
216 n_if
->insert_after(nelse
);
217 if_jump
->jump(nelse
);
218 nelse
->jump_after(if_pop
);
219 nelse
->bc
.pop_count
= 1;
222 if_jump
->jump_after(if_pop
);
223 if_jump
->bc
.pop_count
= 1;
229 for (depart_vec::iterator I
= r
->departs
.begin(), E
= r
->departs
.end();
234 assert(r
->repeats
.empty());
237 void bc_finalizer::run_on(container_node
* c
) {
239 for (node_iterator I
= c
->begin(), E
= c
->end(); I
!= E
; ++I
) {
242 if (n
->is_alu_group()) {
243 finalize_alu_group(static_cast<alu_group_node
*>(n
));
245 if (n
->is_alu_clause()) {
246 cf_node
*c
= static_cast<cf_node
*>(n
);
248 if (c
->bc
.op
== CF_OP_ALU_PUSH_BEFORE
&& ctx
.is_egcm()) {
249 if (ctx
.stack_workaround_8xx
) {
250 region_node
*r
= c
->get_parent_region();
253 unsigned elems
= get_stack_depth(r
, loops
, ifs
);
254 unsigned dmod1
= elems
% ctx
.stack_entry_size
;
255 unsigned dmod2
= (elems
+ 1) % ctx
.stack_entry_size
;
257 if (elems
&& (!dmod1
|| !dmod2
))
258 c
->flags
|= NF_ALU_STACK_WORKAROUND
;
260 } else if (ctx
.stack_workaround_9xx
) {
261 region_node
*r
= c
->get_parent_region();
264 get_stack_depth(r
, loops
, ifs
);
266 c
->flags
|= NF_ALU_STACK_WORKAROUND
;
270 } else if (n
->is_fetch_inst()) {
271 finalize_fetch(static_cast<fetch_node
*>(n
));
272 } else if (n
->is_cf_inst()) {
273 finalize_cf(static_cast<cf_node
*>(n
));
275 if (n
->is_container())
276 run_on(static_cast<container_node
*>(n
));
281 void bc_finalizer::finalize_alu_group(alu_group_node
* g
) {
283 alu_node
*last
= NULL
;
285 for (node_iterator I
= g
->begin(), E
= g
->end(); I
!= E
; ++I
) {
286 alu_node
*n
= static_cast<alu_node
*>(*I
);
287 unsigned slot
= n
->bc
.slot
;
289 value
*d
= n
->dst
.empty() ? NULL
: n
->dst
[0];
291 if (d
&& d
->is_special_reg()) {
292 assert(n
->bc
.op_ptr
->flags
& AF_MOVA
);
296 sel_chan fdst
= d
? d
->get_final_gpr() : sel_chan(0, 0);
299 assert(fdst
.chan() == slot
|| slot
== SLOT_TRANS
);
302 n
->bc
.dst_gpr
= fdst
.sel();
303 n
->bc
.dst_chan
= d
? fdst
.chan() : slot
< SLOT_TRANS
? slot
: 0;
306 if (d
&& d
->is_rel() && d
->rel
&& !d
->rel
->is_const()) {
308 update_ngpr(d
->array
->gpr
.sel() + d
->array
->array_size
-1);
313 n
->bc
.write_mask
= d
!= NULL
;
316 if (n
->bc
.op_ptr
->flags
& AF_PRED
) {
317 n
->bc
.update_pred
= (n
->dst
[1] != NULL
);
318 n
->bc
.update_exec_mask
= (n
->dst
[2] != NULL
);
321 // FIXME handle predication here
322 n
->bc
.pred_sel
= PRED_SEL_OFF
;
324 update_ngpr(n
->bc
.dst_gpr
);
326 finalize_alu_src(g
, n
);
334 void bc_finalizer::finalize_alu_src(alu_group_node
* g
, alu_node
* a
) {
338 sblog
<< "finalize_alu_src: ";
345 for (vvec::iterator I
= sv
.begin(), E
= sv
.end(); I
!= E
; ++I
, ++si
) {
349 bc_alu_src
&src
= a
->bc
.src
[si
];
357 sc
= v
->get_final_gpr();
359 src
.chan
= sc
.chan();
360 if (!v
->rel
->is_const()) {
362 update_ngpr(v
->array
->gpr
.sel() + v
->array
->array_size
-1);
368 gpr
= v
->get_final_gpr();
370 src
.chan
= gpr
.chan();
371 update_ngpr(src
.sel
);
374 src
.sel
= v
->gpr
.sel();
375 src
.chan
= v
->gpr
.chan();
376 update_ngpr(src
.sel
);
380 literal lv
= v
->literal_value
;
383 if (lv
== literal(0))
385 else if (lv
== literal(0.5f
))
386 src
.sel
= ALU_SRC_0_5
;
387 else if (lv
== literal(1.0f
))
389 else if (lv
== literal(1))
390 src
.sel
= ALU_SRC_1_INT
;
391 else if (lv
== literal(-1))
392 src
.sel
= ALU_SRC_M_1_INT
;
394 src
.sel
= ALU_SRC_LITERAL
;
395 src
.chan
= g
->literal_chan(lv
);
401 cf_node
*clause
= static_cast<cf_node
*>(g
->parent
);
402 assert(clause
->is_alu_clause());
403 sel_chan k
= translate_kcache(clause
, v
);
405 assert(k
&& "kcache translation failed");
412 case VLK_SPECIAL_CONST
:
413 src
.sel
= v
->select
.sel();
414 src
.chan
= v
->select
.chan();
417 assert(!"unknown value kind");
423 a
->bc
.src
[si
++].sel
= 0;
427 void bc_finalizer::copy_fetch_src(fetch_node
&dst
, fetch_node
&src
, unsigned arg_start
)
431 for (unsigned chan
= 0; chan
< 4; ++chan
) {
433 dst
.bc
.dst_sel
[chan
] = SEL_MASK
;
435 unsigned sel
= SEL_MASK
;
437 value
*v
= src
.src
[arg_start
+ chan
];
439 if (!v
|| v
->is_undef()) {
441 } else if (v
->is_const()) {
442 literal l
= v
->literal_value
;
445 else if (l
== literal(1.0f
))
448 sblog
<< "invalid fetch constant operand " << chan
<< " ";
454 } else if (v
->is_any_gpr()) {
455 unsigned vreg
= v
->gpr
.sel();
456 unsigned vchan
= v
->gpr
.chan();
460 else if ((unsigned)reg
!= vreg
) {
461 sblog
<< "invalid fetch source operand " << chan
<< " ";
470 sblog
<< "invalid fetch source operand " << chan
<< " ";
476 dst
.bc
.src_sel
[chan
] = sel
;
482 dst
.bc
.src_gpr
= reg
>= 0 ? reg
: 0;
485 void bc_finalizer::emit_set_grad(fetch_node
* f
) {
487 assert(f
->src
.size() == 12);
488 unsigned ops
[2] = { FETCH_OP_SET_GRADIENTS_V
, FETCH_OP_SET_GRADIENTS_H
};
490 unsigned arg_start
= 0;
492 for (unsigned op
= 0; op
< 2; ++op
) {
493 fetch_node
*n
= sh
.create_fetch();
494 n
->bc
.set_op(ops
[op
]);
498 copy_fetch_src(*n
, *f
, arg_start
);
505 void bc_finalizer::emit_set_texture_offsets(fetch_node
&f
) {
506 assert(f
.src
.size() == 8);
508 fetch_node
*n
= sh
.create_fetch();
510 n
->bc
.set_op(FETCH_OP_SET_TEXTURE_OFFSETS
);
512 copy_fetch_src(*n
, f
, 4);
517 void bc_finalizer::finalize_fetch(fetch_node
* f
) {
523 unsigned src_count
= 4;
525 unsigned flags
= f
->bc
.op_ptr
->flags
;
527 if (flags
& FF_VTX
) {
529 } else if (flags
& FF_USEGRAD
) {
531 } else if (flags
& FF_USE_TEXTURE_OFFSETS
) {
532 emit_set_texture_offsets(*f
);
535 for (unsigned chan
= 0; chan
< src_count
; ++chan
) {
537 unsigned sel
= f
->bc
.src_sel
[chan
];
542 value
*v
= f
->src
[chan
];
546 } else if (v
->is_const()) {
547 literal l
= v
->literal_value
;
550 else if (l
== literal(1.0f
))
553 sblog
<< "invalid fetch constant operand " << chan
<< " ";
559 } else if (v
->is_any_gpr()) {
560 unsigned vreg
= v
->gpr
.sel();
561 unsigned vchan
= v
->gpr
.chan();
565 else if ((unsigned)reg
!= vreg
) {
566 sblog
<< "invalid fetch source operand " << chan
<< " ";
575 sblog
<< "invalid fetch source operand " << chan
<< " ";
581 f
->bc
.src_sel
[chan
] = sel
;
587 f
->bc
.src_gpr
= reg
>= 0 ? reg
: 0;
593 unsigned dst_swz
[4] = {SEL_MASK
, SEL_MASK
, SEL_MASK
, SEL_MASK
};
595 for (unsigned chan
= 0; chan
< 4; ++chan
) {
597 unsigned sel
= f
->bc
.dst_sel
[chan
];
602 value
*v
= f
->dst
[chan
];
606 if (v
->is_any_gpr()) {
607 unsigned vreg
= v
->gpr
.sel();
608 unsigned vchan
= v
->gpr
.chan();
612 else if ((unsigned)reg
!= vreg
) {
613 sblog
<< "invalid fetch dst operand " << chan
<< " ";
619 dst_swz
[vchan
] = sel
;
622 sblog
<< "invalid fetch dst operand " << chan
<< " ";
630 for (unsigned i
= 0; i
< 4; ++i
)
631 f
->bc
.dst_sel
[i
] = dst_swz
[i
];
638 f
->bc
.dst_gpr
= reg
>= 0 ? reg
: 0;
641 void bc_finalizer::finalize_cf(cf_node
* c
) {
643 unsigned flags
= c
->bc
.op_ptr
->flags
;
645 c
->bc
.end_of_program
= 0;
648 if (flags
& CF_EXP
) {
649 c
->bc
.set_op(CF_OP_EXPORT
);
650 last_export
[c
->bc
.type
] = c
;
654 for (unsigned chan
= 0; chan
< 4; ++chan
) {
656 unsigned sel
= c
->bc
.sel
[chan
];
661 value
*v
= c
->src
[chan
];
665 } else if (v
->is_const()) {
666 literal l
= v
->literal_value
;
669 else if (l
== literal(1.0f
))
672 sblog
<< "invalid export constant operand " << chan
<< " ";
678 } else if (v
->is_any_gpr()) {
679 unsigned vreg
= v
->gpr
.sel();
680 unsigned vchan
= v
->gpr
.chan();
684 else if ((unsigned)reg
!= vreg
) {
685 sblog
<< "invalid export source operand " << chan
<< " ";
694 sblog
<< "invalid export source operand " << chan
<< " ";
700 c
->bc
.sel
[chan
] = sel
;
706 c
->bc
.rw_gpr
= reg
>= 0 ? reg
: 0;
708 } else if (flags
& CF_MEM
) {
713 for (unsigned chan
= 0; chan
< 4; ++chan
) {
714 value
*v
= c
->src
[chan
];
715 if (!v
|| v
->is_undef())
718 if (!v
->is_any_gpr() || v
->gpr
.chan() != chan
) {
719 sblog
<< "invalid source operand " << chan
<< " ";
724 unsigned vreg
= v
->gpr
.sel();
727 else if ((unsigned)reg
!= vreg
) {
728 sblog
<< "invalid source operand " << chan
<< " ";
737 assert(reg
>= 0 && mask
);
742 c
->bc
.rw_gpr
= reg
>= 0 ? reg
: 0;
743 c
->bc
.comp_mask
= mask
;
745 if (((flags
& CF_RAT
) || (!(flags
& CF_STRM
))) && (c
->bc
.type
& 1)) {
749 for (unsigned chan
= 0; chan
< 4; ++chan
) {
750 value
*v
= c
->src
[4 + chan
];
751 if (!v
|| v
->is_undef())
754 if (!v
->is_any_gpr() || v
->gpr
.chan() != chan
) {
755 sblog
<< "invalid source operand " << chan
<< " ";
760 unsigned vreg
= v
->gpr
.sel();
763 else if ((unsigned)reg
!= vreg
) {
764 sblog
<< "invalid source operand " << chan
<< " ";
776 c
->bc
.index_gpr
= reg
>= 0 ? reg
: 0;
778 } else if (flags
& CF_CALL
) {
779 update_nstack(c
->get_parent_region(), ctx
.wavefront_size
== 16 ? 2 : 1);
783 sel_chan
bc_finalizer::translate_kcache(cf_node
* alu
, value
* v
) {
784 unsigned sel
= v
->select
.sel();
785 unsigned bank
= sel
>> 12;
786 unsigned chan
= v
->select
.chan();
787 static const unsigned kc_base
[] = {128, 160, 256, 288};
791 unsigned line
= sel
>> 4;
793 for (unsigned k
= 0; k
< 4; ++k
) {
794 bc_kcache
&kc
= alu
->bc
.kc
[k
];
796 if (kc
.mode
== KC_LOCK_NONE
)
799 if (kc
.bank
== bank
&& (kc
.addr
== line
||
800 (kc
.mode
== KC_LOCK_2
&& kc
.addr
+ 1 == line
))) {
802 sel
= kc_base
[k
] + (sel
- (kc
.addr
<< 4));
804 return sel_chan(sel
, chan
);
808 assert(!"kcache translation error");
812 void bc_finalizer::update_ngpr(unsigned gpr
) {
813 if (gpr
< MAX_GPR
- ctx
.alu_temp_gprs
&& gpr
>= ngpr
)
817 unsigned bc_finalizer::get_stack_depth(node
*n
, unsigned &loops
,
818 unsigned &ifs
, unsigned add
) {
819 unsigned stack_elements
= add
;
820 bool has_non_wqm_push
= (add
!= 0);
821 region_node
*r
= n
->is_region() ?
822 static_cast<region_node
*>(n
) : n
->get_parent_region();
832 has_non_wqm_push
= true;
834 r
= r
->get_parent_region();
836 stack_elements
+= (loops
* ctx
.stack_entry_size
) + ifs
;
838 // reserve additional elements in some cases
839 switch (ctx
.hw_class
) {
842 // If any non-WQM push is invoked, 2 elements should be reserved.
843 if (has_non_wqm_push
)
846 case HW_CLASS_CAYMAN
:
847 // If any stack operation is invoked, 2 elements should be reserved
851 case HW_CLASS_EVERGREEN
:
852 // According to the docs we need to reserve 1 element for each of the
854 // 1) non-WQM push is used with WQM/LOOP frames on stack
855 // 2) ALU_ELSE_AFTER is used at the point of max stack usage
857 // It was found that the conditions above are not sufficient, there are
858 // other cases where we also need to reserve stack space, that's why
859 // we always reserve 1 stack element if we have non-WQM push on stack.
860 // Condition 2 is ignored for now because we don't use this instruction.
861 if (has_non_wqm_push
)
864 case HW_CLASS_UNKNOWN
:
867 return stack_elements
;
870 void bc_finalizer::update_nstack(region_node
* r
, unsigned add
) {
873 unsigned elems
= r
? get_stack_depth(r
, loops
, ifs
, add
) : add
;
875 // XXX all chips expect this value to be computed using 4 as entry size,
876 // not the real entry size
877 unsigned stack_entries
= (elems
+ 3) >> 2;
879 if (nstack
< stack_entries
)
880 nstack
= stack_entries
;
883 void bc_finalizer::cf_peephole() {
884 if (ctx
.stack_workaround_8xx
|| ctx
.stack_workaround_9xx
) {
885 for (node_iterator N
, I
= sh
.root
->begin(), E
= sh
.root
->end(); I
!= E
;
888 cf_node
*c
= static_cast<cf_node
*>(*I
);
890 if (c
->bc
.op
== CF_OP_ALU_PUSH_BEFORE
&&
891 (c
->flags
& NF_ALU_STACK_WORKAROUND
)) {
892 cf_node
*push
= sh
.create_cf(CF_OP_PUSH
);
893 c
->insert_before(push
);
895 c
->bc
.set_op(CF_OP_ALU
);
900 for (node_iterator N
, I
= sh
.root
->begin(), E
= sh
.root
->end(); I
!= E
;
904 cf_node
*c
= static_cast<cf_node
*>(*I
);
906 if (c
->jump_after_target
) {
907 c
->jump_target
= static_cast<cf_node
*>(c
->jump_target
->next
);
908 c
->jump_after_target
= false;
911 if (c
->is_cf_op(CF_OP_POP
)) {
913 if (p
->is_alu_clause()) {
914 cf_node
*a
= static_cast<cf_node
*>(p
);
916 if (a
->bc
.op
== CF_OP_ALU
) {
917 a
->bc
.set_op(CF_OP_ALU_POP_AFTER
);
921 } else if (c
->is_cf_op(CF_OP_JUMP
) && c
->jump_target
== c
->next
) {
922 // if JUMP is immediately followed by its jump target,
923 // then JUMP is useless and we can eliminate it
929 } // namespace r600_sb