2 * Copyright 2013 Vadim Girlin <vadimgirlin@gmail.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #define FBC_DUMP(q) do { q } while (0)
36 #include "sb_shader.h"
41 void bc_finalizer::insert_rv6xx_load_ar_workaround(alu_group_node
*b4
) {
43 alu_group_node
*g
= sh
.create_alu_group();
44 alu_node
*a
= sh
.create_alu();
46 a
->bc
.set_op(ALU_OP0_NOP
);
53 int bc_finalizer::run() {
57 regions_vec
&rv
= sh
.get_regions();
58 for (regions_vec::reverse_iterator I
= rv
.rbegin(), E
= rv
.rend(); I
!= E
;
64 bool loop
= r
->is_loop();
76 // workaround for some problems on r6xx/7xx
77 // add ALU NOP to each vertex shader
78 if (!ctx
.is_egcm() && (sh
.target
== TARGET_VS
|| sh
.target
== TARGET_ES
)) {
79 cf_node
*c
= sh
.create_clause(NST_ALU_CLAUSE
);
81 alu_group_node
*g
= sh
.create_alu_group();
83 alu_node
*a
= sh
.create_alu();
84 a
->bc
.set_op(ALU_OP0_NOP
);
90 sh
.root
->push_back(c
);
92 c
= sh
.create_cf(CF_OP_NOP
);
93 sh
.root
->push_back(c
);
98 if (!ctx
.is_cayman() && last_cf
->bc
.op_ptr
->flags
& CF_ALU
) {
99 last_cf
= sh
.create_cf(CF_OP_NOP
);
100 sh
.root
->push_back(last_cf
);
103 if (ctx
.is_cayman()) {
105 cf_node
*c
= sh
.create_cf(CF_OP_CF_END
);
106 sh
.root
->push_back(c
);
108 last_cf
->insert_after(sh
.create_cf(CF_OP_CF_END
));
110 last_cf
->bc
.end_of_program
= 1;
112 for (unsigned t
= EXP_PIXEL
; t
< EXP_TYPE_COUNT
; ++t
) {
113 cf_node
*le
= last_export
[t
];
115 le
->bc
.set_op(CF_OP_EXPORT_DONE
);
123 void bc_finalizer::finalize_loop(region_node
* r
) {
127 cf_node
*loop_start
= sh
.create_cf(CF_OP_LOOP_START_DX10
);
128 cf_node
*loop_end
= sh
.create_cf(CF_OP_LOOP_END
);
130 // Update last_cf, but don't overwrite it if it's outside the current loop nest since
131 // it may point to a cf that is later in program order.
132 // The single parent level check is sufficient since finalize_loop() is processed in
133 // reverse order from innermost to outermost loop nest level.
134 if (!last_cf
|| last_cf
->get_parent_region() == r
) {
138 loop_start
->jump_after(loop_end
);
139 loop_end
->jump_after(loop_start
);
141 for (depart_vec::iterator I
= r
->departs
.begin(), E
= r
->departs
.end();
143 depart_node
*dep
= *I
;
144 cf_node
*loop_break
= sh
.create_cf(CF_OP_LOOP_BREAK
);
145 loop_break
->jump(loop_end
);
146 dep
->push_back(loop_break
);
150 // FIXME produces unnecessary LOOP_CONTINUE
151 for (repeat_vec::iterator I
= r
->repeats
.begin(), E
= r
->repeats
.end();
153 repeat_node
*rep
= *I
;
154 if (!(rep
->parent
== r
&& rep
->prev
== NULL
)) {
155 cf_node
*loop_cont
= sh
.create_cf(CF_OP_LOOP_CONTINUE
);
156 loop_cont
->jump(loop_end
);
157 rep
->push_back(loop_cont
);
162 r
->push_front(loop_start
);
163 r
->push_back(loop_end
);
166 void bc_finalizer::finalize_if(region_node
* r
) {
170 // expecting the following control flow structure here:
173 // - depart/repeat 1 (it may be depart/repeat for some outer region)
177 // - depart/repeat 2 (possibly for outer region)
179 // - some optional code
182 // - optional <else> code> ...
186 container_node
*repdep1
= static_cast<container_node
*>(r
->first
);
187 assert(repdep1
->is_depart() || repdep1
->is_repeat());
189 if_node
*n_if
= static_cast<if_node
*>(repdep1
->first
);
194 assert(n_if
->is_if());
196 container_node
*repdep2
= static_cast<container_node
*>(n_if
->first
);
197 assert(repdep2
->is_depart() || repdep2
->is_repeat());
199 cf_node
*if_jump
= sh
.create_cf(CF_OP_JUMP
);
200 cf_node
*if_pop
= sh
.create_cf(CF_OP_POP
);
202 if_pop
->bc
.pop_count
= 1;
203 if_pop
->jump_after(if_pop
);
205 r
->push_front(if_jump
);
206 r
->push_back(if_pop
);
208 bool has_else
= n_if
->next
;
211 cf_node
*nelse
= sh
.create_cf(CF_OP_ELSE
);
212 n_if
->insert_after(nelse
);
213 if_jump
->jump(nelse
);
214 nelse
->jump_after(if_pop
);
215 nelse
->bc
.pop_count
= 1;
218 if_jump
->jump_after(if_pop
);
219 if_jump
->bc
.pop_count
= 1;
225 for (depart_vec::iterator I
= r
->departs
.begin(), E
= r
->departs
.end();
230 assert(r
->repeats
.empty());
233 void bc_finalizer::run_on(container_node
* c
) {
234 node
*prev_node
= NULL
;
235 for (node_iterator I
= c
->begin(), E
= c
->end(); I
!= E
; ++I
) {
238 if (n
->is_alu_group()) {
239 finalize_alu_group(static_cast<alu_group_node
*>(n
), prev_node
);
241 if (n
->is_alu_clause()) {
242 cf_node
*c
= static_cast<cf_node
*>(n
);
244 if (c
->bc
.op
== CF_OP_ALU_PUSH_BEFORE
&& ctx
.is_egcm()) {
245 if (ctx
.stack_workaround_8xx
) {
246 region_node
*r
= c
->get_parent_region();
249 unsigned elems
= get_stack_depth(r
, loops
, ifs
);
250 unsigned dmod1
= elems
% ctx
.stack_entry_size
;
251 unsigned dmod2
= (elems
+ 1) % ctx
.stack_entry_size
;
253 if (elems
&& (!dmod1
|| !dmod2
))
254 c
->flags
|= NF_ALU_STACK_WORKAROUND
;
256 } else if (ctx
.stack_workaround_9xx
) {
257 region_node
*r
= c
->get_parent_region();
260 get_stack_depth(r
, loops
, ifs
);
262 c
->flags
|= NF_ALU_STACK_WORKAROUND
;
266 } else if (n
->is_fetch_inst()) {
267 finalize_fetch(static_cast<fetch_node
*>(n
));
268 } else if (n
->is_cf_inst()) {
269 finalize_cf(static_cast<cf_node
*>(n
));
271 if (n
->is_container())
272 run_on(static_cast<container_node
*>(n
));
278 void bc_finalizer::finalize_alu_group(alu_group_node
* g
, node
*prev_node
) {
280 alu_node
*last
= NULL
;
281 alu_group_node
*prev_g
= NULL
;
282 bool add_nop
= false;
283 if (prev_node
&& prev_node
->is_alu_group()) {
284 prev_g
= static_cast<alu_group_node
*>(prev_node
);
287 for (node_iterator I
= g
->begin(), E
= g
->end(); I
!= E
; ++I
) {
288 alu_node
*n
= static_cast<alu_node
*>(*I
);
289 unsigned slot
= n
->bc
.slot
;
290 value
*d
= n
->dst
.empty() ? NULL
: n
->dst
[0];
292 if (d
&& d
->is_special_reg()) {
293 assert((n
->bc
.op_ptr
->flags
& AF_MOVA
) || d
->is_geometry_emit());
297 sel_chan fdst
= d
? d
->get_final_gpr() : sel_chan(0, 0);
300 assert(fdst
.chan() == slot
|| slot
== SLOT_TRANS
);
303 n
->bc
.dst_gpr
= fdst
.sel();
304 n
->bc
.dst_chan
= d
? fdst
.chan() : slot
< SLOT_TRANS
? slot
: 0;
307 if (d
&& d
->is_rel() && d
->rel
&& !d
->rel
->is_const()) {
309 update_ngpr(d
->array
->gpr
.sel() + d
->array
->array_size
-1);
314 n
->bc
.write_mask
= d
!= NULL
;
317 if (n
->bc
.op_ptr
->flags
& AF_PRED
) {
318 n
->bc
.update_pred
= (n
->dst
[1] != NULL
);
319 n
->bc
.update_exec_mask
= (n
->dst
[2] != NULL
);
322 // FIXME handle predication here
323 n
->bc
.pred_sel
= PRED_SEL_OFF
;
325 update_ngpr(n
->bc
.dst_gpr
);
327 add_nop
|= finalize_alu_src(g
, n
, prev_g
);
333 if (sh
.get_ctx().r6xx_gpr_index_workaround
) {
334 insert_rv6xx_load_ar_workaround(g
);
340 bool bc_finalizer::finalize_alu_src(alu_group_node
* g
, alu_node
* a
, alu_group_node
*prev
) {
342 bool add_nop
= false;
344 sblog
<< "finalize_alu_src: ";
351 for (vvec::iterator I
= sv
.begin(), E
= sv
.end(); I
!= E
; ++I
, ++si
) {
355 bc_alu_src
&src
= a
->bc
.src
[si
];
363 sc
= v
->get_final_gpr();
365 src
.chan
= sc
.chan();
366 if (!v
->rel
->is_const()) {
368 update_ngpr(v
->array
->gpr
.sel() + v
->array
->array_size
-1);
369 if (prev
&& !add_nop
) {
370 for (node_iterator pI
= prev
->begin(), pE
= prev
->end(); pI
!= pE
; ++pI
) {
371 alu_node
*pn
= static_cast<alu_node
*>(*pI
);
372 if (pn
->bc
.dst_gpr
== src
.sel
) {
383 gpr
= v
->get_final_gpr();
385 src
.chan
= gpr
.chan();
386 update_ngpr(src
.sel
);
389 src
.sel
= v
->gpr
.sel();
390 src
.chan
= v
->gpr
.chan();
391 update_ngpr(src
.sel
);
395 literal lv
= v
->literal_value
;
398 if (lv
== literal(0))
400 else if (lv
== literal(0.5f
))
401 src
.sel
= ALU_SRC_0_5
;
402 else if (lv
== literal(1.0f
))
404 else if (lv
== literal(1))
405 src
.sel
= ALU_SRC_1_INT
;
406 else if (lv
== literal(-1))
407 src
.sel
= ALU_SRC_M_1_INT
;
409 src
.sel
= ALU_SRC_LITERAL
;
410 src
.chan
= g
->literal_chan(lv
);
416 cf_node
*clause
= static_cast<cf_node
*>(g
->parent
);
417 assert(clause
->is_alu_clause());
418 sel_chan k
= translate_kcache(clause
, v
);
420 assert(k
&& "kcache translation failed");
427 case VLK_SPECIAL_CONST
:
428 src
.sel
= v
->select
.sel();
429 src
.chan
= v
->select
.chan();
432 assert(!"unknown value kind");
435 if (prev
&& !add_nop
) {
436 for (node_iterator pI
= prev
->begin(), pE
= prev
->end(); pI
!= pE
; ++pI
) {
437 alu_node
*pn
= static_cast<alu_node
*>(*pI
);
438 if (pn
->bc
.dst_rel
) {
439 if (pn
->bc
.dst_gpr
== src
.sel
) {
449 a
->bc
.src
[si
++].sel
= 0;
454 void bc_finalizer::copy_fetch_src(fetch_node
&dst
, fetch_node
&src
, unsigned arg_start
)
458 for (unsigned chan
= 0; chan
< 4; ++chan
) {
460 dst
.bc
.dst_sel
[chan
] = SEL_MASK
;
462 unsigned sel
= SEL_MASK
;
464 value
*v
= src
.src
[arg_start
+ chan
];
466 if (!v
|| v
->is_undef()) {
468 } else if (v
->is_const()) {
469 literal l
= v
->literal_value
;
472 else if (l
== literal(1.0f
))
475 sblog
<< "invalid fetch constant operand " << chan
<< " ";
481 } else if (v
->is_any_gpr()) {
482 unsigned vreg
= v
->gpr
.sel();
483 unsigned vchan
= v
->gpr
.chan();
487 else if ((unsigned)reg
!= vreg
) {
488 sblog
<< "invalid fetch source operand " << chan
<< " ";
497 sblog
<< "invalid fetch source operand " << chan
<< " ";
503 dst
.bc
.src_sel
[chan
] = sel
;
509 dst
.bc
.src_gpr
= reg
>= 0 ? reg
: 0;
512 void bc_finalizer::emit_set_grad(fetch_node
* f
) {
514 assert(f
->src
.size() == 12);
515 unsigned ops
[2] = { FETCH_OP_SET_GRADIENTS_V
, FETCH_OP_SET_GRADIENTS_H
};
517 unsigned arg_start
= 0;
519 for (unsigned op
= 0; op
< 2; ++op
) {
520 fetch_node
*n
= sh
.create_fetch();
521 n
->bc
.set_op(ops
[op
]);
525 copy_fetch_src(*n
, *f
, arg_start
);
532 void bc_finalizer::emit_set_texture_offsets(fetch_node
&f
) {
533 assert(f
.src
.size() == 8);
535 fetch_node
*n
= sh
.create_fetch();
537 n
->bc
.set_op(FETCH_OP_SET_TEXTURE_OFFSETS
);
539 copy_fetch_src(*n
, f
, 4);
544 void bc_finalizer::finalize_fetch(fetch_node
* f
) {
550 unsigned src_count
= 4;
552 unsigned flags
= f
->bc
.op_ptr
->flags
;
554 if (flags
& FF_VTX
) {
556 } else if (flags
& FF_USEGRAD
) {
558 } else if (flags
& FF_USE_TEXTURE_OFFSETS
) {
559 emit_set_texture_offsets(*f
);
562 for (unsigned chan
= 0; chan
< src_count
; ++chan
) {
564 unsigned sel
= f
->bc
.src_sel
[chan
];
569 value
*v
= f
->src
[chan
];
573 } else if (v
->is_const()) {
574 literal l
= v
->literal_value
;
577 else if (l
== literal(1.0f
))
580 sblog
<< "invalid fetch constant operand " << chan
<< " ";
586 } else if (v
->is_any_gpr()) {
587 unsigned vreg
= v
->gpr
.sel();
588 unsigned vchan
= v
->gpr
.chan();
592 else if ((unsigned)reg
!= vreg
) {
593 sblog
<< "invalid fetch source operand " << chan
<< " ";
602 sblog
<< "invalid fetch source operand " << chan
<< " ";
608 f
->bc
.src_sel
[chan
] = sel
;
614 f
->bc
.src_gpr
= reg
>= 0 ? reg
: 0;
620 unsigned dst_swz
[4] = {SEL_MASK
, SEL_MASK
, SEL_MASK
, SEL_MASK
};
622 for (unsigned chan
= 0; chan
< 4; ++chan
) {
624 unsigned sel
= f
->bc
.dst_sel
[chan
];
629 value
*v
= f
->dst
[chan
];
633 if (v
->is_any_gpr()) {
634 unsigned vreg
= v
->gpr
.sel();
635 unsigned vchan
= v
->gpr
.chan();
639 else if ((unsigned)reg
!= vreg
) {
640 sblog
<< "invalid fetch dst operand " << chan
<< " ";
646 dst_swz
[vchan
] = sel
;
649 sblog
<< "invalid fetch dst operand " << chan
<< " ";
657 for (unsigned i
= 0; i
< 4; ++i
)
658 f
->bc
.dst_sel
[i
] = dst_swz
[i
];
665 f
->bc
.dst_gpr
= reg
>= 0 ? reg
: 0;
668 void bc_finalizer::finalize_cf(cf_node
* c
) {
670 unsigned flags
= c
->bc
.op_ptr
->flags
;
672 c
->bc
.end_of_program
= 0;
675 if (flags
& CF_EXP
) {
676 c
->bc
.set_op(CF_OP_EXPORT
);
677 last_export
[c
->bc
.type
] = c
;
681 for (unsigned chan
= 0; chan
< 4; ++chan
) {
683 unsigned sel
= c
->bc
.sel
[chan
];
688 value
*v
= c
->src
[chan
];
692 } else if (v
->is_const()) {
693 literal l
= v
->literal_value
;
696 else if (l
== literal(1.0f
))
699 sblog
<< "invalid export constant operand " << chan
<< " ";
705 } else if (v
->is_any_gpr()) {
706 unsigned vreg
= v
->gpr
.sel();
707 unsigned vchan
= v
->gpr
.chan();
711 else if ((unsigned)reg
!= vreg
) {
712 sblog
<< "invalid export source operand " << chan
<< " ";
721 sblog
<< "invalid export source operand " << chan
<< " ";
727 c
->bc
.sel
[chan
] = sel
;
733 c
->bc
.rw_gpr
= reg
>= 0 ? reg
: 0;
735 } else if (flags
& CF_MEM
) {
740 for (unsigned chan
= 0; chan
< 4; ++chan
) {
741 value
*v
= c
->src
[chan
];
742 if (!v
|| v
->is_undef())
745 if (!v
->is_any_gpr() || v
->gpr
.chan() != chan
) {
746 sblog
<< "invalid source operand " << chan
<< " ";
751 unsigned vreg
= v
->gpr
.sel();
754 else if ((unsigned)reg
!= vreg
) {
755 sblog
<< "invalid source operand " << chan
<< " ";
764 assert(reg
>= 0 && mask
);
769 c
->bc
.rw_gpr
= reg
>= 0 ? reg
: 0;
770 c
->bc
.comp_mask
= mask
;
772 if (((flags
& CF_RAT
) || (!(flags
& CF_STRM
))) && (c
->bc
.type
& 1)) {
776 for (unsigned chan
= 0; chan
< 4; ++chan
) {
777 value
*v
= c
->src
[4 + chan
];
778 if (!v
|| v
->is_undef())
781 if (!v
->is_any_gpr() || v
->gpr
.chan() != chan
) {
782 sblog
<< "invalid source operand " << chan
<< " ";
787 unsigned vreg
= v
->gpr
.sel();
790 else if ((unsigned)reg
!= vreg
) {
791 sblog
<< "invalid source operand " << chan
<< " ";
803 c
->bc
.index_gpr
= reg
>= 0 ? reg
: 0;
805 } else if (flags
& CF_CALL
) {
806 update_nstack(c
->get_parent_region(), ctx
.wavefront_size
== 16 ? 2 : 1);
810 sel_chan
bc_finalizer::translate_kcache(cf_node
* alu
, value
* v
) {
811 unsigned sel
= v
->select
.sel();
812 unsigned bank
= sel
>> 12;
813 unsigned chan
= v
->select
.chan();
814 static const unsigned kc_base
[] = {128, 160, 256, 288};
818 unsigned line
= sel
>> 4;
820 for (unsigned k
= 0; k
< 4; ++k
) {
821 bc_kcache
&kc
= alu
->bc
.kc
[k
];
823 if (kc
.mode
== KC_LOCK_NONE
)
826 if (kc
.bank
== bank
&& (kc
.addr
== line
||
827 (kc
.mode
== KC_LOCK_2
&& kc
.addr
+ 1 == line
))) {
829 sel
= kc_base
[k
] + (sel
- (kc
.addr
<< 4));
831 return sel_chan(sel
, chan
);
835 assert(!"kcache translation error");
839 void bc_finalizer::update_ngpr(unsigned gpr
) {
840 if (gpr
< MAX_GPR
- ctx
.alu_temp_gprs
&& gpr
>= ngpr
)
844 unsigned bc_finalizer::get_stack_depth(node
*n
, unsigned &loops
,
845 unsigned &ifs
, unsigned add
) {
846 unsigned stack_elements
= add
;
847 bool has_non_wqm_push
= (add
!= 0);
848 region_node
*r
= n
->is_region() ?
849 static_cast<region_node
*>(n
) : n
->get_parent_region();
859 has_non_wqm_push
= true;
861 r
= r
->get_parent_region();
863 stack_elements
+= (loops
* ctx
.stack_entry_size
) + ifs
;
865 // reserve additional elements in some cases
866 switch (ctx
.hw_class
) {
869 // If any non-WQM push is invoked, 2 elements should be reserved.
870 if (has_non_wqm_push
)
873 case HW_CLASS_CAYMAN
:
874 // If any stack operation is invoked, 2 elements should be reserved
878 case HW_CLASS_EVERGREEN
:
879 // According to the docs we need to reserve 1 element for each of the
881 // 1) non-WQM push is used with WQM/LOOP frames on stack
882 // 2) ALU_ELSE_AFTER is used at the point of max stack usage
884 // It was found that the conditions above are not sufficient, there are
885 // other cases where we also need to reserve stack space, that's why
886 // we always reserve 1 stack element if we have non-WQM push on stack.
887 // Condition 2 is ignored for now because we don't use this instruction.
888 if (has_non_wqm_push
)
891 case HW_CLASS_UNKNOWN
:
894 return stack_elements
;
897 void bc_finalizer::update_nstack(region_node
* r
, unsigned add
) {
900 unsigned elems
= r
? get_stack_depth(r
, loops
, ifs
, add
) : add
;
902 // XXX all chips expect this value to be computed using 4 as entry size,
903 // not the real entry size
904 unsigned stack_entries
= (elems
+ 3) >> 2;
906 if (nstack
< stack_entries
)
907 nstack
= stack_entries
;
910 void bc_finalizer::cf_peephole() {
911 if (ctx
.stack_workaround_8xx
|| ctx
.stack_workaround_9xx
) {
912 for (node_iterator N
, I
= sh
.root
->begin(), E
= sh
.root
->end(); I
!= E
;
915 cf_node
*c
= static_cast<cf_node
*>(*I
);
917 if (c
->bc
.op
== CF_OP_ALU_PUSH_BEFORE
&&
918 (c
->flags
& NF_ALU_STACK_WORKAROUND
)) {
919 cf_node
*push
= sh
.create_cf(CF_OP_PUSH
);
920 c
->insert_before(push
);
922 c
->bc
.set_op(CF_OP_ALU
);
927 for (node_iterator N
, I
= sh
.root
->begin(), E
= sh
.root
->end(); I
!= E
;
931 cf_node
*c
= static_cast<cf_node
*>(*I
);
933 if (c
->jump_after_target
) {
934 c
->jump_target
= static_cast<cf_node
*>(c
->jump_target
->next
);
935 c
->jump_after_target
= false;
938 if (c
->is_cf_op(CF_OP_POP
)) {
940 if (p
->is_alu_clause()) {
941 cf_node
*a
= static_cast<cf_node
*>(p
);
943 if (a
->bc
.op
== CF_OP_ALU
) {
944 a
->bc
.set_op(CF_OP_ALU_POP_AFTER
);
948 } else if (c
->is_cf_op(CF_OP_JUMP
) && c
->jump_target
== c
->next
) {
949 // if JUMP is immediately followed by its jump target,
950 // then JUMP is useless and we can eliminate it
956 } // namespace r600_sb