2 * Copyright 2013 Vadim Girlin <vadimgirlin@gmail.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #define FBC_DUMP(q) do { q } while (0)
39 #include "sb_shader.h"
47 int bc_finalizer::run() {
49 regions_vec
&rv
= sh
.get_regions();
51 for (regions_vec::reverse_iterator I
= rv
.rbegin(), E
= rv
.rend(); I
!= E
;
57 bool loop
= r
->is_loop();
71 // workaround for some problems on r6xx/7xx
72 // add ALU NOP to each vertex shader
73 if (!ctx
.is_egcm() && sh
.target
== TARGET_VS
) {
74 cf_node
*c
= sh
.create_clause(NST_ALU_CLAUSE
);
76 alu_group_node
*g
= sh
.create_alu_group();
78 alu_node
*a
= sh
.create_alu();
79 a
->bc
.set_op(ALU_OP0_NOP
);
85 sh
.root
->push_back(c
);
87 c
= sh
.create_cf(CF_OP_NOP
);
88 sh
.root
->push_back(c
);
93 if (last_cf
->bc
.op_ptr
->flags
& CF_ALU
) {
94 last_cf
= sh
.create_cf(CF_OP_NOP
);
95 sh
.root
->push_back(last_cf
);
99 last_cf
->insert_after(sh
.create_cf(CF_OP_CF_END
));
101 last_cf
->bc
.end_of_program
= 1;
103 for (unsigned t
= EXP_PIXEL
; t
< EXP_TYPE_COUNT
; ++t
) {
104 cf_node
*le
= last_export
[t
];
106 le
->bc
.set_op(CF_OP_EXPORT_DONE
);
114 void bc_finalizer::finalize_loop(region_node
* r
) {
116 cf_node
*loop_start
= sh
.create_cf(CF_OP_LOOP_START_DX10
);
117 cf_node
*loop_end
= sh
.create_cf(CF_OP_LOOP_END
);
119 loop_start
->jump_after(loop_end
);
120 loop_end
->jump_after(loop_start
);
122 for (depart_vec::iterator I
= r
->departs
.begin(), E
= r
->departs
.end();
124 depart_node
*dep
= *I
;
125 cf_node
*loop_break
= sh
.create_cf(CF_OP_LOOP_BREAK
);
126 loop_break
->jump(loop_end
);
127 dep
->push_back(loop_break
);
131 // FIXME produces unnecessary LOOP_CONTINUE
132 for (repeat_vec::iterator I
= r
->repeats
.begin(), E
= r
->repeats
.end();
134 repeat_node
*rep
= *I
;
135 if (!(rep
->parent
== r
&& rep
->prev
== NULL
)) {
136 cf_node
*loop_cont
= sh
.create_cf(CF_OP_LOOP_CONTINUE
);
137 loop_cont
->jump(loop_end
);
138 rep
->push_back(loop_cont
);
143 r
->push_front(loop_start
);
144 r
->push_back(loop_end
);
147 void bc_finalizer::finalize_if(region_node
* r
) {
151 // expecting the following control flow structure here:
154 // - depart/repeat 1 (it may be depart/repeat for some outer region)
158 // - depart/repeat 2 (possibly for outer region)
160 // - some optional code
163 // - optional <else> code> ...
167 container_node
*repdep1
= static_cast<container_node
*>(r
->first
);
168 assert(repdep1
->is_depart() || repdep1
->is_repeat());
170 if_node
*n_if
= static_cast<if_node
*>(repdep1
->first
);
175 assert(n_if
->is_if());
177 container_node
*repdep2
= static_cast<container_node
*>(n_if
->first
);
178 assert(repdep2
->is_depart() || repdep2
->is_repeat());
180 cf_node
*if_jump
= sh
.create_cf(CF_OP_JUMP
);
181 cf_node
*if_pop
= sh
.create_cf(CF_OP_POP
);
183 if_pop
->bc
.pop_count
= 1;
184 if_pop
->jump_after(if_pop
);
186 r
->push_front(if_jump
);
187 r
->push_back(if_pop
);
189 bool has_else
= n_if
->next
;
192 cf_node
*nelse
= sh
.create_cf(CF_OP_ELSE
);
193 n_if
->insert_after(nelse
);
194 if_jump
->jump(nelse
);
195 nelse
->jump_after(if_pop
);
196 nelse
->bc
.pop_count
= 1;
199 if_jump
->jump_after(if_pop
);
200 if_jump
->bc
.pop_count
= 1;
206 for (depart_vec::iterator I
= r
->departs
.begin(), E
= r
->departs
.end();
211 assert(r
->repeats
.empty());
214 void bc_finalizer::run_on(container_node
* c
) {
216 for (node_iterator I
= c
->begin(), E
= c
->end(); I
!= E
; ++I
) {
219 if (n
->is_alu_group()) {
220 finalize_alu_group(static_cast<alu_group_node
*>(n
));
222 if (n
->is_fetch_inst()) {
223 finalize_fetch(static_cast<fetch_node
*>(n
));
224 } else if (n
->is_cf_inst()) {
225 finalize_cf(static_cast<cf_node
*>(n
));
226 } else if (n
->is_alu_clause()) {
228 } else if (n
->is_fetch_clause()) {
231 assert(!"unexpected node");
234 if (n
->is_container())
235 run_on(static_cast<container_node
*>(n
));
240 void bc_finalizer::finalize_alu_group(alu_group_node
* g
) {
242 alu_node
*last
= NULL
;
244 for (node_iterator I
= g
->begin(), E
= g
->end(); I
!= E
; ++I
) {
245 alu_node
*n
= static_cast<alu_node
*>(*I
);
246 unsigned slot
= n
->bc
.slot
;
248 value
*d
= n
->dst
.empty() ? NULL
: n
->dst
[0];
250 if (d
&& d
->is_special_reg()) {
251 assert(n
->bc
.op_ptr
->flags
& AF_MOVA
);
255 sel_chan fdst
= d
? d
->get_final_gpr() : sel_chan(0, 0);
258 assert(fdst
.chan() == slot
|| slot
== SLOT_TRANS
);
261 n
->bc
.dst_gpr
= fdst
.sel();
262 n
->bc
.dst_chan
= d
? fdst
.chan() : slot
< SLOT_TRANS
? slot
: 0;
265 if (d
&& d
->is_rel() && d
->rel
&& !d
->rel
->is_const()) {
267 update_ngpr(d
->array
->gpr
.sel() + d
->array
->array_size
-1);
272 n
->bc
.write_mask
= d
!= NULL
;
275 if (n
->bc
.op_ptr
->flags
& AF_PRED
) {
276 n
->bc
.update_pred
= (n
->dst
[1] != NULL
);
277 n
->bc
.update_exec_mask
= (n
->dst
[2] != NULL
);
280 // FIXME handle predication here
281 n
->bc
.pred_sel
= PRED_SEL_OFF
;
283 update_ngpr(n
->bc
.dst_gpr
);
285 finalize_alu_src(g
, n
);
293 void bc_finalizer::finalize_alu_src(alu_group_node
* g
, alu_node
* a
) {
297 cerr
<< "finalize_alu_src: ";
304 for (vvec::iterator I
= sv
.begin(), E
= sv
.end(); I
!= E
; ++I
, ++si
) {
308 bc_alu_src
&src
= a
->bc
.src
[si
];
316 sc
= v
->get_final_gpr();
318 src
.chan
= sc
.chan();
319 if (!v
->rel
->is_const()) {
321 update_ngpr(v
->array
->gpr
.sel() + v
->array
->array_size
-1);
327 gpr
= v
->get_final_gpr();
329 src
.chan
= gpr
.chan();
330 update_ngpr(src
.sel
);
333 src
.sel
= v
->gpr
.sel();
334 src
.chan
= v
->gpr
.chan();
335 update_ngpr(src
.sel
);
339 literal lv
= v
->literal_value
;
342 if (lv
== literal(0))
344 else if (lv
== literal(0.5f
))
345 src
.sel
= ALU_SRC_0_5
;
346 else if (lv
== literal(1.0f
))
348 else if (lv
== literal(1))
349 src
.sel
= ALU_SRC_1_INT
;
350 else if (lv
== literal(-1))
351 src
.sel
= ALU_SRC_M_1_INT
;
353 src
.sel
= ALU_SRC_LITERAL
;
354 src
.chan
= g
->literal_chan(lv
);
360 cf_node
*clause
= static_cast<cf_node
*>(g
->parent
);
361 assert(clause
->is_alu_clause());
362 sel_chan k
= translate_kcache(clause
, v
);
364 assert(k
&& "kcache translation failed");
371 case VLK_SPECIAL_CONST
:
372 src
.sel
= v
->select
.sel();
373 src
.chan
= v
->select
.chan();
376 assert(!"unknown value kind");
382 a
->bc
.src
[si
++].sel
= 0;
386 void bc_finalizer::emit_set_grad(fetch_node
* f
) {
388 assert(f
->src
.size() == 12);
389 unsigned ops
[2] = { FETCH_OP_SET_GRADIENTS_V
, FETCH_OP_SET_GRADIENTS_H
};
391 unsigned arg_start
= 0;
393 for (unsigned op
= 0; op
< 2; ++op
) {
394 fetch_node
*n
= sh
.create_fetch();
395 n
->bc
.set_op(ops
[op
]);
397 // FIXME extract this loop into a separate method and reuse it
403 for (unsigned chan
= 0; chan
< 4; ++chan
) {
405 n
->bc
.dst_sel
[chan
] = SEL_MASK
;
407 unsigned sel
= SEL_MASK
;
409 value
*v
= f
->src
[arg_start
+ chan
];
411 if (!v
|| v
->is_undef()) {
413 } else if (v
->is_const()) {
414 literal l
= v
->literal_value
;
417 else if (l
== literal(1.0f
))
420 cerr
<< "invalid fetch constant operand " << chan
<< " ";
426 } else if (v
->is_any_gpr()) {
427 unsigned vreg
= v
->gpr
.sel();
428 unsigned vchan
= v
->gpr
.chan();
432 else if ((unsigned)reg
!= vreg
) {
433 cerr
<< "invalid fetch source operand " << chan
<< " ";
442 cerr
<< "invalid fetch source operand " << chan
<< " ";
448 n
->bc
.src_sel
[chan
] = sel
;
454 n
->bc
.src_gpr
= reg
>= 0 ? reg
: 0;
461 void bc_finalizer::finalize_fetch(fetch_node
* f
) {
467 unsigned src_count
= 4;
469 unsigned flags
= f
->bc
.op_ptr
->flags
;
471 if (flags
& FF_VTX
) {
473 } else if (flags
& FF_USEGRAD
) {
477 for (unsigned chan
= 0; chan
< src_count
; ++chan
) {
479 unsigned sel
= f
->bc
.src_sel
[chan
];
484 value
*v
= f
->src
[chan
];
488 } else if (v
->is_const()) {
489 literal l
= v
->literal_value
;
492 else if (l
== literal(1.0f
))
495 cerr
<< "invalid fetch constant operand " << chan
<< " ";
501 } else if (v
->is_any_gpr()) {
502 unsigned vreg
= v
->gpr
.sel();
503 unsigned vchan
= v
->gpr
.chan();
507 else if ((unsigned)reg
!= vreg
) {
508 cerr
<< "invalid fetch source operand " << chan
<< " ";
517 cerr
<< "invalid fetch source operand " << chan
<< " ";
523 f
->bc
.src_sel
[chan
] = sel
;
529 f
->bc
.src_gpr
= reg
>= 0 ? reg
: 0;
535 unsigned dst_swz
[4] = {SEL_MASK
, SEL_MASK
, SEL_MASK
, SEL_MASK
};
537 for (unsigned chan
= 0; chan
< 4; ++chan
) {
539 unsigned sel
= f
->bc
.dst_sel
[chan
];
544 value
*v
= f
->dst
[chan
];
548 if (v
->is_any_gpr()) {
549 unsigned vreg
= v
->gpr
.sel();
550 unsigned vchan
= v
->gpr
.chan();
554 else if ((unsigned)reg
!= vreg
) {
555 cerr
<< "invalid fetch dst operand " << chan
<< " ";
561 dst_swz
[vchan
] = sel
;
564 cerr
<< "invalid fetch dst operand " << chan
<< " ";
572 for (unsigned i
= 0; i
< 4; ++i
)
573 f
->bc
.dst_sel
[i
] = dst_swz
[i
];
580 f
->bc
.dst_gpr
= reg
>= 0 ? reg
: 0;
583 void bc_finalizer::finalize_cf(cf_node
* c
) {
585 unsigned flags
= c
->bc
.op_ptr
->flags
;
587 if (flags
& CF_CALL
) {
588 update_nstack(c
->get_parent_region(), ctx
.is_cayman() ? 1 : 2);
591 c
->bc
.end_of_program
= 0;
594 if (flags
& CF_EXP
) {
595 c
->bc
.set_op(CF_OP_EXPORT
);
596 last_export
[c
->bc
.type
] = c
;
600 for (unsigned chan
= 0; chan
< 4; ++chan
) {
602 unsigned sel
= c
->bc
.sel
[chan
];
607 value
*v
= c
->src
[chan
];
611 } else if (v
->is_const()) {
612 literal l
= v
->literal_value
;
615 else if (l
== literal(1.0f
))
618 cerr
<< "invalid export constant operand " << chan
<< " ";
624 } else if (v
->is_any_gpr()) {
625 unsigned vreg
= v
->gpr
.sel();
626 unsigned vchan
= v
->gpr
.chan();
630 else if ((unsigned)reg
!= vreg
) {
631 cerr
<< "invalid export source operand " << chan
<< " ";
640 cerr
<< "invalid export source operand " << chan
<< " ";
646 c
->bc
.sel
[chan
] = sel
;
652 c
->bc
.rw_gpr
= reg
>= 0 ? reg
: 0;
654 } else if (flags
& CF_MEM
) {
659 for (unsigned chan
= 0; chan
< 4; ++chan
) {
660 value
*v
= c
->src
[chan
];
661 if (!v
|| v
->is_undef())
664 if (!v
->is_any_gpr() || v
->gpr
.chan() != chan
) {
665 cerr
<< "invalid source operand " << chan
<< " ";
670 unsigned vreg
= v
->gpr
.sel();
673 else if ((unsigned)reg
!= vreg
) {
674 cerr
<< "invalid source operand " << chan
<< " ";
683 assert(reg
>= 0 && mask
);
688 c
->bc
.rw_gpr
= reg
>= 0 ? reg
: 0;
689 c
->bc
.comp_mask
= mask
;
691 if ((flags
& CF_RAT
) && (c
->bc
.type
& 1)) {
695 for (unsigned chan
= 0; chan
< 4; ++chan
) {
696 value
*v
= c
->src
[4 + chan
];
697 if (!v
|| v
->is_undef())
700 if (!v
->is_any_gpr() || v
->gpr
.chan() != chan
) {
701 cerr
<< "invalid source operand " << chan
<< " ";
706 unsigned vreg
= v
->gpr
.sel();
709 else if ((unsigned)reg
!= vreg
) {
710 cerr
<< "invalid source operand " << chan
<< " ";
722 c
->bc
.index_gpr
= reg
>= 0 ? reg
: 0;
730 if ((flags
& (CF_BRANCH
| CF_LOOP
)) && !sh
.uses_gradients
) {
731 c
->bc
.valid_pixel_mode
= 1;
738 sel_chan
bc_finalizer::translate_kcache(cf_node
* alu
, value
* v
) {
739 unsigned sel
= v
->select
.sel();
740 unsigned bank
= sel
>> 12;
741 unsigned chan
= v
->select
.chan();
742 static const unsigned kc_base
[] = {128, 160, 256, 288};
746 unsigned line
= sel
>> 4;
748 for (unsigned k
= 0; k
< 4; ++k
) {
749 bc_kcache
&kc
= alu
->bc
.kc
[k
];
751 if (kc
.mode
== KC_LOCK_NONE
)
754 if (kc
.bank
== bank
&& (kc
.addr
== line
||
755 (kc
.mode
== KC_LOCK_2
&& kc
.addr
+ 1 == line
))) {
757 sel
= kc_base
[k
] + (sel
- (kc
.addr
<< 4));
759 return sel_chan(sel
, chan
);
763 assert(!"kcache translation error");
767 void bc_finalizer::update_ngpr(unsigned gpr
) {
768 if (gpr
< MAX_GPR
- ctx
.alu_temp_gprs
&& gpr
>= ngpr
)
772 void bc_finalizer::update_nstack(region_node
* r
, unsigned add
) {
782 r
= r
->get_parent_region();
785 unsigned stack_elements
= (loops
* ctx
.stack_entry_size
) + ifs
+ add
;
787 // FIXME calculate more precisely
788 if (ctx
.is_evergreen()) {
796 unsigned stack_entries
= (stack_elements
+ 3) >> 2;
798 if (nstack
< stack_entries
)
799 nstack
= stack_entries
;
802 void bc_finalizer::cf_peephole() {
804 for (node_iterator N
, I
= sh
.root
->begin(), E
= sh
.root
->end(); I
!= E
;
808 cf_node
*c
= static_cast<cf_node
*>(*I
);
810 if (c
->jump_after_target
) {
811 c
->jump_target
= static_cast<cf_node
*>(c
->jump_target
->next
);
812 c
->jump_after_target
= false;
815 if (c
->is_cf_op(CF_OP_POP
)) {
817 if (p
->is_alu_clause()) {
818 cf_node
*a
= static_cast<cf_node
*>(p
);
820 if (a
->bc
.op
== CF_OP_ALU
) {
821 a
->bc
.set_op(CF_OP_ALU_POP_AFTER
);
825 } else if (c
->is_cf_op(CF_OP_JUMP
) && c
->jump_target
== c
->next
) {
826 // if JUMP is immediately followed by its jump target,
827 // then JUMP is useless and we can eliminate it
833 } // namespace r600_sb