2 * Copyright 2013 Vadim Girlin <vadimgirlin@gmail.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #define BCP_DUMP(q) do { q } while (0)
36 #include "r600_pipe.h"
37 #include "r600_shader.h"
43 #include "sb_shader.h"
48 int bc_parser::decode() {
54 dec
= new bc_decoder(ctx
, dw
, bc_ndw
);
56 shader_target t
= TARGET_UNKNOWN
;
60 case TGSI_PROCESSOR_FRAGMENT
: t
= TARGET_PS
; break;
61 case TGSI_PROCESSOR_VERTEX
: t
= TARGET_VS
; break;
62 case TGSI_PROCESSOR_COMPUTE
: t
= TARGET_COMPUTE
; break;
63 default: assert(!"unknown shader target"); return -1; break;
66 if (bc
->type
== TGSI_PROCESSOR_COMPUTE
)
72 sh
= new shader(ctx
, t
, bc
->debug_id
);
73 int r
= decode_shader();
78 sh
->nstack
= bc
->nstack
;
83 int bc_parser::decode_shader() {
92 if ((r
= decode_cf(i
, eop
)))
95 } while (!eop
|| (i
>> 1) <= max_cf
);
100 int bc_parser::prepare() {
102 if ((r
= parse_decls()))
104 if ((r
= prepare_ir()))
109 int bc_parser::parse_decls() {
113 sh
->add_gpr_array(0, bc
->ngpr
, 0x0F);
115 // compute shaders have some values preloaded in R0, R1
116 sh
->add_input(0 /* GPR */, true /* preloaded */, 0x0F /* mask */);
117 sh
->add_input(1 /* GPR */, true /* preloaded */, 0x0F /* mask */);
121 if (pshader
->indirect_files
& ~(1 << TGSI_FILE_CONSTANT
)) {
123 assert(pshader
->num_arrays
);
125 if (pshader
->num_arrays
) {
126 for (unsigned i
= 0; i
< pshader
->num_arrays
; ++i
) {
127 r600_shader_array
&a
= pshader
->arrays
[i
];
128 sh
->add_gpr_array(a
.gpr_start
, a
.gpr_count
, a
.comp_mask
);
131 sh
->add_gpr_array(0, pshader
->bc
.ngpr
, 0x0F);
135 if (sh
->target
== TARGET_VS
)
136 sh
->add_input(0, 1, 0x0F);
138 bool ps_interp
= ctx
.hw_class
>= HW_CLASS_EVERGREEN
139 && sh
->target
== TARGET_PS
;
141 unsigned linear
= 0, persp
= 0, centroid
= 1;
143 for (unsigned i
= 0; i
< pshader
->ninput
; ++i
) {
144 r600_shader_io
& in
= pshader
->input
[i
];
145 bool preloaded
= sh
->target
== TARGET_PS
&& !(ps_interp
&& in
.spi_sid
);
146 sh
->add_input(in
.gpr
, preloaded
, /*in.write_mask*/ 0x0F);
147 if (ps_interp
&& in
.spi_sid
) {
148 if (in
.interpolate
== TGSI_INTERPOLATE_LINEAR
||
149 in
.interpolate
== TGSI_INTERPOLATE_COLOR
)
151 else if (in
.interpolate
== TGSI_INTERPOLATE_PERSPECTIVE
)
159 unsigned mask
= (1 << (2 * (linear
+ persp
) * centroid
)) - 1;
163 sh
->add_input(gpr
, true, mask
& 0x0F);
172 int bc_parser::decode_cf(unsigned &i
, bool &eop
) {
176 cf_node
*cf
= sh
->create_cf();
177 sh
->root
->push_back(cf
);
179 unsigned id
= i
>> 1;
183 if (cf_map
.size() < id
+ 1)
184 cf_map
.resize(id
+ 1);
188 if ((r
= dec
->decode_cf(i
, cf
->bc
)))
191 cf_op_flags flags
= (cf_op_flags
)cf
->bc
.op_ptr
->flags
;
193 if (flags
& CF_ALU
) {
194 if ((r
= decode_alu_clause(cf
)))
196 } else if (flags
& CF_FETCH
) {
197 if ((r
= decode_fetch_clause(cf
)))
199 } else if (flags
& CF_EXP
) {
202 assert(!cf
->bc
.rw_rel
);
203 } else if (flags
& (CF_STRM
| CF_RAT
)) {
206 assert(!cf
->bc
.rw_rel
);
207 } else if (flags
& CF_BRANCH
) {
208 if (cf
->bc
.addr
> max_cf
)
209 max_cf
= cf
->bc
.addr
;
212 eop
= cf
->bc
.end_of_program
|| cf
->bc
.op
== CF_OP_CF_END
||
213 cf
->bc
.op
== CF_OP_RET
;
217 int bc_parser::decode_alu_clause(cf_node
* cf
) {
218 unsigned i
= cf
->bc
.addr
<< 1, cnt
= cf
->bc
.count
+ 1, gcnt
;
220 cf
->subtype
= NST_ALU_CLAUSE
;
223 memset(slots
[0], 0, 5*sizeof(slots
[0][0]));
228 decode_alu_group(cf
, i
, gcnt
);
237 int bc_parser::decode_alu_group(cf_node
* cf
, unsigned &i
, unsigned &gcnt
) {
240 alu_group_node
*g
= sh
->create_alu_group();
243 memset(slots
[cgroup
], 0, 5*sizeof(slots
[0][0]));
246 unsigned literal_mask
= 0;
249 n
= sh
->create_alu();
252 if ((r
= dec
->decode_alu(i
, n
->bc
)))
255 if (!sh
->assign_slot(n
, slots
[cgroup
])) {
256 assert(!"alu slot assignment failed");
262 } while (gcnt
<= 5 && !n
->bc
.last
);
266 for (node_iterator I
= g
->begin(), E
= g
->end(); I
!= E
; ++I
) {
267 n
= static_cast<alu_node
*>(*I
);
272 for (int k
= 0; k
< n
->bc
.op_ptr
->src_count
; ++k
) {
273 bc_alu_src
&src
= n
->bc
.src
[k
];
276 if (src
.sel
== ALU_SRC_LITERAL
) {
277 literal_mask
|= (1 << src
.chan
);
278 src
.value
.u
= dw
[i
+ src
.chan
];
283 unsigned literal_ndw
= 0;
284 while (literal_mask
) {
285 g
->literals
.push_back(dw
[i
+ literal_ndw
]);
290 literal_ndw
= (literal_ndw
+ 1) & ~1u;
293 gcnt
+= literal_ndw
>> 1;
299 int bc_parser::prepare_alu_clause(cf_node
* cf
) {
301 // loop over alu groups
302 for (node_iterator I
= cf
->begin(), E
= cf
->end(); I
!= E
; ++I
) {
303 assert(I
->subtype
== NST_ALU_GROUP
);
304 alu_group_node
*g
= static_cast<alu_group_node
*>(*I
);
305 prepare_alu_group(cf
, g
);
311 int bc_parser::prepare_alu_group(cf_node
* cf
, alu_group_node
*g
) {
316 memset(slots
[cgroup
], 0, 5*sizeof(slots
[0][0]));
318 for (node_iterator I
= g
->begin(), E
= g
->end();
320 n
= static_cast<alu_node
*>(*I
);
322 if (!sh
->assign_slot(n
, slots
[cgroup
])) {
323 assert(!"alu slot assignment failed");
327 unsigned src_count
= n
->bc
.op_ptr
->src_count
;
329 if (ctx
.alu_slots(n
->bc
.op
) & AF_4SLOT
)
330 n
->flags
|= NF_ALU_4SLOT
;
332 n
->src
.resize(src_count
);
334 unsigned flags
= n
->bc
.op_ptr
->flags
;
336 if (flags
& AF_PRED
) {
338 if (n
->bc
.update_pred
)
339 n
->dst
[1] = sh
->get_special_value(SV_ALU_PRED
);
340 if (n
->bc
.update_exec_mask
)
341 n
->dst
[2] = sh
->get_special_value(SV_EXEC_MASK
);
343 n
->flags
|= NF_DONT_HOIST
;
345 } else if (flags
& AF_KILL
) {
348 n
->dst
[1] = sh
->get_special_value(SV_VALID_MASK
);
351 n
->flags
|= NF_DONT_HOIST
| NF_DONT_MOVE
|
352 NF_DONT_KILL
| NF_SCHEDULE_EARLY
;
358 if (flags
& AF_MOVA
) {
360 n
->dst
[0] = sh
->get_special_value(SV_AR_INDEX
);
362 n
->flags
|= NF_DONT_HOIST
;
364 } else if (n
->bc
.op_ptr
->src_count
== 3 || n
->bc
.write_mask
) {
365 assert(!n
->bc
.dst_rel
|| n
->bc
.index_mode
== INDEX_AR_X
);
367 value
*v
= sh
->get_gpr_value(false, n
->bc
.dst_gpr
, n
->bc
.dst_chan
,
373 if (n
->bc
.pred_sel
) {
374 sh
->has_alu_predication
= true;
375 n
->pred
= sh
->get_special_value(SV_ALU_PRED
);
378 for (unsigned s
= 0; s
< src_count
; ++s
) {
379 bc_alu_src
&src
= n
->bc
.src
[s
];
381 if (src
.sel
== ALU_SRC_LITERAL
) {
382 n
->src
[s
] = sh
->get_const_value(src
.value
);
383 } else if (src
.sel
== ALU_SRC_PS
|| src
.sel
== ALU_SRC_PV
) {
384 unsigned pgroup
= !cgroup
, prev_slot
= src
.sel
== ALU_SRC_PS
?
385 SLOT_TRANS
: src
.chan
;
386 alu_node
*prev_alu
= slots
[pgroup
][prev_slot
];
390 if (!prev_alu
->dst
[0]) {
391 value
* t
= sh
->create_temp_value();
392 prev_alu
->dst
[0] = t
;
395 value
*d
= prev_alu
->dst
[0];
398 d
= sh
->get_gpr_value(true, prev_alu
->bc
.dst_gpr
,
399 prev_alu
->bc
.dst_chan
,
400 prev_alu
->bc
.dst_rel
);
404 } else if (ctx
.is_kcache_sel(src
.sel
)) {
405 unsigned sel
= src
.sel
, kc_addr
;
406 unsigned kc_set
= ((sel
>> 7) & 2) + ((sel
>> 5) & 1);
408 bc_kcache
&kc
= cf
->bc
.kc
[kc_set
];
409 kc_addr
= (kc
.addr
<< 4) + (sel
& 0x1F);
410 n
->src
[s
] = sh
->get_kcache_value(kc
.bank
, kc_addr
, src
.chan
);
411 } else if (src
.sel
< MAX_GPR
) {
412 value
*v
= sh
->get_gpr_value(true, src
.sel
, src
.chan
, src
.rel
);
416 } else if (src
.sel
>= ALU_SRC_PARAM_OFFSET
) {
417 // using slot for value channel because in fact the slot
418 // determines the channel that is loaded by INTERP_LOAD_P0
419 // (and maybe some others).
420 // otherwise GVN will consider INTERP_LOAD_P0s with the same
421 // param index as equal instructions and leave only one of them
422 n
->src
[s
] = sh
->get_special_ro_value(sel_chan(src
.sel
,
427 n
->src
[s
] = sh
->get_const_value(0);
430 n
->src
[s
] = sh
->get_const_value(0.5f
);
433 n
->src
[s
] = sh
->get_const_value(1.0f
);
436 n
->src
[s
] = sh
->get_const_value(1);
438 case ALU_SRC_M_1_INT
:
439 n
->src
[s
] = sh
->get_const_value(-1);
442 n
->src
[s
] = sh
->get_special_ro_value(src
.sel
);
449 // pack multislot instructions into alu_packed_node
451 alu_packed_node
*p
= NULL
;
452 for (node_iterator N
, I
= g
->begin(), E
= g
->end(); I
!= E
; I
= N
) {
454 alu_node
*a
= static_cast<alu_node
*>(*I
);
455 unsigned sflags
= a
->bc
.slot_flags
;
457 if (sflags
== AF_4V
|| (ctx
.is_cayman() && sflags
== AF_S
)) {
459 p
= sh
->create_alu_packed();
469 if (p
->count() == 3 && ctx
.is_cayman()) {
470 // cayman's scalar instruction that can use 3 or 4 slots
472 // FIXME for simplicity we'll always add 4th slot,
473 // but probably we might want to always remove 4th slot and make
474 // sure that regalloc won't choose 'w' component for dst
476 alu_node
*f
= static_cast<alu_node
*>(p
->first
);
477 alu_node
*a
= sh
->create_alu();
479 a
->dst
.resize(f
->dst
.size());
489 int bc_parser::decode_fetch_clause(cf_node
* cf
) {
491 unsigned i
= cf
->bc
.addr
<< 1, cnt
= cf
->bc
.count
+ 1;
493 cf
->subtype
= NST_TEX_CLAUSE
;
496 fetch_node
*n
= sh
->create_fetch();
498 if ((r
= dec
->decode_fetch(i
, n
->bc
)))
500 if (n
->bc
.src_rel
|| n
->bc
.dst_rel
)
507 int bc_parser::prepare_fetch_clause(cf_node
*cf
) {
511 for (node_iterator I
= cf
->begin(), E
= cf
->end(); I
!= E
; ++I
) {
513 fetch_node
*n
= static_cast<fetch_node
*>(*I
);
514 assert(n
->is_valid());
516 unsigned flags
= n
->bc
.op_ptr
->flags
;
518 unsigned vtx
= flags
& FF_VTX
;
519 unsigned num_src
= vtx
? ctx
.vtx_src_num
: 4;
523 if (flags
& (FF_SETGRAD
| FF_USEGRAD
| FF_GETGRAD
)) {
524 sh
->uses_gradients
= true;
527 if (flags
& FF_SETGRAD
) {
532 case FETCH_OP_SET_GRADIENTS_V
:
535 case FETCH_OP_SET_GRADIENTS_H
:
539 assert(!"unexpected SET_GRAD instruction");
546 for(unsigned s
= 0; s
< 4; ++s
) {
547 unsigned sw
= n
->bc
.src_sel
[s
];
549 (*grad
)[s
] = sh
->get_gpr_value(true, n
->bc
.src_gpr
,
551 else if (sw
== SEL_0
)
552 (*grad
)[s
] = sh
->get_const_value(0.0f
);
553 else if (sw
== SEL_1
)
554 (*grad
)[s
] = sh
->get_const_value(1.0f
);
558 if (flags
& FF_USEGRAD
) {
560 std::copy(grad_v
.begin(), grad_v
.end(), n
->src
.begin() + 4);
561 std::copy(grad_h
.begin(), grad_h
.end(), n
->src
.begin() + 8);
566 for(int s
= 0; s
< 4; ++s
) {
567 if (n
->bc
.dst_sel
[s
] != SEL_MASK
)
568 n
->dst
[s
] = sh
->get_gpr_value(false, n
->bc
.dst_gpr
, s
, false);
569 // NOTE: it doesn't matter here which components of the result we
570 // are using, but original n->bc.dst_sel should be taken into
571 // account when building the bytecode
573 for(unsigned s
= 0; s
< num_src
; ++s
) {
574 if (n
->bc
.src_sel
[s
] <= SEL_W
)
575 n
->src
[s
] = sh
->get_gpr_value(true, n
->bc
.src_gpr
,
576 n
->bc
.src_sel
[s
], false);
585 int bc_parser::prepare_ir() {
587 for(id_cf_map::iterator I
= cf_map
.begin(), E
= cf_map
.end(); I
!= E
; ++I
) {
593 unsigned flags
= c
->bc
.op_ptr
->flags
;
595 if (flags
& CF_ALU
) {
596 prepare_alu_clause(c
);
597 } else if (flags
& CF_FETCH
) {
598 prepare_fetch_clause(c
);
599 } else if (c
->bc
.op
== CF_OP_CALL_FS
) {
601 c
->flags
|= NF_SCHEDULE_EARLY
| NF_DONT_MOVE
;
602 } else if (flags
& CF_LOOP_START
) {
604 } else if (c
->bc
.op
== CF_OP_JUMP
) {
606 } else if (c
->bc
.op
== CF_OP_LOOP_END
) {
608 } else if (c
->bc
.op
== CF_OP_LOOP_CONTINUE
) {
609 assert(!loop_stack
.empty());
610 repeat_node
*rep
= sh
->create_repeat(loop_stack
.top());
611 if (c
->parent
->first
!= c
)
612 rep
->move(c
->parent
->first
, c
);
613 c
->replace_with(rep
);
614 sh
->simplify_dep_rep(rep
);
615 } else if (c
->bc
.op
== CF_OP_LOOP_BREAK
) {
616 assert(!loop_stack
.empty());
617 depart_node
*dep
= sh
->create_depart(loop_stack
.top());
618 if (c
->parent
->first
!= c
)
619 dep
->move(c
->parent
->first
, c
);
620 c
->replace_with(dep
);
621 sh
->simplify_dep_rep(dep
);
622 } else if (flags
& CF_EXP
) {
624 // unroll burst exports
626 assert(c
->bc
.op
== CF_OP_EXPORT
|| c
->bc
.op
== CF_OP_EXPORT_DONE
);
628 c
->bc
.set_op(CF_OP_EXPORT
);
630 unsigned burst_count
= c
->bc
.burst_count
;
631 unsigned eop
= c
->bc
.end_of_program
;
633 c
->bc
.end_of_program
= 0;
634 c
->bc
.burst_count
= 0;
639 for(int s
= 0; s
< 4; ++s
) {
640 switch (c
->bc
.sel
[s
]) {
642 c
->src
[s
] = sh
->get_const_value(0.0f
);
645 c
->src
[s
] = sh
->get_const_value(1.0f
);
650 if (c
->bc
.sel
[s
] <= SEL_W
)
651 c
->src
[s
] = sh
->get_gpr_value(true, c
->bc
.rw_gpr
,
652 c
->bc
.sel
[s
], false);
654 assert(!"invalid src_sel for export");
661 cf_node
*cf_next
= sh
->create_cf();
663 ++cf_next
->bc
.rw_gpr
;
664 ++cf_next
->bc
.array_base
;
666 c
->insert_after(cf_next
);
671 c
->bc
.end_of_program
= eop
;
672 } else if (flags
& (CF_STRM
| CF_RAT
)) {
674 unsigned burst_count
= c
->bc
.burst_count
;
675 unsigned eop
= c
->bc
.end_of_program
;
677 c
->bc
.end_of_program
= 0;
678 c
->bc
.burst_count
= 0;
684 for(int s
= 0; s
< 4; ++s
) {
685 if (c
->bc
.comp_mask
& (1 << s
))
687 sh
->get_gpr_value(true, c
->bc
.rw_gpr
, s
, false);
690 if ((flags
& CF_RAT
) && (c
->bc
.type
& 1)) { // indexed write
692 for(int s
= 0; s
< 3; ++s
) {
694 sh
->get_gpr_value(true, c
->bc
.index_gpr
, s
, false);
697 // FIXME probably we can relax it a bit
698 c
->flags
|= NF_DONT_HOIST
| NF_DONT_MOVE
;
704 cf_node
*cf_next
= sh
->create_cf();
706 ++cf_next
->bc
.rw_gpr
;
708 // FIXME is it correct?
709 cf_next
->bc
.array_base
+= cf_next
->bc
.elem_size
+ 1;
711 c
->insert_after(cf_next
);
715 c
->bc
.end_of_program
= eop
;
720 assert(loop_stack
.empty());
724 int bc_parser::prepare_loop(cf_node
* c
) {
726 cf_node
*end
= cf_map
[c
->bc
.addr
- 1];
727 assert(end
->bc
.op
== CF_OP_LOOP_END
);
728 assert(c
->parent
== end
->parent
);
730 region_node
*reg
= sh
->create_region();
731 repeat_node
*rep
= sh
->create_repeat(reg
);
734 c
->insert_before(reg
);
735 rep
->move(c
, end
->next
);
737 loop_stack
.push(reg
);
741 int bc_parser::prepare_if(cf_node
* c
) {
742 cf_node
*c_else
= NULL
, *end
= cf_map
[c
->bc
.addr
];
745 sblog
<< "parsing JUMP @" << c
->bc
.id
;
749 if (end
->bc
.op
== CF_OP_ELSE
) {
751 sblog
<< " found ELSE : ";
757 end
= cf_map
[c_else
->bc
.addr
];
760 sblog
<< " no else\n";
766 if (c_else
->parent
!= c
->parent
)
769 if (end
->parent
!= c
->parent
)
772 region_node
*reg
= sh
->create_region();
774 depart_node
*dep2
= sh
->create_depart(reg
);
775 depart_node
*dep
= sh
->create_depart(reg
);
776 if_node
*n_if
= sh
->create_if();
778 c
->insert_before(reg
);
781 dep
->move(c_else
, end
);
785 dep
->push_front(n_if
);
786 n_if
->push_back(dep2
);
788 n_if
->cond
= sh
->get_special_value(SV_EXEC_MASK
);
794 } // namespace r600_sb