2 * Copyright 2013 Vadim Girlin <vadimgirlin@gmail.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #define BCP_DUMP(q) do { q } while (0)
36 #include "r600_pipe.h"
37 #include "r600_shader.h"
43 #include "sb_shader.h"
48 int bc_parser::decode() {
54 dec
= new bc_decoder(ctx
, dw
, bc_ndw
);
56 shader_target t
= TARGET_UNKNOWN
;
60 case TGSI_PROCESSOR_FRAGMENT
: t
= TARGET_PS
; break;
61 case TGSI_PROCESSOR_VERTEX
: t
= TARGET_VS
; break;
62 case TGSI_PROCESSOR_COMPUTE
: t
= TARGET_COMPUTE
; break;
63 default: assert(!"unknown shader target"); return -1; break;
66 if (bc
->type
== TGSI_PROCESSOR_COMPUTE
)
72 sh
= new shader(ctx
, t
, bc
->debug_id
);
73 sh
->safe_math
= sb_context::safe_math
|| (t
== TARGET_COMPUTE
);
75 int r
= decode_shader();
80 sh
->nstack
= bc
->nstack
;
85 int bc_parser::decode_shader() {
94 if ((r
= decode_cf(i
, eop
)))
97 } while (!eop
|| (i
>> 1) <= max_cf
);
102 int bc_parser::prepare() {
104 if ((r
= parse_decls()))
106 if ((r
= prepare_ir()))
111 int bc_parser::parse_decls() {
115 sh
->add_gpr_array(0, bc
->ngpr
, 0x0F);
117 // compute shaders have some values preloaded in R0, R1
118 sh
->add_input(0 /* GPR */, true /* preloaded */, 0x0F /* mask */);
119 sh
->add_input(1 /* GPR */, true /* preloaded */, 0x0F /* mask */);
123 if (pshader
->indirect_files
& ~(1 << TGSI_FILE_CONSTANT
)) {
125 assert(pshader
->num_arrays
);
127 if (pshader
->num_arrays
) {
128 for (unsigned i
= 0; i
< pshader
->num_arrays
; ++i
) {
129 r600_shader_array
&a
= pshader
->arrays
[i
];
130 sh
->add_gpr_array(a
.gpr_start
, a
.gpr_count
, a
.comp_mask
);
133 sh
->add_gpr_array(0, pshader
->bc
.ngpr
, 0x0F);
137 if (sh
->target
== TARGET_VS
)
138 sh
->add_input(0, 1, 0x0F);
140 bool ps_interp
= ctx
.hw_class
>= HW_CLASS_EVERGREEN
141 && sh
->target
== TARGET_PS
;
143 unsigned linear
= 0, persp
= 0, centroid
= 1;
145 for (unsigned i
= 0; i
< pshader
->ninput
; ++i
) {
146 r600_shader_io
& in
= pshader
->input
[i
];
147 bool preloaded
= sh
->target
== TARGET_PS
&& !(ps_interp
&& in
.spi_sid
);
148 sh
->add_input(in
.gpr
, preloaded
, /*in.write_mask*/ 0x0F);
149 if (ps_interp
&& in
.spi_sid
) {
150 if (in
.interpolate
== TGSI_INTERPOLATE_LINEAR
||
151 in
.interpolate
== TGSI_INTERPOLATE_COLOR
)
153 else if (in
.interpolate
== TGSI_INTERPOLATE_PERSPECTIVE
)
161 unsigned mask
= (1 << (2 * (linear
+ persp
) * centroid
)) - 1;
165 sh
->add_input(gpr
, true, mask
& 0x0F);
174 int bc_parser::decode_cf(unsigned &i
, bool &eop
) {
178 cf_node
*cf
= sh
->create_cf();
179 sh
->root
->push_back(cf
);
181 unsigned id
= i
>> 1;
185 if (cf_map
.size() < id
+ 1)
186 cf_map
.resize(id
+ 1);
190 if ((r
= dec
->decode_cf(i
, cf
->bc
)))
193 cf_op_flags flags
= (cf_op_flags
)cf
->bc
.op_ptr
->flags
;
195 if (flags
& CF_ALU
) {
196 if ((r
= decode_alu_clause(cf
)))
198 } else if (flags
& CF_FETCH
) {
199 if ((r
= decode_fetch_clause(cf
)))
201 } else if (flags
& CF_EXP
) {
204 assert(!cf
->bc
.rw_rel
);
205 } else if (flags
& (CF_STRM
| CF_RAT
)) {
208 assert(!cf
->bc
.rw_rel
);
209 } else if (flags
& CF_BRANCH
) {
210 if (cf
->bc
.addr
> max_cf
)
211 max_cf
= cf
->bc
.addr
;
214 eop
= cf
->bc
.end_of_program
|| cf
->bc
.op
== CF_OP_CF_END
||
215 cf
->bc
.op
== CF_OP_RET
;
219 int bc_parser::decode_alu_clause(cf_node
* cf
) {
220 unsigned i
= cf
->bc
.addr
<< 1, cnt
= cf
->bc
.count
+ 1, gcnt
;
222 cf
->subtype
= NST_ALU_CLAUSE
;
225 memset(slots
[0], 0, 5*sizeof(slots
[0][0]));
230 decode_alu_group(cf
, i
, gcnt
);
239 int bc_parser::decode_alu_group(cf_node
* cf
, unsigned &i
, unsigned &gcnt
) {
242 alu_group_node
*g
= sh
->create_alu_group();
245 memset(slots
[cgroup
], 0, 5*sizeof(slots
[0][0]));
248 unsigned literal_mask
= 0;
251 n
= sh
->create_alu();
254 if ((r
= dec
->decode_alu(i
, n
->bc
)))
257 if (!sh
->assign_slot(n
, slots
[cgroup
])) {
258 assert(!"alu slot assignment failed");
264 } while (gcnt
<= 5 && !n
->bc
.last
);
268 for (node_iterator I
= g
->begin(), E
= g
->end(); I
!= E
; ++I
) {
269 n
= static_cast<alu_node
*>(*I
);
274 for (int k
= 0; k
< n
->bc
.op_ptr
->src_count
; ++k
) {
275 bc_alu_src
&src
= n
->bc
.src
[k
];
278 if (src
.sel
== ALU_SRC_LITERAL
) {
279 literal_mask
|= (1 << src
.chan
);
280 src
.value
.u
= dw
[i
+ src
.chan
];
285 unsigned literal_ndw
= 0;
286 while (literal_mask
) {
287 g
->literals
.push_back(dw
[i
+ literal_ndw
]);
292 literal_ndw
= (literal_ndw
+ 1) & ~1u;
295 gcnt
+= literal_ndw
>> 1;
301 int bc_parser::prepare_alu_clause(cf_node
* cf
) {
303 // loop over alu groups
304 for (node_iterator I
= cf
->begin(), E
= cf
->end(); I
!= E
; ++I
) {
305 assert(I
->subtype
== NST_ALU_GROUP
);
306 alu_group_node
*g
= static_cast<alu_group_node
*>(*I
);
307 prepare_alu_group(cf
, g
);
313 int bc_parser::prepare_alu_group(cf_node
* cf
, alu_group_node
*g
) {
318 memset(slots
[cgroup
], 0, 5*sizeof(slots
[0][0]));
320 for (node_iterator I
= g
->begin(), E
= g
->end();
322 n
= static_cast<alu_node
*>(*I
);
324 if (!sh
->assign_slot(n
, slots
[cgroup
])) {
325 assert(!"alu slot assignment failed");
329 unsigned src_count
= n
->bc
.op_ptr
->src_count
;
331 if (ctx
.alu_slots(n
->bc
.op
) & AF_4SLOT
)
332 n
->flags
|= NF_ALU_4SLOT
;
334 n
->src
.resize(src_count
);
336 unsigned flags
= n
->bc
.op_ptr
->flags
;
338 if (flags
& AF_PRED
) {
340 if (n
->bc
.update_pred
)
341 n
->dst
[1] = sh
->get_special_value(SV_ALU_PRED
);
342 if (n
->bc
.update_exec_mask
)
343 n
->dst
[2] = sh
->get_special_value(SV_EXEC_MASK
);
345 n
->flags
|= NF_DONT_HOIST
;
347 } else if (flags
& AF_KILL
) {
350 n
->dst
[1] = sh
->get_special_value(SV_VALID_MASK
);
353 n
->flags
|= NF_DONT_HOIST
| NF_DONT_MOVE
|
354 NF_DONT_KILL
| NF_SCHEDULE_EARLY
;
360 if (flags
& AF_MOVA
) {
362 n
->dst
[0] = sh
->get_special_value(SV_AR_INDEX
);
364 n
->flags
|= NF_DONT_HOIST
;
366 } else if (n
->bc
.op_ptr
->src_count
== 3 || n
->bc
.write_mask
) {
367 assert(!n
->bc
.dst_rel
|| n
->bc
.index_mode
== INDEX_AR_X
);
369 value
*v
= sh
->get_gpr_value(false, n
->bc
.dst_gpr
, n
->bc
.dst_chan
,
375 if (n
->bc
.pred_sel
) {
376 sh
->has_alu_predication
= true;
377 n
->pred
= sh
->get_special_value(SV_ALU_PRED
);
380 for (unsigned s
= 0; s
< src_count
; ++s
) {
381 bc_alu_src
&src
= n
->bc
.src
[s
];
383 if (src
.sel
== ALU_SRC_LITERAL
) {
384 n
->src
[s
] = sh
->get_const_value(src
.value
);
385 } else if (src
.sel
== ALU_SRC_PS
|| src
.sel
== ALU_SRC_PV
) {
386 unsigned pgroup
= !cgroup
, prev_slot
= src
.sel
== ALU_SRC_PS
?
387 SLOT_TRANS
: src
.chan
;
389 // XXX shouldn't happen but llvm backend uses PS on cayman
390 if (prev_slot
== SLOT_TRANS
&& ctx
.is_cayman())
393 alu_node
*prev_alu
= slots
[pgroup
][prev_slot
];
397 if (!prev_alu
->dst
[0]) {
398 value
* t
= sh
->create_temp_value();
399 prev_alu
->dst
[0] = t
;
402 value
*d
= prev_alu
->dst
[0];
405 d
= sh
->get_gpr_value(true, prev_alu
->bc
.dst_gpr
,
406 prev_alu
->bc
.dst_chan
,
407 prev_alu
->bc
.dst_rel
);
411 } else if (ctx
.is_kcache_sel(src
.sel
)) {
412 unsigned sel
= src
.sel
, kc_addr
;
413 unsigned kc_set
= ((sel
>> 7) & 2) + ((sel
>> 5) & 1);
415 bc_kcache
&kc
= cf
->bc
.kc
[kc_set
];
416 kc_addr
= (kc
.addr
<< 4) + (sel
& 0x1F);
417 n
->src
[s
] = sh
->get_kcache_value(kc
.bank
, kc_addr
, src
.chan
);
418 } else if (src
.sel
< MAX_GPR
) {
419 value
*v
= sh
->get_gpr_value(true, src
.sel
, src
.chan
, src
.rel
);
423 } else if (src
.sel
>= ALU_SRC_PARAM_OFFSET
) {
424 // using slot for value channel because in fact the slot
425 // determines the channel that is loaded by INTERP_LOAD_P0
426 // (and maybe some others).
427 // otherwise GVN will consider INTERP_LOAD_P0s with the same
428 // param index as equal instructions and leave only one of them
429 n
->src
[s
] = sh
->get_special_ro_value(sel_chan(src
.sel
,
434 n
->src
[s
] = sh
->get_const_value(0);
437 n
->src
[s
] = sh
->get_const_value(0.5f
);
440 n
->src
[s
] = sh
->get_const_value(1.0f
);
443 n
->src
[s
] = sh
->get_const_value(1);
445 case ALU_SRC_M_1_INT
:
446 n
->src
[s
] = sh
->get_const_value(-1);
449 n
->src
[s
] = sh
->get_special_ro_value(src
.sel
);
456 // pack multislot instructions into alu_packed_node
458 alu_packed_node
*p
= NULL
;
459 for (node_iterator N
, I
= g
->begin(), E
= g
->end(); I
!= E
; I
= N
) {
461 alu_node
*a
= static_cast<alu_node
*>(*I
);
462 unsigned sflags
= a
->bc
.slot_flags
;
464 if (sflags
== AF_4V
|| (ctx
.is_cayman() && sflags
== AF_S
)) {
466 p
= sh
->create_alu_packed();
476 if (p
->count() == 3 && ctx
.is_cayman()) {
477 // cayman's scalar instruction that can use 3 or 4 slots
479 // FIXME for simplicity we'll always add 4th slot,
480 // but probably we might want to always remove 4th slot and make
481 // sure that regalloc won't choose 'w' component for dst
483 alu_node
*f
= static_cast<alu_node
*>(p
->first
);
484 alu_node
*a
= sh
->create_alu();
486 a
->dst
.resize(f
->dst
.size());
496 int bc_parser::decode_fetch_clause(cf_node
* cf
) {
498 unsigned i
= cf
->bc
.addr
<< 1, cnt
= cf
->bc
.count
+ 1;
500 cf
->subtype
= NST_TEX_CLAUSE
;
503 fetch_node
*n
= sh
->create_fetch();
505 if ((r
= dec
->decode_fetch(i
, n
->bc
)))
507 if (n
->bc
.src_rel
|| n
->bc
.dst_rel
)
514 int bc_parser::prepare_fetch_clause(cf_node
*cf
) {
518 for (node_iterator I
= cf
->begin(), E
= cf
->end(); I
!= E
; ++I
) {
520 fetch_node
*n
= static_cast<fetch_node
*>(*I
);
521 assert(n
->is_valid());
523 unsigned flags
= n
->bc
.op_ptr
->flags
;
525 unsigned vtx
= flags
& FF_VTX
;
526 unsigned num_src
= vtx
? ctx
.vtx_src_num
: 4;
530 if (flags
& (FF_SETGRAD
| FF_USEGRAD
| FF_GETGRAD
)) {
531 sh
->uses_gradients
= true;
534 if (flags
& FF_SETGRAD
) {
539 case FETCH_OP_SET_GRADIENTS_V
:
542 case FETCH_OP_SET_GRADIENTS_H
:
546 assert(!"unexpected SET_GRAD instruction");
553 for(unsigned s
= 0; s
< 4; ++s
) {
554 unsigned sw
= n
->bc
.src_sel
[s
];
556 (*grad
)[s
] = sh
->get_gpr_value(true, n
->bc
.src_gpr
,
558 else if (sw
== SEL_0
)
559 (*grad
)[s
] = sh
->get_const_value(0.0f
);
560 else if (sw
== SEL_1
)
561 (*grad
)[s
] = sh
->get_const_value(1.0f
);
565 if (flags
& FF_USEGRAD
) {
567 std::copy(grad_v
.begin(), grad_v
.end(), n
->src
.begin() + 4);
568 std::copy(grad_h
.begin(), grad_h
.end(), n
->src
.begin() + 8);
573 for(int s
= 0; s
< 4; ++s
) {
574 if (n
->bc
.dst_sel
[s
] != SEL_MASK
)
575 n
->dst
[s
] = sh
->get_gpr_value(false, n
->bc
.dst_gpr
, s
, false);
576 // NOTE: it doesn't matter here which components of the result we
577 // are using, but original n->bc.dst_sel should be taken into
578 // account when building the bytecode
580 for(unsigned s
= 0; s
< num_src
; ++s
) {
581 if (n
->bc
.src_sel
[s
] <= SEL_W
)
582 n
->src
[s
] = sh
->get_gpr_value(true, n
->bc
.src_gpr
,
583 n
->bc
.src_sel
[s
], false);
592 int bc_parser::prepare_ir() {
594 for(id_cf_map::iterator I
= cf_map
.begin(), E
= cf_map
.end(); I
!= E
; ++I
) {
600 unsigned flags
= c
->bc
.op_ptr
->flags
;
602 if (flags
& CF_ALU
) {
603 prepare_alu_clause(c
);
604 } else if (flags
& CF_FETCH
) {
605 prepare_fetch_clause(c
);
606 } else if (c
->bc
.op
== CF_OP_CALL_FS
) {
608 c
->flags
|= NF_SCHEDULE_EARLY
| NF_DONT_MOVE
;
609 } else if (flags
& CF_LOOP_START
) {
611 } else if (c
->bc
.op
== CF_OP_JUMP
) {
613 } else if (c
->bc
.op
== CF_OP_LOOP_END
) {
615 } else if (c
->bc
.op
== CF_OP_LOOP_CONTINUE
) {
616 assert(!loop_stack
.empty());
617 repeat_node
*rep
= sh
->create_repeat(loop_stack
.top());
618 if (c
->parent
->first
!= c
)
619 rep
->move(c
->parent
->first
, c
);
620 c
->replace_with(rep
);
621 sh
->simplify_dep_rep(rep
);
622 } else if (c
->bc
.op
== CF_OP_LOOP_BREAK
) {
623 assert(!loop_stack
.empty());
624 depart_node
*dep
= sh
->create_depart(loop_stack
.top());
625 if (c
->parent
->first
!= c
)
626 dep
->move(c
->parent
->first
, c
);
627 c
->replace_with(dep
);
628 sh
->simplify_dep_rep(dep
);
629 } else if (flags
& CF_EXP
) {
631 // unroll burst exports
633 assert(c
->bc
.op
== CF_OP_EXPORT
|| c
->bc
.op
== CF_OP_EXPORT_DONE
);
635 c
->bc
.set_op(CF_OP_EXPORT
);
637 unsigned burst_count
= c
->bc
.burst_count
;
638 unsigned eop
= c
->bc
.end_of_program
;
640 c
->bc
.end_of_program
= 0;
641 c
->bc
.burst_count
= 0;
646 for(int s
= 0; s
< 4; ++s
) {
647 switch (c
->bc
.sel
[s
]) {
649 c
->src
[s
] = sh
->get_const_value(0.0f
);
652 c
->src
[s
] = sh
->get_const_value(1.0f
);
657 if (c
->bc
.sel
[s
] <= SEL_W
)
658 c
->src
[s
] = sh
->get_gpr_value(true, c
->bc
.rw_gpr
,
659 c
->bc
.sel
[s
], false);
661 assert(!"invalid src_sel for export");
668 cf_node
*cf_next
= sh
->create_cf();
670 ++cf_next
->bc
.rw_gpr
;
671 ++cf_next
->bc
.array_base
;
673 c
->insert_after(cf_next
);
678 c
->bc
.end_of_program
= eop
;
679 } else if (flags
& (CF_STRM
| CF_RAT
)) {
681 unsigned burst_count
= c
->bc
.burst_count
;
682 unsigned eop
= c
->bc
.end_of_program
;
684 c
->bc
.end_of_program
= 0;
685 c
->bc
.burst_count
= 0;
691 for(int s
= 0; s
< 4; ++s
) {
692 if (c
->bc
.comp_mask
& (1 << s
))
694 sh
->get_gpr_value(true, c
->bc
.rw_gpr
, s
, false);
697 if ((flags
& CF_RAT
) && (c
->bc
.type
& 1)) { // indexed write
699 for(int s
= 0; s
< 3; ++s
) {
701 sh
->get_gpr_value(true, c
->bc
.index_gpr
, s
, false);
704 // FIXME probably we can relax it a bit
705 c
->flags
|= NF_DONT_HOIST
| NF_DONT_MOVE
;
711 cf_node
*cf_next
= sh
->create_cf();
713 ++cf_next
->bc
.rw_gpr
;
715 // FIXME is it correct?
716 cf_next
->bc
.array_base
+= cf_next
->bc
.elem_size
+ 1;
718 c
->insert_after(cf_next
);
722 c
->bc
.end_of_program
= eop
;
727 assert(loop_stack
.empty());
731 int bc_parser::prepare_loop(cf_node
* c
) {
733 cf_node
*end
= cf_map
[c
->bc
.addr
- 1];
734 assert(end
->bc
.op
== CF_OP_LOOP_END
);
735 assert(c
->parent
== end
->parent
);
737 region_node
*reg
= sh
->create_region();
738 repeat_node
*rep
= sh
->create_repeat(reg
);
741 c
->insert_before(reg
);
742 rep
->move(c
, end
->next
);
744 loop_stack
.push(reg
);
748 int bc_parser::prepare_if(cf_node
* c
) {
749 cf_node
*c_else
= NULL
, *end
= cf_map
[c
->bc
.addr
];
752 sblog
<< "parsing JUMP @" << c
->bc
.id
;
756 if (end
->bc
.op
== CF_OP_ELSE
) {
758 sblog
<< " found ELSE : ";
764 end
= cf_map
[c_else
->bc
.addr
];
767 sblog
<< " no else\n";
773 if (c_else
->parent
!= c
->parent
)
776 if (end
->parent
!= c
->parent
)
779 region_node
*reg
= sh
->create_region();
781 depart_node
*dep2
= sh
->create_depart(reg
);
782 depart_node
*dep
= sh
->create_depart(reg
);
783 if_node
*n_if
= sh
->create_if();
785 c
->insert_before(reg
);
788 dep
->move(c_else
, end
);
792 dep
->push_front(n_if
);
793 n_if
->push_back(dep2
);
795 n_if
->cond
= sh
->get_special_value(SV_EXEC_MASK
);
801 } // namespace r600_sb