2 * Copyright 2013 Vadim Girlin <vadimgirlin@gmail.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #define BCP_DUMP(q) do { q } while (0)
36 #include "r600_pipe.h"
37 #include "r600_shader.h"
43 #include "sb_shader.h"
48 int bc_parser::decode() {
54 dec
= new bc_decoder(ctx
, dw
, bc_ndw
);
56 shader_target t
= TARGET_UNKNOWN
;
60 case TGSI_PROCESSOR_FRAGMENT
: t
= TARGET_PS
; break;
61 case TGSI_PROCESSOR_VERTEX
:
62 t
= pshader
->vs_as_es
? TARGET_ES
: TARGET_VS
;
64 case TGSI_PROCESSOR_GEOMETRY
: t
= TARGET_GS
; break;
65 case TGSI_PROCESSOR_COMPUTE
: t
= TARGET_COMPUTE
; break;
66 default: assert(!"unknown shader target"); return -1; break;
69 if (bc
->type
== TGSI_PROCESSOR_COMPUTE
)
75 sh
= new shader(ctx
, t
, bc
->debug_id
);
76 sh
->safe_math
= sb_context::safe_math
|| (t
== TARGET_COMPUTE
);
78 int r
= decode_shader();
83 sh
->nstack
= bc
->nstack
;
88 int bc_parser::decode_shader() {
97 if ((r
= decode_cf(i
, eop
)))
100 } while (!eop
|| (i
>> 1) <= max_cf
);
105 int bc_parser::prepare() {
107 if ((r
= parse_decls()))
109 if ((r
= prepare_ir()))
114 int bc_parser::parse_decls() {
118 sh
->add_gpr_array(0, bc
->ngpr
, 0x0F);
120 // compute shaders have some values preloaded in R0, R1
121 sh
->add_input(0 /* GPR */, true /* preloaded */, 0x0F /* mask */);
122 sh
->add_input(1 /* GPR */, true /* preloaded */, 0x0F /* mask */);
126 if (pshader
->indirect_files
& ~(1 << TGSI_FILE_CONSTANT
)) {
128 assert(pshader
->num_arrays
);
130 if (pshader
->num_arrays
) {
131 for (unsigned i
= 0; i
< pshader
->num_arrays
; ++i
) {
132 r600_shader_array
&a
= pshader
->arrays
[i
];
133 sh
->add_gpr_array(a
.gpr_start
, a
.gpr_count
, a
.comp_mask
);
136 sh
->add_gpr_array(0, pshader
->bc
.ngpr
, 0x0F);
140 if (sh
->target
== TARGET_VS
|| sh
->target
== TARGET_ES
)
141 sh
->add_input(0, 1, 0x0F);
142 else if (sh
->target
== TARGET_GS
) {
143 sh
->add_input(0, 1, 0x0F);
144 sh
->add_input(1, 1, 0x0F);
147 bool ps_interp
= ctx
.hw_class
>= HW_CLASS_EVERGREEN
148 && sh
->target
== TARGET_PS
;
150 unsigned linear
= 0, persp
= 0, centroid
= 1;
152 for (unsigned i
= 0; i
< pshader
->ninput
; ++i
) {
153 r600_shader_io
& in
= pshader
->input
[i
];
154 bool preloaded
= sh
->target
== TARGET_PS
&& !(ps_interp
&& in
.spi_sid
);
155 sh
->add_input(in
.gpr
, preloaded
, /*in.write_mask*/ 0x0F);
156 if (ps_interp
&& in
.spi_sid
) {
157 if (in
.interpolate
== TGSI_INTERPOLATE_LINEAR
||
158 in
.interpolate
== TGSI_INTERPOLATE_COLOR
)
160 else if (in
.interpolate
== TGSI_INTERPOLATE_PERSPECTIVE
)
168 unsigned mask
= (1 << (2 * (linear
+ persp
) * centroid
)) - 1;
172 sh
->add_input(gpr
, true, mask
& 0x0F);
181 int bc_parser::decode_cf(unsigned &i
, bool &eop
) {
185 cf_node
*cf
= sh
->create_cf();
186 sh
->root
->push_back(cf
);
188 unsigned id
= i
>> 1;
192 if (cf_map
.size() < id
+ 1)
193 cf_map
.resize(id
+ 1);
197 if ((r
= dec
->decode_cf(i
, cf
->bc
)))
200 cf_op_flags flags
= (cf_op_flags
)cf
->bc
.op_ptr
->flags
;
202 if (flags
& CF_ALU
) {
203 if ((r
= decode_alu_clause(cf
)))
205 } else if (flags
& CF_FETCH
) {
206 if ((r
= decode_fetch_clause(cf
)))
208 } else if (flags
& CF_EXP
) {
211 assert(!cf
->bc
.rw_rel
);
212 } else if (flags
& CF_MEM
) {
215 assert(!cf
->bc
.rw_rel
);
216 } else if (flags
& CF_BRANCH
) {
217 if (cf
->bc
.addr
> max_cf
)
218 max_cf
= cf
->bc
.addr
;
221 eop
= cf
->bc
.end_of_program
|| cf
->bc
.op
== CF_OP_CF_END
||
222 cf
->bc
.op
== CF_OP_RET
;
226 int bc_parser::decode_alu_clause(cf_node
* cf
) {
227 unsigned i
= cf
->bc
.addr
<< 1, cnt
= cf
->bc
.count
+ 1, gcnt
;
229 cf
->subtype
= NST_ALU_CLAUSE
;
232 memset(slots
[0], 0, 5*sizeof(slots
[0][0]));
237 decode_alu_group(cf
, i
, gcnt
);
246 int bc_parser::decode_alu_group(cf_node
* cf
, unsigned &i
, unsigned &gcnt
) {
249 alu_group_node
*g
= sh
->create_alu_group();
252 memset(slots
[cgroup
], 0, 5*sizeof(slots
[0][0]));
255 unsigned literal_mask
= 0;
258 n
= sh
->create_alu();
261 if ((r
= dec
->decode_alu(i
, n
->bc
)))
264 if (!sh
->assign_slot(n
, slots
[cgroup
])) {
265 assert(!"alu slot assignment failed");
271 } while (gcnt
<= 5 && !n
->bc
.last
);
275 for (node_iterator I
= g
->begin(), E
= g
->end(); I
!= E
; ++I
) {
276 n
= static_cast<alu_node
*>(*I
);
281 for (int k
= 0; k
< n
->bc
.op_ptr
->src_count
; ++k
) {
282 bc_alu_src
&src
= n
->bc
.src
[k
];
285 if (src
.sel
== ALU_SRC_LITERAL
) {
286 literal_mask
|= (1 << src
.chan
);
287 src
.value
.u
= dw
[i
+ src
.chan
];
292 unsigned literal_ndw
= 0;
293 while (literal_mask
) {
294 g
->literals
.push_back(dw
[i
+ literal_ndw
]);
299 literal_ndw
= (literal_ndw
+ 1) & ~1u;
302 gcnt
+= literal_ndw
>> 1;
308 int bc_parser::prepare_alu_clause(cf_node
* cf
) {
310 // loop over alu groups
311 for (node_iterator I
= cf
->begin(), E
= cf
->end(); I
!= E
; ++I
) {
312 assert(I
->subtype
== NST_ALU_GROUP
);
313 alu_group_node
*g
= static_cast<alu_group_node
*>(*I
);
314 prepare_alu_group(cf
, g
);
320 int bc_parser::prepare_alu_group(cf_node
* cf
, alu_group_node
*g
) {
325 memset(slots
[cgroup
], 0, 5*sizeof(slots
[0][0]));
327 for (node_iterator I
= g
->begin(), E
= g
->end();
329 n
= static_cast<alu_node
*>(*I
);
331 if (!sh
->assign_slot(n
, slots
[cgroup
])) {
332 assert(!"alu slot assignment failed");
336 unsigned src_count
= n
->bc
.op_ptr
->src_count
;
338 if (ctx
.alu_slots(n
->bc
.op
) & AF_4SLOT
)
339 n
->flags
|= NF_ALU_4SLOT
;
341 n
->src
.resize(src_count
);
343 unsigned flags
= n
->bc
.op_ptr
->flags
;
345 if (flags
& AF_PRED
) {
347 if (n
->bc
.update_pred
)
348 n
->dst
[1] = sh
->get_special_value(SV_ALU_PRED
);
349 if (n
->bc
.update_exec_mask
)
350 n
->dst
[2] = sh
->get_special_value(SV_EXEC_MASK
);
352 n
->flags
|= NF_DONT_HOIST
;
354 } else if (flags
& AF_KILL
) {
357 n
->dst
[1] = sh
->get_special_value(SV_VALID_MASK
);
360 n
->flags
|= NF_DONT_HOIST
| NF_DONT_MOVE
|
361 NF_DONT_KILL
| NF_SCHEDULE_EARLY
;
367 if (flags
& AF_MOVA
) {
369 n
->dst
[0] = sh
->get_special_value(SV_AR_INDEX
);
371 n
->flags
|= NF_DONT_HOIST
;
373 } else if (n
->bc
.op_ptr
->src_count
== 3 || n
->bc
.write_mask
) {
374 assert(!n
->bc
.dst_rel
|| n
->bc
.index_mode
== INDEX_AR_X
);
376 value
*v
= sh
->get_gpr_value(false, n
->bc
.dst_gpr
, n
->bc
.dst_chan
,
382 if (n
->bc
.pred_sel
) {
383 sh
->has_alu_predication
= true;
384 n
->pred
= sh
->get_special_value(SV_ALU_PRED
);
387 for (unsigned s
= 0; s
< src_count
; ++s
) {
388 bc_alu_src
&src
= n
->bc
.src
[s
];
390 if (src
.sel
== ALU_SRC_LITERAL
) {
391 n
->src
[s
] = sh
->get_const_value(src
.value
);
392 } else if (src
.sel
== ALU_SRC_PS
|| src
.sel
== ALU_SRC_PV
) {
393 unsigned pgroup
= !cgroup
, prev_slot
= src
.sel
== ALU_SRC_PS
?
394 SLOT_TRANS
: src
.chan
;
396 // XXX shouldn't happen but llvm backend uses PS on cayman
397 if (prev_slot
== SLOT_TRANS
&& ctx
.is_cayman())
400 alu_node
*prev_alu
= slots
[pgroup
][prev_slot
];
404 if (!prev_alu
->dst
[0]) {
405 value
* t
= sh
->create_temp_value();
406 prev_alu
->dst
[0] = t
;
409 value
*d
= prev_alu
->dst
[0];
412 d
= sh
->get_gpr_value(true, prev_alu
->bc
.dst_gpr
,
413 prev_alu
->bc
.dst_chan
,
414 prev_alu
->bc
.dst_rel
);
418 } else if (ctx
.is_kcache_sel(src
.sel
)) {
419 unsigned sel
= src
.sel
, kc_addr
;
420 unsigned kc_set
= ((sel
>> 7) & 2) + ((sel
>> 5) & 1);
422 bc_kcache
&kc
= cf
->bc
.kc
[kc_set
];
423 kc_addr
= (kc
.addr
<< 4) + (sel
& 0x1F);
424 n
->src
[s
] = sh
->get_kcache_value(kc
.bank
, kc_addr
, src
.chan
);
425 } else if (src
.sel
< MAX_GPR
) {
426 value
*v
= sh
->get_gpr_value(true, src
.sel
, src
.chan
, src
.rel
);
430 } else if (src
.sel
>= ALU_SRC_PARAM_OFFSET
) {
431 // using slot for value channel because in fact the slot
432 // determines the channel that is loaded by INTERP_LOAD_P0
433 // (and maybe some others).
434 // otherwise GVN will consider INTERP_LOAD_P0s with the same
435 // param index as equal instructions and leave only one of them
436 n
->src
[s
] = sh
->get_special_ro_value(sel_chan(src
.sel
,
441 n
->src
[s
] = sh
->get_const_value(0);
444 n
->src
[s
] = sh
->get_const_value(0.5f
);
447 n
->src
[s
] = sh
->get_const_value(1.0f
);
450 n
->src
[s
] = sh
->get_const_value(1);
452 case ALU_SRC_M_1_INT
:
453 n
->src
[s
] = sh
->get_const_value(-1);
456 n
->src
[s
] = sh
->get_special_ro_value(src
.sel
);
463 // pack multislot instructions into alu_packed_node
465 alu_packed_node
*p
= NULL
;
466 for (node_iterator N
, I
= g
->begin(), E
= g
->end(); I
!= E
; I
= N
) {
468 alu_node
*a
= static_cast<alu_node
*>(*I
);
469 unsigned sflags
= a
->bc
.slot_flags
;
471 if (sflags
== AF_4V
|| (ctx
.is_cayman() && sflags
== AF_S
)) {
473 p
= sh
->create_alu_packed();
483 if (p
->count() == 3 && ctx
.is_cayman()) {
484 // cayman's scalar instruction that can use 3 or 4 slots
486 // FIXME for simplicity we'll always add 4th slot,
487 // but probably we might want to always remove 4th slot and make
488 // sure that regalloc won't choose 'w' component for dst
490 alu_node
*f
= static_cast<alu_node
*>(p
->first
);
491 alu_node
*a
= sh
->create_alu();
493 a
->dst
.resize(f
->dst
.size());
503 int bc_parser::decode_fetch_clause(cf_node
* cf
) {
505 unsigned i
= cf
->bc
.addr
<< 1, cnt
= cf
->bc
.count
+ 1;
507 cf
->subtype
= NST_TEX_CLAUSE
;
510 fetch_node
*n
= sh
->create_fetch();
512 if ((r
= dec
->decode_fetch(i
, n
->bc
)))
514 if (n
->bc
.src_rel
|| n
->bc
.dst_rel
)
521 int bc_parser::prepare_fetch_clause(cf_node
*cf
) {
525 for (node_iterator I
= cf
->begin(), E
= cf
->end(); I
!= E
; ++I
) {
527 fetch_node
*n
= static_cast<fetch_node
*>(*I
);
528 assert(n
->is_valid());
530 unsigned flags
= n
->bc
.op_ptr
->flags
;
532 unsigned vtx
= flags
& FF_VTX
;
533 unsigned num_src
= vtx
? ctx
.vtx_src_num
: 4;
537 if (flags
& (FF_SETGRAD
| FF_USEGRAD
| FF_GETGRAD
)) {
538 sh
->uses_gradients
= true;
541 if (flags
& FF_SETGRAD
) {
546 case FETCH_OP_SET_GRADIENTS_V
:
549 case FETCH_OP_SET_GRADIENTS_H
:
553 assert(!"unexpected SET_GRAD instruction");
560 for(unsigned s
= 0; s
< 4; ++s
) {
561 unsigned sw
= n
->bc
.src_sel
[s
];
563 (*grad
)[s
] = sh
->get_gpr_value(true, n
->bc
.src_gpr
,
565 else if (sw
== SEL_0
)
566 (*grad
)[s
] = sh
->get_const_value(0.0f
);
567 else if (sw
== SEL_1
)
568 (*grad
)[s
] = sh
->get_const_value(1.0f
);
572 if (flags
& FF_USEGRAD
) {
574 std::copy(grad_v
.begin(), grad_v
.end(), n
->src
.begin() + 4);
575 std::copy(grad_h
.begin(), grad_h
.end(), n
->src
.begin() + 8);
580 for(int s
= 0; s
< 4; ++s
) {
581 if (n
->bc
.dst_sel
[s
] != SEL_MASK
)
582 n
->dst
[s
] = sh
->get_gpr_value(false, n
->bc
.dst_gpr
, s
, false);
583 // NOTE: it doesn't matter here which components of the result we
584 // are using, but original n->bc.dst_sel should be taken into
585 // account when building the bytecode
587 for(unsigned s
= 0; s
< num_src
; ++s
) {
588 if (n
->bc
.src_sel
[s
] <= SEL_W
)
589 n
->src
[s
] = sh
->get_gpr_value(true, n
->bc
.src_gpr
,
590 n
->bc
.src_sel
[s
], false);
599 int bc_parser::prepare_ir() {
601 for(id_cf_map::iterator I
= cf_map
.begin(), E
= cf_map
.end(); I
!= E
; ++I
) {
607 unsigned flags
= c
->bc
.op_ptr
->flags
;
609 if (flags
& CF_ALU
) {
610 prepare_alu_clause(c
);
611 } else if (flags
& CF_FETCH
) {
612 prepare_fetch_clause(c
);
613 } else if (c
->bc
.op
== CF_OP_CALL_FS
) {
615 c
->flags
|= NF_SCHEDULE_EARLY
| NF_DONT_MOVE
;
616 } else if (flags
& CF_LOOP_START
) {
618 } else if (c
->bc
.op
== CF_OP_JUMP
) {
620 } else if (c
->bc
.op
== CF_OP_LOOP_END
) {
622 } else if (c
->bc
.op
== CF_OP_LOOP_CONTINUE
) {
623 assert(!loop_stack
.empty());
624 repeat_node
*rep
= sh
->create_repeat(loop_stack
.top());
625 if (c
->parent
->first
!= c
)
626 rep
->move(c
->parent
->first
, c
);
627 c
->replace_with(rep
);
628 sh
->simplify_dep_rep(rep
);
629 } else if (c
->bc
.op
== CF_OP_LOOP_BREAK
) {
630 assert(!loop_stack
.empty());
631 depart_node
*dep
= sh
->create_depart(loop_stack
.top());
632 if (c
->parent
->first
!= c
)
633 dep
->move(c
->parent
->first
, c
);
634 c
->replace_with(dep
);
635 sh
->simplify_dep_rep(dep
);
636 } else if (flags
& CF_EXP
) {
638 // unroll burst exports
640 assert(c
->bc
.op
== CF_OP_EXPORT
|| c
->bc
.op
== CF_OP_EXPORT_DONE
);
642 c
->bc
.set_op(CF_OP_EXPORT
);
644 unsigned burst_count
= c
->bc
.burst_count
;
645 unsigned eop
= c
->bc
.end_of_program
;
647 c
->bc
.end_of_program
= 0;
648 c
->bc
.burst_count
= 0;
653 for(int s
= 0; s
< 4; ++s
) {
654 switch (c
->bc
.sel
[s
]) {
656 c
->src
[s
] = sh
->get_const_value(0.0f
);
659 c
->src
[s
] = sh
->get_const_value(1.0f
);
664 if (c
->bc
.sel
[s
] <= SEL_W
)
665 c
->src
[s
] = sh
->get_gpr_value(true, c
->bc
.rw_gpr
,
666 c
->bc
.sel
[s
], false);
668 assert(!"invalid src_sel for export");
675 cf_node
*cf_next
= sh
->create_cf();
677 ++cf_next
->bc
.rw_gpr
;
678 ++cf_next
->bc
.array_base
;
680 c
->insert_after(cf_next
);
685 c
->bc
.end_of_program
= eop
;
686 } else if (flags
& CF_MEM
) {
688 unsigned burst_count
= c
->bc
.burst_count
;
689 unsigned eop
= c
->bc
.end_of_program
;
691 c
->bc
.end_of_program
= 0;
692 c
->bc
.burst_count
= 0;
698 for(int s
= 0; s
< 4; ++s
) {
699 if (c
->bc
.comp_mask
& (1 << s
))
701 sh
->get_gpr_value(true, c
->bc
.rw_gpr
, s
, false);
704 if (((flags
& CF_RAT
) || (!(flags
& CF_STRM
))) && (c
->bc
.type
& 1)) { // indexed write
706 for(int s
= 0; s
< 3; ++s
) {
708 sh
->get_gpr_value(true, c
->bc
.index_gpr
, s
, false);
711 // FIXME probably we can relax it a bit
712 c
->flags
|= NF_DONT_HOIST
| NF_DONT_MOVE
;
718 cf_node
*cf_next
= sh
->create_cf();
720 ++cf_next
->bc
.rw_gpr
;
722 // FIXME is it correct?
723 cf_next
->bc
.array_base
+= cf_next
->bc
.elem_size
+ 1;
725 c
->insert_after(cf_next
);
729 c
->bc
.end_of_program
= eop
;
734 assert(loop_stack
.empty());
738 int bc_parser::prepare_loop(cf_node
* c
) {
740 cf_node
*end
= cf_map
[c
->bc
.addr
- 1];
741 assert(end
->bc
.op
== CF_OP_LOOP_END
);
742 assert(c
->parent
== end
->parent
);
744 region_node
*reg
= sh
->create_region();
745 repeat_node
*rep
= sh
->create_repeat(reg
);
748 c
->insert_before(reg
);
749 rep
->move(c
, end
->next
);
751 loop_stack
.push(reg
);
755 int bc_parser::prepare_if(cf_node
* c
) {
756 cf_node
*c_else
= NULL
, *end
= cf_map
[c
->bc
.addr
];
759 sblog
<< "parsing JUMP @" << c
->bc
.id
;
763 if (end
->bc
.op
== CF_OP_ELSE
) {
765 sblog
<< " found ELSE : ";
771 end
= cf_map
[c_else
->bc
.addr
];
774 sblog
<< " no else\n";
780 if (c_else
->parent
!= c
->parent
)
783 if (end
->parent
!= c
->parent
)
786 region_node
*reg
= sh
->create_region();
788 depart_node
*dep2
= sh
->create_depart(reg
);
789 depart_node
*dep
= sh
->create_depart(reg
);
790 if_node
*n_if
= sh
->create_if();
792 c
->insert_before(reg
);
795 dep
->move(c_else
, end
);
799 dep
->push_front(n_if
);
800 n_if
->push_back(dep2
);
802 n_if
->cond
= sh
->get_special_value(SV_EXEC_MASK
);
808 } // namespace r600_sb