2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 #define NOUVEAU_DEBUG 1
26 #include "nvc0_program.h"
29 nvc0_insn_can_load(struct nv_instruction
*nvi
, int s
,
30 struct nv_instruction
*ld
)
34 if (ld
->opcode
== NV_OP_MOV
&& ld
->src
[0]->value
->reg
.file
== NV_FILE_IMM
) {
35 if (s
> 1 || !(nvc0_op_info_table
[nvi
->opcode
].immediate
& (1 << s
)))
37 if (!(nvc0_op_info_table
[nvi
->opcode
].immediate
& 4))
38 if (ld
->src
[0]->value
->reg
.imm
.u32
& 0xfff)
41 if (!(nvc0_op_info_table
[nvi
->opcode
].memory
& (1 << s
)))
44 if (ld
->indirect
>= 0)
47 for (i
= 0; i
< 3 && nvi
->src
[i
]; ++i
)
48 if (nvi
->src
[i
]->value
->reg
.file
== NV_FILE_IMM
)
54 /* Return whether this instruction can be executed conditionally. */
56 nvc0_insn_is_predicateable(struct nv_instruction
*nvi
)
58 if (nvi
->predicate
>= 0) /* already predicated */
60 if (!nvc0_op_info_table
[nvi
->opcode
].predicate
&&
61 !nvc0_op_info_table
[nvi
->opcode
].pseudo
)
67 nvc0_insn_refcount(struct nv_instruction
*nvi
)
71 for (i
= 0; i
< 5 && nvi
->def
[i
]; ++i
) {
74 rc
+= nvi
->def
[i
]->refc
;
80 nvc0_pc_replace_value(struct nv_pc
*pc
,
81 struct nv_value
*old_val
,
82 struct nv_value
*new_val
)
86 if (old_val
== new_val
)
89 for (i
= 0, n
= 0; i
< pc
->num_refs
; ++i
) {
90 if (pc
->refs
[i
]->value
== old_val
) {
92 for (s
= 0; s
< 6 && pc
->refs
[i
]->insn
->src
[s
]; ++s
)
93 if (pc
->refs
[i
]->insn
->src
[s
] == pc
->refs
[i
])
96 nv_reference(pc
, pc
->refs
[i
]->insn
, s
, new_val
);
102 static INLINE boolean
103 is_gpr63(struct nv_value
*val
)
105 return (val
->reg
.file
== NV_FILE_GPR
&& val
->reg
.id
== 63);
109 nvc0_pc_find_constant(struct nv_ref
*ref
)
111 struct nv_value
*src
;
117 while (src
->insn
&& src
->insn
->opcode
== NV_OP_MOV
) {
118 assert(!src
->insn
->src
[0]->mod
);
119 src
= src
->insn
->src
[0]->value
;
121 if ((src
->reg
.file
== NV_FILE_IMM
) || is_gpr63(src
) ||
123 src
->insn
->opcode
== NV_OP_LD
&&
124 src
->insn
->src
[0]->value
->reg
.file
>= NV_FILE_MEM_C(0) &&
125 src
->insn
->src
[0]->value
->reg
.file
<= NV_FILE_MEM_C(15)))
131 nvc0_pc_find_immediate(struct nv_ref
*ref
)
133 struct nv_value
*src
= nvc0_pc_find_constant(ref
);
135 return (src
&& (src
->reg
.file
== NV_FILE_IMM
|| is_gpr63(src
))) ? src
: NULL
;
139 nv_pc_free_refs(struct nv_pc
*pc
)
142 for (i
= 0; i
< pc
->num_refs
; i
+= 64)
148 edge_name(ubyte type
)
151 case CFG_EDGE_FORWARD
: return "forward";
152 case CFG_EDGE_BACK
: return "back";
153 case CFG_EDGE_LOOP_ENTER
: return "loop";
154 case CFG_EDGE_LOOP_LEAVE
: return "break";
155 case CFG_EDGE_FAKE
: return "fake";
162 nvc0_pc_pass_in_order(struct nv_basic_block
*root
, nv_pc_pass_func f
,
165 struct nv_basic_block
*bb
[64], *bbb
[16], *b
;
176 for (j
= 1; j
>= 0; --j
) {
180 switch (b
->out_kind
[j
]) {
183 case CFG_EDGE_FORWARD
:
185 if (++b
->out
[j
]->priv
== b
->out
[j
]->num_in
)
188 case CFG_EDGE_LOOP_ENTER
:
191 case CFG_EDGE_LOOP_LEAVE
:
192 if (!b
->out
[j
]->priv
) {
193 bbb
[pp
++] = b
->out
[j
];
208 bb
[pp
- 1] = bbb
[pp
- 1];
214 nv_do_print_function(void *priv
, struct nv_basic_block
*b
)
216 struct nv_instruction
*i
;
218 debug_printf("=== BB %i ", b
->id
);
220 debug_printf("[%s -> %i] ", edge_name(b
->out_kind
[0]), b
->out
[0]->id
);
222 debug_printf("[%s -> %i] ", edge_name(b
->out_kind
[1]), b
->out
[1]->id
);
223 debug_printf("===\n");
228 for (; i
; i
= i
->next
)
229 nvc0_print_instruction(i
);
233 nvc0_print_function(struct nv_basic_block
*root
)
235 if (root
->subroutine
)
236 debug_printf("SUBROUTINE %i\n", root
->subroutine
);
238 debug_printf("MAIN\n");
240 nvc0_pc_pass_in_order(root
, nv_do_print_function
, root
);
244 nvc0_print_program(struct nv_pc
*pc
)
247 for (i
= 0; i
< pc
->num_subroutines
+ 1; ++i
)
249 nvc0_print_function(pc
->root
[i
]);
252 #if NOUVEAU_DEBUG > 1
254 nv_do_print_cfgraph(struct nv_pc
*pc
, FILE *f
, struct nv_basic_block
*b
)
258 b
->pass_seq
= pc
->pass_seq
;
260 fprintf(f
, "\t%i [shape=box]\n", b
->id
);
262 for (i
= 0; i
< 2; ++i
) {
265 switch (b
->out_kind
[i
]) {
266 case CFG_EDGE_FORWARD
:
267 fprintf(f
, "\t%i -> %i;\n", b
->id
, b
->out
[i
]->id
);
269 case CFG_EDGE_LOOP_ENTER
:
270 fprintf(f
, "\t%i -> %i [color=green];\n", b
->id
, b
->out
[i
]->id
);
272 case CFG_EDGE_LOOP_LEAVE
:
273 fprintf(f
, "\t%i -> %i [color=red];\n", b
->id
, b
->out
[i
]->id
);
276 fprintf(f
, "\t%i -> %i;\n", b
->id
, b
->out
[i
]->id
);
279 fprintf(f
, "\t%i -> %i [style=dotted];\n", b
->id
, b
->out
[i
]->id
);
285 if (b
->out
[i
]->pass_seq
< pc
->pass_seq
)
286 nv_do_print_cfgraph(pc
, f
, b
->out
[i
]);
290 /* Print the control flow graph of subroutine @subr (0 == MAIN) to a file. */
292 nv_print_cfgraph(struct nv_pc
*pc
, const char *filepath
, int subr
)
296 f
= fopen(filepath
, "a");
300 fprintf(f
, "digraph G {\n");
304 nv_do_print_cfgraph(pc
, f
, pc
->root
[subr
]);
313 nvc0_pc_print_binary(struct nv_pc
*pc
)
317 NOUVEAU_DBG("nvc0_pc_print_binary(%u ops)\n", pc
->emit_size
/ 8);
319 for (i
= 0; i
< pc
->emit_size
/ 4; i
+= 2) {
320 debug_printf("0x%08x ", pc
->emit
[i
+ 0]);
321 debug_printf("0x%08x ", pc
->emit
[i
+ 1]);
329 nvc0_emit_program(struct nv_pc
*pc
)
331 uint32_t *code
= pc
->emit
;
334 NOUVEAU_DBG("emitting program: size = %u\n", pc
->emit_size
);
337 for (n
= 0; n
< pc
->num_blocks
; ++n
) {
338 struct nv_instruction
*i
;
339 struct nv_basic_block
*b
= pc
->bb_list
[n
];
341 for (i
= b
->entry
; i
; i
= i
->next
) {
342 nvc0_emit_instruction(pc
, i
);
347 assert(pc
->emit
== &code
[pc
->emit_size
/ 4]);
349 pc
->emit
[0] = 0x00001de7;
350 pc
->emit
[1] = 0x80000000;
356 nvc0_pc_print_binary(pc
);
358 debug_printf("not printing binary\n");
364 nvc0_generate_code(struct nvc0_translation_info
*ti
)
370 pc
= CALLOC_STRUCT(nv_pc
);
374 pc
->is_fragprog
= ti
->prog
->type
== PIPE_SHADER_FRAGMENT
;
376 pc
->root
= CALLOC(ti
->num_subrs
+ 1, sizeof(pc
->root
[0]));
381 pc
->num_subroutines
= ti
->num_subrs
;
383 ret
= nvc0_tgsi_to_nc(pc
, ti
);
386 #if NOUVEAU_DEBUG > 1
387 nvc0_print_program(pc
);
390 pc
->opt_reload_elim
= ti
->require_stores
? FALSE
: TRUE
;
393 ret
= nvc0_pc_exec_pass0(pc
);
397 nvc0_print_program(pc
);
400 /* register allocation */
401 ret
= nvc0_pc_exec_pass1(pc
);
404 #if NOUVEAU_DEBUG > 1
405 nvc0_print_program(pc
);
406 nv_print_cfgraph(pc
, "nvc0_shader_cfgraph.dot", 0);
409 /* prepare for emission */
410 ret
= nvc0_pc_exec_pass2(pc
);
413 assert(!(pc
->emit_size
% 8));
415 pc
->emit
= CALLOC(pc
->emit_size
/ 4 + 2, 4);
420 ret
= nvc0_emit_program(pc
);
424 ti
->prog
->code
= pc
->emit
;
425 ti
->prog
->code_base
= 0;
426 ti
->prog
->code_size
= pc
->emit_size
;
427 ti
->prog
->parm_size
= 0;
429 ti
->prog
->max_gpr
= MAX2(4, pc
->max_reg
[NV_FILE_GPR
] + 1);
431 ti
->prog
->relocs
= pc
->reloc_entries
;
432 ti
->prog
->num_relocs
= pc
->num_relocs
;
434 NOUVEAU_DBG("SHADER TRANSLATION - %s\n", ret
? "failure" : "success");
439 for (i
= 0; i
< pc
->num_blocks
; ++i
)
440 FREE(pc
->bb_list
[i
]);
444 /* on success, these will be referenced by struct nvc0_program */
449 if (pc
->reloc_entries
)
450 FREE(pc
->reloc_entries
);
457 nvbb_insert_phi(struct nv_basic_block
*b
, struct nv_instruction
*i
)
464 assert(!b
->entry
->prev
&& b
->exit
);
472 if (b
->entry
->opcode
== NV_OP_PHI
) { /* insert after entry */
473 assert(b
->entry
== b
->exit
);
478 } else { /* insert before entry */
479 assert(b
->entry
->prev
&& b
->exit
);
481 i
->prev
= b
->entry
->prev
;
489 nvc0_insn_append(struct nv_basic_block
*b
, struct nv_instruction
*i
)
491 if (i
->opcode
== NV_OP_PHI
) {
492 nvbb_insert_phi(b
, i
);
501 if (i
->prev
&& i
->prev
->opcode
== NV_OP_PHI
)
506 b
->num_instructions
++;
508 if (i
->prev
&& i
->prev
->terminator
)
509 nvc0_insns_permute(i
->prev
, i
);
513 nvc0_insn_insert_after(struct nv_instruction
*at
, struct nv_instruction
*ni
)
516 nvc0_insn_append(at
->bb
, ni
);
524 ni
->bb
->num_instructions
++;
528 nvc0_insn_insert_before(struct nv_instruction
*at
, struct nv_instruction
*ni
)
530 nvc0_insn_insert_after(at
, ni
);
531 nvc0_insns_permute(at
, ni
);
535 nvc0_insn_delete(struct nv_instruction
*nvi
)
537 struct nv_basic_block
*b
= nvi
->bb
;
540 /* debug_printf("REM: "); nv_print_instruction(nvi); */
542 for (s
= 0; s
< 6 && nvi
->src
[s
]; ++s
)
543 nv_reference(NULL
, nvi
, s
, NULL
);
546 nvi
->next
->prev
= nvi
->prev
;
548 assert(nvi
== b
->exit
);
553 nvi
->prev
->next
= nvi
->next
;
555 if (nvi
== b
->entry
) {
556 /* PHIs don't get hooked to b->entry */
557 b
->entry
= nvi
->next
;
558 assert(!nvi
->prev
|| nvi
->prev
->opcode
== NV_OP_PHI
);
562 if (nvi
->opcode
!= NV_OP_PHI
)
563 NOUVEAU_DBG("NOTE: b->phi points to non-PHI instruction\n");
566 if (!nvi
->next
|| nvi
->next
->opcode
!= NV_OP_PHI
)
574 nvc0_insns_permute(struct nv_instruction
*i1
, struct nv_instruction
*i2
)
576 struct nv_basic_block
*b
= i1
->bb
;
578 assert(i1
->opcode
!= NV_OP_PHI
&&
579 i2
->opcode
!= NV_OP_PHI
);
580 assert(i1
->next
== i2
);
600 nvc0_bblock_attach(struct nv_basic_block
*parent
,
601 struct nv_basic_block
*b
, ubyte edge_kind
)
603 assert(b
->num_in
< 8);
605 if (parent
->out
[0]) {
606 assert(!parent
->out
[1]);
608 parent
->out_kind
[1] = edge_kind
;
611 parent
->out_kind
[0] = edge_kind
;
614 b
->in
[b
->num_in
] = parent
;
615 b
->in_kind
[b
->num_in
++] = edge_kind
;
618 /* NOTE: all BRKs are treated as conditional, so there are 2 outgoing BBs */
621 nvc0_bblock_dominated_by(struct nv_basic_block
*b
, struct nv_basic_block
*d
)
628 for (j
= 0; j
< b
->num_in
; ++j
)
629 if ((b
->in_kind
[j
] != CFG_EDGE_BACK
) &&
630 !nvc0_bblock_dominated_by(b
->in
[j
], d
))
633 return j
? TRUE
: FALSE
;
636 /* check if @bf (future) can be reached from @bp (past), stop at @bt */
638 nvc0_bblock_reachable_by(struct nv_basic_block
*bf
, struct nv_basic_block
*bp
,
639 struct nv_basic_block
*bt
)
641 struct nv_basic_block
*q
[NV_PC_MAX_BASIC_BLOCKS
], *b
;
655 assert(n
<= (1024 - 2));
657 for (i
= 0; i
< 2; ++i
) {
658 if (b
->out
[i
] && !IS_WALL_EDGE(b
->out_kind
[i
]) && !b
->out
[i
]->priv
) {
664 for (--n
; n
>= 0; --n
)
670 static struct nv_basic_block
*
671 nvbb_find_dom_frontier(struct nv_basic_block
*b
, struct nv_basic_block
*df
)
673 struct nv_basic_block
*out
;
676 if (!nvc0_bblock_dominated_by(df
, b
)) {
677 for (i
= 0; i
< df
->num_in
; ++i
) {
678 if (df
->in_kind
[i
] == CFG_EDGE_BACK
)
680 if (nvc0_bblock_dominated_by(df
->in
[i
], b
))
684 for (i
= 0; i
< 2 && df
->out
[i
]; ++i
) {
685 if (df
->out_kind
[i
] == CFG_EDGE_BACK
)
687 if ((out
= nvbb_find_dom_frontier(b
, df
->out
[i
])))
693 struct nv_basic_block
*
694 nvc0_bblock_dom_frontier(struct nv_basic_block
*b
)
696 struct nv_basic_block
*df
;
699 for (i
= 0; i
< 2 && b
->out
[i
]; ++i
)
700 if ((df
= nvbb_find_dom_frontier(b
, b
->out
[i
])))