2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /** @file brw_fs_copy_propagation.cpp
26 * Support for global copy propagation in two passes: A local pass that does
27 * intra-block copy (and constant) propagation, and a global pass that uses
28 * dataflow analysis on the copies available at the end of each block to re-do
29 * local copy propagation with more copies available.
31 * See Muchnick's Advanced Compiler Design and Implementation, section
35 #define ACP_HASH_SIZE 16
37 #include "main/bitset.h"
41 namespace { /* avoid conflict with opt_copy_propagation_elements */
42 struct acp_entry
: public exec_node
{
52 * Which entries in the fs_copy_prop_dataflow acp table are live at the
53 * start of this block. This is the useful output of the analysis, since
54 * it lets us plug those into the local copy propagation on the second
60 * Which entries in the fs_copy_prop_dataflow acp table are live at the end
61 * of this block. This is done in initial setup from the per-block acps
62 * returned by the first local copy prop pass.
67 * Which entries in the fs_copy_prop_dataflow acp table are generated by
68 * instructions in this block which reach the end of the block without
74 * Which entries in the fs_copy_prop_dataflow acp table are killed over the
75 * course of this block.
80 class fs_copy_prop_dataflow
83 fs_copy_prop_dataflow(void *mem_ctx
, cfg_t
*cfg
,
84 exec_list
*out_acp
[ACP_HASH_SIZE
]);
86 void setup_initial_values();
89 void dump_block_data() const;
98 struct block_data
*bd
;
100 } /* anonymous namespace */
102 fs_copy_prop_dataflow::fs_copy_prop_dataflow(void *mem_ctx
, cfg_t
*cfg
,
103 exec_list
*out_acp
[ACP_HASH_SIZE
])
104 : mem_ctx(mem_ctx
), cfg(cfg
)
106 bd
= rzalloc_array(mem_ctx
, struct block_data
, cfg
->num_blocks
);
109 foreach_block (block
, cfg
) {
110 for (int i
= 0; i
< ACP_HASH_SIZE
; i
++) {
111 num_acp
+= out_acp
[block
->num
][i
].length();
115 acp
= rzalloc_array(mem_ctx
, struct acp_entry
*, num_acp
);
117 bitset_words
= BITSET_WORDS(num_acp
);
120 foreach_block (block
, cfg
) {
121 bd
[block
->num
].livein
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
122 bd
[block
->num
].liveout
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
123 bd
[block
->num
].copy
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
124 bd
[block
->num
].kill
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
126 for (int i
= 0; i
< ACP_HASH_SIZE
; i
++) {
127 foreach_in_list(acp_entry
, entry
, &out_acp
[block
->num
][i
]) {
128 acp
[next_acp
] = entry
;
130 /* opt_copy_propagate_local populates out_acp with copies created
131 * in a block which are still live at the end of the block. This
132 * is exactly what we want in the COPY set.
134 BITSET_SET(bd
[block
->num
].copy
, next_acp
);
141 assert(next_acp
== num_acp
);
143 setup_initial_values();
148 * Set up initial values for each of the data flow sets, prior to running
149 * the fixed-point algorithm.
152 fs_copy_prop_dataflow::setup_initial_values()
154 /* Initialize the COPY and KILL sets. */
155 foreach_block (block
, cfg
) {
156 foreach_inst_in_block(fs_inst
, inst
, block
) {
157 if (inst
->dst
.file
!= GRF
)
160 /* Mark ACP entries which are killed by this instruction. */
161 for (int i
= 0; i
< num_acp
; i
++) {
162 if (inst
->overwrites_reg(acp
[i
]->dst
) ||
163 inst
->overwrites_reg(acp
[i
]->src
)) {
164 BITSET_SET(bd
[block
->num
].kill
, i
);
170 /* Populate the initial values for the livein and liveout sets. For the
171 * block at the start of the program, livein = 0 and liveout = copy.
172 * For the others, set liveout to 0 (the empty set) and livein to ~0
173 * (the universal set).
175 foreach_block (block
, cfg
) {
176 if (block
->parents
.is_empty()) {
177 for (int i
= 0; i
< bitset_words
; i
++) {
178 bd
[block
->num
].livein
[i
] = 0u;
179 bd
[block
->num
].liveout
[i
] = bd
[block
->num
].copy
[i
];
182 for (int i
= 0; i
< bitset_words
; i
++) {
183 bd
[block
->num
].liveout
[i
] = 0u;
184 bd
[block
->num
].livein
[i
] = ~0u;
191 * Walk the set of instructions in the block, marking which entries in the acp
192 * are killed by the block.
195 fs_copy_prop_dataflow::run()
202 /* Update liveout for all blocks. */
203 foreach_block (block
, cfg
) {
204 if (block
->parents
.is_empty())
207 for (int i
= 0; i
< bitset_words
; i
++) {
208 const BITSET_WORD old_liveout
= bd
[block
->num
].liveout
[i
];
210 bd
[block
->num
].liveout
[i
] =
211 bd
[block
->num
].copy
[i
] | (bd
[block
->num
].livein
[i
] &
212 ~bd
[block
->num
].kill
[i
]);
214 if (old_liveout
!= bd
[block
->num
].liveout
[i
])
219 /* Update livein for all blocks. If a copy is live out of all parent
220 * blocks, it's live coming in to this block.
222 foreach_block (block
, cfg
) {
223 if (block
->parents
.is_empty())
226 for (int i
= 0; i
< bitset_words
; i
++) {
227 const BITSET_WORD old_livein
= bd
[block
->num
].livein
[i
];
229 bd
[block
->num
].livein
[i
] = ~0u;
230 foreach_list_typed(bblock_link
, parent_link
, link
, &block
->parents
) {
231 bblock_t
*parent
= parent_link
->block
;
232 bd
[block
->num
].livein
[i
] &= bd
[parent
->num
].liveout
[i
];
235 if (old_livein
!= bd
[block
->num
].livein
[i
])
243 fs_copy_prop_dataflow::dump_block_data() const
245 foreach_block (block
, cfg
) {
246 fprintf(stderr
, "Block %d [%d, %d] (parents ", block
->num
,
247 block
->start_ip
, block
->end_ip
);
248 foreach_list_typed(bblock_link
, link
, link
, &block
->parents
) {
249 bblock_t
*parent
= link
->block
;
250 fprintf(stderr
, "%d ", parent
->num
);
252 fprintf(stderr
, "):\n");
253 fprintf(stderr
, " livein = 0x");
254 for (int i
= 0; i
< bitset_words
; i
++)
255 fprintf(stderr
, "%08x", bd
[block
->num
].livein
[i
]);
256 fprintf(stderr
, ", liveout = 0x");
257 for (int i
= 0; i
< bitset_words
; i
++)
258 fprintf(stderr
, "%08x", bd
[block
->num
].liveout
[i
]);
259 fprintf(stderr
, ",\n copy = 0x");
260 for (int i
= 0; i
< bitset_words
; i
++)
261 fprintf(stderr
, "%08x", bd
[block
->num
].copy
[i
]);
262 fprintf(stderr
, ", kill = 0x");
263 for (int i
= 0; i
< bitset_words
; i
++)
264 fprintf(stderr
, "%08x", bd
[block
->num
].kill
[i
]);
265 fprintf(stderr
, "\n");
270 is_logic_op(enum opcode opcode
)
272 return (opcode
== BRW_OPCODE_AND
||
273 opcode
== BRW_OPCODE_OR
||
274 opcode
== BRW_OPCODE_XOR
||
275 opcode
== BRW_OPCODE_NOT
);
279 fs_visitor::try_copy_propagate(fs_inst
*inst
, int arg
, acp_entry
*entry
)
281 if (inst
->src
[arg
].file
!= GRF
)
284 if (entry
->src
.file
== IMM
)
286 assert(entry
->src
.file
== GRF
|| entry
->src
.file
== UNIFORM
);
288 if (entry
->opcode
== SHADER_OPCODE_LOAD_PAYLOAD
&&
289 inst
->opcode
== SHADER_OPCODE_LOAD_PAYLOAD
)
292 assert(entry
->dst
.file
== GRF
);
293 if (inst
->src
[arg
].reg
!= entry
->dst
.reg
)
296 /* Bail if inst is reading a range that isn't contained in the range
297 * that entry is writing.
299 if (inst
->src
[arg
].reg_offset
< entry
->dst
.reg_offset
||
300 (inst
->src
[arg
].reg_offset
* 32 + inst
->src
[arg
].subreg_offset
+
301 inst
->regs_read(arg
) * inst
->src
[arg
].stride
* 32) >
302 (entry
->dst
.reg_offset
+ entry
->regs_written
) * 32)
305 /* we can't generally copy-propagate UD negations because we
306 * can end up accessing the resulting values as signed integers
307 * instead. See also resolve_ud_negate() and comment in
308 * fs_generator::generate_code.
310 if (inst
->src
[arg
].type
== BRW_REGISTER_TYPE_UD
&&
314 bool has_source_modifiers
= entry
->src
.abs
|| entry
->src
.negate
;
316 if ((has_source_modifiers
|| entry
->src
.file
== UNIFORM
||
317 !entry
->src
.is_contiguous()) &&
318 !inst
->can_do_source_mods(brw
))
321 if (has_source_modifiers
&&
322 inst
->opcode
== SHADER_OPCODE_GEN4_SCRATCH_WRITE
)
325 /* Bail if the result of composing both strides would exceed the
328 if (entry
->src
.stride
* inst
->src
[arg
].stride
> 4)
331 /* Bail if the result of composing both strides cannot be expressed
332 * as another stride. This avoids, for example, trying to transform
335 * MOV (8) rX<1>UD rY<0;1,0>UD
336 * FOO (8) ... rX<8;8,1>UW
340 * FOO (8) ... rY<0;1,0>UW
342 * Which would have different semantics.
344 if (entry
->src
.stride
!= 1 &&
345 (inst
->src
[arg
].stride
*
346 type_sz(inst
->src
[arg
].type
)) % type_sz(entry
->src
.type
) != 0)
349 if (has_source_modifiers
&& entry
->dst
.type
!= inst
->src
[arg
].type
)
352 if (brw
->gen
>= 8 && (entry
->src
.negate
|| entry
->src
.abs
) &&
353 is_logic_op(inst
->opcode
)) {
357 if (entry
->saturate
) {
358 switch(inst
->opcode
) {
360 if (inst
->src
[1].file
!= IMM
||
361 inst
->src
[1].fixed_hw_reg
.dw1
.f
< 0.0 ||
362 inst
->src
[1].fixed_hw_reg
.dw1
.f
> 1.0) {
371 inst
->src
[arg
].file
= entry
->src
.file
;
372 inst
->src
[arg
].reg
= entry
->src
.reg
;
373 inst
->src
[arg
].stride
*= entry
->src
.stride
;
374 inst
->saturate
= inst
->saturate
|| entry
->saturate
;
376 switch (entry
->src
.file
) {
378 assert(entry
->src
.width
== 1);
381 inst
->src
[arg
].width
= entry
->src
.width
;
382 inst
->src
[arg
].reg_offset
= entry
->src
.reg_offset
;
383 inst
->src
[arg
].subreg_offset
= entry
->src
.subreg_offset
;
387 assert(entry
->src
.width
% inst
->src
[arg
].width
== 0);
388 /* In this case, we'll just leave the width alone. The source
389 * register could have different widths depending on how it is
390 * being used. For instance, if only half of the register was
391 * used then we want to preserve that and continue to only use
394 * Also, we have to deal with mapping parts of vgrfs to other
395 * parts of vgrfs so we have to do some reg_offset magic.
398 /* Compute the offset of inst->src[arg] relative to inst->dst */
399 assert(entry
->dst
.subreg_offset
== 0);
400 int rel_offset
= inst
->src
[arg
].reg_offset
- entry
->dst
.reg_offset
;
401 int rel_suboffset
= inst
->src
[arg
].subreg_offset
;
403 /* Compute the final register offset (in bytes) */
404 int offset
= entry
->src
.reg_offset
* 32 + entry
->src
.subreg_offset
;
405 offset
+= rel_offset
* 32 + rel_suboffset
;
406 inst
->src
[arg
].reg_offset
= offset
/ 32;
407 inst
->src
[arg
].subreg_offset
= offset
% 32;
411 unreachable("Invalid register file");
415 if (!inst
->src
[arg
].abs
) {
416 inst
->src
[arg
].abs
= entry
->src
.abs
;
417 inst
->src
[arg
].negate
^= entry
->src
.negate
;
425 fs_visitor::try_constant_propagate(fs_inst
*inst
, acp_entry
*entry
)
427 bool progress
= false;
429 if (entry
->src
.file
!= IMM
)
434 for (int i
= inst
->sources
- 1; i
>= 0; i
--) {
435 if (inst
->src
[i
].file
!= GRF
)
438 assert(entry
->dst
.file
== GRF
);
439 if (inst
->src
[i
].reg
!= entry
->dst
.reg
)
442 /* Bail if inst is reading a range that isn't contained in the range
443 * that entry is writing.
445 if (inst
->src
[i
].reg_offset
< entry
->dst
.reg_offset
||
446 (inst
->src
[i
].reg_offset
* 32 + inst
->src
[i
].subreg_offset
+
447 inst
->regs_read(i
) * inst
->src
[i
].stride
* 32) >
448 (entry
->dst
.reg_offset
+ entry
->regs_written
) * 32)
451 fs_reg val
= entry
->src
;
452 val
.effective_width
= inst
->src
[i
].effective_width
;
453 val
.type
= inst
->src
[i
].type
;
455 if (inst
->src
[i
].abs
) {
456 if ((brw
->gen
>= 8 && is_logic_op(inst
->opcode
)) ||
457 !brw_abs_immediate(val
.type
, &val
.fixed_hw_reg
)) {
462 if (inst
->src
[i
].negate
) {
463 if ((brw
->gen
>= 8 && is_logic_op(inst
->opcode
)) ||
464 !brw_negate_immediate(val
.type
, &val
.fixed_hw_reg
)) {
469 switch (inst
->opcode
) {
471 case SHADER_OPCODE_LOAD_PAYLOAD
:
476 case SHADER_OPCODE_POW
:
477 case SHADER_OPCODE_INT_QUOTIENT
:
478 case SHADER_OPCODE_INT_REMAINDER
:
482 case BRW_OPCODE_BFI1
:
486 case BRW_OPCODE_SUBB
:
493 case BRW_OPCODE_MACH
:
499 case BRW_OPCODE_ADDC
:
503 } else if (i
== 0 && inst
->src
[1].file
!= IMM
) {
504 /* Fit this constant in by commuting the operands.
505 * Exception: we can't do this for 32-bit integer MUL/MACH
506 * because it's asymmetric.
508 if ((inst
->opcode
== BRW_OPCODE_MUL
||
509 inst
->opcode
== BRW_OPCODE_MACH
) &&
510 (inst
->src
[1].type
== BRW_REGISTER_TYPE_D
||
511 inst
->src
[1].type
== BRW_REGISTER_TYPE_UD
))
513 inst
->src
[0] = inst
->src
[1];
524 } else if (i
== 0 && inst
->src
[1].file
!= IMM
) {
525 enum brw_conditional_mod new_cmod
;
527 new_cmod
= brw_swap_cmod(inst
->conditional_mod
);
528 if (new_cmod
!= BRW_CONDITIONAL_NONE
) {
529 /* Fit this constant in by swapping the operands and
532 inst
->src
[0] = inst
->src
[1];
534 inst
->conditional_mod
= new_cmod
;
544 } else if (i
== 0 && inst
->src
[1].file
!= IMM
) {
545 inst
->src
[0] = inst
->src
[1];
548 /* If this was predicated, flipping operands means
549 * we also need to flip the predicate.
551 if (inst
->conditional_mod
== BRW_CONDITIONAL_NONE
) {
552 inst
->predicate_inverse
=
553 !inst
->predicate_inverse
;
559 case SHADER_OPCODE_RCP
:
560 /* The hardware doesn't do math on immediate values
561 * (because why are you doing that, seriously?), but
562 * the correct answer is to just constant fold it
566 if (inst
->src
[0].fixed_hw_reg
.dw1
.f
!= 0.0f
) {
567 inst
->opcode
= BRW_OPCODE_MOV
;
569 inst
->src
[0].fixed_hw_reg
.dw1
.f
= 1.0f
/ inst
->src
[0].fixed_hw_reg
.dw1
.f
;
574 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
594 can_propagate_from(fs_inst
*inst
)
596 return (inst
->opcode
== BRW_OPCODE_MOV
&&
597 inst
->dst
.file
== GRF
&&
598 ((inst
->src
[0].file
== GRF
&&
599 (inst
->src
[0].reg
!= inst
->dst
.reg
||
600 inst
->src
[0].reg_offset
!= inst
->dst
.reg_offset
)) ||
601 inst
->src
[0].file
== UNIFORM
||
602 inst
->src
[0].file
== IMM
) &&
603 inst
->src
[0].type
== inst
->dst
.type
&&
604 !inst
->is_partial_write());
607 /* Walks a basic block and does copy propagation on it using the acp
611 fs_visitor::opt_copy_propagate_local(void *copy_prop_ctx
, bblock_t
*block
,
614 bool progress
= false;
616 foreach_inst_in_block(fs_inst
, inst
, block
) {
617 /* Try propagating into this instruction. */
618 for (int i
= 0; i
< inst
->sources
; i
++) {
619 if (inst
->src
[i
].file
!= GRF
)
622 foreach_in_list(acp_entry
, entry
, &acp
[inst
->src
[i
].reg
% ACP_HASH_SIZE
]) {
623 if (try_constant_propagate(inst
, entry
))
626 if (try_copy_propagate(inst
, i
, entry
))
631 /* kill the destination from the ACP */
632 if (inst
->dst
.file
== GRF
) {
633 foreach_in_list_safe(acp_entry
, entry
, &acp
[inst
->dst
.reg
% ACP_HASH_SIZE
]) {
634 if (inst
->overwrites_reg(entry
->dst
)) {
639 /* Oops, we only have the chaining hash based on the destination, not
640 * the source, so walk across the entire table.
642 for (int i
= 0; i
< ACP_HASH_SIZE
; i
++) {
643 foreach_in_list_safe(acp_entry
, entry
, &acp
[i
]) {
644 if (inst
->overwrites_reg(entry
->src
))
650 /* If this instruction's source could potentially be folded into the
651 * operand of another instruction, add it to the ACP.
653 if (can_propagate_from(inst
)) {
654 acp_entry
*entry
= ralloc(copy_prop_ctx
, acp_entry
);
655 entry
->dst
= inst
->dst
;
656 entry
->src
= inst
->src
[0];
657 entry
->regs_written
= inst
->regs_written
;
658 entry
->opcode
= inst
->opcode
;
659 entry
->saturate
= inst
->saturate
;
660 acp
[entry
->dst
.reg
% ACP_HASH_SIZE
].push_tail(entry
);
661 } else if (inst
->opcode
== SHADER_OPCODE_LOAD_PAYLOAD
&&
662 inst
->dst
.file
== GRF
) {
664 for (int i
= 0; i
< inst
->sources
; i
++) {
665 int regs_written
= ((inst
->src
[i
].effective_width
*
666 type_sz(inst
->src
[i
].type
)) + 31) / 32;
667 if (inst
->src
[i
].file
== GRF
) {
668 acp_entry
*entry
= ralloc(copy_prop_ctx
, acp_entry
);
669 entry
->dst
= inst
->dst
;
670 entry
->dst
.reg_offset
= offset
;
671 entry
->dst
.width
= inst
->src
[i
].effective_width
;
672 entry
->src
= inst
->src
[i
];
673 entry
->regs_written
= regs_written
;
674 entry
->opcode
= inst
->opcode
;
675 if (!entry
->dst
.equals(inst
->src
[i
])) {
676 acp
[entry
->dst
.reg
% ACP_HASH_SIZE
].push_tail(entry
);
681 offset
+= regs_written
;
690 fs_visitor::opt_copy_propagate()
692 bool progress
= false;
693 void *copy_prop_ctx
= ralloc_context(NULL
);
694 exec_list
*out_acp
[cfg
->num_blocks
];
696 for (int i
= 0; i
< cfg
->num_blocks
; i
++)
697 out_acp
[i
] = new exec_list
[ACP_HASH_SIZE
];
699 /* First, walk through each block doing local copy propagation and getting
700 * the set of copies available at the end of the block.
702 foreach_block (block
, cfg
) {
703 progress
= opt_copy_propagate_local(copy_prop_ctx
, block
,
704 out_acp
[block
->num
]) || progress
;
707 /* Do dataflow analysis for those available copies. */
708 fs_copy_prop_dataflow
dataflow(copy_prop_ctx
, cfg
, out_acp
);
710 /* Next, re-run local copy propagation, this time with the set of copies
711 * provided by the dataflow analysis available at the start of a block.
713 foreach_block (block
, cfg
) {
714 exec_list in_acp
[ACP_HASH_SIZE
];
716 for (int i
= 0; i
< dataflow
.num_acp
; i
++) {
717 if (BITSET_TEST(dataflow
.bd
[block
->num
].livein
, i
)) {
718 struct acp_entry
*entry
= dataflow
.acp
[i
];
719 in_acp
[entry
->dst
.reg
% ACP_HASH_SIZE
].push_tail(entry
);
723 progress
= opt_copy_propagate_local(copy_prop_ctx
, block
, in_acp
) || progress
;
726 for (int i
= 0; i
< cfg
->num_blocks
; i
++)
727 delete [] out_acp
[i
];
728 ralloc_free(copy_prop_ctx
);
731 invalidate_live_intervals();