2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /** @file brw_fs_copy_propagation.cpp
26 * Support for global copy propagation in two passes: A local pass that does
27 * intra-block copy (and constant) propagation, and a global pass that uses
28 * dataflow analysis on the copies available at the end of each block to re-do
29 * local copy propagation with more copies available.
31 * See Muchnick's Advanced Compiler Design and Implementation, section
35 #define ACP_HASH_SIZE 64
37 #include "util/bitset.h"
38 #include "util/u_math.h"
40 #include "brw_fs_live_variables.h"
46 namespace { /* avoid conflict with opt_copy_propagation_elements */
47 struct acp_entry
: public exec_node
{
51 unsigned size_written
;
59 * Which entries in the fs_copy_prop_dataflow acp table are live at the
60 * start of this block. This is the useful output of the analysis, since
61 * it lets us plug those into the local copy propagation on the second
67 * Which entries in the fs_copy_prop_dataflow acp table are live at the end
68 * of this block. This is done in initial setup from the per-block acps
69 * returned by the first local copy prop pass.
74 * Which entries in the fs_copy_prop_dataflow acp table are generated by
75 * instructions in this block which reach the end of the block without
81 * Which entries in the fs_copy_prop_dataflow acp table are killed over the
82 * course of this block.
87 * Which entries in the fs_copy_prop_dataflow acp table are guaranteed to
88 * have a fully uninitialized destination at the end of this block.
93 class fs_copy_prop_dataflow
96 fs_copy_prop_dataflow(void *mem_ctx
, cfg_t
*cfg
,
97 const fs_live_variables
&live
,
98 exec_list
*out_acp
[ACP_HASH_SIZE
]);
100 void setup_initial_values();
103 void dump_block_data() const UNUSED
;
107 const fs_live_variables
&live
;
113 struct block_data
*bd
;
115 } /* anonymous namespace */
117 fs_copy_prop_dataflow::fs_copy_prop_dataflow(void *mem_ctx
, cfg_t
*cfg
,
118 const fs_live_variables
&live
,
119 exec_list
*out_acp
[ACP_HASH_SIZE
])
120 : mem_ctx(mem_ctx
), cfg(cfg
), live(live
)
122 bd
= rzalloc_array(mem_ctx
, struct block_data
, cfg
->num_blocks
);
125 foreach_block (block
, cfg
) {
126 for (int i
= 0; i
< ACP_HASH_SIZE
; i
++) {
127 num_acp
+= out_acp
[block
->num
][i
].length();
131 acp
= rzalloc_array(mem_ctx
, struct acp_entry
*, num_acp
);
133 bitset_words
= BITSET_WORDS(num_acp
);
136 foreach_block (block
, cfg
) {
137 bd
[block
->num
].livein
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
138 bd
[block
->num
].liveout
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
139 bd
[block
->num
].copy
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
140 bd
[block
->num
].kill
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
141 bd
[block
->num
].undef
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
143 for (int i
= 0; i
< ACP_HASH_SIZE
; i
++) {
144 foreach_in_list(acp_entry
, entry
, &out_acp
[block
->num
][i
]) {
145 acp
[next_acp
] = entry
;
147 entry
->global_idx
= next_acp
;
149 /* opt_copy_propagation_local populates out_acp with copies created
150 * in a block which are still live at the end of the block. This
151 * is exactly what we want in the COPY set.
153 BITSET_SET(bd
[block
->num
].copy
, next_acp
);
160 assert(next_acp
== num_acp
);
162 setup_initial_values();
167 * Set up initial values for each of the data flow sets, prior to running
168 * the fixed-point algorithm.
171 fs_copy_prop_dataflow::setup_initial_values()
173 /* Initialize the COPY and KILL sets. */
175 /* Create a temporary table of ACP entries which we'll use for efficient
176 * look-up. Unfortunately, we have to do this in two steps because we
177 * have to match both sources and destinations and an ACP entry can only
178 * be in one list at a time.
180 * We choose to make the table size between num_acp/2 and num_acp/4 to
181 * try and trade off between the time it takes to initialize the table
182 * via exec_list constructors or make_empty() and the cost of
183 * collisions. In practice, it doesn't appear to matter too much what
184 * size we make the table as long as it's roughly the same order of
185 * magnitude as num_acp. We get most of the benefit of the table
186 * approach even if we use a table of size ACP_HASH_SIZE though a
187 * full-sized table is 1-2% faster in practice.
189 unsigned acp_table_size
= util_next_power_of_two(num_acp
) / 4;
190 acp_table_size
= MAX2(acp_table_size
, ACP_HASH_SIZE
);
191 exec_list
*acp_table
= new exec_list
[acp_table_size
];
193 /* First, get all the KILLs for instructions which overwrite ACP
196 for (int i
= 0; i
< num_acp
; i
++) {
197 unsigned idx
= reg_space(acp
[i
]->dst
) & (acp_table_size
- 1);
198 acp_table
[idx
].push_tail(acp
[i
]);
201 foreach_block (block
, cfg
) {
202 foreach_inst_in_block(fs_inst
, inst
, block
) {
203 if (inst
->dst
.file
!= VGRF
)
206 unsigned idx
= reg_space(inst
->dst
) & (acp_table_size
- 1);
207 foreach_in_list(acp_entry
, entry
, &acp_table
[idx
]) {
208 if (regions_overlap(inst
->dst
, inst
->size_written
,
209 entry
->dst
, entry
->size_written
))
210 BITSET_SET(bd
[block
->num
].kill
, entry
->global_idx
);
215 /* Clear the table for the second pass */
216 for (unsigned i
= 0; i
< acp_table_size
; i
++)
217 acp_table
[i
].make_empty();
219 /* Next, get all the KILLs for instructions which overwrite ACP
222 for (int i
= 0; i
< num_acp
; i
++) {
223 unsigned idx
= reg_space(acp
[i
]->src
) & (acp_table_size
- 1);
224 acp_table
[idx
].push_tail(acp
[i
]);
227 foreach_block (block
, cfg
) {
228 foreach_inst_in_block(fs_inst
, inst
, block
) {
229 if (inst
->dst
.file
!= VGRF
&&
230 inst
->dst
.file
!= FIXED_GRF
)
233 unsigned idx
= reg_space(inst
->dst
) & (acp_table_size
- 1);
234 foreach_in_list(acp_entry
, entry
, &acp_table
[idx
]) {
235 if (regions_overlap(inst
->dst
, inst
->size_written
,
236 entry
->src
, entry
->size_read
))
237 BITSET_SET(bd
[block
->num
].kill
, entry
->global_idx
);
245 /* Populate the initial values for the livein and liveout sets. For the
246 * block at the start of the program, livein = 0 and liveout = copy.
247 * For the others, set liveout and livein to ~0 (the universal set).
249 foreach_block (block
, cfg
) {
250 if (block
->parents
.is_empty()) {
251 for (int i
= 0; i
< bitset_words
; i
++) {
252 bd
[block
->num
].livein
[i
] = 0u;
253 bd
[block
->num
].liveout
[i
] = bd
[block
->num
].copy
[i
];
256 for (int i
= 0; i
< bitset_words
; i
++) {
257 bd
[block
->num
].liveout
[i
] = ~0u;
258 bd
[block
->num
].livein
[i
] = ~0u;
263 /* Initialize the undef set. */
264 foreach_block (block
, cfg
) {
265 for (int i
= 0; i
< num_acp
; i
++) {
266 BITSET_SET(bd
[block
->num
].undef
, i
);
267 for (unsigned off
= 0; off
< acp
[i
]->size_written
; off
+= REG_SIZE
) {
268 if (BITSET_TEST(live
.block_data
[block
->num
].defout
,
269 live
.var_from_reg(byte_offset(acp
[i
]->dst
, off
))))
270 BITSET_CLEAR(bd
[block
->num
].undef
, i
);
277 * Walk the set of instructions in the block, marking which entries in the acp
278 * are killed by the block.
281 fs_copy_prop_dataflow::run()
288 foreach_block (block
, cfg
) {
289 if (block
->parents
.is_empty())
292 for (int i
= 0; i
< bitset_words
; i
++) {
293 const BITSET_WORD old_liveout
= bd
[block
->num
].liveout
[i
];
294 BITSET_WORD livein_from_any_block
= 0;
296 /* Update livein for this block. If a copy is live out of all
297 * parent blocks, it's live coming in to this block.
299 bd
[block
->num
].livein
[i
] = ~0u;
300 foreach_list_typed(bblock_link
, parent_link
, link
, &block
->parents
) {
301 bblock_t
*parent
= parent_link
->block
;
302 /* Consider ACP entries with a known-undefined destination to
303 * be available from the parent. This is valid because we're
304 * free to set the undefined variable equal to the source of
305 * the ACP entry without breaking the application's
306 * expectations, since the variable is undefined.
308 bd
[block
->num
].livein
[i
] &= (bd
[parent
->num
].liveout
[i
] |
309 bd
[parent
->num
].undef
[i
]);
310 livein_from_any_block
|= bd
[parent
->num
].liveout
[i
];
313 /* Limit to the set of ACP entries that can possibly be available
314 * at the start of the block, since propagating from a variable
315 * which is guaranteed to be undefined (rather than potentially
316 * undefined for some dynamic control-flow paths) doesn't seem
317 * particularly useful.
319 bd
[block
->num
].livein
[i
] &= livein_from_any_block
;
321 /* Update liveout for this block. */
322 bd
[block
->num
].liveout
[i
] =
323 bd
[block
->num
].copy
[i
] | (bd
[block
->num
].livein
[i
] &
324 ~bd
[block
->num
].kill
[i
]);
326 if (old_liveout
!= bd
[block
->num
].liveout
[i
])
334 fs_copy_prop_dataflow::dump_block_data() const
336 foreach_block (block
, cfg
) {
337 fprintf(stderr
, "Block %d [%d, %d] (parents ", block
->num
,
338 block
->start_ip
, block
->end_ip
);
339 foreach_list_typed(bblock_link
, link
, link
, &block
->parents
) {
340 bblock_t
*parent
= link
->block
;
341 fprintf(stderr
, "%d ", parent
->num
);
343 fprintf(stderr
, "):\n");
344 fprintf(stderr
, " livein = 0x");
345 for (int i
= 0; i
< bitset_words
; i
++)
346 fprintf(stderr
, "%08x", bd
[block
->num
].livein
[i
]);
347 fprintf(stderr
, ", liveout = 0x");
348 for (int i
= 0; i
< bitset_words
; i
++)
349 fprintf(stderr
, "%08x", bd
[block
->num
].liveout
[i
]);
350 fprintf(stderr
, ",\n copy = 0x");
351 for (int i
= 0; i
< bitset_words
; i
++)
352 fprintf(stderr
, "%08x", bd
[block
->num
].copy
[i
]);
353 fprintf(stderr
, ", kill = 0x");
354 for (int i
= 0; i
< bitset_words
; i
++)
355 fprintf(stderr
, "%08x", bd
[block
->num
].kill
[i
]);
356 fprintf(stderr
, "\n");
361 is_logic_op(enum opcode opcode
)
363 return (opcode
== BRW_OPCODE_AND
||
364 opcode
== BRW_OPCODE_OR
||
365 opcode
== BRW_OPCODE_XOR
||
366 opcode
== BRW_OPCODE_NOT
);
370 can_take_stride(fs_inst
*inst
, unsigned arg
, unsigned stride
,
371 const gen_device_info
*devinfo
)
376 /* Bail if the channels of the source need to be aligned to the byte offset
377 * of the corresponding channel of the destination, and the provided stride
378 * would break this restriction.
380 if (has_dst_aligned_region_restriction(devinfo
, inst
) &&
381 !(type_sz(inst
->src
[arg
].type
) * stride
==
382 type_sz(inst
->dst
.type
) * inst
->dst
.stride
||
386 /* 3-source instructions can only be Align16, which restricts what strides
387 * they can take. They can only take a stride of 1 (the usual case), or 0
388 * with a special "repctrl" bit. But the repctrl bit doesn't work for
389 * 64-bit datatypes, so if the source type is 64-bit then only a stride of
390 * 1 is allowed. From the Broadwell PRM, Volume 7 "3D Media GPGPU", page
393 * This is applicable to 32b datatypes and 16b datatype. 64b datatypes
394 * cannot use the replicate control.
396 if (inst
->is_3src(devinfo
)) {
397 if (type_sz(inst
->src
[arg
].type
) > 4)
400 return stride
== 1 || stride
== 0;
403 /* From the Broadwell PRM, Volume 2a "Command Reference - Instructions",
404 * page 391 ("Extended Math Function"):
406 * The following restrictions apply for align1 mode: Scalar source is
407 * supported. Source and destination horizontal stride must be the
410 * From the Haswell PRM Volume 2b "Command Reference - Instructions", page
411 * 134 ("Extended Math Function"):
413 * Scalar source is supported. Source and destination horizontal stride
416 * and similar language exists for IVB and SNB. Pre-SNB, math instructions
417 * are sends, so the sources are moved to MRF's and there are no
420 if (inst
->is_math()) {
421 if (devinfo
->gen
== 6 || devinfo
->gen
== 7) {
422 assert(inst
->dst
.stride
== 1);
423 return stride
== 1 || stride
== 0;
424 } else if (devinfo
->gen
>= 8) {
425 return stride
== inst
->dst
.stride
|| stride
== 0;
433 instruction_requires_packed_data(fs_inst
*inst
)
435 switch (inst
->opcode
) {
436 case FS_OPCODE_DDX_FINE
:
437 case FS_OPCODE_DDX_COARSE
:
438 case FS_OPCODE_DDY_FINE
:
439 case FS_OPCODE_DDY_COARSE
:
447 fs_visitor::try_copy_propagate(fs_inst
*inst
, int arg
, acp_entry
*entry
)
449 if (inst
->src
[arg
].file
!= VGRF
)
452 if (entry
->src
.file
== IMM
)
454 assert(entry
->src
.file
== VGRF
|| entry
->src
.file
== UNIFORM
||
455 entry
->src
.file
== ATTR
|| entry
->src
.file
== FIXED_GRF
);
457 /* Avoid propagating a LOAD_PAYLOAD instruction into another if there is a
458 * good chance that we'll be able to eliminate the latter through register
459 * coalescing. If only part of the sources of the second LOAD_PAYLOAD can
460 * be simplified through copy propagation we would be making register
461 * coalescing impossible, ending up with unnecessary copies in the program.
462 * This is also the case for is_multi_copy_payload() copies that can only
463 * be coalesced when the instruction is lowered into a sequence of MOVs.
465 * Worse -- In cases where the ACP entry was the result of CSE combining
466 * multiple LOAD_PAYLOAD subexpressions, propagating the first LOAD_PAYLOAD
467 * into the second would undo the work of CSE, leading to an infinite
468 * optimization loop. Avoid this by detecting LOAD_PAYLOAD copies from CSE
469 * temporaries which should match is_coalescing_payload().
471 if (entry
->opcode
== SHADER_OPCODE_LOAD_PAYLOAD
&&
472 (is_coalescing_payload(alloc
, inst
) || is_multi_copy_payload(inst
)))
475 assert(entry
->dst
.file
== VGRF
);
476 if (inst
->src
[arg
].nr
!= entry
->dst
.nr
)
479 /* Bail if inst is reading a range that isn't contained in the range
480 * that entry is writing.
482 if (!region_contained_in(inst
->src
[arg
], inst
->size_read(arg
),
483 entry
->dst
, entry
->size_written
))
486 /* Avoid propagating a FIXED_GRF register into an EOT instruction in order
487 * for any register allocation restrictions to be applied.
489 if (entry
->src
.file
== FIXED_GRF
&& inst
->eot
)
492 /* Avoid propagating odd-numbered FIXED_GRF registers into the first source
493 * of a LINTERP instruction on platforms where the PLN instruction has
494 * register alignment restrictions.
496 if (devinfo
->has_pln
&& devinfo
->gen
<= 6 &&
497 entry
->src
.file
== FIXED_GRF
&& (entry
->src
.nr
& 1) &&
498 inst
->opcode
== FS_OPCODE_LINTERP
&& arg
== 0)
501 /* we can't generally copy-propagate UD negations because we
502 * can end up accessing the resulting values as signed integers
503 * instead. See also resolve_ud_negate() and comment in
504 * fs_generator::generate_code.
506 if (entry
->src
.type
== BRW_REGISTER_TYPE_UD
&&
510 bool has_source_modifiers
= entry
->src
.abs
|| entry
->src
.negate
;
512 if ((has_source_modifiers
|| entry
->src
.file
== UNIFORM
||
513 !entry
->src
.is_contiguous()) &&
514 !inst
->can_do_source_mods(devinfo
))
517 if (has_source_modifiers
&&
518 inst
->opcode
== SHADER_OPCODE_GEN4_SCRATCH_WRITE
)
521 /* Some instructions implemented in the generator backend, such as
522 * derivatives, assume that their operands are packed so we can't
523 * generally propagate strided regions to them.
525 const unsigned entry_stride
= (entry
->src
.file
== FIXED_GRF
? 1 :
527 if (instruction_requires_packed_data(inst
) && entry_stride
> 1)
530 /* Bail if the result of composing both strides would exceed the
533 if (!can_take_stride(inst
, arg
, entry_stride
* inst
->src
[arg
].stride
,
537 /* Bail if the source FIXED_GRF region of the copy cannot be trivially
538 * composed with the source region of the instruction -- E.g. because the
539 * copy uses some extended stride greater than 4 not supported natively by
540 * the hardware as a horizontal stride, or because instruction compression
541 * could require us to use a vertical stride shorter than a GRF.
543 if (entry
->src
.file
== FIXED_GRF
&&
544 (inst
->src
[arg
].stride
> 4 ||
545 inst
->dst
.component_size(inst
->exec_size
) >
546 inst
->src
[arg
].component_size(inst
->exec_size
)))
549 /* Bail if the instruction type is larger than the execution type of the
550 * copy, what implies that each channel is reading multiple channels of the
551 * destination of the copy, and simply replacing the sources would give a
552 * program with different semantics.
554 if (type_sz(entry
->dst
.type
) < type_sz(inst
->src
[arg
].type
))
557 /* Bail if the result of composing both strides cannot be expressed
558 * as another stride. This avoids, for example, trying to transform
561 * MOV (8) rX<1>UD rY<0;1,0>UD
562 * FOO (8) ... rX<8;8,1>UW
566 * FOO (8) ... rY<0;1,0>UW
568 * Which would have different semantics.
570 if (entry_stride
!= 1 &&
571 (inst
->src
[arg
].stride
*
572 type_sz(inst
->src
[arg
].type
)) % type_sz(entry
->src
.type
) != 0)
575 /* Since semantics of source modifiers are type-dependent we need to
576 * ensure that the meaning of the instruction remains the same if we
577 * change the type. If the sizes of the types are different the new
578 * instruction will read a different amount of data than the original
579 * and the semantics will always be different.
581 if (has_source_modifiers
&&
582 entry
->dst
.type
!= inst
->src
[arg
].type
&&
583 (!inst
->can_change_types() ||
584 type_sz(entry
->dst
.type
) != type_sz(inst
->src
[arg
].type
)))
587 if (devinfo
->gen
>= 8 && (entry
->src
.negate
|| entry
->src
.abs
) &&
588 is_logic_op(inst
->opcode
)) {
592 if (entry
->saturate
) {
593 switch(inst
->opcode
) {
595 if ((inst
->conditional_mod
!= BRW_CONDITIONAL_GE
&&
596 inst
->conditional_mod
!= BRW_CONDITIONAL_L
) ||
597 inst
->src
[1].file
!= IMM
||
598 inst
->src
[1].f
< 0.0 ||
599 inst
->src
[1].f
> 1.0) {
608 /* Save the offset of inst->src[arg] relative to entry->dst for it to be
611 const unsigned rel_offset
= inst
->src
[arg
].offset
- entry
->dst
.offset
;
613 /* Fold the copy into the instruction consuming it. */
614 inst
->src
[arg
].file
= entry
->src
.file
;
615 inst
->src
[arg
].nr
= entry
->src
.nr
;
616 inst
->src
[arg
].subnr
= entry
->src
.subnr
;
617 inst
->src
[arg
].offset
= entry
->src
.offset
;
619 /* Compose the strides of both regions. */
620 if (entry
->src
.file
== FIXED_GRF
) {
621 if (inst
->src
[arg
].stride
) {
622 const unsigned orig_width
= 1 << entry
->src
.width
;
623 const unsigned reg_width
= REG_SIZE
/ (type_sz(inst
->src
[arg
].type
) *
624 inst
->src
[arg
].stride
);
625 inst
->src
[arg
].width
= cvt(MIN2(orig_width
, reg_width
)) - 1;
626 inst
->src
[arg
].hstride
= cvt(inst
->src
[arg
].stride
);
627 inst
->src
[arg
].vstride
= inst
->src
[arg
].hstride
+ inst
->src
[arg
].width
;
629 inst
->src
[arg
].vstride
= inst
->src
[arg
].hstride
=
630 inst
->src
[arg
].width
= 0;
633 inst
->src
[arg
].stride
= 1;
635 /* Hopefully no Align16 around here... */
636 assert(entry
->src
.swizzle
== BRW_SWIZZLE_XYZW
);
637 inst
->src
[arg
].swizzle
= entry
->src
.swizzle
;
639 inst
->src
[arg
].stride
*= entry
->src
.stride
;
642 /* Compose any saturate modifiers. */
643 inst
->saturate
= inst
->saturate
|| entry
->saturate
;
645 /* Compute the first component of the copy that the instruction is
646 * reading, and the base byte offset within that component.
648 assert(entry
->dst
.offset
% REG_SIZE
== 0 && entry
->dst
.stride
== 1);
649 const unsigned component
= rel_offset
/ type_sz(entry
->dst
.type
);
650 const unsigned suboffset
= rel_offset
% type_sz(entry
->dst
.type
);
652 /* Calculate the byte offset at the origin of the copy of the given
653 * component and suboffset.
655 inst
->src
[arg
] = byte_offset(inst
->src
[arg
],
656 component
* entry_stride
* type_sz(entry
->src
.type
) + suboffset
);
658 if (has_source_modifiers
) {
659 if (entry
->dst
.type
!= inst
->src
[arg
].type
) {
660 /* We are propagating source modifiers from a MOV with a different
661 * type. If we got here, then we can just change the source and
662 * destination types of the instruction and keep going.
664 assert(inst
->can_change_types());
665 for (int i
= 0; i
< inst
->sources
; i
++) {
666 inst
->src
[i
].type
= entry
->dst
.type
;
668 inst
->dst
.type
= entry
->dst
.type
;
671 if (!inst
->src
[arg
].abs
) {
672 inst
->src
[arg
].abs
= entry
->src
.abs
;
673 inst
->src
[arg
].negate
^= entry
->src
.negate
;
682 fs_visitor::try_constant_propagate(fs_inst
*inst
, acp_entry
*entry
)
684 bool progress
= false;
686 if (entry
->src
.file
!= IMM
)
688 if (type_sz(entry
->src
.type
) > 4)
693 for (int i
= inst
->sources
- 1; i
>= 0; i
--) {
694 if (inst
->src
[i
].file
!= VGRF
)
697 assert(entry
->dst
.file
== VGRF
);
698 if (inst
->src
[i
].nr
!= entry
->dst
.nr
)
701 /* Bail if inst is reading a range that isn't contained in the range
702 * that entry is writing.
704 if (!region_contained_in(inst
->src
[i
], inst
->size_read(i
),
705 entry
->dst
, entry
->size_written
))
708 /* If the type sizes don't match each channel of the instruction is
709 * either extracting a portion of the constant (which could be handled
710 * with some effort but the code below doesn't) or reading multiple
711 * channels of the source at once.
713 if (type_sz(inst
->src
[i
].type
) != type_sz(entry
->dst
.type
))
716 fs_reg val
= entry
->src
;
717 val
.type
= inst
->src
[i
].type
;
719 if (inst
->src
[i
].abs
) {
720 if ((devinfo
->gen
>= 8 && is_logic_op(inst
->opcode
)) ||
721 !brw_abs_immediate(val
.type
, &val
.as_brw_reg())) {
726 if (inst
->src
[i
].negate
) {
727 if ((devinfo
->gen
>= 8 && is_logic_op(inst
->opcode
)) ||
728 !brw_negate_immediate(val
.type
, &val
.as_brw_reg())) {
733 switch (inst
->opcode
) {
735 case SHADER_OPCODE_LOAD_PAYLOAD
:
741 case SHADER_OPCODE_INT_QUOTIENT
:
742 case SHADER_OPCODE_INT_REMAINDER
:
743 /* FINISHME: Promote non-float constants and remove this. */
744 if (devinfo
->gen
< 8)
747 case SHADER_OPCODE_POW
:
748 /* Allow constant propagation into src1 (except on Gen 6 which
749 * doesn't support scalar source math), and let constant combining
750 * promote the constant on Gen < 8.
752 if (devinfo
->gen
== 6)
755 case BRW_OPCODE_BFI1
:
759 case BRW_OPCODE_SUBB
:
766 case BRW_OPCODE_MACH
:
768 case SHADER_OPCODE_MULH
:
773 case BRW_OPCODE_ADDC
:
777 } else if (i
== 0 && inst
->src
[1].file
!= IMM
) {
778 /* Fit this constant in by commuting the operands.
779 * Exception: we can't do this for 32-bit integer MUL/MACH
780 * because it's asymmetric.
782 * The BSpec says for Broadwell that
784 * "When multiplying DW x DW, the dst cannot be accumulator."
786 * Integer MUL with a non-accumulator destination will be lowered
787 * by lower_integer_multiplication(), so don't restrict it.
789 if (((inst
->opcode
== BRW_OPCODE_MUL
&&
790 inst
->dst
.is_accumulator()) ||
791 inst
->opcode
== BRW_OPCODE_MACH
) &&
792 (inst
->src
[1].type
== BRW_REGISTER_TYPE_D
||
793 inst
->src
[1].type
== BRW_REGISTER_TYPE_UD
))
795 inst
->src
[0] = inst
->src
[1];
806 } else if (i
== 0 && inst
->src
[1].file
!= IMM
) {
807 enum brw_conditional_mod new_cmod
;
809 new_cmod
= brw_swap_cmod(inst
->conditional_mod
);
810 if (new_cmod
!= BRW_CONDITIONAL_NONE
) {
811 /* Fit this constant in by swapping the operands and
814 inst
->src
[0] = inst
->src
[1];
816 inst
->conditional_mod
= new_cmod
;
826 } else if (i
== 0 && inst
->src
[1].file
!= IMM
&&
827 (inst
->conditional_mod
== BRW_CONDITIONAL_NONE
||
828 /* Only GE and L are commutative. */
829 inst
->conditional_mod
== BRW_CONDITIONAL_GE
||
830 inst
->conditional_mod
== BRW_CONDITIONAL_L
)) {
831 inst
->src
[0] = inst
->src
[1];
834 /* If this was predicated, flipping operands means
835 * we also need to flip the predicate.
837 if (inst
->conditional_mod
== BRW_CONDITIONAL_NONE
) {
838 inst
->predicate_inverse
=
839 !inst
->predicate_inverse
;
845 case FS_OPCODE_FB_WRITE_LOGICAL
:
846 /* The stencil and omask sources of FS_OPCODE_FB_WRITE_LOGICAL are
847 * bit-cast using a strided region so they cannot be immediates.
849 if (i
!= FB_WRITE_LOGICAL_SRC_SRC_STENCIL
&&
850 i
!= FB_WRITE_LOGICAL_SRC_OMASK
) {
856 case SHADER_OPCODE_TEX_LOGICAL
:
857 case SHADER_OPCODE_TXD_LOGICAL
:
858 case SHADER_OPCODE_TXF_LOGICAL
:
859 case SHADER_OPCODE_TXL_LOGICAL
:
860 case SHADER_OPCODE_TXS_LOGICAL
:
861 case FS_OPCODE_TXB_LOGICAL
:
862 case SHADER_OPCODE_TXF_CMS_LOGICAL
:
863 case SHADER_OPCODE_TXF_CMS_W_LOGICAL
:
864 case SHADER_OPCODE_TXF_UMS_LOGICAL
:
865 case SHADER_OPCODE_TXF_MCS_LOGICAL
:
866 case SHADER_OPCODE_LOD_LOGICAL
:
867 case SHADER_OPCODE_TG4_LOGICAL
:
868 case SHADER_OPCODE_TG4_OFFSET_LOGICAL
:
869 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL
:
870 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL
:
871 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL
:
872 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL
:
873 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL
:
874 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL
:
875 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL
:
876 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL
:
877 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL
:
882 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
883 case SHADER_OPCODE_BROADCAST
:
903 can_propagate_from(fs_inst
*inst
)
905 return (inst
->opcode
== BRW_OPCODE_MOV
&&
906 inst
->dst
.file
== VGRF
&&
907 ((inst
->src
[0].file
== VGRF
&&
908 !regions_overlap(inst
->dst
, inst
->size_written
,
909 inst
->src
[0], inst
->size_read(0))) ||
910 inst
->src
[0].file
== ATTR
||
911 inst
->src
[0].file
== UNIFORM
||
912 inst
->src
[0].file
== IMM
||
913 (inst
->src
[0].file
== FIXED_GRF
&&
914 inst
->src
[0].is_contiguous())) &&
915 inst
->src
[0].type
== inst
->dst
.type
&&
916 !inst
->is_partial_write()) ||
917 is_identity_payload(FIXED_GRF
, inst
);
920 /* Walks a basic block and does copy propagation on it using the acp
924 fs_visitor::opt_copy_propagation_local(void *copy_prop_ctx
, bblock_t
*block
,
927 bool progress
= false;
929 foreach_inst_in_block(fs_inst
, inst
, block
) {
930 /* Try propagating into this instruction. */
931 for (int i
= 0; i
< inst
->sources
; i
++) {
932 if (inst
->src
[i
].file
!= VGRF
)
935 foreach_in_list(acp_entry
, entry
, &acp
[inst
->src
[i
].nr
% ACP_HASH_SIZE
]) {
936 if (try_constant_propagate(inst
, entry
))
938 else if (try_copy_propagate(inst
, i
, entry
))
943 /* kill the destination from the ACP */
944 if (inst
->dst
.file
== VGRF
|| inst
->dst
.file
== FIXED_GRF
) {
945 foreach_in_list_safe(acp_entry
, entry
, &acp
[inst
->dst
.nr
% ACP_HASH_SIZE
]) {
946 if (regions_overlap(entry
->dst
, entry
->size_written
,
947 inst
->dst
, inst
->size_written
))
951 /* Oops, we only have the chaining hash based on the destination, not
952 * the source, so walk across the entire table.
954 for (int i
= 0; i
< ACP_HASH_SIZE
; i
++) {
955 foreach_in_list_safe(acp_entry
, entry
, &acp
[i
]) {
956 /* Make sure we kill the entry if this instruction overwrites
957 * _any_ of the registers that it reads
959 if (regions_overlap(entry
->src
, entry
->size_read
,
960 inst
->dst
, inst
->size_written
))
966 /* If this instruction's source could potentially be folded into the
967 * operand of another instruction, add it to the ACP.
969 if (can_propagate_from(inst
)) {
970 acp_entry
*entry
= rzalloc(copy_prop_ctx
, acp_entry
);
971 entry
->dst
= inst
->dst
;
972 entry
->src
= inst
->src
[0];
973 entry
->size_written
= inst
->size_written
;
974 for (unsigned i
= 0; i
< inst
->sources
; i
++)
975 entry
->size_read
+= inst
->size_read(i
);
976 entry
->opcode
= inst
->opcode
;
977 entry
->saturate
= inst
->saturate
;
978 acp
[entry
->dst
.nr
% ACP_HASH_SIZE
].push_tail(entry
);
979 } else if (inst
->opcode
== SHADER_OPCODE_LOAD_PAYLOAD
&&
980 inst
->dst
.file
== VGRF
) {
982 for (int i
= 0; i
< inst
->sources
; i
++) {
983 int effective_width
= i
< inst
->header_size
? 8 : inst
->exec_size
;
984 assert(effective_width
* type_sz(inst
->src
[i
].type
) % REG_SIZE
== 0);
985 const unsigned size_written
= effective_width
*
986 type_sz(inst
->src
[i
].type
);
987 if (inst
->src
[i
].file
== VGRF
||
988 (inst
->src
[i
].file
== FIXED_GRF
&&
989 inst
->src
[i
].is_contiguous())) {
990 acp_entry
*entry
= rzalloc(copy_prop_ctx
, acp_entry
);
991 entry
->dst
= byte_offset(inst
->dst
, offset
);
992 entry
->src
= inst
->src
[i
];
993 entry
->size_written
= size_written
;
994 entry
->size_read
= inst
->size_read(i
);
995 entry
->opcode
= inst
->opcode
;
996 if (!entry
->dst
.equals(inst
->src
[i
])) {
997 acp
[entry
->dst
.nr
% ACP_HASH_SIZE
].push_tail(entry
);
1002 offset
+= size_written
;
1011 fs_visitor::opt_copy_propagation()
1013 bool progress
= false;
1014 void *copy_prop_ctx
= ralloc_context(NULL
);
1015 exec_list
*out_acp
[cfg
->num_blocks
];
1017 for (int i
= 0; i
< cfg
->num_blocks
; i
++)
1018 out_acp
[i
] = new exec_list
[ACP_HASH_SIZE
];
1020 const fs_live_variables
&live
= live_analysis
.require();
1022 /* First, walk through each block doing local copy propagation and getting
1023 * the set of copies available at the end of the block.
1025 foreach_block (block
, cfg
) {
1026 progress
= opt_copy_propagation_local(copy_prop_ctx
, block
,
1027 out_acp
[block
->num
]) || progress
;
1029 /* If the destination of an ACP entry exists only within this block,
1030 * then there's no need to keep it for dataflow analysis. We can delete
1031 * it from the out_acp table and avoid growing the bitsets any bigger
1032 * than we absolutely have to.
1034 * Because nothing in opt_copy_propagation_local touches the block
1035 * start/end IPs and opt_copy_propagation_local is incapable of
1036 * extending the live range of an ACP destination beyond the block,
1037 * it's safe to use the liveness information in this way.
1039 for (unsigned a
= 0; a
< ACP_HASH_SIZE
; a
++) {
1040 foreach_in_list_safe(acp_entry
, entry
, &out_acp
[block
->num
][a
]) {
1041 assert(entry
->dst
.file
== VGRF
);
1042 if (block
->start_ip
<= live
.vgrf_start
[entry
->dst
.nr
] &&
1043 live
.vgrf_end
[entry
->dst
.nr
] <= block
->end_ip
)
1049 /* Do dataflow analysis for those available copies. */
1050 fs_copy_prop_dataflow
dataflow(copy_prop_ctx
, cfg
, live
, out_acp
);
1052 /* Next, re-run local copy propagation, this time with the set of copies
1053 * provided by the dataflow analysis available at the start of a block.
1055 foreach_block (block
, cfg
) {
1056 exec_list in_acp
[ACP_HASH_SIZE
];
1058 for (int i
= 0; i
< dataflow
.num_acp
; i
++) {
1059 if (BITSET_TEST(dataflow
.bd
[block
->num
].livein
, i
)) {
1060 struct acp_entry
*entry
= dataflow
.acp
[i
];
1061 in_acp
[entry
->dst
.nr
% ACP_HASH_SIZE
].push_tail(entry
);
1065 progress
= opt_copy_propagation_local(copy_prop_ctx
, block
, in_acp
) ||
1069 for (int i
= 0; i
< cfg
->num_blocks
; i
++)
1070 delete [] out_acp
[i
];
1071 ralloc_free(copy_prop_ctx
);
1074 invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW
|
1075 DEPENDENCY_INSTRUCTION_DETAIL
);