2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * @file brw_vec4_copy_propagation.cpp
27 * Implements tracking of values copied between registers, and
28 * optimizations based on that: copy propagation and constant
44 is_direct_copy(vec4_instruction
*inst
)
46 return (inst
->opcode
== BRW_OPCODE_MOV
&&
48 inst
->dst
.file
== VGRF
&&
49 inst
->dst
.offset
% REG_SIZE
== 0 &&
51 !inst
->src
[0].reladdr
&&
52 (inst
->dst
.type
== inst
->src
[0].type
||
53 (inst
->dst
.type
== BRW_REGISTER_TYPE_F
&&
54 inst
->src
[0].type
== BRW_REGISTER_TYPE_VF
)));
58 is_dominated_by_previous_instruction(vec4_instruction
*inst
)
60 return (inst
->opcode
!= BRW_OPCODE_DO
&&
61 inst
->opcode
!= BRW_OPCODE_WHILE
&&
62 inst
->opcode
!= BRW_OPCODE_ELSE
&&
63 inst
->opcode
!= BRW_OPCODE_ENDIF
);
67 is_channel_updated(vec4_instruction
*inst
, src_reg
*values
[4], int ch
)
69 const src_reg
*src
= values
[ch
];
71 /* consider GRF only */
72 assert(inst
->dst
.file
== VGRF
);
73 if (!src
|| src
->file
!= VGRF
)
76 return regions_overlap(*src
, REG_SIZE
, inst
->dst
, inst
->size_written
) &&
77 (inst
->dst
.offset
!= src
->offset
||
78 inst
->dst
.writemask
& (1 << BRW_GET_SWZ(src
->swizzle
, ch
)));
82 is_logic_op(enum opcode opcode
)
84 return (opcode
== BRW_OPCODE_AND
||
85 opcode
== BRW_OPCODE_OR
||
86 opcode
== BRW_OPCODE_XOR
||
87 opcode
== BRW_OPCODE_NOT
);
91 * Get the origin of a copy as a single register if all components present in
92 * the given readmask originate from the same register and have compatible
93 * regions, otherwise return a BAD_FILE register.
96 get_copy_value(const copy_entry
&entry
, unsigned readmask
)
101 for (unsigned i
= 0; i
< 4; i
++) {
102 if (readmask
& (1 << i
)) {
103 if (entry
.value
[i
]) {
104 src_reg src
= *entry
.value
[i
];
106 if (src
.file
== IMM
) {
109 swz
[i
] = BRW_GET_SWZ(src
.swizzle
, i
);
110 /* Overwrite the original swizzle so the src_reg::equals call
111 * below doesn't care about it, the correct swizzle will be
112 * calculated once the swizzles of all components are known.
114 src
.swizzle
= BRW_SWIZZLE_XYZW
;
117 if (value
.file
== BAD_FILE
) {
119 } else if (!value
.equals(src
)) {
128 return swizzle(value
,
129 brw_compose_swizzle(brw_swizzle_for_mask(readmask
),
130 BRW_SWIZZLE4(swz
[0], swz
[1],
135 try_constant_propagate(const struct gen_device_info
*devinfo
,
136 vec4_instruction
*inst
,
137 int arg
, const copy_entry
*entry
)
139 /* For constant propagation, we only handle the same constant
140 * across all 4 channels. Some day, we should handle the 8-bit
141 * float vector format, which would let us constant propagate
143 * We could be more aggressive here -- some channels might not get used
144 * based on the destination writemask.
147 get_copy_value(*entry
,
148 brw_apply_inv_swizzle_to_mask(inst
->src
[arg
].swizzle
,
151 if (value
.file
!= IMM
)
154 /* 64-bit types can't be used except for one-source instructions, which
155 * higher levels should have constant folded away, so there's no point in
156 * propagating immediates here.
158 if (type_sz(value
.type
) == 8 || type_sz(inst
->src
[arg
].type
) == 8)
161 if (value
.type
== BRW_REGISTER_TYPE_VF
) {
162 /* The result of bit-casting the component values of a vector float
163 * cannot in general be represented as an immediate.
165 if (inst
->src
[arg
].type
!= BRW_REGISTER_TYPE_F
)
168 value
.type
= inst
->src
[arg
].type
;
171 if (inst
->src
[arg
].abs
) {
172 if ((devinfo
->gen
>= 8 && is_logic_op(inst
->opcode
)) ||
173 !brw_abs_immediate(value
.type
, &value
.as_brw_reg())) {
178 if (inst
->src
[arg
].negate
) {
179 if ((devinfo
->gen
>= 8 && is_logic_op(inst
->opcode
)) ||
180 !brw_negate_immediate(value
.type
, &value
.as_brw_reg())) {
185 value
= swizzle(value
, inst
->src
[arg
].swizzle
);
187 switch (inst
->opcode
) {
189 case SHADER_OPCODE_BROADCAST
:
190 inst
->src
[arg
] = value
;
193 case SHADER_OPCODE_POW
:
194 case SHADER_OPCODE_INT_QUOTIENT
:
195 case SHADER_OPCODE_INT_REMAINDER
:
196 if (devinfo
->gen
< 8)
203 case BRW_OPCODE_BFI1
:
207 case BRW_OPCODE_SUBB
:
209 inst
->src
[arg
] = value
;
214 case BRW_OPCODE_MACH
:
216 case SHADER_OPCODE_MULH
:
221 case BRW_OPCODE_ADDC
:
223 inst
->src
[arg
] = value
;
225 } else if (arg
== 0 && inst
->src
[1].file
!= IMM
) {
226 /* Fit this constant in by commuting the operands. Exception: we
227 * can't do this for 32-bit integer MUL/MACH because it's asymmetric.
229 if ((inst
->opcode
== BRW_OPCODE_MUL
||
230 inst
->opcode
== BRW_OPCODE_MACH
) &&
231 (inst
->src
[1].type
== BRW_REGISTER_TYPE_D
||
232 inst
->src
[1].type
== BRW_REGISTER_TYPE_UD
))
234 inst
->src
[0] = inst
->src
[1];
235 inst
->src
[1] = value
;
239 case GS_OPCODE_SET_WRITE_OFFSET
:
240 /* This is just a multiply by a constant with special strides.
241 * The generator will handle immediates in both arguments (generating
242 * a single MOV of the product). So feel free to propagate in src0.
244 inst
->src
[arg
] = value
;
249 inst
->src
[arg
] = value
;
251 } else if (arg
== 0 && inst
->src
[1].file
!= IMM
) {
252 enum brw_conditional_mod new_cmod
;
254 new_cmod
= brw_swap_cmod(inst
->conditional_mod
);
255 if (new_cmod
!= BRW_CONDITIONAL_NONE
) {
256 /* Fit this constant in by swapping the operands and
259 inst
->src
[0] = inst
->src
[1];
260 inst
->src
[1] = value
;
261 inst
->conditional_mod
= new_cmod
;
269 inst
->src
[arg
] = value
;
271 } else if (arg
== 0 && inst
->src
[1].file
!= IMM
) {
272 inst
->src
[0] = inst
->src
[1];
273 inst
->src
[1] = value
;
275 /* If this was predicated, flipping operands means
276 * we also need to flip the predicate.
278 if (inst
->conditional_mod
== BRW_CONDITIONAL_NONE
) {
279 inst
->predicate_inverse
= !inst
->predicate_inverse
;
293 is_align1_opcode(unsigned opcode
)
296 case VEC4_OPCODE_DOUBLE_TO_F32
:
297 case VEC4_OPCODE_DOUBLE_TO_D32
:
298 case VEC4_OPCODE_DOUBLE_TO_U32
:
299 case VEC4_OPCODE_TO_DOUBLE
:
300 case VEC4_OPCODE_PICK_LOW_32BIT
:
301 case VEC4_OPCODE_PICK_HIGH_32BIT
:
302 case VEC4_OPCODE_SET_LOW_32BIT
:
303 case VEC4_OPCODE_SET_HIGH_32BIT
:
311 try_copy_propagate(const struct gen_device_info
*devinfo
,
312 vec4_instruction
*inst
, int arg
,
313 const copy_entry
*entry
, int attributes_per_reg
)
315 /* Build up the value we are propagating as if it were the source of a
319 get_copy_value(*entry
,
320 brw_apply_inv_swizzle_to_mask(inst
->src
[arg
].swizzle
,
323 /* Check that we can propagate that value */
324 if (value
.file
!= UNIFORM
&&
325 value
.file
!= VGRF
&&
329 /* In gen < 8 instructions that write 2 registers also need to read 2
330 * registers. Make sure we don't break that restriction by copy
331 * propagating from a uniform.
333 if (devinfo
->gen
< 8 && inst
->size_written
> REG_SIZE
&& is_uniform(value
))
336 /* There is a regioning restriction such that if execsize == width
337 * and hstride != 0 then the vstride can't be 0. When we split instrutions
338 * that take a single-precision source (like F->DF conversions) we end up
339 * with a 4-wide source on an instruction with an execution size of 4.
340 * If we then copy-propagate the source from a uniform we also end up with a
341 * vstride of 0 and we violate the restriction.
343 if (inst
->exec_size
== 4 && value
.file
== UNIFORM
&&
344 type_sz(value
.type
) == 4)
347 /* If the type of the copy value is different from the type of the
348 * instruction then the swizzles and writemasks involved don't have the same
349 * meaning and simply replacing the source would produce different semantics.
351 if (type_sz(value
.type
) != type_sz(inst
->src
[arg
].type
))
354 if (devinfo
->gen
>= 8 && (value
.negate
|| value
.abs
) &&
355 is_logic_op(inst
->opcode
)) {
359 if (inst
->src
[arg
].offset
% REG_SIZE
|| value
.offset
% REG_SIZE
)
362 bool has_source_modifiers
= value
.negate
|| value
.abs
;
364 /* gen6 math and gen7+ SENDs from GRFs ignore source modifiers on
367 if ((has_source_modifiers
|| value
.file
== UNIFORM
||
368 value
.swizzle
!= BRW_SWIZZLE_XYZW
) && !inst
->can_do_source_mods(devinfo
))
371 if (has_source_modifiers
&&
372 value
.type
!= inst
->src
[arg
].type
&&
373 !inst
->can_change_types())
376 if (has_source_modifiers
&&
377 inst
->opcode
== SHADER_OPCODE_GEN4_SCRATCH_WRITE
)
380 unsigned composed_swizzle
= brw_compose_swizzle(inst
->src
[arg
].swizzle
,
383 /* Instructions that operate on vectors in ALIGN1 mode will ignore swizzles
384 * so copy-propagation won't be safe if the composed swizzle is anything
385 * other than the identity.
387 if (is_align1_opcode(inst
->opcode
) && composed_swizzle
!= BRW_SWIZZLE_XYZW
)
390 if (inst
->is_3src(devinfo
) &&
391 (value
.file
== UNIFORM
||
392 (value
.file
== ATTR
&& attributes_per_reg
!= 1)) &&
393 !brw_is_single_value_swizzle(composed_swizzle
))
396 if (inst
->is_send_from_grf())
399 /* we can't generally copy-propagate UD negations becuse we
400 * end up accessing the resulting values as signed integers
401 * instead. See also resolve_ud_negate().
404 value
.type
== BRW_REGISTER_TYPE_UD
)
407 /* Don't report progress if this is a noop. */
408 if (value
.equals(inst
->src
[arg
]))
411 const unsigned dst_saturate_mask
= inst
->dst
.writemask
&
412 brw_apply_swizzle_to_mask(inst
->src
[arg
].swizzle
, entry
->saturatemask
);
414 if (dst_saturate_mask
) {
415 /* We either saturate all or nothing. */
416 if (dst_saturate_mask
!= inst
->dst
.writemask
)
419 /* Limit saturate propagation only to SEL with src1 bounded within 0.0
420 * and 1.0, otherwise skip copy propagate altogether.
422 switch(inst
->opcode
) {
425 inst
->src
[0].type
!= BRW_REGISTER_TYPE_F
||
426 inst
->src
[1].file
!= IMM
||
427 inst
->src
[1].type
!= BRW_REGISTER_TYPE_F
||
428 inst
->src
[1].f
< 0.0 ||
429 inst
->src
[1].f
> 1.0) {
433 inst
->saturate
= true;
440 /* Build the final value */
441 if (inst
->src
[arg
].abs
) {
442 value
.negate
= false;
445 if (inst
->src
[arg
].negate
)
446 value
.negate
= !value
.negate
;
448 value
.swizzle
= composed_swizzle
;
449 if (has_source_modifiers
&&
450 value
.type
!= inst
->src
[arg
].type
) {
451 assert(inst
->can_change_types());
452 for (int i
= 0; i
< 3; i
++) {
453 inst
->src
[i
].type
= value
.type
;
455 inst
->dst
.type
= value
.type
;
457 value
.type
= inst
->src
[arg
].type
;
460 inst
->src
[arg
] = value
;
465 vec4_visitor::opt_copy_propagation(bool do_constant_prop
)
467 /* If we are in dual instanced or single mode, then attributes are going
468 * to be interleaved, so one register contains two attribute slots.
470 const int attributes_per_reg
=
471 prog_data
->dispatch_mode
== DISPATCH_MODE_4X2_DUAL_OBJECT
? 1 : 2;
472 bool progress
= false;
473 struct copy_entry entries
[alloc
.total_size
];
475 memset(&entries
, 0, sizeof(entries
));
477 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
478 /* This pass only works on basic blocks. If there's flow
479 * control, throw out all our information and start from
482 * This should really be fixed by using a structure like in
483 * src/glsl/opt_copy_propagation.cpp to track available copies.
485 if (!is_dominated_by_previous_instruction(inst
)) {
486 memset(&entries
, 0, sizeof(entries
));
490 /* For each source arg, see if each component comes from a copy
491 * from the same type file (IMM, VGRF, UNIFORM), and try
492 * optimizing out access to the copy result
494 for (int i
= 2; i
>= 0; i
--) {
495 /* Copied values end up in GRFs, and we don't track reladdr
498 if (inst
->src
[i
].file
!= VGRF
||
499 inst
->src
[i
].reladdr
)
502 /* We only handle register-aligned single GRF copies. */
503 if (inst
->size_read(i
) != REG_SIZE
||
504 inst
->src
[i
].offset
% REG_SIZE
)
507 const unsigned reg
= (alloc
.offsets
[inst
->src
[i
].nr
] +
508 inst
->src
[i
].offset
/ REG_SIZE
);
509 const copy_entry
&entry
= entries
[reg
];
511 if (do_constant_prop
&& try_constant_propagate(devinfo
, inst
, i
, &entry
))
513 else if (try_copy_propagate(devinfo
, inst
, i
, &entry
, attributes_per_reg
))
517 /* Track available source registers. */
518 if (inst
->dst
.file
== VGRF
) {
520 alloc
.offsets
[inst
->dst
.nr
] + inst
->dst
.offset
/ REG_SIZE
;
522 /* Update our destination's current channel values. For a direct copy,
523 * the value is the newly propagated source. Otherwise, we don't know
524 * the new value, so clear it.
526 bool direct_copy
= is_direct_copy(inst
);
527 entries
[reg
].saturatemask
&= ~inst
->dst
.writemask
;
528 for (int i
= 0; i
< 4; i
++) {
529 if (inst
->dst
.writemask
& (1 << i
)) {
530 entries
[reg
].value
[i
] = direct_copy
? &inst
->src
[0] : NULL
;
531 entries
[reg
].saturatemask
|=
532 inst
->saturate
&& direct_copy
? 1 << i
: 0;
536 /* Clear the records for any registers whose current value came from
537 * our destination's updated channels, as the two are no longer equal.
539 if (inst
->dst
.reladdr
)
540 memset(&entries
, 0, sizeof(entries
));
542 for (unsigned i
= 0; i
< alloc
.total_size
; i
++) {
543 for (int j
= 0; j
< 4; j
++) {
544 if (is_channel_updated(inst
, entries
[i
].value
, j
)) {
545 entries
[i
].value
[j
] = NULL
;
546 entries
[i
].saturatemask
&= ~(1 << j
);
555 invalidate_live_intervals();
560 } /* namespace brw */