2 * Copyright 2011 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
28 #include "util/u_math.h"
34 Instruction::isNop() const
36 if (op
== OP_PHI
|| op
== OP_SPLIT
|| op
== OP_MERGE
|| op
== OP_CONSTRAINT
)
38 if (terminator
|| join
) // XXX: should terminator imply flow ?
42 if (!fixed
&& op
== OP_NOP
)
45 if (defExists(0) && def(0).rep()->reg
.data
.id
< 0) {
46 for (int d
= 1; defExists(d
); ++d
)
47 if (def(d
).rep()->reg
.data
.id
>= 0)
48 WARN("part of vector result is unused !\n");
52 if (op
== OP_MOV
|| op
== OP_UNION
) {
53 if (!getDef(0)->equals(getSrc(0)))
56 if (!def(0).rep()->equals(getSrc(1)))
64 bool Instruction::isDead() const
69 op
== OP_SUSTB
|| op
== OP_SUSTP
|| op
== OP_SUREDP
|| op
== OP_SUREDB
||
73 for (int d
= 0; defExists(d
); ++d
)
74 if (getDef(d
)->refCount() || getDef(d
)->reg
.data
.id
>= 0)
77 if (terminator
|| asFlow())
85 // =============================================================================
87 class CopyPropagation
: public Pass
90 virtual bool visit(BasicBlock
*);
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
97 CopyPropagation::visit(BasicBlock
*bb
)
99 Instruction
*mov
, *si
, *next
;
101 for (mov
= bb
->getEntry(); mov
; mov
= next
) {
103 if (mov
->op
!= OP_MOV
|| mov
->fixed
|| !mov
->getSrc(0)->asLValue())
105 if (mov
->getPredicate())
107 if (mov
->def(0).getFile() != mov
->src(0).getFile())
109 si
= mov
->getSrc(0)->getInsn();
110 if (mov
->getDef(0)->reg
.data
.id
< 0 && si
&& si
->op
!= OP_PHI
) {
112 mov
->def(0).replace(mov
->getSrc(0), false);
113 delete_Instruction(prog
, mov
);
119 // =============================================================================
121 class MergeSplits
: public Pass
124 virtual bool visit(BasicBlock
*);
127 // For SPLIT / MERGE pairs that operate on the same registers, replace the
128 // post-merge def with the SPLIT's source.
130 MergeSplits::visit(BasicBlock
*bb
)
132 Instruction
*i
, *next
, *si
;
134 for (i
= bb
->getEntry(); i
; i
= next
) {
136 if (i
->op
!= OP_MERGE
|| typeSizeof(i
->dType
) != 8)
138 si
= i
->getSrc(0)->getInsn();
139 if (si
->op
!= OP_SPLIT
|| si
!= i
->getSrc(1)->getInsn())
141 i
->def(0).replace(si
->getSrc(0), false);
142 delete_Instruction(prog
, i
);
148 // =============================================================================
150 class LoadPropagation
: public Pass
153 virtual bool visit(BasicBlock
*);
155 void checkSwapSrc01(Instruction
*);
157 bool isCSpaceLoad(Instruction
*);
158 bool isImmd32Load(Instruction
*);
159 bool isAttribOrSharedLoad(Instruction
*);
163 LoadPropagation::isCSpaceLoad(Instruction
*ld
)
165 return ld
&& ld
->op
== OP_LOAD
&& ld
->src(0).getFile() == FILE_MEMORY_CONST
;
169 LoadPropagation::isImmd32Load(Instruction
*ld
)
171 if (!ld
|| (ld
->op
!= OP_MOV
) || (typeSizeof(ld
->dType
) != 4))
173 return ld
->src(0).getFile() == FILE_IMMEDIATE
;
177 LoadPropagation::isAttribOrSharedLoad(Instruction
*ld
)
180 (ld
->op
== OP_VFETCH
||
181 (ld
->op
== OP_LOAD
&&
182 (ld
->src(0).getFile() == FILE_SHADER_INPUT
||
183 ld
->src(0).getFile() == FILE_MEMORY_SHARED
)));
187 LoadPropagation::checkSwapSrc01(Instruction
*insn
)
189 if (!prog
->getTarget()->getOpInfo(insn
).commutative
)
190 if (insn
->op
!= OP_SET
&& insn
->op
!= OP_SLCT
)
192 if (insn
->src(1).getFile() != FILE_GPR
)
195 Instruction
*i0
= insn
->getSrc(0)->getInsn();
196 Instruction
*i1
= insn
->getSrc(1)->getInsn();
198 if (isCSpaceLoad(i0
)) {
199 if (!isCSpaceLoad(i1
))
200 insn
->swapSources(0, 1);
204 if (isImmd32Load(i0
)) {
205 if (!isCSpaceLoad(i1
) && !isImmd32Load(i1
))
206 insn
->swapSources(0, 1);
210 if (isAttribOrSharedLoad(i1
)) {
211 if (!isAttribOrSharedLoad(i0
))
212 insn
->swapSources(0, 1);
219 if (insn
->op
== OP_SET
|| insn
->op
== OP_SET_AND
||
220 insn
->op
== OP_SET_OR
|| insn
->op
== OP_SET_XOR
)
221 insn
->asCmp()->setCond
= reverseCondCode(insn
->asCmp()->setCond
);
223 if (insn
->op
== OP_SLCT
)
224 insn
->asCmp()->setCond
= inverseCondCode(insn
->asCmp()->setCond
);
228 LoadPropagation::visit(BasicBlock
*bb
)
230 const Target
*targ
= prog
->getTarget();
233 for (Instruction
*i
= bb
->getEntry(); i
; i
= next
) {
236 if (i
->op
== OP_CALL
) // calls have args as sources, they must be in regs
242 for (int s
= 0; i
->srcExists(s
); ++s
) {
243 Instruction
*ld
= i
->getSrc(s
)->getInsn();
245 if (!ld
|| ld
->fixed
|| (ld
->op
!= OP_LOAD
&& ld
->op
!= OP_MOV
))
247 if (!targ
->insnCanLoad(i
, s
, ld
))
251 i
->setSrc(s
, ld
->getSrc(0));
252 if (ld
->src(0).isIndirect(0))
253 i
->setIndirect(s
, 0, ld
->getIndirect(0, 0));
255 if (ld
->getDef(0)->refCount() == 0)
256 delete_Instruction(prog
, ld
);
262 // =============================================================================
264 // Evaluate constant expressions.
265 class ConstantFolding
: public Pass
268 bool foldAll(Program
*);
271 virtual bool visit(BasicBlock
*);
273 void expr(Instruction
*, ImmediateValue
&, ImmediateValue
&);
274 void expr(Instruction
*, ImmediateValue
&, ImmediateValue
&, ImmediateValue
&);
275 void opnd(Instruction
*, ImmediateValue
&, int s
);
277 void unary(Instruction
*, const ImmediateValue
&);
279 void tryCollapseChainedMULs(Instruction
*, const int s
, ImmediateValue
&);
281 // TGSI 'true' is converted to -1 by F2I(NEG(SET)), track back to SET
282 CmpInstruction
*findOriginForTestWithZero(Value
*);
284 unsigned int foldCount
;
289 // TODO: remember generated immediates and only revisit these
291 ConstantFolding::foldAll(Program
*prog
)
293 unsigned int iterCount
= 0;
298 } while (foldCount
&& ++iterCount
< 2);
303 ConstantFolding::visit(BasicBlock
*bb
)
305 Instruction
*i
, *next
;
307 for (i
= bb
->getEntry(); i
; i
= next
) {
309 if (i
->op
== OP_MOV
|| i
->op
== OP_CALL
)
312 ImmediateValue src0
, src1
, src2
;
314 if (i
->srcExists(2) &&
315 i
->src(0).getImmediate(src0
) &&
316 i
->src(1).getImmediate(src1
) &&
317 i
->src(2).getImmediate(src2
))
318 expr(i
, src0
, src1
, src2
);
320 if (i
->srcExists(1) &&
321 i
->src(0).getImmediate(src0
) && i
->src(1).getImmediate(src1
))
324 if (i
->srcExists(0) && i
->src(0).getImmediate(src0
))
327 if (i
->srcExists(1) && i
->src(1).getImmediate(src1
))
334 ConstantFolding::findOriginForTestWithZero(Value
*value
)
338 Instruction
*insn
= value
->getInsn();
340 while (insn
&& insn
->op
!= OP_SET
) {
341 Instruction
*next
= NULL
;
346 next
= insn
->getSrc(0)->getInsn();
347 if (insn
->sType
!= next
->dType
)
351 next
= insn
->getSrc(0)->getInsn();
358 return insn
? insn
->asCmp() : NULL
;
362 Modifier::applyTo(ImmediateValue
& imm
) const
364 if (!bits
) // avoid failure if imm.reg.type is unhandled (e.g. b128)
366 switch (imm
.reg
.type
) {
368 if (bits
& NV50_IR_MOD_ABS
)
369 imm
.reg
.data
.f32
= fabsf(imm
.reg
.data
.f32
);
370 if (bits
& NV50_IR_MOD_NEG
)
371 imm
.reg
.data
.f32
= -imm
.reg
.data
.f32
;
372 if (bits
& NV50_IR_MOD_SAT
) {
373 if (imm
.reg
.data
.f32
< 0.0f
)
374 imm
.reg
.data
.f32
= 0.0f
;
376 if (imm
.reg
.data
.f32
> 1.0f
)
377 imm
.reg
.data
.f32
= 1.0f
;
379 assert(!(bits
& NV50_IR_MOD_NOT
));
382 case TYPE_S8
: // NOTE: will be extended
385 case TYPE_U8
: // NOTE: treated as signed
388 if (bits
& NV50_IR_MOD_ABS
)
389 imm
.reg
.data
.s32
= (imm
.reg
.data
.s32
>= 0) ?
390 imm
.reg
.data
.s32
: -imm
.reg
.data
.s32
;
391 if (bits
& NV50_IR_MOD_NEG
)
392 imm
.reg
.data
.s32
= -imm
.reg
.data
.s32
;
393 if (bits
& NV50_IR_MOD_NOT
)
394 imm
.reg
.data
.s32
= ~imm
.reg
.data
.s32
;
398 if (bits
& NV50_IR_MOD_ABS
)
399 imm
.reg
.data
.f64
= fabs(imm
.reg
.data
.f64
);
400 if (bits
& NV50_IR_MOD_NEG
)
401 imm
.reg
.data
.f64
= -imm
.reg
.data
.f64
;
402 if (bits
& NV50_IR_MOD_SAT
) {
403 if (imm
.reg
.data
.f64
< 0.0)
404 imm
.reg
.data
.f64
= 0.0;
406 if (imm
.reg
.data
.f64
> 1.0)
407 imm
.reg
.data
.f64
= 1.0;
409 assert(!(bits
& NV50_IR_MOD_NOT
));
413 assert(!"invalid/unhandled type");
414 imm
.reg
.data
.u64
= 0;
420 Modifier::getOp() const
423 case NV50_IR_MOD_ABS
: return OP_ABS
;
424 case NV50_IR_MOD_NEG
: return OP_NEG
;
425 case NV50_IR_MOD_SAT
: return OP_SAT
;
426 case NV50_IR_MOD_NOT
: return OP_NOT
;
435 ConstantFolding::expr(Instruction
*i
,
436 ImmediateValue
&imm0
, ImmediateValue
&imm1
)
438 struct Storage
*const a
= &imm0
.reg
, *const b
= &imm1
.reg
;
441 memset(&res
.data
, 0, sizeof(res
.data
));
447 if (i
->dnz
&& i
->dType
== TYPE_F32
) {
448 if (!isfinite(a
->data
.f32
))
450 if (!isfinite(b
->data
.f32
))
455 res
.data
.f32
= a
->data
.f32
* b
->data
.f32
* exp2f(i
->postFactor
);
457 case TYPE_F64
: res
.data
.f64
= a
->data
.f64
* b
->data
.f64
; break;
459 if (i
->subOp
== NV50_IR_SUBOP_MUL_HIGH
) {
460 res
.data
.s32
= ((int64_t)a
->data
.s32
* b
->data
.s32
) >> 32;
465 if (i
->subOp
== NV50_IR_SUBOP_MUL_HIGH
) {
466 res
.data
.u32
= ((uint64_t)a
->data
.u32
* b
->data
.u32
) >> 32;
469 res
.data
.u32
= a
->data
.u32
* b
->data
.u32
; break;
475 if (b
->data
.u32
== 0)
478 case TYPE_F32
: res
.data
.f32
= a
->data
.f32
/ b
->data
.f32
; break;
479 case TYPE_F64
: res
.data
.f64
= a
->data
.f64
/ b
->data
.f64
; break;
480 case TYPE_S32
: res
.data
.s32
= a
->data
.s32
/ b
->data
.s32
; break;
481 case TYPE_U32
: res
.data
.u32
= a
->data
.u32
/ b
->data
.u32
; break;
488 case TYPE_F32
: res
.data
.f32
= a
->data
.f32
+ b
->data
.f32
; break;
489 case TYPE_F64
: res
.data
.f64
= a
->data
.f64
+ b
->data
.f64
; break;
491 case TYPE_U32
: res
.data
.u32
= a
->data
.u32
+ b
->data
.u32
; break;
498 case TYPE_F32
: res
.data
.f32
= pow(a
->data
.f32
, b
->data
.f32
); break;
499 case TYPE_F64
: res
.data
.f64
= pow(a
->data
.f64
, b
->data
.f64
); break;
506 case TYPE_F32
: res
.data
.f32
= MAX2(a
->data
.f32
, b
->data
.f32
); break;
507 case TYPE_F64
: res
.data
.f64
= MAX2(a
->data
.f64
, b
->data
.f64
); break;
508 case TYPE_S32
: res
.data
.s32
= MAX2(a
->data
.s32
, b
->data
.s32
); break;
509 case TYPE_U32
: res
.data
.u32
= MAX2(a
->data
.u32
, b
->data
.u32
); break;
516 case TYPE_F32
: res
.data
.f32
= MIN2(a
->data
.f32
, b
->data
.f32
); break;
517 case TYPE_F64
: res
.data
.f64
= MIN2(a
->data
.f64
, b
->data
.f64
); break;
518 case TYPE_S32
: res
.data
.s32
= MIN2(a
->data
.s32
, b
->data
.s32
); break;
519 case TYPE_U32
: res
.data
.u32
= MIN2(a
->data
.u32
, b
->data
.u32
); break;
525 res
.data
.u64
= a
->data
.u64
& b
->data
.u64
;
528 res
.data
.u64
= a
->data
.u64
| b
->data
.u64
;
531 res
.data
.u64
= a
->data
.u64
^ b
->data
.u64
;
534 res
.data
.u32
= a
->data
.u32
<< b
->data
.u32
;
538 case TYPE_S32
: res
.data
.s32
= a
->data
.s32
>> b
->data
.u32
; break;
539 case TYPE_U32
: res
.data
.u32
= a
->data
.u32
>> b
->data
.u32
; break;
545 if (a
->data
.u32
!= b
->data
.u32
)
547 res
.data
.u32
= a
->data
.u32
;
550 int offset
= b
->data
.u32
& 0xff;
551 int width
= (b
->data
.u32
>> 8) & 0xff;
558 if (width
+ offset
< 32) {
560 lshift
= 32 - width
- offset
;
562 if (i
->subOp
== NV50_IR_SUBOP_EXTBF_REV
)
563 res
.data
.u32
= util_bitreverse(a
->data
.u32
);
565 res
.data
.u32
= a
->data
.u32
;
567 case TYPE_S32
: res
.data
.s32
= (res
.data
.s32
<< lshift
) >> rshift
; break;
568 case TYPE_U32
: res
.data
.u32
= (res
.data
.u32
<< lshift
) >> rshift
; break;
575 res
.data
.u32
= util_bitcount(a
->data
.u32
& b
->data
.u32
);
582 i
->src(0).mod
= Modifier(0);
583 i
->src(1).mod
= Modifier(0);
586 i
->setSrc(0, new_ImmediateValue(i
->bb
->getProgram(), res
.data
.u32
));
589 i
->getSrc(0)->reg
.data
= res
.data
;
591 if (i
->op
== OP_MAD
|| i
->op
== OP_FMA
) {
594 i
->setSrc(1, i
->getSrc(0));
595 i
->src(1).mod
= i
->src(2).mod
;
596 i
->setSrc(0, i
->getSrc(2));
600 if (i
->src(0).getImmediate(src0
))
601 expr(i
, src0
, *i
->getSrc(1)->asImm());
602 if (i
->saturate
&& !prog
->getTarget()->isSatSupported(i
)) {
603 bld
.setPosition(i
, false);
604 i
->setSrc(1, bld
.loadImm(NULL
, res
.data
.u32
));
607 i
->op
= i
->saturate
? OP_SAT
: OP_MOV
; /* SAT handled by unary() */
613 ConstantFolding::expr(Instruction
*i
,
614 ImmediateValue
&imm0
,
615 ImmediateValue
&imm1
,
616 ImmediateValue
&imm2
)
618 struct Storage
*const a
= &imm0
.reg
, *const b
= &imm1
.reg
, *const c
= &imm2
.reg
;
621 memset(&res
.data
, 0, sizeof(res
.data
));
625 int offset
= b
->data
.u32
& 0xff;
626 int width
= (b
->data
.u32
>> 8) & 0xff;
627 unsigned bitmask
= ((1 << width
) - 1) << offset
;
628 res
.data
.u32
= ((a
->data
.u32
<< offset
) & bitmask
) | (c
->data
.u32
& ~bitmask
);
636 i
->src(0).mod
= Modifier(0);
637 i
->src(1).mod
= Modifier(0);
638 i
->src(2).mod
= Modifier(0);
640 i
->setSrc(0, new_ImmediateValue(i
->bb
->getProgram(), res
.data
.u32
));
644 i
->getSrc(0)->reg
.data
= res
.data
;
650 ConstantFolding::unary(Instruction
*i
, const ImmediateValue
&imm
)
654 if (i
->dType
!= TYPE_F32
)
657 case OP_NEG
: res
.data
.f32
= -imm
.reg
.data
.f32
; break;
658 case OP_ABS
: res
.data
.f32
= fabsf(imm
.reg
.data
.f32
); break;
659 case OP_SAT
: res
.data
.f32
= CLAMP(imm
.reg
.data
.f32
, 0.0f
, 1.0f
); break;
660 case OP_RCP
: res
.data
.f32
= 1.0f
/ imm
.reg
.data
.f32
; break;
661 case OP_RSQ
: res
.data
.f32
= 1.0f
/ sqrtf(imm
.reg
.data
.f32
); break;
662 case OP_LG2
: res
.data
.f32
= log2f(imm
.reg
.data
.f32
); break;
663 case OP_EX2
: res
.data
.f32
= exp2f(imm
.reg
.data
.f32
); break;
664 case OP_SIN
: res
.data
.f32
= sinf(imm
.reg
.data
.f32
); break;
665 case OP_COS
: res
.data
.f32
= cosf(imm
.reg
.data
.f32
); break;
666 case OP_SQRT
: res
.data
.f32
= sqrtf(imm
.reg
.data
.f32
); break;
669 // these should be handled in subsequent OP_SIN/COS/EX2
670 res
.data
.f32
= imm
.reg
.data
.f32
;
676 i
->setSrc(0, new_ImmediateValue(i
->bb
->getProgram(), res
.data
.f32
));
677 i
->src(0).mod
= Modifier(0);
681 ConstantFolding::tryCollapseChainedMULs(Instruction
*mul2
,
682 const int s
, ImmediateValue
& imm2
)
684 const int t
= s
? 0 : 1;
686 Instruction
*mul1
= NULL
; // mul1 before mul2
688 float f
= imm2
.reg
.data
.f32
* exp2f(mul2
->postFactor
);
691 assert(mul2
->op
== OP_MUL
&& mul2
->dType
== TYPE_F32
);
693 if (mul2
->getSrc(t
)->refCount() == 1) {
694 insn
= mul2
->getSrc(t
)->getInsn();
695 if (!mul2
->src(t
).mod
&& insn
->op
== OP_MUL
&& insn
->dType
== TYPE_F32
)
697 if (mul1
&& !mul1
->saturate
) {
700 if (mul1
->src(s1
= 0).getImmediate(imm1
) ||
701 mul1
->src(s1
= 1).getImmediate(imm1
)) {
702 bld
.setPosition(mul1
, false);
704 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
705 mul1
->setSrc(s1
, bld
.loadImm(NULL
, f
* imm1
.reg
.data
.f32
));
706 mul1
->src(s1
).mod
= Modifier(0);
707 mul2
->def(0).replace(mul1
->getDef(0), false);
708 mul1
->saturate
= mul2
->saturate
;
710 if (prog
->getTarget()->isPostMultiplySupported(OP_MUL
, f
, e
)) {
712 // d = mul c, imm -> d = mul_x_imm a, b
713 mul1
->postFactor
= e
;
714 mul2
->def(0).replace(mul1
->getDef(0), false);
716 mul1
->src(0).mod
*= Modifier(NV50_IR_MOD_NEG
);
717 mul1
->saturate
= mul2
->saturate
;
722 if (mul2
->getDef(0)->refCount() == 1 && !mul2
->saturate
) {
724 // d = mul b, c -> d = mul_x_imm a, c
726 insn
= (*mul2
->getDef(0)->uses
.begin())->getInsn();
731 s2
= insn
->getSrc(0) == mul1
->getDef(0) ? 0 : 1;
733 if (insn
->op
== OP_MUL
&& insn
->dType
== TYPE_F32
)
734 if (!insn
->src(s2
).mod
&& !insn
->src(t2
).getImmediate(imm1
))
736 if (mul2
&& prog
->getTarget()->isPostMultiplySupported(OP_MUL
, f
, e
)) {
737 mul2
->postFactor
= e
;
738 mul2
->setSrc(s2
, mul1
->src(t
));
740 mul2
->src(s2
).mod
*= Modifier(NV50_IR_MOD_NEG
);
746 ConstantFolding::opnd(Instruction
*i
, ImmediateValue
&imm0
, int s
)
749 const operation op
= i
->op
;
750 Instruction
*newi
= i
;
754 if (i
->dType
== TYPE_F32
)
755 tryCollapseChainedMULs(i
, s
, imm0
);
757 if (i
->subOp
== NV50_IR_SUBOP_MUL_HIGH
) {
758 assert(!isFloatType(i
->sType
));
759 if (imm0
.isInteger(1) && i
->dType
== TYPE_S32
) {
760 bld
.setPosition(i
, false);
761 // Need to set to the sign value, which is a compare.
762 newi
= bld
.mkCmp(OP_SET
, CC_LT
, TYPE_S32
, i
->getDef(0),
763 TYPE_S32
, i
->getSrc(t
), bld
.mkImm(0));
764 delete_Instruction(prog
, i
);
765 } else if (imm0
.isInteger(0) || imm0
.isInteger(1)) {
766 // The high bits can't be set in this case (either mul by 0 or
770 i
->setSrc(0, new_ImmediateValue(prog
, 0u));
771 i
->src(0).mod
= Modifier(0);
773 } else if (!imm0
.isNegative() && imm0
.isPow2()) {
774 // Translate into a shift
778 imm0
.reg
.data
.u32
= 32 - imm0
.reg
.data
.u32
;
779 i
->setSrc(0, i
->getSrc(t
));
780 i
->src(0).mod
= i
->src(t
).mod
;
781 i
->setSrc(1, new_ImmediateValue(prog
, imm0
.reg
.data
.u32
));
785 if (imm0
.isInteger(0)) {
787 i
->setSrc(0, new_ImmediateValue(prog
, 0u));
788 i
->src(0).mod
= Modifier(0);
792 if (!i
->postFactor
&& (imm0
.isInteger(1) || imm0
.isInteger(-1))) {
793 if (imm0
.isNegative())
794 i
->src(t
).mod
= i
->src(t
).mod
^ Modifier(NV50_IR_MOD_NEG
);
795 i
->op
= i
->src(t
).mod
.getOp();
797 i
->setSrc(0, i
->getSrc(1));
798 i
->src(0).mod
= i
->src(1).mod
;
805 if (!i
->postFactor
&& (imm0
.isInteger(2) || imm0
.isInteger(-2))) {
806 if (imm0
.isNegative())
807 i
->src(t
).mod
= i
->src(t
).mod
^ Modifier(NV50_IR_MOD_NEG
);
809 i
->setSrc(s
, i
->getSrc(t
));
810 i
->src(s
).mod
= i
->src(t
).mod
;
812 if (!isFloatType(i
->sType
) && !imm0
.isNegative() && imm0
.isPow2()) {
815 i
->setSrc(0, i
->getSrc(t
));
816 i
->src(0).mod
= i
->src(t
).mod
;
817 i
->setSrc(1, new_ImmediateValue(prog
, imm0
.reg
.data
.u32
));
822 if (imm0
.isInteger(0)) {
823 i
->setSrc(0, i
->getSrc(2));
824 i
->src(0).mod
= i
->src(2).mod
;
827 i
->op
= i
->src(0).mod
.getOp();
831 if (imm0
.isInteger(1) || imm0
.isInteger(-1)) {
832 if (imm0
.isNegative())
833 i
->src(t
).mod
= i
->src(t
).mod
^ Modifier(NV50_IR_MOD_NEG
);
835 i
->setSrc(0, i
->getSrc(1));
836 i
->src(0).mod
= i
->src(1).mod
;
838 i
->setSrc(1, i
->getSrc(2));
839 i
->src(1).mod
= i
->src(2).mod
;
847 if (imm0
.isInteger(0)) {
849 i
->setSrc(0, i
->getSrc(1));
850 i
->src(0).mod
= i
->src(1).mod
;
853 i
->op
= i
->src(0).mod
.getOp();
855 i
->src(0).mod
= Modifier(0);
860 if (s
!= 1 || (i
->dType
!= TYPE_S32
&& i
->dType
!= TYPE_U32
))
862 bld
.setPosition(i
, false);
863 if (imm0
.reg
.data
.u32
== 0) {
866 if (imm0
.reg
.data
.u32
== 1) {
870 if (i
->dType
== TYPE_U32
&& imm0
.isPow2()) {
872 i
->setSrc(1, bld
.mkImm(util_logbase2(imm0
.reg
.data
.u32
)));
874 if (i
->dType
== TYPE_U32
) {
877 const uint32_t d
= imm0
.reg
.data
.u32
;
880 uint32_t l
= util_logbase2(d
);
881 if (((uint32_t)1 << l
) < d
)
883 m
= (((uint64_t)1 << 32) * (((uint64_t)1 << l
) - d
)) / d
+ 1;
889 mul
= bld
.mkOp2(OP_MUL
, TYPE_U32
, tA
, i
->getSrc(0),
890 bld
.loadImm(NULL
, m
));
891 mul
->subOp
= NV50_IR_SUBOP_MUL_HIGH
;
892 bld
.mkOp2(OP_SUB
, TYPE_U32
, tB
, i
->getSrc(0), tA
);
895 bld
.mkOp2(OP_SHR
, TYPE_U32
, tA
, tB
, bld
.mkImm(r
));
898 tB
= s
? bld
.getSSA() : i
->getDef(0);
899 newi
= bld
.mkOp2(OP_ADD
, TYPE_U32
, tB
, mul
->getDef(0), tA
);
901 bld
.mkOp2(OP_SHR
, TYPE_U32
, i
->getDef(0), tB
, bld
.mkImm(s
));
903 delete_Instruction(prog
, i
);
905 if (imm0
.reg
.data
.s32
== -1) {
911 const int32_t d
= imm0
.reg
.data
.s32
;
913 int32_t l
= util_logbase2(static_cast<unsigned>(abs(d
)));
914 if ((1 << l
) < abs(d
))
918 m
= ((uint64_t)1 << (32 + l
- 1)) / abs(d
) + 1 - ((uint64_t)1 << 32);
922 bld
.mkOp3(OP_MAD
, TYPE_S32
, tA
, i
->getSrc(0), bld
.loadImm(NULL
, m
),
923 i
->getSrc(0))->subOp
= NV50_IR_SUBOP_MUL_HIGH
;
925 bld
.mkOp2(OP_SHR
, TYPE_S32
, tB
, tA
, bld
.mkImm(l
- 1));
929 bld
.mkCmp(OP_SET
, CC_LT
, TYPE_S32
, tA
, TYPE_S32
, i
->getSrc(0), bld
.mkImm(0));
930 tD
= (d
< 0) ? bld
.getSSA() : i
->getDef(0)->asLValue();
931 newi
= bld
.mkOp2(OP_SUB
, TYPE_U32
, tD
, tB
, tA
);
933 bld
.mkOp1(OP_NEG
, TYPE_S32
, i
->getDef(0), tB
);
935 delete_Instruction(prog
, i
);
940 if (i
->sType
== TYPE_U32
&& imm0
.isPow2()) {
941 bld
.setPosition(i
, false);
943 i
->setSrc(1, bld
.loadImm(NULL
, imm0
.reg
.data
.u32
- 1));
947 case OP_SET
: // TODO: SET_AND,OR,XOR
949 CmpInstruction
*si
= findOriginForTestWithZero(i
->getSrc(t
));
951 if (i
->src(t
).mod
!= Modifier(0))
953 if (imm0
.reg
.data
.u32
!= 0 || !si
|| si
->op
!= OP_SET
)
956 ccZ
= (CondCode
)((unsigned int)i
->asCmp()->setCond
& ~CC_U
);
958 ccZ
= reverseCondCode(ccZ
);
960 case CC_LT
: cc
= CC_FL
; break;
961 case CC_GE
: cc
= CC_TR
; break;
962 case CC_EQ
: cc
= inverseCondCode(cc
); break;
963 case CC_LE
: cc
= inverseCondCode(cc
); break;
969 i
->asCmp()->setCond
= cc
;
970 i
->setSrc(0, si
->src(0));
971 i
->setSrc(1, si
->src(1));
972 i
->sType
= si
->sType
;
978 if (s
!= 1 || i
->src(0).mod
!= Modifier(0))
980 // try to concatenate shifts
981 Instruction
*si
= i
->getSrc(0)->getInsn();
982 if (!si
|| si
->op
!= OP_SHL
)
985 if (si
->src(1).getImmediate(imm1
)) {
986 bld
.setPosition(i
, false);
987 i
->setSrc(0, si
->getSrc(0));
988 i
->setSrc(1, bld
.loadImm(NULL
, imm0
.reg
.data
.u32
+ imm1
.reg
.data
.u32
));
1010 case TYPE_S32
: res
= util_last_bit_signed(imm0
.reg
.data
.s32
) - 1; break;
1011 case TYPE_U32
: res
= util_last_bit(imm0
.reg
.data
.u32
) - 1; break;
1015 if (i
->subOp
== NV50_IR_SUBOP_BFIND_SAMT
&& res
>= 0)
1017 bld
.setPosition(i
, false); /* make sure bld is init'ed */
1018 i
->setSrc(0, bld
.mkImm(res
));
1025 // Only deal with 1-arg POPCNT here
1026 if (i
->srcExists(1))
1028 uint32_t res
= util_bitcount(imm0
.reg
.data
.u32
);
1029 i
->setSrc(0, new_ImmediateValue(i
->bb
->getProgram(), res
));
1041 // =============================================================================
1043 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1044 class ModifierFolding
: public Pass
1047 virtual bool visit(BasicBlock
*);
1051 ModifierFolding::visit(BasicBlock
*bb
)
1053 const Target
*target
= prog
->getTarget();
1055 Instruction
*i
, *next
, *mi
;
1058 for (i
= bb
->getEntry(); i
; i
= next
) {
1061 if (0 && i
->op
== OP_SUB
) {
1062 // turn "sub" into "add neg" (do we really want this ?)
1064 i
->src(0).mod
= i
->src(0).mod
^ Modifier(NV50_IR_MOD_NEG
);
1067 for (int s
= 0; s
< 3 && i
->srcExists(s
); ++s
) {
1068 mi
= i
->getSrc(s
)->getInsn();
1070 mi
->predSrc
>= 0 || mi
->getDef(0)->refCount() > 8)
1072 if (i
->sType
== TYPE_U32
&& mi
->dType
== TYPE_S32
) {
1073 if ((i
->op
!= OP_ADD
&&
1075 (mi
->op
!= OP_ABS
&&
1079 if (i
->sType
!= mi
->dType
) {
1082 if ((mod
= Modifier(mi
->op
)) == Modifier(0))
1084 mod
*= mi
->src(0).mod
;
1086 if ((i
->op
== OP_ABS
) || i
->src(s
).mod
.abs()) {
1087 // abs neg [abs] = abs
1088 mod
= mod
& Modifier(~(NV50_IR_MOD_NEG
| NV50_IR_MOD_ABS
));
1090 if ((i
->op
== OP_NEG
) && mod
.neg()) {
1092 // neg as both opcode and modifier on same insn is prohibited
1093 // neg neg abs = abs, neg neg = identity
1094 mod
= mod
& Modifier(~NV50_IR_MOD_NEG
);
1095 i
->op
= mod
.getOp();
1096 mod
= mod
& Modifier(~NV50_IR_MOD_ABS
);
1097 if (mod
== Modifier(0))
1101 if (target
->isModSupported(i
, s
, mod
)) {
1102 i
->setSrc(s
, mi
->getSrc(0));
1103 i
->src(s
).mod
*= mod
;
1107 if (i
->op
== OP_SAT
) {
1108 mi
= i
->getSrc(0)->getInsn();
1110 mi
->getDef(0)->refCount() <= 1 && target
->isSatSupported(mi
)) {
1112 mi
->setDef(0, i
->getDef(0));
1113 delete_Instruction(prog
, i
);
1121 // =============================================================================
1123 // MUL + ADD -> MAD/FMA
1124 // MIN/MAX(a, a) -> a, etc.
1125 // SLCT(a, b, const) -> cc(const) ? a : b
1127 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1128 class AlgebraicOpt
: public Pass
1131 virtual bool visit(BasicBlock
*);
1133 void handleABS(Instruction
*);
1134 bool handleADD(Instruction
*);
1135 bool tryADDToMADOrSAD(Instruction
*, operation toOp
);
1136 void handleMINMAX(Instruction
*);
1137 void handleRCP(Instruction
*);
1138 void handleSLCT(Instruction
*);
1139 void handleLOGOP(Instruction
*);
1140 void handleCVT(Instruction
*);
1141 void handleSUCLAMP(Instruction
*);
1147 AlgebraicOpt::handleABS(Instruction
*abs
)
1149 Instruction
*sub
= abs
->getSrc(0)->getInsn();
1152 !prog
->getTarget()->isOpSupported(OP_SAD
, abs
->dType
))
1154 // expect not to have mods yet, if we do, bail
1155 if (sub
->src(0).mod
|| sub
->src(1).mod
)
1157 // hidden conversion ?
1158 ty
= intTypeToSigned(sub
->dType
);
1159 if (abs
->dType
!= abs
->sType
|| ty
!= abs
->sType
)
1162 if ((sub
->op
!= OP_ADD
&& sub
->op
!= OP_SUB
) ||
1163 sub
->src(0).getFile() != FILE_GPR
|| sub
->src(0).mod
||
1164 sub
->src(1).getFile() != FILE_GPR
|| sub
->src(1).mod
)
1167 Value
*src0
= sub
->getSrc(0);
1168 Value
*src1
= sub
->getSrc(1);
1170 if (sub
->op
== OP_ADD
) {
1171 Instruction
*neg
= sub
->getSrc(1)->getInsn();
1172 if (neg
&& neg
->op
!= OP_NEG
) {
1173 neg
= sub
->getSrc(0)->getInsn();
1174 src0
= sub
->getSrc(1);
1176 if (!neg
|| neg
->op
!= OP_NEG
||
1177 neg
->dType
!= neg
->sType
|| neg
->sType
!= ty
)
1179 src1
= neg
->getSrc(0);
1183 abs
->moveSources(1, 2); // move sources >=1 up by 2
1185 abs
->setType(sub
->dType
);
1186 abs
->setSrc(0, src0
);
1187 abs
->setSrc(1, src1
);
1188 bld
.setPosition(abs
, false);
1189 abs
->setSrc(2, bld
.loadImm(bld
.getSSA(typeSizeof(ty
)), 0));
1193 AlgebraicOpt::handleADD(Instruction
*add
)
1195 Value
*src0
= add
->getSrc(0);
1196 Value
*src1
= add
->getSrc(1);
1198 if (src0
->reg
.file
!= FILE_GPR
|| src1
->reg
.file
!= FILE_GPR
)
1201 bool changed
= false;
1202 if (!changed
&& prog
->getTarget()->isOpSupported(OP_MAD
, add
->dType
))
1203 changed
= tryADDToMADOrSAD(add
, OP_MAD
);
1204 if (!changed
&& prog
->getTarget()->isOpSupported(OP_SAD
, add
->dType
))
1205 changed
= tryADDToMADOrSAD(add
, OP_SAD
);
1209 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1210 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1212 AlgebraicOpt::tryADDToMADOrSAD(Instruction
*add
, operation toOp
)
1214 Value
*src0
= add
->getSrc(0);
1215 Value
*src1
= add
->getSrc(1);
1218 const operation srcOp
= toOp
== OP_SAD
? OP_SAD
: OP_MUL
;
1219 const Modifier modBad
= Modifier(~((toOp
== OP_MAD
) ? NV50_IR_MOD_NEG
: 0));
1222 if (src0
->refCount() == 1 &&
1223 src0
->getUniqueInsn() && src0
->getUniqueInsn()->op
== srcOp
)
1226 if (src1
->refCount() == 1 &&
1227 src1
->getUniqueInsn() && src1
->getUniqueInsn()->op
== srcOp
)
1232 if ((src0
->getUniqueInsn() && src0
->getUniqueInsn()->bb
!= add
->bb
) ||
1233 (src1
->getUniqueInsn() && src1
->getUniqueInsn()->bb
!= add
->bb
))
1236 src
= add
->getSrc(s
);
1238 if (src
->getInsn()->postFactor
)
1240 if (toOp
== OP_SAD
) {
1242 if (!src
->getInsn()->src(2).getImmediate(imm
))
1244 if (!imm
.isInteger(0))
1248 mod
[0] = add
->src(0).mod
;
1249 mod
[1] = add
->src(1).mod
;
1250 mod
[2] = src
->getUniqueInsn()->src(0).mod
;
1251 mod
[3] = src
->getUniqueInsn()->src(1).mod
;
1253 if (((mod
[0] | mod
[1]) | (mod
[2] | mod
[3])) & modBad
)
1257 add
->subOp
= src
->getInsn()->subOp
; // potentially mul-high
1259 add
->setSrc(2, add
->src(s
? 0 : 1));
1261 add
->setSrc(0, src
->getInsn()->getSrc(0));
1262 add
->src(0).mod
= mod
[2] ^ mod
[s
];
1263 add
->setSrc(1, src
->getInsn()->getSrc(1));
1264 add
->src(1).mod
= mod
[3];
1270 AlgebraicOpt::handleMINMAX(Instruction
*minmax
)
1272 Value
*src0
= minmax
->getSrc(0);
1273 Value
*src1
= minmax
->getSrc(1);
1275 if (src0
!= src1
|| src0
->reg
.file
!= FILE_GPR
)
1277 if (minmax
->src(0).mod
== minmax
->src(1).mod
) {
1278 if (minmax
->def(0).mayReplace(minmax
->src(0))) {
1279 minmax
->def(0).replace(minmax
->src(0), false);
1280 minmax
->bb
->remove(minmax
);
1282 minmax
->op
= OP_CVT
;
1283 minmax
->setSrc(1, NULL
);
1287 // min(x, -x) = -abs(x)
1288 // min(x, -abs(x)) = -abs(x)
1289 // min(x, abs(x)) = x
1290 // max(x, -abs(x)) = x
1291 // max(x, abs(x)) = abs(x)
1292 // max(x, -x) = abs(x)
1297 AlgebraicOpt::handleRCP(Instruction
*rcp
)
1299 Instruction
*si
= rcp
->getSrc(0)->getUniqueInsn();
1301 if (si
&& si
->op
== OP_RCP
) {
1302 Modifier mod
= rcp
->src(0).mod
* si
->src(0).mod
;
1303 rcp
->op
= mod
.getOp();
1304 rcp
->setSrc(0, si
->getSrc(0));
1309 AlgebraicOpt::handleSLCT(Instruction
*slct
)
1311 if (slct
->getSrc(2)->reg
.file
== FILE_IMMEDIATE
) {
1312 if (slct
->getSrc(2)->asImm()->compare(slct
->asCmp()->setCond
, 0.0f
))
1313 slct
->setSrc(0, slct
->getSrc(1));
1315 if (slct
->getSrc(0) != slct
->getSrc(1)) {
1319 slct
->setSrc(1, NULL
);
1320 slct
->setSrc(2, NULL
);
1324 AlgebraicOpt::handleLOGOP(Instruction
*logop
)
1326 Value
*src0
= logop
->getSrc(0);
1327 Value
*src1
= logop
->getSrc(1);
1329 if (src0
->reg
.file
!= FILE_GPR
|| src1
->reg
.file
!= FILE_GPR
)
1333 if ((logop
->op
== OP_AND
|| logop
->op
== OP_OR
) &&
1334 logop
->def(0).mayReplace(logop
->src(0))) {
1335 logop
->def(0).replace(logop
->src(0), false);
1336 delete_Instruction(prog
, logop
);
1339 // try AND(SET, SET) -> SET_AND(SET)
1340 Instruction
*set0
= src0
->getInsn();
1341 Instruction
*set1
= src1
->getInsn();
1343 if (!set0
|| set0
->fixed
|| !set1
|| set1
->fixed
)
1345 if (set1
->op
!= OP_SET
) {
1346 Instruction
*xchg
= set0
;
1349 if (set1
->op
!= OP_SET
)
1352 operation redOp
= (logop
->op
== OP_AND
? OP_SET_AND
:
1353 logop
->op
== OP_XOR
? OP_SET_XOR
: OP_SET_OR
);
1354 if (!prog
->getTarget()->isOpSupported(redOp
, set1
->sType
))
1356 if (set0
->op
!= OP_SET
&&
1357 set0
->op
!= OP_SET_AND
&&
1358 set0
->op
!= OP_SET_OR
&&
1359 set0
->op
!= OP_SET_XOR
)
1361 if (set0
->getDef(0)->refCount() > 1 &&
1362 set1
->getDef(0)->refCount() > 1)
1364 if (set0
->getPredicate() || set1
->getPredicate())
1366 // check that they don't source each other
1367 for (int s
= 0; s
< 2; ++s
)
1368 if (set0
->getSrc(s
) == set1
->getDef(0) ||
1369 set1
->getSrc(s
) == set0
->getDef(0))
1372 set0
= cloneForward(func
, set0
);
1373 set1
= cloneShallow(func
, set1
);
1374 logop
->bb
->insertAfter(logop
, set1
);
1375 logop
->bb
->insertAfter(logop
, set0
);
1377 set0
->dType
= TYPE_U8
;
1378 set0
->getDef(0)->reg
.file
= FILE_PREDICATE
;
1379 set0
->getDef(0)->reg
.size
= 1;
1380 set1
->setSrc(2, set0
->getDef(0));
1382 set1
->setDef(0, logop
->getDef(0));
1383 delete_Instruction(prog
, logop
);
1387 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1389 // F2I(NEG(I2F(ABS(SET))))
1391 AlgebraicOpt::handleCVT(Instruction
*cvt
)
1393 if (cvt
->sType
!= TYPE_F32
||
1394 cvt
->dType
!= TYPE_S32
|| cvt
->src(0).mod
!= Modifier(0))
1396 Instruction
*insn
= cvt
->getSrc(0)->getInsn();
1397 if (!insn
|| insn
->op
!= OP_NEG
|| insn
->dType
!= TYPE_F32
)
1399 if (insn
->src(0).mod
!= Modifier(0))
1401 insn
= insn
->getSrc(0)->getInsn();
1403 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
1404 if (insn
&& insn
->op
== OP_CVT
&&
1405 insn
->dType
== TYPE_F32
&&
1406 insn
->sType
== TYPE_S32
) {
1407 insn
= insn
->getSrc(0)->getInsn();
1408 if (!insn
|| insn
->op
!= OP_ABS
|| insn
->sType
!= TYPE_S32
||
1411 insn
= insn
->getSrc(0)->getInsn();
1412 if (!insn
|| insn
->op
!= OP_SET
|| insn
->dType
!= TYPE_U32
)
1415 if (!insn
|| insn
->op
!= OP_SET
|| insn
->dType
!= TYPE_F32
) {
1419 Instruction
*bset
= cloneShallow(func
, insn
);
1420 bset
->dType
= TYPE_U32
;
1421 bset
->setDef(0, cvt
->getDef(0));
1422 cvt
->bb
->insertAfter(cvt
, bset
);
1423 delete_Instruction(prog
, cvt
);
1426 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
1428 AlgebraicOpt::handleSUCLAMP(Instruction
*insn
)
1431 int32_t val
= insn
->getSrc(2)->asImm()->reg
.data
.s32
;
1435 assert(insn
->srcExists(0) && insn
->src(0).getFile() == FILE_GPR
);
1437 // look for ADD (TODO: only count references by non-SUCLAMP)
1438 if (insn
->getSrc(0)->refCount() > 1)
1440 add
= insn
->getSrc(0)->getInsn();
1441 if (!add
|| add
->op
!= OP_ADD
||
1442 (add
->dType
!= TYPE_U32
&&
1443 add
->dType
!= TYPE_S32
))
1446 // look for immediate
1447 for (s
= 0; s
< 2; ++s
)
1448 if (add
->src(s
).getImmediate(imm
))
1453 // determine if immediate fits
1454 val
+= imm
.reg
.data
.s32
;
1455 if (val
> 31 || val
< -32)
1457 // determine if other addend fits
1458 if (add
->src(s
).getFile() != FILE_GPR
|| add
->src(s
).mod
!= Modifier(0))
1461 bld
.setPosition(insn
, false); // make sure bld is init'ed
1463 insn
->setSrc(2, bld
.mkImm(val
));
1464 insn
->setSrc(0, add
->getSrc(s
));
1468 AlgebraicOpt::visit(BasicBlock
*bb
)
1471 for (Instruction
*i
= bb
->getEntry(); i
; i
= next
) {
1509 // =============================================================================
1512 updateLdStOffset(Instruction
*ldst
, int32_t offset
, Function
*fn
)
1514 if (offset
!= ldst
->getSrc(0)->reg
.data
.offset
) {
1515 if (ldst
->getSrc(0)->refCount() > 1)
1516 ldst
->setSrc(0, cloneShallow(fn
, ldst
->getSrc(0)));
1517 ldst
->getSrc(0)->reg
.data
.offset
= offset
;
1521 // Combine loads and stores, forward stores to loads where possible.
1522 class MemoryOpt
: public Pass
1530 const Value
*rel
[2];
1538 bool overlaps(const Instruction
*ldst
) const;
1540 inline void link(Record
**);
1541 inline void unlink(Record
**);
1542 inline void set(const Instruction
*ldst
);
1548 Record
*loads
[DATA_FILE_COUNT
];
1549 Record
*stores
[DATA_FILE_COUNT
];
1551 MemoryPool recordPool
;
1554 virtual bool visit(BasicBlock
*);
1555 bool runOpt(BasicBlock
*);
1557 Record
**getList(const Instruction
*);
1559 Record
*findRecord(const Instruction
*, bool load
, bool& isAdjacent
) const;
1561 // merge @insn into load/store instruction from @rec
1562 bool combineLd(Record
*rec
, Instruction
*ld
);
1563 bool combineSt(Record
*rec
, Instruction
*st
);
1565 bool replaceLdFromLd(Instruction
*ld
, Record
*ldRec
);
1566 bool replaceLdFromSt(Instruction
*ld
, Record
*stRec
);
1567 bool replaceStFromSt(Instruction
*restrict st
, Record
*stRec
);
1569 void addRecord(Instruction
*ldst
);
1570 void purgeRecords(Instruction
*const st
, DataFile
);
1571 void lockStores(Instruction
*const ld
);
1578 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record
), 6)
1580 for (int i
= 0; i
< DATA_FILE_COUNT
; ++i
) {
1590 for (unsigned int i
= 0; i
< DATA_FILE_COUNT
; ++i
) {
1592 for (it
= loads
[i
]; it
; it
= next
) {
1594 recordPool
.release(it
);
1597 for (it
= stores
[i
]; it
; it
= next
) {
1599 recordPool
.release(it
);
1606 MemoryOpt::combineLd(Record
*rec
, Instruction
*ld
)
1608 int32_t offRc
= rec
->offset
;
1609 int32_t offLd
= ld
->getSrc(0)->reg
.data
.offset
;
1610 int sizeRc
= rec
->size
;
1611 int sizeLd
= typeSizeof(ld
->dType
);
1612 int size
= sizeRc
+ sizeLd
;
1615 if (!prog
->getTarget()->
1616 isAccessSupported(ld
->getSrc(0)->reg
.file
, typeOfSize(size
)))
1618 // no unaligned loads
1619 if (((size
== 0x8) && (MIN2(offLd
, offRc
) & 0x7)) ||
1620 ((size
== 0xc) && (MIN2(offLd
, offRc
) & 0xf)))
1623 assert(sizeRc
+ sizeLd
<= 16 && offRc
!= offLd
);
1625 for (j
= 0; sizeRc
; sizeRc
-= rec
->insn
->getDef(j
)->reg
.size
, ++j
);
1627 if (offLd
< offRc
) {
1629 for (sz
= 0, d
= 0; sz
< sizeLd
; sz
+= ld
->getDef(d
)->reg
.size
, ++d
);
1630 // d: nr of definitions in ld
1631 // j: nr of definitions in rec->insn, move:
1632 for (d
= d
+ j
- 1; j
> 0; --j
, --d
)
1633 rec
->insn
->setDef(d
, rec
->insn
->getDef(j
- 1));
1635 if (rec
->insn
->getSrc(0)->refCount() > 1)
1636 rec
->insn
->setSrc(0, cloneShallow(func
, rec
->insn
->getSrc(0)));
1637 rec
->offset
= rec
->insn
->getSrc(0)->reg
.data
.offset
= offLd
;
1643 // move definitions of @ld to @rec->insn
1644 for (j
= 0; sizeLd
; ++j
, ++d
) {
1645 sizeLd
-= ld
->getDef(j
)->reg
.size
;
1646 rec
->insn
->setDef(d
, ld
->getDef(j
));
1650 rec
->insn
->getSrc(0)->reg
.size
= size
;
1651 rec
->insn
->setType(typeOfSize(size
));
1653 delete_Instruction(prog
, ld
);
1659 MemoryOpt::combineSt(Record
*rec
, Instruction
*st
)
1661 int32_t offRc
= rec
->offset
;
1662 int32_t offSt
= st
->getSrc(0)->reg
.data
.offset
;
1663 int sizeRc
= rec
->size
;
1664 int sizeSt
= typeSizeof(st
->dType
);
1666 int size
= sizeRc
+ sizeSt
;
1668 Value
*src
[4]; // no modifiers in ValueRef allowed for st
1671 if (!prog
->getTarget()->
1672 isAccessSupported(st
->getSrc(0)->reg
.file
, typeOfSize(size
)))
1674 if (size
== 8 && MIN2(offRc
, offSt
) & 0x7)
1677 st
->takeExtraSources(0, extra
); // save predicate and indirect address
1679 if (offRc
< offSt
) {
1680 // save values from @st
1681 for (s
= 0; sizeSt
; ++s
) {
1682 sizeSt
-= st
->getSrc(s
+ 1)->reg
.size
;
1683 src
[s
] = st
->getSrc(s
+ 1);
1685 // set record's values as low sources of @st
1686 for (j
= 1; sizeRc
; ++j
) {
1687 sizeRc
-= rec
->insn
->getSrc(j
)->reg
.size
;
1688 st
->setSrc(j
, rec
->insn
->getSrc(j
));
1690 // set saved values as high sources of @st
1691 for (k
= j
, j
= 0; j
< s
; ++j
)
1692 st
->setSrc(k
++, src
[j
]);
1694 updateLdStOffset(st
, offRc
, func
);
1696 for (j
= 1; sizeSt
; ++j
)
1697 sizeSt
-= st
->getSrc(j
)->reg
.size
;
1698 for (s
= 1; sizeRc
; ++j
, ++s
) {
1699 sizeRc
-= rec
->insn
->getSrc(s
)->reg
.size
;
1700 st
->setSrc(j
, rec
->insn
->getSrc(s
));
1702 rec
->offset
= offSt
;
1704 st
->putExtraSources(0, extra
); // restore pointer and predicate
1706 delete_Instruction(prog
, rec
->insn
);
1709 rec
->insn
->getSrc(0)->reg
.size
= size
;
1710 rec
->insn
->setType(typeOfSize(size
));
1715 MemoryOpt::Record::set(const Instruction
*ldst
)
1717 const Symbol
*mem
= ldst
->getSrc(0)->asSym();
1718 fileIndex
= mem
->reg
.fileIndex
;
1719 rel
[0] = ldst
->getIndirect(0, 0);
1720 rel
[1] = ldst
->getIndirect(0, 1);
1721 offset
= mem
->reg
.data
.offset
;
1722 base
= mem
->getBase();
1723 size
= typeSizeof(ldst
->sType
);
1727 MemoryOpt::Record::link(Record
**list
)
1737 MemoryOpt::Record::unlink(Record
**list
)
1747 MemoryOpt::Record
**
1748 MemoryOpt::getList(const Instruction
*insn
)
1750 if (insn
->op
== OP_LOAD
|| insn
->op
== OP_VFETCH
)
1751 return &loads
[insn
->src(0).getFile()];
1752 return &stores
[insn
->src(0).getFile()];
1756 MemoryOpt::addRecord(Instruction
*i
)
1758 Record
**list
= getList(i
);
1759 Record
*it
= reinterpret_cast<Record
*>(recordPool
.allocate());
1768 MemoryOpt::findRecord(const Instruction
*insn
, bool load
, bool& isAdj
) const
1770 const Symbol
*sym
= insn
->getSrc(0)->asSym();
1771 const int size
= typeSizeof(insn
->sType
);
1773 Record
*it
= load
? loads
[sym
->reg
.file
] : stores
[sym
->reg
.file
];
1775 for (; it
; it
= it
->next
) {
1776 if (it
->locked
&& insn
->op
!= OP_LOAD
)
1778 if ((it
->offset
>> 4) != (sym
->reg
.data
.offset
>> 4) ||
1779 it
->rel
[0] != insn
->getIndirect(0, 0) ||
1780 it
->fileIndex
!= sym
->reg
.fileIndex
||
1781 it
->rel
[1] != insn
->getIndirect(0, 1))
1784 if (it
->offset
< sym
->reg
.data
.offset
) {
1785 if (it
->offset
+ it
->size
>= sym
->reg
.data
.offset
) {
1786 isAdj
= (it
->offset
+ it
->size
== sym
->reg
.data
.offset
);
1789 if (!(it
->offset
& 0x7))
1793 isAdj
= it
->offset
!= sym
->reg
.data
.offset
;
1794 if (size
<= it
->size
&& !isAdj
)
1797 if (!(sym
->reg
.data
.offset
& 0x7))
1798 if (it
->offset
- size
<= sym
->reg
.data
.offset
)
1806 MemoryOpt::replaceLdFromSt(Instruction
*ld
, Record
*rec
)
1808 Instruction
*st
= rec
->insn
;
1809 int32_t offSt
= rec
->offset
;
1810 int32_t offLd
= ld
->getSrc(0)->reg
.data
.offset
;
1813 for (s
= 1; offSt
!= offLd
&& st
->srcExists(s
); ++s
)
1814 offSt
+= st
->getSrc(s
)->reg
.size
;
1818 for (d
= 0; ld
->defExists(d
) && st
->srcExists(s
); ++d
, ++s
) {
1819 if (ld
->getDef(d
)->reg
.size
!= st
->getSrc(s
)->reg
.size
)
1821 if (st
->getSrc(s
)->reg
.file
!= FILE_GPR
)
1823 ld
->def(d
).replace(st
->src(s
), false);
1830 MemoryOpt::replaceLdFromLd(Instruction
*ldE
, Record
*rec
)
1832 Instruction
*ldR
= rec
->insn
;
1833 int32_t offR
= rec
->offset
;
1834 int32_t offE
= ldE
->getSrc(0)->reg
.data
.offset
;
1837 assert(offR
<= offE
);
1838 for (dR
= 0; offR
< offE
&& ldR
->defExists(dR
); ++dR
)
1839 offR
+= ldR
->getDef(dR
)->reg
.size
;
1843 for (dE
= 0; ldE
->defExists(dE
) && ldR
->defExists(dR
); ++dE
, ++dR
) {
1844 if (ldE
->getDef(dE
)->reg
.size
!= ldR
->getDef(dR
)->reg
.size
)
1846 ldE
->def(dE
).replace(ldR
->getDef(dR
), false);
1849 delete_Instruction(prog
, ldE
);
1854 MemoryOpt::replaceStFromSt(Instruction
*restrict st
, Record
*rec
)
1856 const Instruction
*const ri
= rec
->insn
;
1859 int32_t offS
= st
->getSrc(0)->reg
.data
.offset
;
1860 int32_t offR
= rec
->offset
;
1861 int32_t endS
= offS
+ typeSizeof(st
->dType
);
1862 int32_t endR
= offR
+ typeSizeof(ri
->dType
);
1864 rec
->size
= MAX2(endS
, endR
) - MIN2(offS
, offR
);
1866 st
->takeExtraSources(0, extra
);
1872 // get non-replaced sources of ri
1873 for (s
= 1; offR
< offS
; offR
+= ri
->getSrc(s
)->reg
.size
, ++s
)
1874 vals
[k
++] = ri
->getSrc(s
);
1876 // get replaced sources of st
1877 for (s
= 1; st
->srcExists(s
); offS
+= st
->getSrc(s
)->reg
.size
, ++s
)
1878 vals
[k
++] = st
->getSrc(s
);
1879 // skip replaced sources of ri
1880 for (s
= n
; offR
< endS
; offR
+= ri
->getSrc(s
)->reg
.size
, ++s
);
1881 // get non-replaced sources after values covered by st
1882 for (; offR
< endR
; offR
+= ri
->getSrc(s
)->reg
.size
, ++s
)
1883 vals
[k
++] = ri
->getSrc(s
);
1884 assert((unsigned int)k
<= Elements(vals
));
1885 for (s
= 0; s
< k
; ++s
)
1886 st
->setSrc(s
+ 1, vals
[s
]);
1887 st
->setSrc(0, ri
->getSrc(0));
1891 for (j
= 1; offR
< endS
; offR
+= ri
->getSrc(j
++)->reg
.size
);
1892 for (s
= 1; offS
< endS
; offS
+= st
->getSrc(s
++)->reg
.size
);
1893 for (; offR
< endR
; offR
+= ri
->getSrc(j
++)->reg
.size
)
1894 st
->setSrc(s
++, ri
->getSrc(j
));
1896 st
->putExtraSources(0, extra
);
1898 delete_Instruction(prog
, rec
->insn
);
1901 rec
->offset
= st
->getSrc(0)->reg
.data
.offset
;
1903 st
->setType(typeOfSize(rec
->size
));
1909 MemoryOpt::Record::overlaps(const Instruction
*ldst
) const
1914 if (this->fileIndex
!= that
.fileIndex
)
1917 if (this->rel
[0] || that
.rel
[0])
1918 return this->base
== that
.base
;
1920 (this->offset
< that
.offset
+ that
.size
) &&
1921 (this->offset
+ this->size
> that
.offset
);
1924 // We must not eliminate stores that affect the result of @ld if
1925 // we find later stores to the same location, and we may no longer
1926 // merge them with later stores.
1927 // The stored value can, however, still be used to determine the value
1928 // returned by future loads.
1930 MemoryOpt::lockStores(Instruction
*const ld
)
1932 for (Record
*r
= stores
[ld
->src(0).getFile()]; r
; r
= r
->next
)
1933 if (!r
->locked
&& r
->overlaps(ld
))
1937 // Prior loads from the location of @st are no longer valid.
1938 // Stores to the location of @st may no longer be used to derive
1939 // the value at it nor be coalesced into later stores.
1941 MemoryOpt::purgeRecords(Instruction
*const st
, DataFile f
)
1944 f
= st
->src(0).getFile();
1946 for (Record
*r
= loads
[f
]; r
; r
= r
->next
)
1947 if (!st
|| r
->overlaps(st
))
1948 r
->unlink(&loads
[f
]);
1950 for (Record
*r
= stores
[f
]; r
; r
= r
->next
)
1951 if (!st
|| r
->overlaps(st
))
1952 r
->unlink(&stores
[f
]);
1956 MemoryOpt::visit(BasicBlock
*bb
)
1958 bool ret
= runOpt(bb
);
1959 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
1960 // where 96 bit memory operations are forbidden.
1967 MemoryOpt::runOpt(BasicBlock
*bb
)
1969 Instruction
*ldst
, *next
;
1971 bool isAdjacent
= true;
1973 for (ldst
= bb
->getEntry(); ldst
; ldst
= next
) {
1978 if (ldst
->op
== OP_LOAD
|| ldst
->op
== OP_VFETCH
) {
1979 if (ldst
->isDead()) {
1980 // might have been produced by earlier optimization
1981 delete_Instruction(prog
, ldst
);
1985 if (ldst
->op
== OP_STORE
|| ldst
->op
== OP_EXPORT
) {
1988 // TODO: maybe have all fixed ops act as barrier ?
1989 if (ldst
->op
== OP_CALL
||
1990 ldst
->op
== OP_BAR
||
1991 ldst
->op
== OP_MEMBAR
) {
1992 purgeRecords(NULL
, FILE_MEMORY_LOCAL
);
1993 purgeRecords(NULL
, FILE_MEMORY_GLOBAL
);
1994 purgeRecords(NULL
, FILE_MEMORY_SHARED
);
1995 purgeRecords(NULL
, FILE_SHADER_OUTPUT
);
1997 if (ldst
->op
== OP_ATOM
|| ldst
->op
== OP_CCTL
) {
1998 if (ldst
->src(0).getFile() == FILE_MEMORY_GLOBAL
) {
1999 purgeRecords(NULL
, FILE_MEMORY_LOCAL
);
2000 purgeRecords(NULL
, FILE_MEMORY_GLOBAL
);
2001 purgeRecords(NULL
, FILE_MEMORY_SHARED
);
2003 purgeRecords(NULL
, ldst
->src(0).getFile());
2006 if (ldst
->op
== OP_EMIT
|| ldst
->op
== OP_RESTART
) {
2007 purgeRecords(NULL
, FILE_SHADER_OUTPUT
);
2011 if (ldst
->getPredicate()) // TODO: handle predicated ld/st
2015 DataFile file
= ldst
->src(0).getFile();
2017 // if ld l[]/g[] look for previous store to eliminate the reload
2018 if (file
== FILE_MEMORY_GLOBAL
|| file
== FILE_MEMORY_LOCAL
) {
2019 // TODO: shared memory ?
2020 rec
= findRecord(ldst
, false, isAdjacent
);
2021 if (rec
&& !isAdjacent
)
2022 keep
= !replaceLdFromSt(ldst
, rec
);
2025 // or look for ld from the same location and replace this one
2026 rec
= keep
? findRecord(ldst
, true, isAdjacent
) : NULL
;
2029 keep
= !replaceLdFromLd(ldst
, rec
);
2031 // or combine a previous load with this one
2032 keep
= !combineLd(rec
, ldst
);
2037 rec
= findRecord(ldst
, false, isAdjacent
);
2040 keep
= !replaceStFromSt(ldst
, rec
);
2042 keep
= !combineSt(rec
, ldst
);
2045 purgeRecords(ldst
, DATA_FILE_COUNT
);
2055 // =============================================================================
2057 // Turn control flow into predicated instructions (after register allocation !).
2059 // Could move this to before register allocation on NVC0 and also handle nested
2061 class FlatteningPass
: public Pass
2064 virtual bool visit(BasicBlock
*);
2066 bool tryPredicateConditional(BasicBlock
*);
2067 void predicateInstructions(BasicBlock
*, Value
*pred
, CondCode cc
);
2068 void tryPropagateBranch(BasicBlock
*);
2069 inline bool isConstantCondition(Value
*pred
);
2070 inline bool mayPredicate(const Instruction
*, const Value
*pred
) const;
2071 inline void removeFlow(Instruction
*);
2075 FlatteningPass::isConstantCondition(Value
*pred
)
2077 Instruction
*insn
= pred
->getUniqueInsn();
2079 if (insn
->op
!= OP_SET
|| insn
->srcExists(2))
2082 for (int s
= 0; s
< 2 && insn
->srcExists(s
); ++s
) {
2083 Instruction
*ld
= insn
->getSrc(s
)->getUniqueInsn();
2086 if (ld
->op
!= OP_MOV
&& ld
->op
!= OP_LOAD
)
2088 if (ld
->src(0).isIndirect(0))
2090 file
= ld
->src(0).getFile();
2092 file
= insn
->src(s
).getFile();
2093 // catch $r63 on NVC0
2094 if (file
== FILE_GPR
&& insn
->getSrc(s
)->reg
.data
.id
> prog
->maxGPR
)
2095 file
= FILE_IMMEDIATE
;
2097 if (file
!= FILE_IMMEDIATE
&& file
!= FILE_MEMORY_CONST
)
2104 FlatteningPass::removeFlow(Instruction
*insn
)
2106 FlowInstruction
*term
= insn
? insn
->asFlow() : NULL
;
2109 Graph::Edge::Type ty
= term
->bb
->cfg
.outgoing().getType();
2111 if (term
->op
== OP_BRA
) {
2112 // TODO: this might get more difficult when we get arbitrary BRAs
2113 if (ty
== Graph::Edge::CROSS
|| ty
== Graph::Edge::BACK
)
2116 if (term
->op
!= OP_JOIN
)
2119 Value
*pred
= term
->getPredicate();
2121 delete_Instruction(prog
, term
);
2123 if (pred
&& pred
->refCount() == 0) {
2124 Instruction
*pSet
= pred
->getUniqueInsn();
2125 pred
->join
->reg
.data
.id
= -1; // deallocate
2127 delete_Instruction(prog
, pSet
);
2132 FlatteningPass::predicateInstructions(BasicBlock
*bb
, Value
*pred
, CondCode cc
)
2134 for (Instruction
*i
= bb
->getEntry(); i
; i
= i
->next
) {
2137 assert(!i
->getPredicate());
2138 i
->setPredicate(cc
, pred
);
2140 removeFlow(bb
->getExit());
2144 FlatteningPass::mayPredicate(const Instruction
*insn
, const Value
*pred
) const
2146 if (insn
->isPseudo())
2148 // TODO: calls where we don't know which registers are modified
2150 if (!prog
->getTarget()->mayPredicate(insn
, pred
))
2152 for (int d
= 0; insn
->defExists(d
); ++d
)
2153 if (insn
->getDef(d
)->equals(pred
))
2158 // If we jump to BRA/RET/EXIT, replace the jump with it.
2159 // NOTE: We do not update the CFG anymore here !
2161 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
2163 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
2171 FlatteningPass::tryPropagateBranch(BasicBlock
*bb
)
2173 for (Instruction
*i
= bb
->getExit(); i
&& i
->op
== OP_BRA
; i
= i
->prev
) {
2174 BasicBlock
*bf
= i
->asFlow()->target
.bb
;
2176 if (bf
->getInsnCount() != 1)
2179 FlowInstruction
*bra
= i
->asFlow();
2180 FlowInstruction
*rep
= bf
->getExit()->asFlow();
2182 if (!rep
|| rep
->getPredicate())
2184 if (rep
->op
!= OP_BRA
&&
2185 rep
->op
!= OP_JOIN
&&
2189 // TODO: If there are multiple branches to @rep, only the first would
2190 // be replaced, so only remove them after this pass is done ?
2191 // Also, need to check all incident blocks for fall-through exits and
2192 // add the branch there.
2194 bra
->target
.bb
= rep
->target
.bb
;
2195 if (bf
->cfg
.incidentCount() == 1)
2201 FlatteningPass::visit(BasicBlock
*bb
)
2203 if (tryPredicateConditional(bb
))
2206 // try to attach join to previous instruction
2207 if (prog
->getTarget()->hasJoin
) {
2208 Instruction
*insn
= bb
->getExit();
2209 if (insn
&& insn
->op
== OP_JOIN
&& !insn
->getPredicate()) {
2211 if (insn
&& !insn
->getPredicate() &&
2213 insn
->op
!= OP_TEXBAR
&&
2214 !isTextureOp(insn
->op
) && // probably just nve4
2215 !isSurfaceOp(insn
->op
) && // not confirmed
2216 insn
->op
!= OP_LINTERP
&& // probably just nve4
2217 insn
->op
!= OP_PINTERP
&& // probably just nve4
2218 ((insn
->op
!= OP_LOAD
&& insn
->op
!= OP_STORE
) ||
2219 typeSizeof(insn
->dType
) <= 4) &&
2222 bb
->remove(bb
->getExit());
2228 tryPropagateBranch(bb
);
2234 FlatteningPass::tryPredicateConditional(BasicBlock
*bb
)
2236 BasicBlock
*bL
= NULL
, *bR
= NULL
;
2237 unsigned int nL
= 0, nR
= 0, limit
= 12;
2241 mask
= bb
->initiatesSimpleConditional();
2245 assert(bb
->getExit());
2246 Value
*pred
= bb
->getExit()->getPredicate();
2249 if (isConstantCondition(pred
))
2252 Graph::EdgeIterator ei
= bb
->cfg
.outgoing();
2255 bL
= BasicBlock::get(ei
.getNode());
2256 for (insn
= bL
->getEntry(); insn
; insn
= insn
->next
, ++nL
)
2257 if (!mayPredicate(insn
, pred
))
2260 return false; // too long, do a real branch
2265 bR
= BasicBlock::get(ei
.getNode());
2266 for (insn
= bR
->getEntry(); insn
; insn
= insn
->next
, ++nR
)
2267 if (!mayPredicate(insn
, pred
))
2270 return false; // too long, do a real branch
2274 predicateInstructions(bL
, pred
, bb
->getExit()->cc
);
2276 predicateInstructions(bR
, pred
, inverseCondCode(bb
->getExit()->cc
));
2279 bb
->remove(bb
->joinAt
);
2282 removeFlow(bb
->getExit()); // delete the branch/join at the fork point
2284 // remove potential join operations at the end of the conditional
2285 if (prog
->getTarget()->joinAnterior
) {
2286 bb
= BasicBlock::get((bL
? bL
: bR
)->cfg
.outgoing().getNode());
2287 if (bb
->getEntry() && bb
->getEntry()->op
== OP_JOIN
)
2288 removeFlow(bb
->getEntry());
2294 // =============================================================================
2296 // Fold Immediate into MAD; must be done after register allocation due to
2297 // constraint SDST == SSRC2
2299 // Does NVC0+ have other situations where this pass makes sense?
2300 class NV50PostRaConstantFolding
: public Pass
2303 virtual bool visit(BasicBlock
*);
2307 NV50PostRaConstantFolding::visit(BasicBlock
*bb
)
2312 for (Instruction
*i
= bb
->getFirst(); i
; i
= i
->next
) {
2315 if (i
->def(0).getFile() != FILE_GPR
||
2316 i
->src(0).getFile() != FILE_GPR
||
2317 i
->src(1).getFile() != FILE_GPR
||
2318 i
->src(2).getFile() != FILE_GPR
||
2319 i
->getDef(0)->reg
.data
.id
!= i
->getSrc(2)->reg
.data
.id
||
2320 !isFloatType(i
->dType
))
2323 def
= i
->getSrc(1)->getInsn();
2324 if (def
->op
== OP_MOV
&& def
->src(0).getFile() == FILE_IMMEDIATE
) {
2325 vtmp
= i
->getSrc(1);
2326 i
->setSrc(1, def
->getSrc(0));
2328 /* There's no post-RA dead code elimination, so do it here
2329 * XXX: if we add more code-removing post-RA passes, we might
2330 * want to create a post-RA dead-code elim pass */
2331 if (vtmp
->refCount() == 0)
2332 delete_Instruction(bb
->getProgram(), def
);
2345 // =============================================================================
2347 // Common subexpression elimination. Stupid O^2 implementation.
2348 class LocalCSE
: public Pass
2351 virtual bool visit(BasicBlock
*);
2353 inline bool tryReplace(Instruction
**, Instruction
*);
2355 DLList ops
[OP_LAST
+ 1];
2358 class GlobalCSE
: public Pass
2361 virtual bool visit(BasicBlock
*);
2365 Instruction::isActionEqual(const Instruction
*that
) const
2367 if (this->op
!= that
->op
||
2368 this->dType
!= that
->dType
||
2369 this->sType
!= that
->sType
)
2371 if (this->cc
!= that
->cc
)
2374 if (this->asTex()) {
2375 if (memcmp(&this->asTex()->tex
,
2376 &that
->asTex()->tex
,
2377 sizeof(this->asTex()->tex
)))
2380 if (this->asCmp()) {
2381 if (this->asCmp()->setCond
!= that
->asCmp()->setCond
)
2384 if (this->asFlow()) {
2387 if (this->ipa
!= that
->ipa
||
2388 this->lanes
!= that
->lanes
||
2389 this->perPatch
!= that
->perPatch
)
2391 if (this->postFactor
!= that
->postFactor
)
2395 if (this->subOp
!= that
->subOp
||
2396 this->saturate
!= that
->saturate
||
2397 this->rnd
!= that
->rnd
||
2398 this->ftz
!= that
->ftz
||
2399 this->dnz
!= that
->dnz
||
2400 this->cache
!= that
->cache
||
2401 this->mask
!= that
->mask
)
2408 Instruction::isResultEqual(const Instruction
*that
) const
2412 // NOTE: location of discard only affects tex with liveOnly and quadops
2413 if (!this->defExists(0) && this->op
!= OP_DISCARD
)
2416 if (!isActionEqual(that
))
2419 if (this->predSrc
!= that
->predSrc
)
2422 for (d
= 0; this->defExists(d
); ++d
) {
2423 if (!that
->defExists(d
) ||
2424 !this->getDef(d
)->equals(that
->getDef(d
), false))
2427 if (that
->defExists(d
))
2430 for (s
= 0; this->srcExists(s
); ++s
) {
2431 if (!that
->srcExists(s
))
2433 if (this->src(s
).mod
!= that
->src(s
).mod
)
2435 if (!this->getSrc(s
)->equals(that
->getSrc(s
), true))
2438 if (that
->srcExists(s
))
2441 if (op
== OP_LOAD
|| op
== OP_VFETCH
) {
2442 switch (src(0).getFile()) {
2443 case FILE_MEMORY_CONST
:
2444 case FILE_SHADER_INPUT
:
2454 // pull through common expressions from different in-blocks
2456 GlobalCSE::visit(BasicBlock
*bb
)
2458 Instruction
*phi
, *next
, *ik
;
2461 // TODO: maybe do this with OP_UNION, too
2463 for (phi
= bb
->getPhi(); phi
&& phi
->op
== OP_PHI
; phi
= next
) {
2465 if (phi
->getSrc(0)->refCount() > 1)
2467 ik
= phi
->getSrc(0)->getInsn();
2469 continue; // probably a function input
2470 for (s
= 1; phi
->srcExists(s
); ++s
) {
2471 if (phi
->getSrc(s
)->refCount() > 1)
2473 if (!phi
->getSrc(s
)->getInsn() ||
2474 !phi
->getSrc(s
)->getInsn()->isResultEqual(ik
))
2477 if (!phi
->srcExists(s
)) {
2478 Instruction
*entry
= bb
->getEntry();
2480 if (!entry
|| entry
->op
!= OP_JOIN
)
2483 bb
->insertAfter(entry
, ik
);
2484 ik
->setDef(0, phi
->getDef(0));
2485 delete_Instruction(prog
, phi
);
2493 LocalCSE::tryReplace(Instruction
**ptr
, Instruction
*i
)
2495 Instruction
*old
= *ptr
;
2497 // TODO: maybe relax this later (causes trouble with OP_UNION)
2498 if (i
->isPredicated())
2501 if (!old
->isResultEqual(i
))
2504 for (int d
= 0; old
->defExists(d
); ++d
)
2505 old
->def(d
).replace(i
->getDef(d
), false);
2506 delete_Instruction(prog
, old
);
2512 LocalCSE::visit(BasicBlock
*bb
)
2514 unsigned int replaced
;
2517 Instruction
*ir
, *next
;
2521 // will need to know the order of instructions
2523 for (ir
= bb
->getFirst(); ir
; ir
= ir
->next
)
2524 ir
->serial
= serial
++;
2526 for (ir
= bb
->getEntry(); ir
; ir
= next
) {
2533 ops
[ir
->op
].insert(ir
);
2537 for (s
= 0; ir
->srcExists(s
); ++s
)
2538 if (ir
->getSrc(s
)->asLValue())
2539 if (!src
|| ir
->getSrc(s
)->refCount() < src
->refCount())
2540 src
= ir
->getSrc(s
);
2543 for (Value::UseIterator it
= src
->uses
.begin();
2544 it
!= src
->uses
.end(); ++it
) {
2545 Instruction
*ik
= (*it
)->getInsn();
2546 if (ik
&& ik
->bb
== ir
->bb
&& ik
->serial
< ir
->serial
)
2547 if (tryReplace(&ir
, ik
))
2551 DLLIST_FOR_EACH(&ops
[ir
->op
], iter
)
2553 Instruction
*ik
= reinterpret_cast<Instruction
*>(iter
.get());
2554 if (tryReplace(&ir
, ik
))
2560 ops
[ir
->op
].insert(ir
);
2564 for (unsigned int i
= 0; i
<= OP_LAST
; ++i
)
2572 // =============================================================================
2574 // Remove computations of unused values.
2575 class DeadCodeElim
: public Pass
2578 bool buryAll(Program
*);
2581 virtual bool visit(BasicBlock
*);
2583 void checkSplitLoad(Instruction
*ld
); // for partially dead loads
2585 unsigned int deadCount
;
2589 DeadCodeElim::buryAll(Program
*prog
)
2593 if (!this->run(prog
, false, false))
2595 } while (deadCount
);
2601 DeadCodeElim::visit(BasicBlock
*bb
)
2605 for (Instruction
*i
= bb
->getFirst(); i
; i
= next
) {
2609 delete_Instruction(prog
, i
);
2611 if (i
->defExists(1) && (i
->op
== OP_VFETCH
|| i
->op
== OP_LOAD
)) {
2614 if (i
->defExists(0) && !i
->getDef(0)->refCount()) {
2615 if (i
->op
== OP_ATOM
||
2616 i
->op
== OP_SUREDP
||
2625 DeadCodeElim::checkSplitLoad(Instruction
*ld1
)
2627 Instruction
*ld2
= NULL
; // can get at most 2 loads
2630 int32_t addr1
, addr2
;
2631 int32_t size1
, size2
;
2633 uint32_t mask
= 0xffffffff;
2635 for (d
= 0; ld1
->defExists(d
); ++d
)
2636 if (!ld1
->getDef(d
)->refCount() && ld1
->getDef(d
)->reg
.data
.id
< 0)
2638 if (mask
== 0xffffffff)
2641 addr1
= ld1
->getSrc(0)->reg
.data
.offset
;
2644 for (d
= 0; ld1
->defExists(d
); ++d
) {
2645 if (mask
& (1 << d
)) {
2646 if (size1
&& (addr1
& 0x7))
2648 def1
[n1
] = ld1
->getDef(d
);
2649 size1
+= def1
[n1
++]->reg
.size
;
2652 addr1
+= ld1
->getDef(d
)->reg
.size
;
2657 for (addr2
= addr1
+ size1
; ld1
->defExists(d
); ++d
) {
2658 if (mask
& (1 << d
)) {
2659 def2
[n2
] = ld1
->getDef(d
);
2660 size2
+= def2
[n2
++]->reg
.size
;
2663 addr2
+= ld1
->getDef(d
)->reg
.size
;
2667 updateLdStOffset(ld1
, addr1
, func
);
2668 ld1
->setType(typeOfSize(size1
));
2669 for (d
= 0; d
< 4; ++d
)
2670 ld1
->setDef(d
, (d
< n1
) ? def1
[d
] : NULL
);
2675 ld2
= cloneShallow(func
, ld1
);
2676 updateLdStOffset(ld2
, addr2
, func
);
2677 ld2
->setType(typeOfSize(size2
));
2678 for (d
= 0; d
< 4; ++d
)
2679 ld2
->setDef(d
, (d
< n2
) ? def2
[d
] : NULL
);
2681 ld1
->bb
->insertAfter(ld1
, ld2
);
2684 // =============================================================================
2686 #define RUN_PASS(l, n, f) \
2687 if (level >= (l)) { \
2688 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
2689 INFO("PEEPHOLE: %s\n", #n); \
2691 if (!pass.f(this)) \
2696 Program::optimizeSSA(int level
)
2698 RUN_PASS(1, DeadCodeElim
, buryAll
);
2699 RUN_PASS(1, CopyPropagation
, run
);
2700 RUN_PASS(1, MergeSplits
, run
);
2701 RUN_PASS(2, GlobalCSE
, run
);
2702 RUN_PASS(1, LocalCSE
, run
);
2703 RUN_PASS(2, AlgebraicOpt
, run
);
2704 RUN_PASS(2, ModifierFolding
, run
); // before load propagation -> less checks
2705 RUN_PASS(1, ConstantFolding
, foldAll
);
2706 RUN_PASS(1, LoadPropagation
, run
);
2707 RUN_PASS(2, MemoryOpt
, run
);
2708 RUN_PASS(2, LocalCSE
, run
);
2709 RUN_PASS(0, DeadCodeElim
, buryAll
);
2715 Program::optimizePostRA(int level
)
2717 RUN_PASS(2, FlatteningPass
, run
);
2718 if (getTarget()->getChipset() < 0xc0)
2719 RUN_PASS(2, NV50PostRaConstantFolding
, run
);