nv50/ir/opt: s/SHL/SHR in optimization of u32 DIV
[mesa.git] / src / gallium / drivers / nv50 / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include "nv50_ir.h"
24 #include "nv50_ir_target.h"
25 #include "nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_CONSTRAINT || op == OP_PHI)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (!fixed && op == OP_NOP)
41 return true;
42
43 if (def[0].exists() && def[0].rep()->reg.data.id < 0) {
44 for (int d = 1; defExists(d); ++d)
45 if (def[d].rep()->reg.data.id >= 0)
46 WARN("part of vector result is unused !\n");
47 return true;
48 }
49
50 if (op == OP_MOV || op == OP_UNION) {
51 if (!def[0].rep()->equals(getSrc(0)))
52 return false;
53 if (op == OP_UNION)
54 if (!def[0].rep()->equals(getSrc(1)))
55 return false;
56 return true;
57 }
58
59 return false;
60 }
61
62 bool Instruction::isDead() const
63 {
64 if (op == OP_STORE ||
65 op == OP_EXPORT)
66 return false;
67
68 for (int d = 0; defExists(d); ++d)
69 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
70 return false;
71
72 if (terminator || asFlow())
73 return false;
74 if (fixed)
75 return false;
76
77 return true;
78 };
79
80 // =============================================================================
81
82 class CopyPropagation : public Pass
83 {
84 private:
85 virtual bool visit(BasicBlock *);
86 };
87
88 // Propagate all MOVs forward to make subsequent optimization easier, except if
89 // the sources stem from a phi, in which case we don't want to mess up potential
90 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
91 bool
92 CopyPropagation::visit(BasicBlock *bb)
93 {
94 Instruction *mov, *si, *next;
95
96 for (mov = bb->getEntry(); mov; mov = next) {
97 next = mov->next;
98 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
99 continue;
100 si = mov->getSrc(0)->getInsn();
101 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
102 // propagate
103 mov->def[0].replace(mov->getSrc(0), false);
104 delete_Instruction(prog, mov);
105 }
106 }
107 return true;
108 }
109
110 // =============================================================================
111
112 class LoadPropagation : public Pass
113 {
114 private:
115 virtual bool visit(BasicBlock *);
116
117 void checkSwapSrc01(Instruction *);
118
119 bool isCSpaceLoad(Instruction *);
120 bool isImmd32Load(Instruction *);
121 };
122
123 bool
124 LoadPropagation::isCSpaceLoad(Instruction *ld)
125 {
126 return ld && ld->op == OP_LOAD && ld->src[0].getFile() == FILE_MEMORY_CONST;
127 }
128
129 bool
130 LoadPropagation::isImmd32Load(Instruction *ld)
131 {
132 if (!ld || (ld->op != OP_MOV) || (typeSizeof(ld->dType) != 4))
133 return false;
134 return ld->src[0].getFile() == FILE_IMMEDIATE;
135 }
136
137 void
138 LoadPropagation::checkSwapSrc01(Instruction *insn)
139 {
140 if (!prog->getTarget()->getOpInfo(insn).commutative)
141 if (insn->op != OP_SET && insn->op != OP_SLCT)
142 return;
143 if (insn->src[1].getFile() != FILE_GPR)
144 return;
145
146 Instruction *i0 = insn->getSrc(0)->getInsn();
147 Instruction *i1 = insn->getSrc(1)->getInsn();
148
149 if (isCSpaceLoad(i0)) {
150 if (!isCSpaceLoad(i1))
151 insn->swapSources(0, 1);
152 else
153 return;
154 } else
155 if (isImmd32Load(i0)) {
156 if (!isCSpaceLoad(i1) && !isImmd32Load(i1))
157 insn->swapSources(0, 1);
158 else
159 return;
160 } else {
161 return;
162 }
163
164 if (insn->op == OP_SET)
165 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
166 else
167 if (insn->op == OP_SLCT)
168 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
169 }
170
171 bool
172 LoadPropagation::visit(BasicBlock *bb)
173 {
174 const Target *targ = prog->getTarget();
175 Instruction *next;
176
177 for (Instruction *i = bb->getEntry(); i; i = next) {
178 next = i->next;
179
180 if (i->srcExists(1))
181 checkSwapSrc01(i);
182
183 for (int s = 0; i->srcExists(s); ++s) {
184 Instruction *ld = i->getSrc(s)->getInsn();
185
186 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
187 continue;
188 if (!targ->insnCanLoad(i, s, ld))
189 continue;
190
191 // propagate !
192 i->setSrc(s, ld->getSrc(0));
193 if (ld->src[0].isIndirect(0))
194 i->setIndirect(s, 0, ld->getIndirect(0, 0));
195
196 if (ld->getDef(0)->refCount() == 0)
197 delete_Instruction(prog, ld);
198 }
199 }
200 return true;
201 }
202
203 // =============================================================================
204
205 // Evaluate constant expressions.
206 class ConstantFolding : public Pass
207 {
208 public:
209 bool foldAll(Program *);
210
211 private:
212 virtual bool visit(BasicBlock *);
213
214 void expr(Instruction *, ImmediateValue *, ImmediateValue *);
215 void opnd(Instruction *, ImmediateValue *, int s);
216
217 void unary(Instruction *, const ImmediateValue&);
218
219 // TGSI 'true' is converted to -1 by F2I(NEG(SET)), track back to SET
220 CmpInstruction *findOriginForTestWithZero(Value *);
221
222 unsigned int foldCount;
223
224 BuildUtil bld;
225 };
226
227 // TODO: remember generated immediates and only revisit these
228 bool
229 ConstantFolding::foldAll(Program *prog)
230 {
231 unsigned int iterCount = 0;
232 do {
233 foldCount = 0;
234 if (!run(prog))
235 return false;
236 } while (foldCount && ++iterCount < 2);
237 return true;
238 }
239
240 bool
241 ConstantFolding::visit(BasicBlock *bb)
242 {
243 Instruction *i, *next;
244
245 for (i = bb->getEntry(); i; i = next) {
246 next = i->next;
247 if (i->op == OP_MOV) // continue early, MOV appears frequently
248 continue;
249
250 ImmediateValue *src0 = i->src[0].getImmediate();
251 ImmediateValue *src1 = i->src[1].getImmediate();
252
253 if (src0 && src1)
254 expr(i, src0, src1);
255 else
256 if (src0)
257 opnd(i, src0, 0);
258 else
259 if (src1)
260 opnd(i, src1, 1);
261 }
262 return true;
263 }
264
265 CmpInstruction *
266 ConstantFolding::findOriginForTestWithZero(Value *value)
267 {
268 if (!value)
269 return NULL;
270 Instruction *insn = value->getInsn();
271
272 while (insn && insn->op != OP_SET) {
273 Instruction *next = NULL;
274 switch (insn->op) {
275 case OP_NEG:
276 case OP_ABS:
277 case OP_CVT:
278 next = insn->getSrc(0)->getInsn();
279 if (insn->sType != next->dType)
280 return NULL;
281 break;
282 case OP_MOV:
283 next = insn->getSrc(0)->getInsn();
284 break;
285 default:
286 return NULL;
287 }
288 insn = next;
289 }
290 return insn ? insn->asCmp() : NULL;
291 }
292
293 void
294 Modifier::applyTo(ImmediateValue& imm) const
295 {
296 switch (imm.reg.type) {
297 case TYPE_F32:
298 if (bits & NV50_IR_MOD_ABS)
299 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
300 if (bits & NV50_IR_MOD_NEG)
301 imm.reg.data.f32 = -imm.reg.data.f32;
302 if (bits & NV50_IR_MOD_SAT) {
303 if (imm.reg.data.f32 < 0.0f)
304 imm.reg.data.f32 = 0.0f;
305 else
306 if (imm.reg.data.f32 > 1.0f)
307 imm.reg.data.f32 = 1.0f;
308 }
309 assert(!(bits & NV50_IR_MOD_NOT));
310 break;
311
312 case TYPE_S8: // NOTE: will be extended
313 case TYPE_S16:
314 case TYPE_S32:
315 case TYPE_U8: // NOTE: treated as signed
316 case TYPE_U16:
317 case TYPE_U32:
318 if (bits & NV50_IR_MOD_ABS)
319 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
320 imm.reg.data.s32 : -imm.reg.data.s32;
321 if (bits & NV50_IR_MOD_NEG)
322 imm.reg.data.s32 = -imm.reg.data.s32;
323 if (bits & NV50_IR_MOD_NOT)
324 imm.reg.data.s32 = ~imm.reg.data.s32;
325 break;
326
327 case TYPE_F64:
328 if (bits & NV50_IR_MOD_ABS)
329 imm.reg.data.f64 = fabs(imm.reg.data.f64);
330 if (bits & NV50_IR_MOD_NEG)
331 imm.reg.data.f64 = -imm.reg.data.f64;
332 if (bits & NV50_IR_MOD_SAT) {
333 if (imm.reg.data.f64 < 0.0)
334 imm.reg.data.f64 = 0.0;
335 else
336 if (imm.reg.data.f64 > 1.0)
337 imm.reg.data.f64 = 1.0;
338 }
339 assert(!(bits & NV50_IR_MOD_NOT));
340 break;
341
342 default:
343 assert(!"invalid/unhandled type");
344 imm.reg.data.u64 = 0;
345 break;
346 }
347 }
348
349 operation
350 Modifier::getOp() const
351 {
352 switch (bits) {
353 case NV50_IR_MOD_ABS: return OP_ABS;
354 case NV50_IR_MOD_NEG: return OP_NEG;
355 case NV50_IR_MOD_SAT: return OP_SAT;
356 case NV50_IR_MOD_NOT: return OP_NOT;
357 case 0:
358 return OP_MOV;
359 default:
360 return OP_CVT;
361 }
362 }
363
364 void
365 ConstantFolding::expr(Instruction *i,
366 ImmediateValue *src0, ImmediateValue *src1)
367 {
368 ImmediateValue imm0(src0, i->sType);
369 ImmediateValue imm1(src1, i->sType);
370 struct Storage res;
371 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
372
373 i->src[0].mod.applyTo(imm0);
374 i->src[1].mod.applyTo(imm1);
375
376 switch (i->op) {
377 case OP_MAD:
378 case OP_FMA:
379 case OP_MUL:
380 if (i->dnz && i->dType == TYPE_F32) {
381 if (!isfinite(a->data.f32))
382 a->data.f32 = 0.0f;
383 if (!isfinite(b->data.f32))
384 b->data.f32 = 0.0f;
385 }
386 switch (i->dType) {
387 case TYPE_F32: res.data.f32 = a->data.f32 * b->data.f32; break;
388 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
389 case TYPE_S32:
390 case TYPE_U32: res.data.u32 = a->data.u32 * b->data.u32; break;
391 default:
392 return;
393 }
394 break;
395 case OP_DIV:
396 if (b->data.u32 == 0)
397 break;
398 switch (i->dType) {
399 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
400 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
401 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
402 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
403 default:
404 return;
405 }
406 break;
407 case OP_ADD:
408 switch (i->dType) {
409 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
410 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
411 case TYPE_S32:
412 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
413 default:
414 return;
415 }
416 break;
417 case OP_POW:
418 switch (i->dType) {
419 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
420 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
421 default:
422 return;
423 }
424 break;
425 case OP_MAX:
426 switch (i->dType) {
427 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
428 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
429 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
430 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
431 default:
432 return;
433 }
434 break;
435 case OP_MIN:
436 switch (i->dType) {
437 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
438 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
439 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
440 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
441 default:
442 return;
443 }
444 break;
445 case OP_AND:
446 res.data.u64 = a->data.u64 & b->data.u64;
447 break;
448 case OP_OR:
449 res.data.u64 = a->data.u64 | b->data.u64;
450 break;
451 case OP_XOR:
452 res.data.u64 = a->data.u64 ^ b->data.u64;
453 break;
454 case OP_SHL:
455 res.data.u32 = a->data.u32 << b->data.u32;
456 break;
457 case OP_SHR:
458 switch (i->dType) {
459 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
460 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
461 default:
462 return;
463 }
464 break;
465 case OP_SLCT:
466 if (a->data.u32 != b->data.u32)
467 return;
468 res.data.u32 = a->data.u32;
469 break;
470 default:
471 return;
472 }
473 ++foldCount;
474
475 i->src[0].mod = Modifier(0);
476 i->src[1].mod = Modifier(0);
477
478 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
479 i->setSrc(1, NULL);
480
481 i->getSrc(0)->reg.data = res.data;
482
483 if (i->op == OP_MAD || i->op == OP_FMA) {
484 i->op = OP_ADD;
485
486 i->setSrc(1, i->getSrc(0));
487 i->setSrc(0, i->getSrc(2));
488 i->setSrc(2, NULL);
489
490 i->src[1].mod = i->src[2].mod;
491
492 src0 = i->src[0].getImmediate();
493 if (src0)
494 expr(i, src0, i->getSrc(1)->asImm());
495 } else {
496 i->op = OP_MOV;
497 }
498 }
499
500 void
501 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
502 {
503 Storage res;
504
505 if (i->dType != TYPE_F32)
506 return;
507 switch (i->op) {
508 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
509 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
510 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
511 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
512 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
513 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
514 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
515 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
516 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
517 case OP_PRESIN:
518 case OP_PREEX2:
519 // these should be handled in subsequent OP_SIN/COS/EX2
520 res.data.f32 = imm.reg.data.f32;
521 break;
522 default:
523 return;
524 }
525 i->op = OP_MOV;
526 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
527 i->src[0].mod = Modifier(0);
528 }
529
530 void
531 ConstantFolding::opnd(Instruction *i, ImmediateValue *src, int s)
532 {
533 const int t = !s;
534 const operation op = i->op;
535
536 ImmediateValue imm(src, i->sType);
537
538 i->src[s].mod.applyTo(imm);
539
540 switch (i->op) {
541 case OP_MUL:
542 if (i->dType == TYPE_F32 && i->getSrc(t)->refCount() == 1) {
543 Instruction *si = i->getSrc(t)->getUniqueInsn();
544
545 if (si && si->op == OP_MUL) {
546 float f = imm.reg.data.f32;
547
548 if (si->src[1].getImmediate()) {
549 f *= si->src[1].getImmediate()->reg.data.f32;
550 si->setSrc(1, new_ImmediateValue(prog, f));
551 i->def[0].replace(i->getSrc(t), false);
552 break;
553 } else {
554 int fac;
555 if (f == 0.125f) fac = -3;
556 else
557 if (f == 0.250f) fac = -2;
558 else
559 if (f == 0.500f) fac = -1;
560 else
561 if (f == 2.000f) fac = +1;
562 else
563 if (f == 4.000f) fac = +2;
564 else
565 if (f == 8.000f) fac = +3;
566 else
567 fac = 0;
568 if (fac) {
569 // FIXME: allowed & modifier
570 si->postFactor = fac;
571 i->def[0].replace(i->getSrc(t), false);
572 break;
573 }
574 }
575 }
576 }
577 if (imm.isInteger(0)) {
578 i->op = OP_MOV;
579 i->setSrc(0, i->getSrc(s));
580 i->setSrc(1, NULL);
581 } else
582 if (imm.isInteger(1) || imm.isInteger(-1)) {
583 if (imm.isNegative())
584 i->src[t].mod = i->src[t].mod ^ Modifier(NV50_IR_MOD_NEG);
585 i->op = i->src[t].mod.getOp();
586 if (s == 0) {
587 i->setSrc(0, i->getSrc(1));
588 i->src[0].mod = i->src[1].mod;
589 i->src[1].mod = 0;
590 }
591 if (i->op != OP_CVT)
592 i->src[0].mod = 0;
593 i->setSrc(1, NULL);
594 } else
595 if (imm.isInteger(2) || imm.isInteger(-2)) {
596 if (imm.isNegative())
597 i->src[t].mod = i->src[t].mod ^ Modifier(NV50_IR_MOD_NEG);
598 i->op = OP_ADD;
599 i->setSrc(s, i->getSrc(t));
600 i->src[s].mod = i->src[t].mod;
601 } else
602 if (!isFloatType(i->sType) && !imm.isNegative() && imm.isPow2()) {
603 i->op = OP_SHL;
604 imm.applyLog2();
605 i->setSrc(1, new_ImmediateValue(prog, imm.reg.data.u32));
606 }
607 break;
608 case OP_ADD:
609 if (imm.isInteger(0)) {
610 if (s == 0) {
611 i->setSrc(0, i->getSrc(1));
612 i->src[0].mod = i->src[1].mod;
613 }
614 i->setSrc(1, NULL);
615 i->op = i->src[0].mod.getOp();
616 if (i->op != OP_CVT)
617 i->src[0].mod = Modifier(0);
618 }
619 break;
620
621 case OP_DIV:
622 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
623 break;
624 bld.setPosition(i, false);
625 if (imm.reg.data.u32 == 0) {
626 break;
627 } else
628 if (imm.reg.data.u32 == 1) {
629 i->op = OP_MOV;
630 i->setSrc(1, NULL);
631 } else
632 if (i->dType == TYPE_U32 && imm.isPow2()) {
633 i->op = OP_SHR;
634 i->setSrc(1, bld.mkImm(util_logbase2(imm.reg.data.u32)));
635 } else
636 if (i->dType == TYPE_U32) {
637 Instruction *mul;
638 Value *tA, *tB;
639 const uint32_t d = imm.reg.data.u32;
640 uint32_t m;
641 int r, s;
642 uint32_t l = util_logbase2(d);
643 if (((uint32_t)1 << l) < d)
644 ++l;
645 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
646 r = l ? 1 : 0;
647 s = l ? (l - 1) : 0;
648
649 tA = bld.getSSA();
650 tB = bld.getSSA();
651 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
652 bld.loadImm(NULL, m));
653 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
654 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
655 tA = bld.getSSA();
656 if (r)
657 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
658 else
659 tA = tB;
660 tB = s ? bld.getSSA() : i->getDef(0);
661 bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
662 if (s)
663 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
664
665 delete_Instruction(prog, i);
666 } else
667 if (imm.reg.data.s32 == -1) {
668 i->op = OP_NEG;
669 i->setSrc(1, NULL);
670 } else {
671 LValue *tA, *tB;
672 LValue *tD;
673 const int32_t d = imm.reg.data.s32;
674 int32_t m;
675 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
676 if ((1 << l) < abs(d))
677 ++l;
678 if (!l)
679 l = 1;
680 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
681
682 tA = bld.getSSA();
683 tB = bld.getSSA();
684 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
685 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
686 if (l > 1)
687 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
688 else
689 tB = tA;
690 tA = bld.getSSA();
691 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, i->getSrc(0), bld.mkImm(0));
692 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
693 bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
694 if (d < 0)
695 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
696
697 delete_Instruction(prog, i);
698 }
699 break;
700
701 case OP_SET: // TODO: SET_AND,OR,XOR
702 {
703 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
704 CondCode cc, ccZ;
705 if (i->src[t].mod != Modifier(0))
706 return;
707 if (imm.reg.data.u32 != 0 || !si || si->op != OP_SET)
708 return;
709 cc = si->setCond;
710 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
711 if (s == 0)
712 ccZ = reverseCondCode(ccZ);
713 switch (ccZ) {
714 case CC_LT: cc = CC_FL; break;
715 case CC_GE: cc = CC_TR; break;
716 case CC_EQ: cc = inverseCondCode(cc); break;
717 case CC_LE: cc = inverseCondCode(cc); break;
718 case CC_GT: break;
719 case CC_NE: break;
720 default:
721 return;
722 }
723 i->asCmp()->setCond = cc;
724 i->setSrc(0, si->src[0]);
725 i->setSrc(1, si->src[1]);
726 i->sType = si->sType;
727 }
728 break;
729
730 case OP_SHL:
731 {
732 if (s != 1 || i->src[0].mod != Modifier(0))
733 break;
734 // try to concatenate shifts
735 Instruction *si = i->getSrc(0)->getInsn();
736 if (!si ||
737 si->op != OP_SHL || si->src[1].mod != Modifier(0))
738 break;
739 ImmediateValue *siImm = si->src[1].getImmediate();
740 if (siImm) {
741 bld.setPosition(i, false);
742 i->setSrc(0, si->getSrc(0));
743 i->setSrc(1, bld.loadImm(NULL,
744 imm.reg.data.u32 + siImm->reg.data.u32));
745 }
746 }
747 break;
748
749 case OP_ABS:
750 case OP_NEG:
751 case OP_LG2:
752 case OP_RCP:
753 case OP_SQRT:
754 case OP_RSQ:
755 case OP_PRESIN:
756 case OP_SIN:
757 case OP_COS:
758 case OP_PREEX2:
759 case OP_EX2:
760 unary(i, imm);
761 break;
762 default:
763 return;
764 }
765 if (i->op != op)
766 foldCount++;
767 }
768
769 // =============================================================================
770
771 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
772 class ModifierFolding : public Pass
773 {
774 private:
775 virtual bool visit(BasicBlock *);
776 };
777
778 bool
779 ModifierFolding::visit(BasicBlock *bb)
780 {
781 const Target *target = prog->getTarget();
782
783 Instruction *i, *next, *mi;
784 Modifier mod;
785
786 for (i = bb->getEntry(); i; i = next) {
787 next = i->next;
788
789 if (0 && i->op == OP_SUB) {
790 // turn "sub" into "add neg" (do we really want this ?)
791 i->op = OP_ADD;
792 i->src[0].mod = i->src[0].mod ^ Modifier(NV50_IR_MOD_NEG);
793 }
794
795 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
796 mi = i->getSrc(s)->getInsn();
797 if (!mi ||
798 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
799 continue;
800 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
801 if ((i->op != OP_ADD &&
802 i->op != OP_MUL) ||
803 (mi->op != OP_ABS &&
804 mi->op != OP_NEG))
805 continue;
806 } else
807 if (i->sType != mi->dType) {
808 continue;
809 }
810 if ((mod = Modifier(mi->op)) == Modifier(0))
811 continue;
812 mod = mod * mi->src[0].mod;
813
814 if ((i->op == OP_ABS) || i->src[s].mod.abs()) {
815 // abs neg [abs] = abs
816 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
817 } else
818 if ((i->op == OP_NEG) && mod.neg()) {
819 assert(s == 0);
820 // neg as both opcode and modifier on same insn is prohibited
821 // neg neg abs = abs, neg neg = identity
822 mod = mod & Modifier(~NV50_IR_MOD_NEG);
823 i->op = mod.getOp();
824 mod = mod & Modifier(~NV50_IR_MOD_ABS);
825 if (mod == Modifier(0))
826 i->op = OP_MOV;
827 }
828
829 if (target->isModSupported(i, s, mod)) {
830 i->setSrc(s, mi->getSrc(0));
831 i->src[s].mod = i->src[s].mod * mod;
832 }
833 }
834
835 if (i->op == OP_SAT) {
836 mi = i->getSrc(0)->getInsn();
837 if (mi &&
838 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
839 mi->saturate = 1;
840 mi->setDef(0, i->getDef(0));
841 delete_Instruction(prog, i);
842 }
843 }
844 }
845
846 return true;
847 }
848
849 // =============================================================================
850
851 // MUL + ADD -> MAD/FMA
852 // MIN/MAX(a, a) -> a, etc.
853 // SLCT(a, b, const) -> cc(const) ? a : b
854 // RCP(RCP(a)) -> a
855 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
856 class AlgebraicOpt : public Pass
857 {
858 private:
859 virtual bool visit(BasicBlock *);
860
861 void handleADD(Instruction *);
862 void handleMINMAX(Instruction *);
863 void handleRCP(Instruction *);
864 void handleSLCT(Instruction *);
865 void handleLOGOP(Instruction *);
866 void handleCVT(Instruction *);
867 };
868
869 void
870 AlgebraicOpt::handleADD(Instruction *add)
871 {
872 Value *src0 = add->getSrc(0);
873 Value *src1 = add->getSrc(1);
874 Value *src;
875 int s;
876 Modifier mod[4];
877
878 if (!prog->getTarget()->isOpSupported(OP_MAD, add->dType))
879 return;
880
881 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
882 return;
883
884 if (src0->refCount() == 1 &&
885 src0->getUniqueInsn() && src0->getUniqueInsn()->op == OP_MUL)
886 s = 0;
887 else
888 if (src1->refCount() == 1 &&
889 src1->getUniqueInsn() && src1->getUniqueInsn()->op == OP_MUL)
890 s = 1;
891 else
892 return;
893
894 if ((src0->getUniqueInsn() && src0->getUniqueInsn()->bb != add->bb) ||
895 (src1->getUniqueInsn() && src1->getUniqueInsn()->bb != add->bb))
896 return;
897
898 src = add->getSrc(s);
899
900 mod[0] = add->src[0].mod;
901 mod[1] = add->src[1].mod;
902 mod[2] = src->getUniqueInsn()->src[0].mod;
903 mod[3] = src->getUniqueInsn()->src[1].mod;
904
905 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & Modifier(~NV50_IR_MOD_NEG))
906 return;
907
908 add->op = OP_MAD;
909 add->subOp = src->getInsn()->subOp; // potentially mul-high
910
911 add->setSrc(2, add->src[s ? 0 : 1]);
912
913 add->setSrc(0, src->getInsn()->getSrc(0));
914 add->src[0].mod = mod[2] ^ mod[s];
915 add->setSrc(1, src->getInsn()->getSrc(1));
916 add->src[1].mod = mod[3];
917 }
918
919 void
920 AlgebraicOpt::handleMINMAX(Instruction *minmax)
921 {
922 Value *src0 = minmax->getSrc(0);
923 Value *src1 = minmax->getSrc(1);
924
925 if (src0 != src1 || src0->reg.file != FILE_GPR)
926 return;
927 if (minmax->src[0].mod == minmax->src[1].mod) {
928 if (minmax->src[0].mod) {
929 minmax->op = OP_CVT;
930 minmax->setSrc(1, NULL);
931 } else {
932 minmax->def[0].replace(minmax->getSrc(0), false);
933 minmax->bb->remove(minmax);
934 }
935 } else {
936 // TODO:
937 // min(x, -x) = -abs(x)
938 // min(x, -abs(x)) = -abs(x)
939 // min(x, abs(x)) = x
940 // max(x, -abs(x)) = x
941 // max(x, abs(x)) = abs(x)
942 // max(x, -x) = abs(x)
943 }
944 }
945
946 void
947 AlgebraicOpt::handleRCP(Instruction *rcp)
948 {
949 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
950
951 if (si && si->op == OP_RCP) {
952 Modifier mod = rcp->src[0].mod * si->src[0].mod;
953 rcp->op = mod.getOp();
954 rcp->setSrc(0, si->getSrc(0));
955 }
956 }
957
958 void
959 AlgebraicOpt::handleSLCT(Instruction *slct)
960 {
961 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
962 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
963 slct->setSrc(0, slct->getSrc(1));
964 } else
965 if (slct->getSrc(0) != slct->getSrc(1)) {
966 return;
967 }
968 slct->op = OP_MOV;
969 slct->setSrc(1, NULL);
970 slct->setSrc(2, NULL);
971 }
972
973 void
974 AlgebraicOpt::handleLOGOP(Instruction *logop)
975 {
976 Value *src0 = logop->getSrc(0);
977 Value *src1 = logop->getSrc(1);
978
979 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
980 return;
981
982 if (src0 == src1) {
983 if (logop->src[0].mod != Modifier(0) ||
984 logop->src[1].mod != Modifier(0))
985 return;
986 if (logop->op == OP_AND || logop->op == OP_OR) {
987 logop->def[0].replace(logop->getSrc(0), false);
988 delete_Instruction(prog, logop);
989 }
990 } else {
991 // try AND(SET, SET) -> SET_AND(SET)
992 Instruction *set0 = src0->getInsn();
993 Instruction *set1 = src1->getInsn();
994
995 if (!set0 || set0->fixed || !set1 || set1->fixed)
996 return;
997 if (set1->op != OP_SET) {
998 Instruction *xchg = set0;
999 set0 = set1;
1000 set1 = xchg;
1001 if (set1->op != OP_SET)
1002 return;
1003 }
1004 if (set0->op != OP_SET &&
1005 set0->op != OP_SET_AND &&
1006 set0->op != OP_SET_OR &&
1007 set0->op != OP_SET_XOR)
1008 return;
1009 if (set0->getDef(0)->refCount() > 1 &&
1010 set1->getDef(0)->refCount() > 1)
1011 return;
1012 if (set0->getPredicate() || set1->getPredicate())
1013 return;
1014 // check that they don't source each other
1015 for (int s = 0; s < 2; ++s)
1016 if (set0->getSrc(s) == set1->getDef(0) ||
1017 set1->getSrc(s) == set0->getDef(0))
1018 return;
1019
1020 set0 = set0->clone(true);
1021 set1 = set1->clone(false);
1022 logop->bb->insertAfter(logop, set1);
1023 logop->bb->insertAfter(logop, set0);
1024
1025 set0->dType = TYPE_U8;
1026 set0->getDef(0)->reg.file = FILE_PREDICATE;
1027 set0->getDef(0)->reg.size = 1;
1028 set1->setSrc(2, set0->getDef(0));
1029 switch (logop->op) {
1030 case OP_AND: set1->op = OP_SET_AND; break;
1031 case OP_OR: set1->op = OP_SET_OR; break;
1032 case OP_XOR: set1->op = OP_SET_XOR; break;
1033 default:
1034 assert(0);
1035 break;
1036 }
1037 set1->setDef(0, logop->getDef(0));
1038 delete_Instruction(prog, logop);
1039 }
1040 }
1041
1042 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1043 void
1044 AlgebraicOpt::handleCVT(Instruction *cvt)
1045 {
1046 if (cvt->sType != TYPE_F32 ||
1047 cvt->dType != TYPE_S32 || cvt->src[0].mod != Modifier(0))
1048 return;
1049 Instruction *insn = cvt->getSrc(0)->getInsn();
1050 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1051 return;
1052 if (insn->src[0].mod != Modifier(0))
1053 return;
1054 insn = insn->getSrc(0)->getInsn();
1055 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32)
1056 return;
1057
1058 Instruction *bset = insn->clone(false);
1059 bset->dType = TYPE_U32;
1060 bset->setDef(0, cvt->getDef(0));
1061 cvt->bb->insertAfter(cvt, bset);
1062 delete_Instruction(prog, cvt);
1063 }
1064
1065 bool
1066 AlgebraicOpt::visit(BasicBlock *bb)
1067 {
1068 Instruction *next;
1069 for (Instruction *i = bb->getEntry(); i; i = next) {
1070 next = i->next;
1071 switch (i->op) {
1072 case OP_ADD:
1073 handleADD(i);
1074 break;
1075 case OP_RCP:
1076 handleRCP(i);
1077 break;
1078 case OP_MIN:
1079 case OP_MAX:
1080 handleMINMAX(i);
1081 break;
1082 case OP_SLCT:
1083 handleSLCT(i);
1084 break;
1085 case OP_AND:
1086 case OP_OR:
1087 case OP_XOR:
1088 handleLOGOP(i);
1089 break;
1090 case OP_CVT:
1091 handleCVT(i);
1092 break;
1093 default:
1094 break;
1095 }
1096 }
1097
1098 return true;
1099 }
1100
1101 // =============================================================================
1102
1103 static inline void
1104 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
1105 {
1106 if (offset != ldst->getSrc(0)->reg.data.offset) {
1107 if (ldst->getSrc(0)->refCount() > 1)
1108 ldst->setSrc(0, ldst->getSrc(0)->clone(fn));
1109 ldst->getSrc(0)->reg.data.offset = offset;
1110 }
1111 }
1112
1113 // Combine loads and stores, forward stores to loads where possible.
1114 class MemoryOpt : public Pass
1115 {
1116 private:
1117 class Record
1118 {
1119 public:
1120 Record *next;
1121 Instruction *insn;
1122 const Value *rel[2];
1123 const Value *base;
1124 int32_t offset;
1125 int8_t fileIndex;
1126 uint8_t size;
1127 bool locked;
1128 Record *prev;
1129
1130 bool overlaps(const Instruction *ldst) const;
1131
1132 inline void link(Record **);
1133 inline void unlink(Record **);
1134 inline void set(const Instruction *ldst);
1135 };
1136
1137 public:
1138 MemoryOpt();
1139
1140 Record *loads[DATA_FILE_COUNT];
1141 Record *stores[DATA_FILE_COUNT];
1142
1143 MemoryPool recordPool;
1144
1145 private:
1146 virtual bool visit(BasicBlock *);
1147 bool runOpt(BasicBlock *);
1148
1149 Record **getList(const Instruction *);
1150
1151 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
1152
1153 // merge @insn into load/store instruction from @rec
1154 bool combineLd(Record *rec, Instruction *ld);
1155 bool combineSt(Record *rec, Instruction *st);
1156
1157 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
1158 bool replaceLdFromSt(Instruction *ld, Record *stRec);
1159 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
1160
1161 void addRecord(Instruction *ldst);
1162 void purgeRecords(Instruction *const st, DataFile);
1163 void lockStores(Instruction *const ld);
1164 void reset();
1165
1166 private:
1167 Record *prevRecord;
1168 };
1169
1170 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
1171 {
1172 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
1173 loads[i] = NULL;
1174 stores[i] = NULL;
1175 }
1176 prevRecord = NULL;
1177 }
1178
1179 void
1180 MemoryOpt::reset()
1181 {
1182 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
1183 Record *it, *next;
1184 for (it = loads[i]; it; it = next) {
1185 next = it->next;
1186 recordPool.release(it);
1187 }
1188 loads[i] = NULL;
1189 for (it = stores[i]; it; it = next) {
1190 next = it->next;
1191 recordPool.release(it);
1192 }
1193 stores[i] = NULL;
1194 }
1195 }
1196
1197 bool
1198 MemoryOpt::combineLd(Record *rec, Instruction *ld)
1199 {
1200 int32_t offRc = rec->offset;
1201 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1202 int sizeRc = rec->size;
1203 int sizeLd = typeSizeof(ld->dType);
1204 int size = sizeRc + sizeLd;
1205 int d, j;
1206
1207 // only VFETCH can do a 96 byte load
1208 if (ld->op != OP_VFETCH && size == 12)
1209 return false;
1210 // no unaligned loads
1211 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
1212 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
1213 return false;
1214
1215 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
1216
1217 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
1218
1219 if (offLd < offRc) {
1220 int sz;
1221 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
1222 // d: nr of definitions in ld
1223 // j: nr of definitions in rec->insn, move:
1224 for (d = d + j - 1; j > 0; --j, --d)
1225 rec->insn->setDef(d, rec->insn->getDef(j - 1));
1226
1227 if (rec->insn->getSrc(0)->refCount() > 1)
1228 rec->insn->setSrc(0, rec->insn->getSrc(0)->clone(func));
1229 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
1230
1231 d = 0;
1232 } else {
1233 d = j;
1234 }
1235 // move definitions of @ld to @rec->insn
1236 for (j = 0; sizeLd; ++j, ++d) {
1237 sizeLd -= ld->getDef(j)->reg.size;
1238 rec->insn->setDef(d, ld->getDef(j));
1239 }
1240
1241 rec->size = size;
1242 rec->insn->setType(typeOfSize(size));
1243
1244 delete_Instruction(prog, ld);
1245
1246 return true;
1247 }
1248
1249 bool
1250 MemoryOpt::combineSt(Record *rec, Instruction *st)
1251 {
1252 int32_t offRc = rec->offset;
1253 int32_t offSt = st->getSrc(0)->reg.data.offset;
1254 int sizeRc = rec->size;
1255 int sizeSt = typeSizeof(st->dType);
1256 int s = sizeSt / 4;
1257 int size = sizeRc + sizeSt;
1258 int j, k;
1259 Value *src[4]; // no modifiers in ValueRef allowed for st
1260 Value *extra[3];
1261
1262 if (size == 12) // XXX: check if EXPORT a[] can do this after all
1263 return false;
1264 if (size == 8 && MIN2(offRc, offSt) & 0x7)
1265 return false;
1266
1267 st->takeExtraSources(0, extra); // save predicate and indirect address
1268
1269 if (offRc < offSt) {
1270 // save values from @st
1271 for (s = 0; sizeSt; ++s) {
1272 sizeSt -= st->getSrc(s + 1)->reg.size;
1273 src[s] = st->getSrc(s + 1);
1274 }
1275 // set record's values as low sources of @st
1276 for (j = 1; sizeRc; ++j) {
1277 sizeRc -= st->getSrc(j)->reg.size;
1278 st->setSrc(j, rec->insn->getSrc(j));
1279 }
1280 // set saved values as high sources of @st
1281 for (k = j, j = 0; j < s; ++j)
1282 st->setSrc(k++, src[j]);
1283
1284 updateLdStOffset(st, offRc, func);
1285 } else {
1286 for (j = 1; sizeSt; ++j)
1287 sizeSt -= st->getSrc(j)->reg.size;
1288 for (s = 1; sizeRc; ++j, ++s) {
1289 sizeRc -= rec->insn->getSrc(s)->reg.size;
1290 st->setSrc(j, rec->insn->getSrc(s));
1291 }
1292 rec->offset = offSt;
1293 }
1294 st->putExtraSources(0, extra); // restore pointer and predicate
1295
1296 delete_Instruction(prog, rec->insn);
1297 rec->insn = st;
1298 rec->size = size;
1299 rec->insn->setType(typeOfSize(size));
1300 return true;
1301 }
1302
1303 void
1304 MemoryOpt::Record::set(const Instruction *ldst)
1305 {
1306 const Symbol *mem = ldst->getSrc(0)->asSym();
1307 fileIndex = mem->reg.fileIndex;
1308 rel[0] = ldst->getIndirect(0, 0);
1309 rel[1] = ldst->getIndirect(0, 1);
1310 offset = mem->reg.data.offset;
1311 base = mem->getBase();
1312 size = typeSizeof(ldst->sType);
1313 }
1314
1315 void
1316 MemoryOpt::Record::link(Record **list)
1317 {
1318 next = *list;
1319 if (next)
1320 next->prev = this;
1321 prev = NULL;
1322 *list = this;
1323 }
1324
1325 void
1326 MemoryOpt::Record::unlink(Record **list)
1327 {
1328 if (next)
1329 next->prev = prev;
1330 if (prev)
1331 prev->next = next;
1332 else
1333 *list = next;
1334 }
1335
1336 MemoryOpt::Record **
1337 MemoryOpt::getList(const Instruction *insn)
1338 {
1339 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
1340 return &loads[insn->src[0].getFile()];
1341 return &stores[insn->src[0].getFile()];
1342 }
1343
1344 void
1345 MemoryOpt::addRecord(Instruction *i)
1346 {
1347 Record **list = getList(i);
1348 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
1349
1350 it->link(list);
1351 it->set(i);
1352 it->insn = i;
1353 it->locked = false;
1354 }
1355
1356 MemoryOpt::Record *
1357 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
1358 {
1359 const Symbol *sym = insn->getSrc(0)->asSym();
1360 const int size = typeSizeof(insn->sType);
1361 Record *rec = NULL;
1362 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
1363
1364 for (; it; it = it->next) {
1365 if (it->locked && insn->op != OP_LOAD)
1366 continue;
1367 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
1368 it->rel[0] != insn->getIndirect(0, 0) ||
1369 it->fileIndex != sym->reg.fileIndex ||
1370 it->rel[1] != insn->getIndirect(0, 1))
1371 continue;
1372
1373 if (it->offset < sym->reg.data.offset) {
1374 if (it->offset + it->size >= sym->reg.data.offset) {
1375 isAdj = (it->offset + it->size == sym->reg.data.offset);
1376 if (!isAdj)
1377 return it;
1378 if (!(it->offset & 0x7))
1379 rec = it;
1380 }
1381 } else {
1382 isAdj = it->offset != sym->reg.data.offset;
1383 if (size <= it->size && !isAdj)
1384 return it;
1385 else
1386 if (!(sym->reg.data.offset & 0x7))
1387 if (it->offset - size <= sym->reg.data.offset)
1388 rec = it;
1389 }
1390 }
1391 return rec;
1392 }
1393
1394 bool
1395 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
1396 {
1397 Instruction *st = rec->insn;
1398 int32_t offSt = rec->offset;
1399 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1400 int d, s;
1401
1402 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
1403 offSt += st->getSrc(s)->reg.size;
1404 if (offSt != offLd)
1405 return false;
1406
1407 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
1408 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
1409 return false;
1410 if (st->getSrc(s)->reg.file != FILE_GPR)
1411 return false;
1412 ld->def[d].replace(st->getSrc(s), false);
1413 }
1414 ld->bb->remove(ld);
1415 return true;
1416 }
1417
1418 bool
1419 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
1420 {
1421 Instruction *ldR = rec->insn;
1422 int32_t offR = rec->offset;
1423 int32_t offE = ldE->getSrc(0)->reg.data.offset;
1424 int dR, dE;
1425
1426 assert(offR <= offE);
1427 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
1428 offR += ldR->getDef(dR)->reg.size;
1429 if (offR != offE)
1430 return false;
1431
1432 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
1433 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
1434 return false;
1435 ldE->def[dE].replace(ldR->getDef(dR), false);
1436 }
1437
1438 delete_Instruction(prog, ldE);
1439 return true;
1440 }
1441
1442 bool
1443 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
1444 {
1445 const Instruction *const ri = rec->insn;
1446 Value *extra[3];
1447
1448 int32_t offS = st->getSrc(0)->reg.data.offset;
1449 int32_t offR = rec->offset;
1450 int32_t endS = offS + typeSizeof(st->dType);
1451 int32_t endR = offR + typeSizeof(ri->dType);
1452
1453 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
1454
1455 st->takeExtraSources(0, extra);
1456
1457 if (offR < offS) {
1458 Value *vals[4];
1459 int s, n;
1460 int k = 0;
1461 // get non-replaced sources of ri
1462 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
1463 vals[k++] = ri->getSrc(s);
1464 n = s;
1465 // get replaced sources of st
1466 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
1467 vals[k++] = st->getSrc(s);
1468 // skip replaced sources of ri
1469 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
1470 // get non-replaced sources after values covered by st
1471 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
1472 vals[k++] = ri->getSrc(s);
1473 for (s = 0; s < k; ++s)
1474 st->setSrc(s + 1, vals[s]);
1475 st->setSrc(0, ri->getSrc(0));
1476 } else
1477 if (endR > endS) {
1478 int j, s;
1479 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
1480 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
1481 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
1482 st->setSrc(s++, ri->getSrc(j));
1483 }
1484 st->putExtraSources(0, extra);
1485
1486 delete_Instruction(prog, rec->insn);
1487
1488 rec->insn = st;
1489 rec->offset = st->getSrc(0)->reg.data.offset;
1490
1491 st->setType(typeOfSize(rec->size));
1492
1493 return true;
1494 }
1495
1496 bool
1497 MemoryOpt::Record::overlaps(const Instruction *ldst) const
1498 {
1499 Record that;
1500 that.set(ldst);
1501
1502 if (this->fileIndex != that.fileIndex)
1503 return false;
1504
1505 if (this->rel[0] || that.rel[0])
1506 return this->base == that.base;
1507 return
1508 (this->offset < that.offset + that.size) &&
1509 (this->offset + this->size > that.offset);
1510 }
1511
1512 // We must not eliminate stores that affect the result of @ld if
1513 // we find later stores to the same location, and we may no longer
1514 // merge them with later stores.
1515 // The stored value can, however, still be used to determine the value
1516 // returned by future loads.
1517 void
1518 MemoryOpt::lockStores(Instruction *const ld)
1519 {
1520 for (Record *r = stores[ld->src[0].getFile()]; r; r = r->next)
1521 if (!r->locked && r->overlaps(ld))
1522 r->locked = true;
1523 }
1524
1525 // Prior loads from the location of @st are no longer valid.
1526 // Stores to the location of @st may no longer be used to derive
1527 // the value at it nor be coalesced into later stores.
1528 void
1529 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
1530 {
1531 if (st)
1532 f = st->src[0].getFile();
1533
1534 for (Record *r = loads[f]; r; r = r->next)
1535 if (!st || r->overlaps(st))
1536 r->unlink(&loads[f]);
1537
1538 for (Record *r = stores[f]; r; r = r->next)
1539 if (!st || r->overlaps(st))
1540 r->unlink(&stores[f]);
1541 }
1542
1543 bool
1544 MemoryOpt::visit(BasicBlock *bb)
1545 {
1546 bool ret = runOpt(bb);
1547 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
1548 // where 96 bit memory operations are forbidden.
1549 if (ret)
1550 ret = runOpt(bb);
1551 return ret;
1552 }
1553
1554 bool
1555 MemoryOpt::runOpt(BasicBlock *bb)
1556 {
1557 Instruction *ldst, *next;
1558 Record *rec;
1559 bool isAdjacent = true;
1560
1561 for (ldst = bb->getEntry(); ldst; ldst = next) {
1562 bool keep = true;
1563 bool isLoad = true;
1564 next = ldst->next;
1565
1566 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
1567 if (ldst->isDead()) {
1568 // might have been produced by earlier optimization
1569 delete_Instruction(prog, ldst);
1570 continue;
1571 }
1572 } else
1573 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
1574 isLoad = false;
1575 } else {
1576 // TODO: maybe have all fixed ops act as barrier ?
1577 if (ldst->op == OP_CALL) {
1578 purgeRecords(NULL, FILE_MEMORY_LOCAL);
1579 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
1580 purgeRecords(NULL, FILE_MEMORY_SHARED);
1581 purgeRecords(NULL, FILE_SHADER_OUTPUT);
1582 } else
1583 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
1584 purgeRecords(NULL, FILE_SHADER_OUTPUT);
1585 }
1586 continue;
1587 }
1588 if (ldst->getPredicate()) // TODO: handle predicated ld/st
1589 continue;
1590
1591 if (isLoad) {
1592 DataFile file = ldst->src[0].getFile();
1593
1594 // if ld l[]/g[] look for previous store to eliminate the reload
1595 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
1596 // TODO: shared memory ?
1597 rec = findRecord(ldst, false, isAdjacent);
1598 if (rec && !isAdjacent)
1599 keep = !replaceLdFromSt(ldst, rec);
1600 }
1601
1602 // or look for ld from the same location and replace this one
1603 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
1604 if (rec) {
1605 if (!isAdjacent)
1606 keep = !replaceLdFromLd(ldst, rec);
1607 else
1608 // or combine a previous load with this one
1609 keep = !combineLd(rec, ldst);
1610 }
1611 if (keep)
1612 lockStores(ldst);
1613 } else {
1614 rec = findRecord(ldst, false, isAdjacent);
1615 if (rec) {
1616 if (!isAdjacent)
1617 keep = !replaceStFromSt(ldst, rec);
1618 else
1619 keep = !combineSt(rec, ldst);
1620 }
1621 if (keep)
1622 purgeRecords(ldst, DATA_FILE_COUNT);
1623 }
1624 if (keep)
1625 addRecord(ldst);
1626 }
1627 reset();
1628
1629 return true;
1630 }
1631
1632 // =============================================================================
1633
1634 // Turn control flow into predicated instructions (after register allocation !).
1635 // TODO:
1636 // Could move this to before register allocation on NVC0 and also handle nested
1637 // constructs.
1638 class FlatteningPass : public Pass
1639 {
1640 private:
1641 virtual bool visit(BasicBlock *);
1642
1643 bool tryPredicateConditional(BasicBlock *);
1644 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
1645 void tryPropagateBranch(BasicBlock *);
1646 inline bool isConstantCondition(Value *pred);
1647 inline bool mayPredicate(const Instruction *, const Value *pred) const;
1648 inline void removeFlow(Instruction *);
1649 };
1650
1651 bool
1652 FlatteningPass::isConstantCondition(Value *pred)
1653 {
1654 Instruction *insn = pred->getUniqueInsn();
1655 assert(insn);
1656 if (insn->op != OP_SET || insn->srcExists(2))
1657 return false;
1658
1659 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
1660 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
1661 DataFile file;
1662 if (ld) {
1663 if (ld->op != OP_MOV && ld->op != OP_LOAD)
1664 return false;
1665 if (ld->src[0].isIndirect(0))
1666 return false;
1667 file = ld->src[0].getFile();
1668 } else {
1669 file = insn->src[s].getFile();
1670 // catch $r63 on NVC0
1671 if (file == FILE_GPR && insn->getSrc(s)->reg.data.id > prog->maxGPR)
1672 file = FILE_IMMEDIATE;
1673 }
1674 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
1675 return false;
1676 }
1677 return true;
1678 }
1679
1680 void
1681 FlatteningPass::removeFlow(Instruction *insn)
1682 {
1683 FlowInstruction *term = insn ? insn->asFlow() : NULL;
1684 if (!term)
1685 return;
1686 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
1687
1688 if (term->op == OP_BRA) {
1689 // TODO: this might get more difficult when we get arbitrary BRAs
1690 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
1691 return;
1692 } else
1693 if (term->op != OP_JOIN)
1694 return;
1695
1696 delete_Instruction(prog, term);
1697
1698 Value *pred = term->getPredicate();
1699
1700 if (pred && pred->refCount() == 0) {
1701 Instruction *pSet = pred->getUniqueInsn();
1702 pred->join->reg.data.id = -1; // deallocate
1703 if (pSet->isDead())
1704 delete_Instruction(prog, pSet);
1705 }
1706 }
1707
1708 void
1709 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
1710 {
1711 for (Instruction *i = bb->getEntry(); i; i = i->next) {
1712 if (i->isNop())
1713 continue;
1714 assert(!i->getPredicate());
1715 i->setPredicate(cc, pred);
1716 }
1717 removeFlow(bb->getExit());
1718 }
1719
1720 bool
1721 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
1722 {
1723 if (insn->isPseudo())
1724 return true;
1725 // TODO: calls where we don't know which registers are modified
1726
1727 if (!prog->getTarget()->mayPredicate(insn, pred))
1728 return false;
1729 for (int d = 0; insn->defExists(d); ++d)
1730 if (insn->getDef(d)->equals(pred))
1731 return false;
1732 return true;
1733 }
1734
1735 // If we conditionally skip over or to a branch instruction, replace it.
1736 // NOTE: We do not update the CFG anymore here !
1737 void
1738 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
1739 {
1740 BasicBlock *bf = NULL;
1741 unsigned int i;
1742
1743 if (bb->cfg.outgoingCount() != 2)
1744 return;
1745 if (!bb->getExit() || bb->getExit()->op != OP_BRA)
1746 return;
1747 Graph::EdgeIterator ei = bb->cfg.outgoing();
1748
1749 for (i = 0; !ei.end(); ++i, ei.next()) {
1750 bf = BasicBlock::get(ei.getNode());
1751 if (bf->getInsnCount() == 1)
1752 break;
1753 }
1754 if (ei.end() || !bf->getExit())
1755 return;
1756 FlowInstruction *bra = bb->getExit()->asFlow();
1757 FlowInstruction *rep = bf->getExit()->asFlow();
1758
1759 if (rep->getPredicate())
1760 return;
1761 if (rep->op != OP_BRA &&
1762 rep->op != OP_JOIN &&
1763 rep->op != OP_EXIT)
1764 return;
1765
1766 bra->op = rep->op;
1767 bra->target.bb = rep->target.bb;
1768 if (i) // 2nd out block means branch not taken
1769 bra->cc = inverseCondCode(bra->cc);
1770 bf->remove(rep);
1771 }
1772
1773 bool
1774 FlatteningPass::visit(BasicBlock *bb)
1775 {
1776 if (tryPredicateConditional(bb))
1777 return true;
1778
1779 // try to attach join to previous instruction
1780 Instruction *insn = bb->getExit();
1781 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
1782 insn = insn->prev;
1783 if (insn && !insn->getPredicate() && !insn->asFlow() && !insn->isNop()) {
1784 insn->join = 1;
1785 bb->remove(bb->getExit());
1786 return true;
1787 }
1788 }
1789
1790 tryPropagateBranch(bb);
1791
1792 return true;
1793 }
1794
1795 bool
1796 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
1797 {
1798 BasicBlock *bL = NULL, *bR = NULL;
1799 unsigned int nL = 0, nR = 0, limit = 12;
1800 Instruction *insn;
1801 unsigned int mask;
1802
1803 mask = bb->initiatesSimpleConditional();
1804 if (!mask)
1805 return false;
1806
1807 assert(bb->getExit());
1808 Value *pred = bb->getExit()->getPredicate();
1809 assert(pred);
1810
1811 if (isConstantCondition(pred))
1812 limit = 4;
1813
1814 Graph::EdgeIterator ei = bb->cfg.outgoing();
1815
1816 if (mask & 1) {
1817 bL = BasicBlock::get(ei.getNode());
1818 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
1819 if (!mayPredicate(insn, pred))
1820 return false;
1821 if (nL > limit)
1822 return false; // too long, do a real branch
1823 }
1824 ei.next();
1825
1826 if (mask & 2) {
1827 bR = BasicBlock::get(ei.getNode());
1828 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
1829 if (!mayPredicate(insn, pred))
1830 return false;
1831 if (nR > limit)
1832 return false; // too long, do a real branch
1833 }
1834
1835 if (bL)
1836 predicateInstructions(bL, pred, bb->getExit()->cc);
1837 if (bR)
1838 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
1839
1840 if (bb->joinAt) {
1841 bb->remove(bb->joinAt);
1842 bb->joinAt = NULL;
1843 }
1844 removeFlow(bb->getExit()); // delete the branch/join at the fork point
1845
1846 // remove potential join operations at the end of the conditional
1847 if (prog->getTarget()->joinAnterior) {
1848 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
1849 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
1850 removeFlow(bb->getEntry());
1851 }
1852
1853 return true;
1854 }
1855
1856 // =============================================================================
1857
1858 // Common subexpression elimination. Stupid O^2 implementation.
1859 class LocalCSE : public Pass
1860 {
1861 private:
1862 virtual bool visit(BasicBlock *);
1863
1864 inline bool tryReplace(Instruction **, Instruction *);
1865
1866 DLList ops[OP_LAST + 1];
1867 };
1868
1869 class GlobalCSE : public Pass
1870 {
1871 private:
1872 virtual bool visit(BasicBlock *);
1873 };
1874
1875 bool
1876 Instruction::isActionEqual(const Instruction *that) const
1877 {
1878 if (this->op != that->op ||
1879 this->dType != that->dType ||
1880 this->sType != that->sType)
1881 return false;
1882 if (this->cc != that->cc)
1883 return false;
1884
1885 if (this->asTex()) {
1886 if (memcmp(&this->asTex()->tex,
1887 &that->asTex()->tex,
1888 sizeof(this->asTex()->tex)))
1889 return false;
1890 } else
1891 if (this->asCmp()) {
1892 if (this->asCmp()->setCond != that->asCmp()->setCond)
1893 return false;
1894 } else
1895 if (this->asFlow()) {
1896 return false;
1897 } else {
1898 if (this->atomic != that->atomic ||
1899 this->ipa != that->ipa ||
1900 this->lanes != that->lanes ||
1901 this->perPatch != that->perPatch)
1902 return false;
1903 if (this->postFactor != that->postFactor)
1904 return false;
1905 }
1906
1907 if (this->subOp != that->subOp ||
1908 this->saturate != that->saturate ||
1909 this->rnd != that->rnd ||
1910 this->ftz != that->ftz ||
1911 this->dnz != that->dnz ||
1912 this->cache != that->cache)
1913 return false;
1914
1915 return true;
1916 }
1917
1918 bool
1919 Instruction::isResultEqual(const Instruction *that) const
1920 {
1921 unsigned int d, s;
1922
1923 // NOTE: location of discard only affects tex with liveOnly and quadops
1924 if (!this->defExists(0) && this->op != OP_DISCARD)
1925 return false;
1926
1927 if (!isActionEqual(that))
1928 return false;
1929
1930 if (this->predSrc != that->predSrc)
1931 return false;
1932
1933 for (d = 0; this->defExists(d); ++d) {
1934 if (!that->defExists(d) ||
1935 !this->getDef(d)->equals(that->getDef(d), false))
1936 return false;
1937 }
1938 if (that->defExists(d))
1939 return false;
1940
1941 for (s = 0; this->srcExists(s); ++s) {
1942 if (!that->srcExists(s))
1943 return false;
1944 if (this->src[s].mod != that->src[s].mod)
1945 return false;
1946 if (!this->getSrc(s)->equals(that->getSrc(s), true))
1947 return false;
1948 }
1949 if (that->srcExists(s))
1950 return false;
1951
1952 if (op == OP_LOAD || op == OP_VFETCH) {
1953 switch (src[0].getFile()) {
1954 case FILE_MEMORY_CONST:
1955 case FILE_SHADER_INPUT:
1956 return true;
1957 default:
1958 return false;
1959 }
1960 }
1961
1962 return true;
1963 }
1964
1965 // pull through common expressions from different in-blocks
1966 bool
1967 GlobalCSE::visit(BasicBlock *bb)
1968 {
1969 Instruction *phi, *next, *ik;
1970 int s;
1971
1972 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
1973 next = phi->next;
1974 if (phi->getSrc(0)->refCount() > 1)
1975 continue;
1976 ik = phi->getSrc(0)->getInsn();
1977 for (s = 1; phi->srcExists(s); ++s) {
1978 if (phi->getSrc(s)->refCount() > 1)
1979 break;
1980 if (!phi->getSrc(s)->getInsn()->isResultEqual(ik))
1981 break;
1982 }
1983 if (!phi->srcExists(s)) {
1984 Instruction *entry = bb->getEntry();
1985 ik->bb->remove(ik);
1986 if (!entry || entry->op != OP_JOIN)
1987 bb->insertHead(ik);
1988 else
1989 bb->insertAfter(entry, ik);
1990 ik->setDef(0, phi->getDef(0));
1991 delete_Instruction(prog, phi);
1992 }
1993 }
1994
1995 return true;
1996 }
1997
1998 bool
1999 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
2000 {
2001 Instruction *old = *ptr;
2002 if (!old->isResultEqual(i))
2003 return false;
2004 for (int d = 0; old->defExists(d); ++d)
2005 old->def[d].replace(i->getDef(d), false);
2006 delete_Instruction(prog, old);
2007 *ptr = NULL;
2008 return true;
2009 }
2010
2011 bool
2012 LocalCSE::visit(BasicBlock *bb)
2013 {
2014 unsigned int replaced;
2015
2016 do {
2017 Instruction *ir, *next;
2018
2019 replaced = 0;
2020
2021 // will need to know the order of instructions
2022 int serial = 0;
2023 for (ir = bb->getEntry(); ir; ir = ir->next)
2024 ir->serial = serial++;
2025
2026 for (ir = bb->getEntry(); ir; ir = next) {
2027 int s;
2028 Value *src = NULL;
2029
2030 next = ir->next;
2031
2032 if (ir->fixed) {
2033 ops[ir->op].insert(ir);
2034 continue;
2035 }
2036
2037 for (s = 0; ir->srcExists(s); ++s)
2038 if (ir->getSrc(s)->asLValue())
2039 if (!src || ir->getSrc(s)->refCount() < src->refCount())
2040 src = ir->getSrc(s);
2041
2042 if (src) {
2043 for (ValueRef::Iterator refs = src->uses->iterator(); !refs.end();
2044 refs.next()) {
2045 Instruction *ik = refs.get()->getInsn();
2046 if (ik->serial < ir->serial && ik->bb == ir->bb)
2047 if (tryReplace(&ir, ik))
2048 break;
2049 }
2050 } else {
2051 DLLIST_FOR_EACH(&ops[ir->op], iter)
2052 {
2053 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
2054 if (tryReplace(&ir, ik))
2055 break;
2056 }
2057 }
2058
2059 if (ir)
2060 ops[ir->op].insert(ir);
2061 else
2062 ++replaced;
2063 }
2064 for (unsigned int i = 0; i <= OP_LAST; ++i)
2065 ops[i].clear();
2066
2067 } while (replaced);
2068
2069 return true;
2070 }
2071
2072 // =============================================================================
2073
2074 // Remove computations of unused values.
2075 class DeadCodeElim : public Pass
2076 {
2077 public:
2078 bool buryAll(Program *);
2079
2080 private:
2081 virtual bool visit(BasicBlock *);
2082
2083 void checkSplitLoad(Instruction *ld); // for partially dead loads
2084
2085 unsigned int deadCount;
2086 };
2087
2088 bool
2089 DeadCodeElim::buryAll(Program *prog)
2090 {
2091 do {
2092 deadCount = 0;
2093 if (!this->run(prog, false, false))
2094 return false;
2095 } while (deadCount);
2096
2097 return true;
2098 }
2099
2100 bool
2101 DeadCodeElim::visit(BasicBlock *bb)
2102 {
2103 Instruction *next;
2104
2105 for (Instruction *i = bb->getFirst(); i; i = next) {
2106 next = i->next;
2107 if (i->isDead()) {
2108 ++deadCount;
2109 delete_Instruction(prog, i);
2110 } else
2111 if (i->defExists(1) && (i->op == OP_VFETCH || i->op == OP_LOAD)) {
2112 checkSplitLoad(i);
2113 }
2114 }
2115 return true;
2116 }
2117
2118 void
2119 DeadCodeElim::checkSplitLoad(Instruction *ld1)
2120 {
2121 Instruction *ld2 = NULL; // can get at most 2 loads
2122 Value *def1[4];
2123 Value *def2[4];
2124 int32_t addr1, addr2;
2125 int32_t size1, size2;
2126 int d, n1, n2;
2127 uint32_t mask = 0xffffffff;
2128
2129 for (d = 0; ld1->defExists(d); ++d)
2130 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
2131 mask &= ~(1 << d);
2132 if (mask == 0xffffffff)
2133 return;
2134
2135 addr1 = ld1->getSrc(0)->reg.data.offset;
2136 n1 = n2 = 0;
2137 size1 = size2 = 0;
2138 for (d = 0; ld1->defExists(d); ++d) {
2139 if (mask & (1 << d)) {
2140 if (size1 && (addr1 & 0x7))
2141 break;
2142 def1[n1] = ld1->getDef(d);
2143 size1 += def1[n1++]->reg.size;
2144 } else
2145 if (!n1) {
2146 addr1 += ld1->getDef(d)->reg.size;
2147 } else {
2148 break;
2149 }
2150 }
2151 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
2152 if (mask & (1 << d)) {
2153 def2[n2] = ld1->getDef(d);
2154 size2 += def2[n2++]->reg.size;
2155 } else {
2156 assert(!n2);
2157 addr2 += ld1->getDef(d)->reg.size;
2158 }
2159 }
2160
2161 updateLdStOffset(ld1, addr1, func);
2162 ld1->setType(typeOfSize(size1));
2163 for (d = 0; d < 4; ++d)
2164 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
2165
2166 if (!n2)
2167 return;
2168
2169 ld2 = ld1->clone(false);
2170 updateLdStOffset(ld2, addr2, func);
2171 ld2->setType(typeOfSize(size2));
2172 for (d = 0; d < 4; ++d)
2173 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
2174
2175 ld1->bb->insertAfter(ld1, ld2);
2176 }
2177
2178 // =============================================================================
2179
2180 #define RUN_PASS(l, n, f) \
2181 if (level >= (l)) { \
2182 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
2183 INFO("PEEPHOLE: %s\n", #n); \
2184 n pass; \
2185 if (!pass.f(this)) \
2186 return false; \
2187 }
2188
2189 bool
2190 Program::optimizeSSA(int level)
2191 {
2192 RUN_PASS(1, DeadCodeElim, buryAll);
2193 RUN_PASS(1, CopyPropagation, run);
2194 RUN_PASS(2, GlobalCSE, run);
2195 RUN_PASS(1, LocalCSE, run);
2196 RUN_PASS(2, AlgebraicOpt, run);
2197 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
2198 RUN_PASS(1, ConstantFolding, foldAll);
2199 RUN_PASS(1, LoadPropagation, run);
2200 RUN_PASS(2, MemoryOpt, run);
2201 RUN_PASS(2, LocalCSE, run);
2202 RUN_PASS(0, DeadCodeElim, buryAll);
2203 return true;
2204 }
2205
2206 bool
2207 Program::optimizePostRA(int level)
2208 {
2209 RUN_PASS(2, FlatteningPass, run);
2210 return true;
2211 }
2212
2213 }