nv50/ir: Add support for unlimited instruction arguments.
[mesa.git] / src / gallium / drivers / nv50 / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include "nv50_ir.h"
24 #include "nv50_ir_target.h"
25 #include "nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_CONSTRAINT || op == OP_PHI)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (!fixed && op == OP_NOP)
41 return true;
42
43 if (defExists(0) && def[0].rep()->reg.data.id < 0) {
44 for (int d = 1; defExists(d); ++d)
45 if (def[d].rep()->reg.data.id >= 0)
46 WARN("part of vector result is unused !\n");
47 return true;
48 }
49
50 if (op == OP_MOV || op == OP_UNION) {
51 if (!def[0].rep()->equals(getSrc(0)))
52 return false;
53 if (op == OP_UNION)
54 if (!def[0].rep()->equals(getSrc(1)))
55 return false;
56 return true;
57 }
58
59 return false;
60 }
61
62 bool Instruction::isDead() const
63 {
64 if (op == OP_STORE ||
65 op == OP_EXPORT)
66 return false;
67
68 for (int d = 0; defExists(d); ++d)
69 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
70 return false;
71
72 if (terminator || asFlow())
73 return false;
74 if (fixed)
75 return false;
76
77 return true;
78 };
79
80 // =============================================================================
81
82 class CopyPropagation : public Pass
83 {
84 private:
85 virtual bool visit(BasicBlock *);
86 };
87
88 // Propagate all MOVs forward to make subsequent optimization easier, except if
89 // the sources stem from a phi, in which case we don't want to mess up potential
90 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
91 bool
92 CopyPropagation::visit(BasicBlock *bb)
93 {
94 Instruction *mov, *si, *next;
95
96 for (mov = bb->getEntry(); mov; mov = next) {
97 next = mov->next;
98 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
99 continue;
100 si = mov->getSrc(0)->getInsn();
101 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
102 // propagate
103 mov->def[0].replace(mov->getSrc(0), false);
104 delete_Instruction(prog, mov);
105 }
106 }
107 return true;
108 }
109
110 // =============================================================================
111
112 class LoadPropagation : public Pass
113 {
114 private:
115 virtual bool visit(BasicBlock *);
116
117 void checkSwapSrc01(Instruction *);
118
119 bool isCSpaceLoad(Instruction *);
120 bool isImmd32Load(Instruction *);
121 };
122
123 bool
124 LoadPropagation::isCSpaceLoad(Instruction *ld)
125 {
126 return ld && ld->op == OP_LOAD && ld->src[0].getFile() == FILE_MEMORY_CONST;
127 }
128
129 bool
130 LoadPropagation::isImmd32Load(Instruction *ld)
131 {
132 if (!ld || (ld->op != OP_MOV) || (typeSizeof(ld->dType) != 4))
133 return false;
134 return ld->src[0].getFile() == FILE_IMMEDIATE;
135 }
136
137 void
138 LoadPropagation::checkSwapSrc01(Instruction *insn)
139 {
140 if (!prog->getTarget()->getOpInfo(insn).commutative)
141 if (insn->op != OP_SET && insn->op != OP_SLCT)
142 return;
143 if (insn->src[1].getFile() != FILE_GPR)
144 return;
145
146 Instruction *i0 = insn->getSrc(0)->getInsn();
147 Instruction *i1 = insn->getSrc(1)->getInsn();
148
149 if (isCSpaceLoad(i0)) {
150 if (!isCSpaceLoad(i1))
151 insn->swapSources(0, 1);
152 else
153 return;
154 } else
155 if (isImmd32Load(i0)) {
156 if (!isCSpaceLoad(i1) && !isImmd32Load(i1))
157 insn->swapSources(0, 1);
158 else
159 return;
160 } else {
161 return;
162 }
163
164 if (insn->op == OP_SET)
165 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
166 else
167 if (insn->op == OP_SLCT)
168 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
169 }
170
171 bool
172 LoadPropagation::visit(BasicBlock *bb)
173 {
174 const Target *targ = prog->getTarget();
175 Instruction *next;
176
177 for (Instruction *i = bb->getEntry(); i; i = next) {
178 next = i->next;
179
180 if (i->srcExists(1))
181 checkSwapSrc01(i);
182
183 for (int s = 0; i->srcExists(s); ++s) {
184 Instruction *ld = i->getSrc(s)->getInsn();
185
186 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
187 continue;
188 if (!targ->insnCanLoad(i, s, ld))
189 continue;
190
191 // propagate !
192 i->setSrc(s, ld->getSrc(0));
193 if (ld->src[0].isIndirect(0))
194 i->setIndirect(s, 0, ld->getIndirect(0, 0));
195
196 if (ld->getDef(0)->refCount() == 0)
197 delete_Instruction(prog, ld);
198 }
199 }
200 return true;
201 }
202
203 // =============================================================================
204
205 // Evaluate constant expressions.
206 class ConstantFolding : public Pass
207 {
208 public:
209 bool foldAll(Program *);
210
211 private:
212 virtual bool visit(BasicBlock *);
213
214 void expr(Instruction *, ImmediateValue *, ImmediateValue *);
215 void opnd(Instruction *, ImmediateValue *, int s);
216
217 void unary(Instruction *, const ImmediateValue&);
218
219 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
220
221 // TGSI 'true' is converted to -1 by F2I(NEG(SET)), track back to SET
222 CmpInstruction *findOriginForTestWithZero(Value *);
223
224 unsigned int foldCount;
225
226 BuildUtil bld;
227 };
228
229 // TODO: remember generated immediates and only revisit these
230 bool
231 ConstantFolding::foldAll(Program *prog)
232 {
233 unsigned int iterCount = 0;
234 do {
235 foldCount = 0;
236 if (!run(prog))
237 return false;
238 } while (foldCount && ++iterCount < 2);
239 return true;
240 }
241
242 bool
243 ConstantFolding::visit(BasicBlock *bb)
244 {
245 Instruction *i, *next;
246
247 for (i = bb->getEntry(); i; i = next) {
248 next = i->next;
249 if (i->op == OP_MOV) // continue early, MOV appears frequently
250 continue;
251
252 ImmediateValue *src0 = i->srcExists(0) ? i->src[0].getImmediate() : NULL;
253 ImmediateValue *src1 = i->srcExists(1) ? i->src[1].getImmediate() : NULL;
254
255 if (src0 && src1)
256 expr(i, src0, src1);
257 else
258 if (src0)
259 opnd(i, src0, 0);
260 else
261 if (src1)
262 opnd(i, src1, 1);
263 }
264 return true;
265 }
266
267 CmpInstruction *
268 ConstantFolding::findOriginForTestWithZero(Value *value)
269 {
270 if (!value)
271 return NULL;
272 Instruction *insn = value->getInsn();
273
274 while (insn && insn->op != OP_SET) {
275 Instruction *next = NULL;
276 switch (insn->op) {
277 case OP_NEG:
278 case OP_ABS:
279 case OP_CVT:
280 next = insn->getSrc(0)->getInsn();
281 if (insn->sType != next->dType)
282 return NULL;
283 break;
284 case OP_MOV:
285 next = insn->getSrc(0)->getInsn();
286 break;
287 default:
288 return NULL;
289 }
290 insn = next;
291 }
292 return insn ? insn->asCmp() : NULL;
293 }
294
295 void
296 Modifier::applyTo(ImmediateValue& imm) const
297 {
298 switch (imm.reg.type) {
299 case TYPE_F32:
300 if (bits & NV50_IR_MOD_ABS)
301 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
302 if (bits & NV50_IR_MOD_NEG)
303 imm.reg.data.f32 = -imm.reg.data.f32;
304 if (bits & NV50_IR_MOD_SAT) {
305 if (imm.reg.data.f32 < 0.0f)
306 imm.reg.data.f32 = 0.0f;
307 else
308 if (imm.reg.data.f32 > 1.0f)
309 imm.reg.data.f32 = 1.0f;
310 }
311 assert(!(bits & NV50_IR_MOD_NOT));
312 break;
313
314 case TYPE_S8: // NOTE: will be extended
315 case TYPE_S16:
316 case TYPE_S32:
317 case TYPE_U8: // NOTE: treated as signed
318 case TYPE_U16:
319 case TYPE_U32:
320 if (bits & NV50_IR_MOD_ABS)
321 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
322 imm.reg.data.s32 : -imm.reg.data.s32;
323 if (bits & NV50_IR_MOD_NEG)
324 imm.reg.data.s32 = -imm.reg.data.s32;
325 if (bits & NV50_IR_MOD_NOT)
326 imm.reg.data.s32 = ~imm.reg.data.s32;
327 break;
328
329 case TYPE_F64:
330 if (bits & NV50_IR_MOD_ABS)
331 imm.reg.data.f64 = fabs(imm.reg.data.f64);
332 if (bits & NV50_IR_MOD_NEG)
333 imm.reg.data.f64 = -imm.reg.data.f64;
334 if (bits & NV50_IR_MOD_SAT) {
335 if (imm.reg.data.f64 < 0.0)
336 imm.reg.data.f64 = 0.0;
337 else
338 if (imm.reg.data.f64 > 1.0)
339 imm.reg.data.f64 = 1.0;
340 }
341 assert(!(bits & NV50_IR_MOD_NOT));
342 break;
343
344 default:
345 assert(!"invalid/unhandled type");
346 imm.reg.data.u64 = 0;
347 break;
348 }
349 }
350
351 operation
352 Modifier::getOp() const
353 {
354 switch (bits) {
355 case NV50_IR_MOD_ABS: return OP_ABS;
356 case NV50_IR_MOD_NEG: return OP_NEG;
357 case NV50_IR_MOD_SAT: return OP_SAT;
358 case NV50_IR_MOD_NOT: return OP_NOT;
359 case 0:
360 return OP_MOV;
361 default:
362 return OP_CVT;
363 }
364 }
365
366 void
367 ConstantFolding::expr(Instruction *i,
368 ImmediateValue *src0, ImmediateValue *src1)
369 {
370 ImmediateValue imm0(src0, i->sType);
371 ImmediateValue imm1(src1, i->sType);
372 struct Storage res;
373 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
374
375 i->src[0].mod.applyTo(imm0);
376 i->src[1].mod.applyTo(imm1);
377
378 switch (i->op) {
379 case OP_MAD:
380 case OP_FMA:
381 case OP_MUL:
382 if (i->dnz && i->dType == TYPE_F32) {
383 if (!isfinite(a->data.f32))
384 a->data.f32 = 0.0f;
385 if (!isfinite(b->data.f32))
386 b->data.f32 = 0.0f;
387 }
388 switch (i->dType) {
389 case TYPE_F32: res.data.f32 = a->data.f32 * b->data.f32; break;
390 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
391 case TYPE_S32:
392 case TYPE_U32: res.data.u32 = a->data.u32 * b->data.u32; break;
393 default:
394 return;
395 }
396 break;
397 case OP_DIV:
398 if (b->data.u32 == 0)
399 break;
400 switch (i->dType) {
401 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
402 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
403 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
404 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
405 default:
406 return;
407 }
408 break;
409 case OP_ADD:
410 switch (i->dType) {
411 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
412 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
413 case TYPE_S32:
414 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
415 default:
416 return;
417 }
418 break;
419 case OP_POW:
420 switch (i->dType) {
421 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
422 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
423 default:
424 return;
425 }
426 break;
427 case OP_MAX:
428 switch (i->dType) {
429 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
430 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
431 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
432 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
433 default:
434 return;
435 }
436 break;
437 case OP_MIN:
438 switch (i->dType) {
439 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
440 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
441 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
442 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
443 default:
444 return;
445 }
446 break;
447 case OP_AND:
448 res.data.u64 = a->data.u64 & b->data.u64;
449 break;
450 case OP_OR:
451 res.data.u64 = a->data.u64 | b->data.u64;
452 break;
453 case OP_XOR:
454 res.data.u64 = a->data.u64 ^ b->data.u64;
455 break;
456 case OP_SHL:
457 res.data.u32 = a->data.u32 << b->data.u32;
458 break;
459 case OP_SHR:
460 switch (i->dType) {
461 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
462 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
463 default:
464 return;
465 }
466 break;
467 case OP_SLCT:
468 if (a->data.u32 != b->data.u32)
469 return;
470 res.data.u32 = a->data.u32;
471 break;
472 default:
473 return;
474 }
475 ++foldCount;
476
477 i->src[0].mod = Modifier(0);
478 i->src[1].mod = Modifier(0);
479
480 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
481 i->setSrc(1, NULL);
482
483 i->getSrc(0)->reg.data = res.data;
484
485 if (i->op == OP_MAD || i->op == OP_FMA) {
486 i->op = OP_ADD;
487
488 i->setSrc(1, i->getSrc(0));
489 i->setSrc(0, i->getSrc(2));
490 i->setSrc(2, NULL);
491
492 i->src[1].mod = i->src[2].mod;
493
494 src0 = i->src[0].getImmediate();
495 if (src0)
496 expr(i, src0, i->getSrc(1)->asImm());
497 } else {
498 i->op = OP_MOV;
499 }
500 }
501
502 void
503 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
504 {
505 Storage res;
506
507 if (i->dType != TYPE_F32)
508 return;
509 switch (i->op) {
510 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
511 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
512 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
513 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
514 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
515 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
516 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
517 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
518 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
519 case OP_PRESIN:
520 case OP_PREEX2:
521 // these should be handled in subsequent OP_SIN/COS/EX2
522 res.data.f32 = imm.reg.data.f32;
523 break;
524 default:
525 return;
526 }
527 i->op = OP_MOV;
528 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
529 i->src[0].mod = Modifier(0);
530 }
531
532 void
533 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
534 const int s, ImmediateValue& imm2)
535 {
536 const int t = s ? 0 : 1;
537 Instruction *insn;
538 Instruction *mul1 = NULL; // mul1 before mul2
539 int e = 0;
540 float f = imm2.reg.data.f32;
541
542 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
543
544 if (mul2->getSrc(t)->refCount() == 1) {
545 insn = mul2->getSrc(t)->getInsn();
546 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
547 mul1 = insn;
548 if (mul1) {
549 int s1 = 0;
550 ImmediateValue *imm = mul1->src[s1].getImmediate();
551 if (!imm) {
552 s1 = 1;
553 imm = mul1->src[s1].getImmediate();
554 }
555 if (imm) {
556 bld.setPosition(mul1, false);
557 // a = mul r, imm1
558 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
559 ImmediateValue imm1(mul1->src[s1].getImmediate(), TYPE_F32);
560 mul1->src[s1].mod.applyTo(imm1);
561 mul1->src[s1].mod = Modifier(0);
562 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
563 mul2->def[0].replace(mul1->getDef(0), false);
564 } else
565 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
566 // c = mul a, b
567 // d = mul c, imm -> d = mul_x_imm a, b
568 mul1->postFactor = e;
569 mul2->def[0].replace(mul1->getDef(0), false);
570 if (f < 0)
571 mul1->src[0].mod = mul1->src[0].mod ^ Modifier(NV50_IR_MOD_NEG);
572 }
573 return;
574 }
575 }
576 if (mul2->getDef(0)->refCount() == 1) {
577 // b = mul a, imm
578 // d = mul b, c -> d = mul_x_imm a, c
579 int s2, t2;
580 insn = mul2->getDef(0)->uses.front()->getInsn();
581 if (!insn)
582 return;
583 mul1 = mul2;
584 mul2 = NULL;
585 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
586 t2 = s2 ? 0 : 1;
587 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
588 if (!insn->src[t2].getImmediate())
589 mul2 = insn;
590 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
591 mul2->postFactor = e;
592 mul2->setSrc(s2, mul1->src[t]);
593 if (f < 0)
594 mul2->src[s2].mod = mul2->src[s2].mod ^ Modifier(NV50_IR_MOD_NEG);
595 }
596 }
597 }
598
599 void
600 ConstantFolding::opnd(Instruction *i, ImmediateValue *src, int s)
601 {
602 const int t = !s;
603 const operation op = i->op;
604
605 ImmediateValue imm(src, i->sType);
606
607 i->src[s].mod.applyTo(imm);
608
609 switch (i->op) {
610 case OP_MUL:
611 if (i->dType == TYPE_F32)
612 tryCollapseChainedMULs(i, s, imm);
613
614 if (imm.isInteger(0)) {
615 i->op = OP_MOV;
616 i->setSrc(0, i->getSrc(s));
617 i->setSrc(1, NULL);
618 } else
619 if (imm.isInteger(1) || imm.isInteger(-1)) {
620 if (imm.isNegative())
621 i->src[t].mod = i->src[t].mod ^ Modifier(NV50_IR_MOD_NEG);
622 i->op = i->src[t].mod.getOp();
623 if (s == 0) {
624 i->setSrc(0, i->getSrc(1));
625 i->src[0].mod = i->src[1].mod;
626 i->src[1].mod = 0;
627 }
628 if (i->op != OP_CVT)
629 i->src[0].mod = 0;
630 i->setSrc(1, NULL);
631 } else
632 if (imm.isInteger(2) || imm.isInteger(-2)) {
633 if (imm.isNegative())
634 i->src[t].mod = i->src[t].mod ^ Modifier(NV50_IR_MOD_NEG);
635 i->op = OP_ADD;
636 i->setSrc(s, i->getSrc(t));
637 i->src[s].mod = i->src[t].mod;
638 } else
639 if (!isFloatType(i->sType) && !imm.isNegative() && imm.isPow2()) {
640 i->op = OP_SHL;
641 imm.applyLog2();
642 i->setSrc(1, new_ImmediateValue(prog, imm.reg.data.u32));
643 }
644 break;
645 case OP_ADD:
646 if (imm.isInteger(0)) {
647 if (s == 0) {
648 i->setSrc(0, i->getSrc(1));
649 i->src[0].mod = i->src[1].mod;
650 }
651 i->setSrc(1, NULL);
652 i->op = i->src[0].mod.getOp();
653 if (i->op != OP_CVT)
654 i->src[0].mod = Modifier(0);
655 }
656 break;
657
658 case OP_DIV:
659 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
660 break;
661 bld.setPosition(i, false);
662 if (imm.reg.data.u32 == 0) {
663 break;
664 } else
665 if (imm.reg.data.u32 == 1) {
666 i->op = OP_MOV;
667 i->setSrc(1, NULL);
668 } else
669 if (i->dType == TYPE_U32 && imm.isPow2()) {
670 i->op = OP_SHR;
671 i->setSrc(1, bld.mkImm(util_logbase2(imm.reg.data.u32)));
672 } else
673 if (i->dType == TYPE_U32) {
674 Instruction *mul;
675 Value *tA, *tB;
676 const uint32_t d = imm.reg.data.u32;
677 uint32_t m;
678 int r, s;
679 uint32_t l = util_logbase2(d);
680 if (((uint32_t)1 << l) < d)
681 ++l;
682 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
683 r = l ? 1 : 0;
684 s = l ? (l - 1) : 0;
685
686 tA = bld.getSSA();
687 tB = bld.getSSA();
688 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
689 bld.loadImm(NULL, m));
690 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
691 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
692 tA = bld.getSSA();
693 if (r)
694 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
695 else
696 tA = tB;
697 tB = s ? bld.getSSA() : i->getDef(0);
698 bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
699 if (s)
700 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
701
702 delete_Instruction(prog, i);
703 } else
704 if (imm.reg.data.s32 == -1) {
705 i->op = OP_NEG;
706 i->setSrc(1, NULL);
707 } else {
708 LValue *tA, *tB;
709 LValue *tD;
710 const int32_t d = imm.reg.data.s32;
711 int32_t m;
712 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
713 if ((1 << l) < abs(d))
714 ++l;
715 if (!l)
716 l = 1;
717 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
718
719 tA = bld.getSSA();
720 tB = bld.getSSA();
721 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
722 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
723 if (l > 1)
724 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
725 else
726 tB = tA;
727 tA = bld.getSSA();
728 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, i->getSrc(0), bld.mkImm(0));
729 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
730 bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
731 if (d < 0)
732 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
733
734 delete_Instruction(prog, i);
735 }
736 break;
737
738 case OP_MOD:
739 if (i->sType == TYPE_U32 && imm.isPow2()) {
740 bld.setPosition(i, false);
741 i->op = OP_AND;
742 i->setSrc(1, bld.loadImm(NULL, imm.reg.data.u32 - 1));
743 }
744 break;
745
746 case OP_SET: // TODO: SET_AND,OR,XOR
747 {
748 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
749 CondCode cc, ccZ;
750 if (i->src[t].mod != Modifier(0))
751 return;
752 if (imm.reg.data.u32 != 0 || !si || si->op != OP_SET)
753 return;
754 cc = si->setCond;
755 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
756 if (s == 0)
757 ccZ = reverseCondCode(ccZ);
758 switch (ccZ) {
759 case CC_LT: cc = CC_FL; break;
760 case CC_GE: cc = CC_TR; break;
761 case CC_EQ: cc = inverseCondCode(cc); break;
762 case CC_LE: cc = inverseCondCode(cc); break;
763 case CC_GT: break;
764 case CC_NE: break;
765 default:
766 return;
767 }
768 i->asCmp()->setCond = cc;
769 i->setSrc(0, si->src[0]);
770 i->setSrc(1, si->src[1]);
771 i->sType = si->sType;
772 }
773 break;
774
775 case OP_SHL:
776 {
777 if (s != 1 || i->src[0].mod != Modifier(0))
778 break;
779 // try to concatenate shifts
780 Instruction *si = i->getSrc(0)->getInsn();
781 if (!si ||
782 si->op != OP_SHL || si->src[1].mod != Modifier(0))
783 break;
784 ImmediateValue *siImm = si->src[1].getImmediate();
785 if (siImm) {
786 bld.setPosition(i, false);
787 i->setSrc(0, si->getSrc(0));
788 i->setSrc(1, bld.loadImm(NULL,
789 imm.reg.data.u32 + siImm->reg.data.u32));
790 }
791 }
792 break;
793
794 case OP_ABS:
795 case OP_NEG:
796 case OP_LG2:
797 case OP_RCP:
798 case OP_SQRT:
799 case OP_RSQ:
800 case OP_PRESIN:
801 case OP_SIN:
802 case OP_COS:
803 case OP_PREEX2:
804 case OP_EX2:
805 unary(i, imm);
806 break;
807 default:
808 return;
809 }
810 if (i->op != op)
811 foldCount++;
812 }
813
814 // =============================================================================
815
816 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
817 class ModifierFolding : public Pass
818 {
819 private:
820 virtual bool visit(BasicBlock *);
821 };
822
823 bool
824 ModifierFolding::visit(BasicBlock *bb)
825 {
826 const Target *target = prog->getTarget();
827
828 Instruction *i, *next, *mi;
829 Modifier mod;
830
831 for (i = bb->getEntry(); i; i = next) {
832 next = i->next;
833
834 if (0 && i->op == OP_SUB) {
835 // turn "sub" into "add neg" (do we really want this ?)
836 i->op = OP_ADD;
837 i->src[0].mod = i->src[0].mod ^ Modifier(NV50_IR_MOD_NEG);
838 }
839
840 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
841 mi = i->getSrc(s)->getInsn();
842 if (!mi ||
843 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
844 continue;
845 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
846 if ((i->op != OP_ADD &&
847 i->op != OP_MUL) ||
848 (mi->op != OP_ABS &&
849 mi->op != OP_NEG))
850 continue;
851 } else
852 if (i->sType != mi->dType) {
853 continue;
854 }
855 if ((mod = Modifier(mi->op)) == Modifier(0))
856 continue;
857 mod = mod * mi->src[0].mod;
858
859 if ((i->op == OP_ABS) || i->src[s].mod.abs()) {
860 // abs neg [abs] = abs
861 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
862 } else
863 if ((i->op == OP_NEG) && mod.neg()) {
864 assert(s == 0);
865 // neg as both opcode and modifier on same insn is prohibited
866 // neg neg abs = abs, neg neg = identity
867 mod = mod & Modifier(~NV50_IR_MOD_NEG);
868 i->op = mod.getOp();
869 mod = mod & Modifier(~NV50_IR_MOD_ABS);
870 if (mod == Modifier(0))
871 i->op = OP_MOV;
872 }
873
874 if (target->isModSupported(i, s, mod)) {
875 i->setSrc(s, mi->getSrc(0));
876 i->src[s].mod = i->src[s].mod * mod;
877 }
878 }
879
880 if (i->op == OP_SAT) {
881 mi = i->getSrc(0)->getInsn();
882 if (mi &&
883 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
884 mi->saturate = 1;
885 mi->setDef(0, i->getDef(0));
886 delete_Instruction(prog, i);
887 }
888 }
889 }
890
891 return true;
892 }
893
894 // =============================================================================
895
896 // MUL + ADD -> MAD/FMA
897 // MIN/MAX(a, a) -> a, etc.
898 // SLCT(a, b, const) -> cc(const) ? a : b
899 // RCP(RCP(a)) -> a
900 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
901 class AlgebraicOpt : public Pass
902 {
903 private:
904 virtual bool visit(BasicBlock *);
905
906 void handleADD(Instruction *);
907 void handleMINMAX(Instruction *);
908 void handleRCP(Instruction *);
909 void handleSLCT(Instruction *);
910 void handleLOGOP(Instruction *);
911 void handleCVT(Instruction *);
912 };
913
914 void
915 AlgebraicOpt::handleADD(Instruction *add)
916 {
917 Value *src0 = add->getSrc(0);
918 Value *src1 = add->getSrc(1);
919 Value *src;
920 int s;
921 Modifier mod[4];
922
923 if (!prog->getTarget()->isOpSupported(OP_MAD, add->dType))
924 return;
925
926 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
927 return;
928
929 if (src0->refCount() == 1 &&
930 src0->getUniqueInsn() && src0->getUniqueInsn()->op == OP_MUL)
931 s = 0;
932 else
933 if (src1->refCount() == 1 &&
934 src1->getUniqueInsn() && src1->getUniqueInsn()->op == OP_MUL)
935 s = 1;
936 else
937 return;
938
939 if ((src0->getUniqueInsn() && src0->getUniqueInsn()->bb != add->bb) ||
940 (src1->getUniqueInsn() && src1->getUniqueInsn()->bb != add->bb))
941 return;
942
943 src = add->getSrc(s);
944
945 if (src->getInsn()->postFactor)
946 return;
947
948 mod[0] = add->src[0].mod;
949 mod[1] = add->src[1].mod;
950 mod[2] = src->getUniqueInsn()->src[0].mod;
951 mod[3] = src->getUniqueInsn()->src[1].mod;
952
953 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & Modifier(~NV50_IR_MOD_NEG))
954 return;
955
956 add->op = OP_MAD;
957 add->subOp = src->getInsn()->subOp; // potentially mul-high
958
959 add->setSrc(2, add->src[s ? 0 : 1]);
960
961 add->setSrc(0, src->getInsn()->getSrc(0));
962 add->src[0].mod = mod[2] ^ mod[s];
963 add->setSrc(1, src->getInsn()->getSrc(1));
964 add->src[1].mod = mod[3];
965 }
966
967 void
968 AlgebraicOpt::handleMINMAX(Instruction *minmax)
969 {
970 Value *src0 = minmax->getSrc(0);
971 Value *src1 = minmax->getSrc(1);
972
973 if (src0 != src1 || src0->reg.file != FILE_GPR)
974 return;
975 if (minmax->src[0].mod == minmax->src[1].mod) {
976 if (minmax->src[0].mod) {
977 minmax->op = OP_CVT;
978 minmax->setSrc(1, NULL);
979 } else {
980 minmax->def[0].replace(minmax->getSrc(0), false);
981 minmax->bb->remove(minmax);
982 }
983 } else {
984 // TODO:
985 // min(x, -x) = -abs(x)
986 // min(x, -abs(x)) = -abs(x)
987 // min(x, abs(x)) = x
988 // max(x, -abs(x)) = x
989 // max(x, abs(x)) = abs(x)
990 // max(x, -x) = abs(x)
991 }
992 }
993
994 void
995 AlgebraicOpt::handleRCP(Instruction *rcp)
996 {
997 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
998
999 if (si && si->op == OP_RCP) {
1000 Modifier mod = rcp->src[0].mod * si->src[0].mod;
1001 rcp->op = mod.getOp();
1002 rcp->setSrc(0, si->getSrc(0));
1003 }
1004 }
1005
1006 void
1007 AlgebraicOpt::handleSLCT(Instruction *slct)
1008 {
1009 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1010 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1011 slct->setSrc(0, slct->getSrc(1));
1012 } else
1013 if (slct->getSrc(0) != slct->getSrc(1)) {
1014 return;
1015 }
1016 slct->op = OP_MOV;
1017 slct->setSrc(1, NULL);
1018 slct->setSrc(2, NULL);
1019 }
1020
1021 void
1022 AlgebraicOpt::handleLOGOP(Instruction *logop)
1023 {
1024 Value *src0 = logop->getSrc(0);
1025 Value *src1 = logop->getSrc(1);
1026
1027 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1028 return;
1029
1030 if (src0 == src1) {
1031 if (logop->src[0].mod != Modifier(0) ||
1032 logop->src[1].mod != Modifier(0))
1033 return;
1034 if (logop->op == OP_AND || logop->op == OP_OR) {
1035 logop->def[0].replace(logop->getSrc(0), false);
1036 delete_Instruction(prog, logop);
1037 }
1038 } else {
1039 // try AND(SET, SET) -> SET_AND(SET)
1040 Instruction *set0 = src0->getInsn();
1041 Instruction *set1 = src1->getInsn();
1042
1043 if (!set0 || set0->fixed || !set1 || set1->fixed)
1044 return;
1045 if (set1->op != OP_SET) {
1046 Instruction *xchg = set0;
1047 set0 = set1;
1048 set1 = xchg;
1049 if (set1->op != OP_SET)
1050 return;
1051 }
1052 if (set0->op != OP_SET &&
1053 set0->op != OP_SET_AND &&
1054 set0->op != OP_SET_OR &&
1055 set0->op != OP_SET_XOR)
1056 return;
1057 if (set0->getDef(0)->refCount() > 1 &&
1058 set1->getDef(0)->refCount() > 1)
1059 return;
1060 if (set0->getPredicate() || set1->getPredicate())
1061 return;
1062 // check that they don't source each other
1063 for (int s = 0; s < 2; ++s)
1064 if (set0->getSrc(s) == set1->getDef(0) ||
1065 set1->getSrc(s) == set0->getDef(0))
1066 return;
1067
1068 set0 = set0->clone(true);
1069 set1 = set1->clone(false);
1070 logop->bb->insertAfter(logop, set1);
1071 logop->bb->insertAfter(logop, set0);
1072
1073 set0->dType = TYPE_U8;
1074 set0->getDef(0)->reg.file = FILE_PREDICATE;
1075 set0->getDef(0)->reg.size = 1;
1076 set1->setSrc(2, set0->getDef(0));
1077 switch (logop->op) {
1078 case OP_AND: set1->op = OP_SET_AND; break;
1079 case OP_OR: set1->op = OP_SET_OR; break;
1080 case OP_XOR: set1->op = OP_SET_XOR; break;
1081 default:
1082 assert(0);
1083 break;
1084 }
1085 set1->setDef(0, logop->getDef(0));
1086 delete_Instruction(prog, logop);
1087 }
1088 }
1089
1090 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1091 void
1092 AlgebraicOpt::handleCVT(Instruction *cvt)
1093 {
1094 if (cvt->sType != TYPE_F32 ||
1095 cvt->dType != TYPE_S32 || cvt->src[0].mod != Modifier(0))
1096 return;
1097 Instruction *insn = cvt->getSrc(0)->getInsn();
1098 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1099 return;
1100 if (insn->src[0].mod != Modifier(0))
1101 return;
1102 insn = insn->getSrc(0)->getInsn();
1103 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32)
1104 return;
1105
1106 Instruction *bset = insn->clone(false);
1107 bset->dType = TYPE_U32;
1108 bset->setDef(0, cvt->getDef(0));
1109 cvt->bb->insertAfter(cvt, bset);
1110 delete_Instruction(prog, cvt);
1111 }
1112
1113 bool
1114 AlgebraicOpt::visit(BasicBlock *bb)
1115 {
1116 Instruction *next;
1117 for (Instruction *i = bb->getEntry(); i; i = next) {
1118 next = i->next;
1119 switch (i->op) {
1120 case OP_ADD:
1121 handleADD(i);
1122 break;
1123 case OP_RCP:
1124 handleRCP(i);
1125 break;
1126 case OP_MIN:
1127 case OP_MAX:
1128 handleMINMAX(i);
1129 break;
1130 case OP_SLCT:
1131 handleSLCT(i);
1132 break;
1133 case OP_AND:
1134 case OP_OR:
1135 case OP_XOR:
1136 handleLOGOP(i);
1137 break;
1138 case OP_CVT:
1139 handleCVT(i);
1140 break;
1141 default:
1142 break;
1143 }
1144 }
1145
1146 return true;
1147 }
1148
1149 // =============================================================================
1150
1151 static inline void
1152 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
1153 {
1154 if (offset != ldst->getSrc(0)->reg.data.offset) {
1155 if (ldst->getSrc(0)->refCount() > 1)
1156 ldst->setSrc(0, ldst->getSrc(0)->clone(fn));
1157 ldst->getSrc(0)->reg.data.offset = offset;
1158 }
1159 }
1160
1161 // Combine loads and stores, forward stores to loads where possible.
1162 class MemoryOpt : public Pass
1163 {
1164 private:
1165 class Record
1166 {
1167 public:
1168 Record *next;
1169 Instruction *insn;
1170 const Value *rel[2];
1171 const Value *base;
1172 int32_t offset;
1173 int8_t fileIndex;
1174 uint8_t size;
1175 bool locked;
1176 Record *prev;
1177
1178 bool overlaps(const Instruction *ldst) const;
1179
1180 inline void link(Record **);
1181 inline void unlink(Record **);
1182 inline void set(const Instruction *ldst);
1183 };
1184
1185 public:
1186 MemoryOpt();
1187
1188 Record *loads[DATA_FILE_COUNT];
1189 Record *stores[DATA_FILE_COUNT];
1190
1191 MemoryPool recordPool;
1192
1193 private:
1194 virtual bool visit(BasicBlock *);
1195 bool runOpt(BasicBlock *);
1196
1197 Record **getList(const Instruction *);
1198
1199 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
1200
1201 // merge @insn into load/store instruction from @rec
1202 bool combineLd(Record *rec, Instruction *ld);
1203 bool combineSt(Record *rec, Instruction *st);
1204
1205 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
1206 bool replaceLdFromSt(Instruction *ld, Record *stRec);
1207 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
1208
1209 void addRecord(Instruction *ldst);
1210 void purgeRecords(Instruction *const st, DataFile);
1211 void lockStores(Instruction *const ld);
1212 void reset();
1213
1214 private:
1215 Record *prevRecord;
1216 };
1217
1218 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
1219 {
1220 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
1221 loads[i] = NULL;
1222 stores[i] = NULL;
1223 }
1224 prevRecord = NULL;
1225 }
1226
1227 void
1228 MemoryOpt::reset()
1229 {
1230 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
1231 Record *it, *next;
1232 for (it = loads[i]; it; it = next) {
1233 next = it->next;
1234 recordPool.release(it);
1235 }
1236 loads[i] = NULL;
1237 for (it = stores[i]; it; it = next) {
1238 next = it->next;
1239 recordPool.release(it);
1240 }
1241 stores[i] = NULL;
1242 }
1243 }
1244
1245 bool
1246 MemoryOpt::combineLd(Record *rec, Instruction *ld)
1247 {
1248 int32_t offRc = rec->offset;
1249 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1250 int sizeRc = rec->size;
1251 int sizeLd = typeSizeof(ld->dType);
1252 int size = sizeRc + sizeLd;
1253 int d, j;
1254
1255 if (!prog->getTarget()->
1256 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
1257 return false;
1258 // no unaligned loads
1259 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
1260 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
1261 return false;
1262
1263 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
1264
1265 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
1266
1267 if (offLd < offRc) {
1268 int sz;
1269 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
1270 // d: nr of definitions in ld
1271 // j: nr of definitions in rec->insn, move:
1272 for (d = d + j - 1; j > 0; --j, --d)
1273 rec->insn->setDef(d, rec->insn->getDef(j - 1));
1274
1275 if (rec->insn->getSrc(0)->refCount() > 1)
1276 rec->insn->setSrc(0, rec->insn->getSrc(0)->clone(func));
1277 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
1278
1279 d = 0;
1280 } else {
1281 d = j;
1282 }
1283 // move definitions of @ld to @rec->insn
1284 for (j = 0; sizeLd; ++j, ++d) {
1285 sizeLd -= ld->getDef(j)->reg.size;
1286 rec->insn->setDef(d, ld->getDef(j));
1287 }
1288
1289 rec->size = size;
1290 rec->insn->setType(typeOfSize(size));
1291
1292 delete_Instruction(prog, ld);
1293
1294 return true;
1295 }
1296
1297 bool
1298 MemoryOpt::combineSt(Record *rec, Instruction *st)
1299 {
1300 int32_t offRc = rec->offset;
1301 int32_t offSt = st->getSrc(0)->reg.data.offset;
1302 int sizeRc = rec->size;
1303 int sizeSt = typeSizeof(st->dType);
1304 int s = sizeSt / 4;
1305 int size = sizeRc + sizeSt;
1306 int j, k;
1307 Value *src[4]; // no modifiers in ValueRef allowed for st
1308 Value *extra[3];
1309
1310 if (!prog->getTarget()->
1311 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
1312 return false;
1313 if (size == 8 && MIN2(offRc, offSt) & 0x7)
1314 return false;
1315
1316 st->takeExtraSources(0, extra); // save predicate and indirect address
1317
1318 if (offRc < offSt) {
1319 // save values from @st
1320 for (s = 0; sizeSt; ++s) {
1321 sizeSt -= st->getSrc(s + 1)->reg.size;
1322 src[s] = st->getSrc(s + 1);
1323 }
1324 // set record's values as low sources of @st
1325 for (j = 1; sizeRc; ++j) {
1326 sizeRc -= st->getSrc(j)->reg.size;
1327 st->setSrc(j, rec->insn->getSrc(j));
1328 }
1329 // set saved values as high sources of @st
1330 for (k = j, j = 0; j < s; ++j)
1331 st->setSrc(k++, src[j]);
1332
1333 updateLdStOffset(st, offRc, func);
1334 } else {
1335 for (j = 1; sizeSt; ++j)
1336 sizeSt -= st->getSrc(j)->reg.size;
1337 for (s = 1; sizeRc; ++j, ++s) {
1338 sizeRc -= rec->insn->getSrc(s)->reg.size;
1339 st->setSrc(j, rec->insn->getSrc(s));
1340 }
1341 rec->offset = offSt;
1342 }
1343 st->putExtraSources(0, extra); // restore pointer and predicate
1344
1345 delete_Instruction(prog, rec->insn);
1346 rec->insn = st;
1347 rec->size = size;
1348 rec->insn->setType(typeOfSize(size));
1349 return true;
1350 }
1351
1352 void
1353 MemoryOpt::Record::set(const Instruction *ldst)
1354 {
1355 const Symbol *mem = ldst->getSrc(0)->asSym();
1356 fileIndex = mem->reg.fileIndex;
1357 rel[0] = ldst->getIndirect(0, 0);
1358 rel[1] = ldst->getIndirect(0, 1);
1359 offset = mem->reg.data.offset;
1360 base = mem->getBase();
1361 size = typeSizeof(ldst->sType);
1362 }
1363
1364 void
1365 MemoryOpt::Record::link(Record **list)
1366 {
1367 next = *list;
1368 if (next)
1369 next->prev = this;
1370 prev = NULL;
1371 *list = this;
1372 }
1373
1374 void
1375 MemoryOpt::Record::unlink(Record **list)
1376 {
1377 if (next)
1378 next->prev = prev;
1379 if (prev)
1380 prev->next = next;
1381 else
1382 *list = next;
1383 }
1384
1385 MemoryOpt::Record **
1386 MemoryOpt::getList(const Instruction *insn)
1387 {
1388 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
1389 return &loads[insn->src[0].getFile()];
1390 return &stores[insn->src[0].getFile()];
1391 }
1392
1393 void
1394 MemoryOpt::addRecord(Instruction *i)
1395 {
1396 Record **list = getList(i);
1397 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
1398
1399 it->link(list);
1400 it->set(i);
1401 it->insn = i;
1402 it->locked = false;
1403 }
1404
1405 MemoryOpt::Record *
1406 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
1407 {
1408 const Symbol *sym = insn->getSrc(0)->asSym();
1409 const int size = typeSizeof(insn->sType);
1410 Record *rec = NULL;
1411 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
1412
1413 for (; it; it = it->next) {
1414 if (it->locked && insn->op != OP_LOAD)
1415 continue;
1416 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
1417 it->rel[0] != insn->getIndirect(0, 0) ||
1418 it->fileIndex != sym->reg.fileIndex ||
1419 it->rel[1] != insn->getIndirect(0, 1))
1420 continue;
1421
1422 if (it->offset < sym->reg.data.offset) {
1423 if (it->offset + it->size >= sym->reg.data.offset) {
1424 isAdj = (it->offset + it->size == sym->reg.data.offset);
1425 if (!isAdj)
1426 return it;
1427 if (!(it->offset & 0x7))
1428 rec = it;
1429 }
1430 } else {
1431 isAdj = it->offset != sym->reg.data.offset;
1432 if (size <= it->size && !isAdj)
1433 return it;
1434 else
1435 if (!(sym->reg.data.offset & 0x7))
1436 if (it->offset - size <= sym->reg.data.offset)
1437 rec = it;
1438 }
1439 }
1440 return rec;
1441 }
1442
1443 bool
1444 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
1445 {
1446 Instruction *st = rec->insn;
1447 int32_t offSt = rec->offset;
1448 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1449 int d, s;
1450
1451 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
1452 offSt += st->getSrc(s)->reg.size;
1453 if (offSt != offLd)
1454 return false;
1455
1456 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
1457 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
1458 return false;
1459 if (st->getSrc(s)->reg.file != FILE_GPR)
1460 return false;
1461 ld->def[d].replace(st->getSrc(s), false);
1462 }
1463 ld->bb->remove(ld);
1464 return true;
1465 }
1466
1467 bool
1468 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
1469 {
1470 Instruction *ldR = rec->insn;
1471 int32_t offR = rec->offset;
1472 int32_t offE = ldE->getSrc(0)->reg.data.offset;
1473 int dR, dE;
1474
1475 assert(offR <= offE);
1476 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
1477 offR += ldR->getDef(dR)->reg.size;
1478 if (offR != offE)
1479 return false;
1480
1481 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
1482 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
1483 return false;
1484 ldE->def[dE].replace(ldR->getDef(dR), false);
1485 }
1486
1487 delete_Instruction(prog, ldE);
1488 return true;
1489 }
1490
1491 bool
1492 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
1493 {
1494 const Instruction *const ri = rec->insn;
1495 Value *extra[3];
1496
1497 int32_t offS = st->getSrc(0)->reg.data.offset;
1498 int32_t offR = rec->offset;
1499 int32_t endS = offS + typeSizeof(st->dType);
1500 int32_t endR = offR + typeSizeof(ri->dType);
1501
1502 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
1503
1504 st->takeExtraSources(0, extra);
1505
1506 if (offR < offS) {
1507 Value *vals[10];
1508 int s, n;
1509 int k = 0;
1510 // get non-replaced sources of ri
1511 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
1512 vals[k++] = ri->getSrc(s);
1513 n = s;
1514 // get replaced sources of st
1515 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
1516 vals[k++] = st->getSrc(s);
1517 // skip replaced sources of ri
1518 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
1519 // get non-replaced sources after values covered by st
1520 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
1521 vals[k++] = ri->getSrc(s);
1522 assert(k <= Elements(vals));
1523 for (s = 0; s < k; ++s)
1524 st->setSrc(s + 1, vals[s]);
1525 st->setSrc(0, ri->getSrc(0));
1526 } else
1527 if (endR > endS) {
1528 int j, s;
1529 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
1530 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
1531 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
1532 st->setSrc(s++, ri->getSrc(j));
1533 }
1534 st->putExtraSources(0, extra);
1535
1536 delete_Instruction(prog, rec->insn);
1537
1538 rec->insn = st;
1539 rec->offset = st->getSrc(0)->reg.data.offset;
1540
1541 st->setType(typeOfSize(rec->size));
1542
1543 return true;
1544 }
1545
1546 bool
1547 MemoryOpt::Record::overlaps(const Instruction *ldst) const
1548 {
1549 Record that;
1550 that.set(ldst);
1551
1552 if (this->fileIndex != that.fileIndex)
1553 return false;
1554
1555 if (this->rel[0] || that.rel[0])
1556 return this->base == that.base;
1557 return
1558 (this->offset < that.offset + that.size) &&
1559 (this->offset + this->size > that.offset);
1560 }
1561
1562 // We must not eliminate stores that affect the result of @ld if
1563 // we find later stores to the same location, and we may no longer
1564 // merge them with later stores.
1565 // The stored value can, however, still be used to determine the value
1566 // returned by future loads.
1567 void
1568 MemoryOpt::lockStores(Instruction *const ld)
1569 {
1570 for (Record *r = stores[ld->src[0].getFile()]; r; r = r->next)
1571 if (!r->locked && r->overlaps(ld))
1572 r->locked = true;
1573 }
1574
1575 // Prior loads from the location of @st are no longer valid.
1576 // Stores to the location of @st may no longer be used to derive
1577 // the value at it nor be coalesced into later stores.
1578 void
1579 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
1580 {
1581 if (st)
1582 f = st->src[0].getFile();
1583
1584 for (Record *r = loads[f]; r; r = r->next)
1585 if (!st || r->overlaps(st))
1586 r->unlink(&loads[f]);
1587
1588 for (Record *r = stores[f]; r; r = r->next)
1589 if (!st || r->overlaps(st))
1590 r->unlink(&stores[f]);
1591 }
1592
1593 bool
1594 MemoryOpt::visit(BasicBlock *bb)
1595 {
1596 bool ret = runOpt(bb);
1597 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
1598 // where 96 bit memory operations are forbidden.
1599 if (ret)
1600 ret = runOpt(bb);
1601 return ret;
1602 }
1603
1604 bool
1605 MemoryOpt::runOpt(BasicBlock *bb)
1606 {
1607 Instruction *ldst, *next;
1608 Record *rec;
1609 bool isAdjacent = true;
1610
1611 for (ldst = bb->getEntry(); ldst; ldst = next) {
1612 bool keep = true;
1613 bool isLoad = true;
1614 next = ldst->next;
1615
1616 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
1617 if (ldst->isDead()) {
1618 // might have been produced by earlier optimization
1619 delete_Instruction(prog, ldst);
1620 continue;
1621 }
1622 } else
1623 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
1624 isLoad = false;
1625 } else {
1626 // TODO: maybe have all fixed ops act as barrier ?
1627 if (ldst->op == OP_CALL) {
1628 purgeRecords(NULL, FILE_MEMORY_LOCAL);
1629 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
1630 purgeRecords(NULL, FILE_MEMORY_SHARED);
1631 purgeRecords(NULL, FILE_SHADER_OUTPUT);
1632 } else
1633 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
1634 purgeRecords(NULL, FILE_SHADER_OUTPUT);
1635 }
1636 continue;
1637 }
1638 if (ldst->getPredicate()) // TODO: handle predicated ld/st
1639 continue;
1640
1641 if (isLoad) {
1642 DataFile file = ldst->src[0].getFile();
1643
1644 // if ld l[]/g[] look for previous store to eliminate the reload
1645 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
1646 // TODO: shared memory ?
1647 rec = findRecord(ldst, false, isAdjacent);
1648 if (rec && !isAdjacent)
1649 keep = !replaceLdFromSt(ldst, rec);
1650 }
1651
1652 // or look for ld from the same location and replace this one
1653 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
1654 if (rec) {
1655 if (!isAdjacent)
1656 keep = !replaceLdFromLd(ldst, rec);
1657 else
1658 // or combine a previous load with this one
1659 keep = !combineLd(rec, ldst);
1660 }
1661 if (keep)
1662 lockStores(ldst);
1663 } else {
1664 rec = findRecord(ldst, false, isAdjacent);
1665 if (rec) {
1666 if (!isAdjacent)
1667 keep = !replaceStFromSt(ldst, rec);
1668 else
1669 keep = !combineSt(rec, ldst);
1670 }
1671 if (keep)
1672 purgeRecords(ldst, DATA_FILE_COUNT);
1673 }
1674 if (keep)
1675 addRecord(ldst);
1676 }
1677 reset();
1678
1679 return true;
1680 }
1681
1682 // =============================================================================
1683
1684 // Turn control flow into predicated instructions (after register allocation !).
1685 // TODO:
1686 // Could move this to before register allocation on NVC0 and also handle nested
1687 // constructs.
1688 class FlatteningPass : public Pass
1689 {
1690 private:
1691 virtual bool visit(BasicBlock *);
1692
1693 bool tryPredicateConditional(BasicBlock *);
1694 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
1695 void tryPropagateBranch(BasicBlock *);
1696 inline bool isConstantCondition(Value *pred);
1697 inline bool mayPredicate(const Instruction *, const Value *pred) const;
1698 inline void removeFlow(Instruction *);
1699 };
1700
1701 bool
1702 FlatteningPass::isConstantCondition(Value *pred)
1703 {
1704 Instruction *insn = pred->getUniqueInsn();
1705 assert(insn);
1706 if (insn->op != OP_SET || insn->srcExists(2))
1707 return false;
1708
1709 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
1710 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
1711 DataFile file;
1712 if (ld) {
1713 if (ld->op != OP_MOV && ld->op != OP_LOAD)
1714 return false;
1715 if (ld->src[0].isIndirect(0))
1716 return false;
1717 file = ld->src[0].getFile();
1718 } else {
1719 file = insn->src[s].getFile();
1720 // catch $r63 on NVC0
1721 if (file == FILE_GPR && insn->getSrc(s)->reg.data.id > prog->maxGPR)
1722 file = FILE_IMMEDIATE;
1723 }
1724 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
1725 return false;
1726 }
1727 return true;
1728 }
1729
1730 void
1731 FlatteningPass::removeFlow(Instruction *insn)
1732 {
1733 FlowInstruction *term = insn ? insn->asFlow() : NULL;
1734 if (!term)
1735 return;
1736 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
1737
1738 if (term->op == OP_BRA) {
1739 // TODO: this might get more difficult when we get arbitrary BRAs
1740 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
1741 return;
1742 } else
1743 if (term->op != OP_JOIN)
1744 return;
1745
1746 delete_Instruction(prog, term);
1747
1748 Value *pred = term->getPredicate();
1749
1750 if (pred && pred->refCount() == 0) {
1751 Instruction *pSet = pred->getUniqueInsn();
1752 pred->join->reg.data.id = -1; // deallocate
1753 if (pSet->isDead())
1754 delete_Instruction(prog, pSet);
1755 }
1756 }
1757
1758 void
1759 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
1760 {
1761 for (Instruction *i = bb->getEntry(); i; i = i->next) {
1762 if (i->isNop())
1763 continue;
1764 assert(!i->getPredicate());
1765 i->setPredicate(cc, pred);
1766 }
1767 removeFlow(bb->getExit());
1768 }
1769
1770 bool
1771 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
1772 {
1773 if (insn->isPseudo())
1774 return true;
1775 // TODO: calls where we don't know which registers are modified
1776
1777 if (!prog->getTarget()->mayPredicate(insn, pred))
1778 return false;
1779 for (int d = 0; insn->defExists(d); ++d)
1780 if (insn->getDef(d)->equals(pred))
1781 return false;
1782 return true;
1783 }
1784
1785 // If we conditionally skip over or to a branch instruction, replace it.
1786 // NOTE: We do not update the CFG anymore here !
1787 void
1788 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
1789 {
1790 BasicBlock *bf = NULL;
1791 unsigned int i;
1792
1793 if (bb->cfg.outgoingCount() != 2)
1794 return;
1795 if (!bb->getExit() || bb->getExit()->op != OP_BRA)
1796 return;
1797 Graph::EdgeIterator ei = bb->cfg.outgoing();
1798
1799 for (i = 0; !ei.end(); ++i, ei.next()) {
1800 bf = BasicBlock::get(ei.getNode());
1801 if (bf->getInsnCount() == 1)
1802 break;
1803 }
1804 if (ei.end() || !bf->getExit())
1805 return;
1806 FlowInstruction *bra = bb->getExit()->asFlow();
1807 FlowInstruction *rep = bf->getExit()->asFlow();
1808
1809 if (rep->getPredicate())
1810 return;
1811 if (rep->op != OP_BRA &&
1812 rep->op != OP_JOIN &&
1813 rep->op != OP_EXIT)
1814 return;
1815
1816 bra->op = rep->op;
1817 bra->target.bb = rep->target.bb;
1818 if (i) // 2nd out block means branch not taken
1819 bra->cc = inverseCondCode(bra->cc);
1820 bf->remove(rep);
1821 }
1822
1823 bool
1824 FlatteningPass::visit(BasicBlock *bb)
1825 {
1826 if (tryPredicateConditional(bb))
1827 return true;
1828
1829 // try to attach join to previous instruction
1830 Instruction *insn = bb->getExit();
1831 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
1832 insn = insn->prev;
1833 if (insn && !insn->getPredicate() && !insn->asFlow() && !insn->isNop()) {
1834 insn->join = 1;
1835 bb->remove(bb->getExit());
1836 return true;
1837 }
1838 }
1839
1840 tryPropagateBranch(bb);
1841
1842 return true;
1843 }
1844
1845 bool
1846 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
1847 {
1848 BasicBlock *bL = NULL, *bR = NULL;
1849 unsigned int nL = 0, nR = 0, limit = 12;
1850 Instruction *insn;
1851 unsigned int mask;
1852
1853 mask = bb->initiatesSimpleConditional();
1854 if (!mask)
1855 return false;
1856
1857 assert(bb->getExit());
1858 Value *pred = bb->getExit()->getPredicate();
1859 assert(pred);
1860
1861 if (isConstantCondition(pred))
1862 limit = 4;
1863
1864 Graph::EdgeIterator ei = bb->cfg.outgoing();
1865
1866 if (mask & 1) {
1867 bL = BasicBlock::get(ei.getNode());
1868 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
1869 if (!mayPredicate(insn, pred))
1870 return false;
1871 if (nL > limit)
1872 return false; // too long, do a real branch
1873 }
1874 ei.next();
1875
1876 if (mask & 2) {
1877 bR = BasicBlock::get(ei.getNode());
1878 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
1879 if (!mayPredicate(insn, pred))
1880 return false;
1881 if (nR > limit)
1882 return false; // too long, do a real branch
1883 }
1884
1885 if (bL)
1886 predicateInstructions(bL, pred, bb->getExit()->cc);
1887 if (bR)
1888 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
1889
1890 if (bb->joinAt) {
1891 bb->remove(bb->joinAt);
1892 bb->joinAt = NULL;
1893 }
1894 removeFlow(bb->getExit()); // delete the branch/join at the fork point
1895
1896 // remove potential join operations at the end of the conditional
1897 if (prog->getTarget()->joinAnterior) {
1898 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
1899 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
1900 removeFlow(bb->getEntry());
1901 }
1902
1903 return true;
1904 }
1905
1906 // =============================================================================
1907
1908 // Common subexpression elimination. Stupid O^2 implementation.
1909 class LocalCSE : public Pass
1910 {
1911 private:
1912 virtual bool visit(BasicBlock *);
1913
1914 inline bool tryReplace(Instruction **, Instruction *);
1915
1916 DLList ops[OP_LAST + 1];
1917 };
1918
1919 class GlobalCSE : public Pass
1920 {
1921 private:
1922 virtual bool visit(BasicBlock *);
1923 };
1924
1925 bool
1926 Instruction::isActionEqual(const Instruction *that) const
1927 {
1928 if (this->op != that->op ||
1929 this->dType != that->dType ||
1930 this->sType != that->sType)
1931 return false;
1932 if (this->cc != that->cc)
1933 return false;
1934
1935 if (this->asTex()) {
1936 if (memcmp(&this->asTex()->tex,
1937 &that->asTex()->tex,
1938 sizeof(this->asTex()->tex)))
1939 return false;
1940 } else
1941 if (this->asCmp()) {
1942 if (this->asCmp()->setCond != that->asCmp()->setCond)
1943 return false;
1944 } else
1945 if (this->asFlow()) {
1946 return false;
1947 } else {
1948 if (this->atomic != that->atomic ||
1949 this->ipa != that->ipa ||
1950 this->lanes != that->lanes ||
1951 this->perPatch != that->perPatch)
1952 return false;
1953 if (this->postFactor != that->postFactor)
1954 return false;
1955 }
1956
1957 if (this->subOp != that->subOp ||
1958 this->saturate != that->saturate ||
1959 this->rnd != that->rnd ||
1960 this->ftz != that->ftz ||
1961 this->dnz != that->dnz ||
1962 this->cache != that->cache)
1963 return false;
1964
1965 return true;
1966 }
1967
1968 bool
1969 Instruction::isResultEqual(const Instruction *that) const
1970 {
1971 unsigned int d, s;
1972
1973 // NOTE: location of discard only affects tex with liveOnly and quadops
1974 if (!this->defExists(0) && this->op != OP_DISCARD)
1975 return false;
1976
1977 if (!isActionEqual(that))
1978 return false;
1979
1980 if (this->predSrc != that->predSrc)
1981 return false;
1982
1983 for (d = 0; this->defExists(d); ++d) {
1984 if (!that->defExists(d) ||
1985 !this->getDef(d)->equals(that->getDef(d), false))
1986 return false;
1987 }
1988 if (that->defExists(d))
1989 return false;
1990
1991 for (s = 0; this->srcExists(s); ++s) {
1992 if (!that->srcExists(s))
1993 return false;
1994 if (this->src[s].mod != that->src[s].mod)
1995 return false;
1996 if (!this->getSrc(s)->equals(that->getSrc(s), true))
1997 return false;
1998 }
1999 if (that->srcExists(s))
2000 return false;
2001
2002 if (op == OP_LOAD || op == OP_VFETCH) {
2003 switch (src[0].getFile()) {
2004 case FILE_MEMORY_CONST:
2005 case FILE_SHADER_INPUT:
2006 return true;
2007 default:
2008 return false;
2009 }
2010 }
2011
2012 return true;
2013 }
2014
2015 // pull through common expressions from different in-blocks
2016 bool
2017 GlobalCSE::visit(BasicBlock *bb)
2018 {
2019 Instruction *phi, *next, *ik;
2020 int s;
2021
2022 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
2023 next = phi->next;
2024 if (phi->getSrc(0)->refCount() > 1)
2025 continue;
2026 ik = phi->getSrc(0)->getInsn();
2027 for (s = 1; phi->srcExists(s); ++s) {
2028 if (phi->getSrc(s)->refCount() > 1)
2029 break;
2030 if (!phi->getSrc(s)->getInsn()->isResultEqual(ik))
2031 break;
2032 }
2033 if (!phi->srcExists(s)) {
2034 Instruction *entry = bb->getEntry();
2035 ik->bb->remove(ik);
2036 if (!entry || entry->op != OP_JOIN)
2037 bb->insertHead(ik);
2038 else
2039 bb->insertAfter(entry, ik);
2040 ik->setDef(0, phi->getDef(0));
2041 delete_Instruction(prog, phi);
2042 }
2043 }
2044
2045 return true;
2046 }
2047
2048 bool
2049 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
2050 {
2051 Instruction *old = *ptr;
2052 if (!old->isResultEqual(i))
2053 return false;
2054 for (int d = 0; old->defExists(d); ++d)
2055 old->def[d].replace(i->getDef(d), false);
2056 delete_Instruction(prog, old);
2057 *ptr = NULL;
2058 return true;
2059 }
2060
2061 bool
2062 LocalCSE::visit(BasicBlock *bb)
2063 {
2064 unsigned int replaced;
2065
2066 do {
2067 Instruction *ir, *next;
2068
2069 replaced = 0;
2070
2071 // will need to know the order of instructions
2072 int serial = 0;
2073 for (ir = bb->getEntry(); ir; ir = ir->next)
2074 ir->serial = serial++;
2075
2076 for (ir = bb->getEntry(); ir; ir = next) {
2077 int s;
2078 Value *src = NULL;
2079
2080 next = ir->next;
2081
2082 if (ir->fixed) {
2083 ops[ir->op].insert(ir);
2084 continue;
2085 }
2086
2087 for (s = 0; ir->srcExists(s); ++s)
2088 if (ir->getSrc(s)->asLValue())
2089 if (!src || ir->getSrc(s)->refCount() < src->refCount())
2090 src = ir->getSrc(s);
2091
2092 if (src) {
2093 for (Value::UseIterator it = src->uses.begin();
2094 it != src->uses.end(); ++it) {
2095 Instruction *ik = (*it)->getInsn();
2096 if (ik && ik->serial < ir->serial && ik->bb == ir->bb)
2097 if (tryReplace(&ir, ik))
2098 break;
2099 }
2100 } else {
2101 DLLIST_FOR_EACH(&ops[ir->op], iter)
2102 {
2103 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
2104 if (tryReplace(&ir, ik))
2105 break;
2106 }
2107 }
2108
2109 if (ir)
2110 ops[ir->op].insert(ir);
2111 else
2112 ++replaced;
2113 }
2114 for (unsigned int i = 0; i <= OP_LAST; ++i)
2115 ops[i].clear();
2116
2117 } while (replaced);
2118
2119 return true;
2120 }
2121
2122 // =============================================================================
2123
2124 // Remove computations of unused values.
2125 class DeadCodeElim : public Pass
2126 {
2127 public:
2128 bool buryAll(Program *);
2129
2130 private:
2131 virtual bool visit(BasicBlock *);
2132
2133 void checkSplitLoad(Instruction *ld); // for partially dead loads
2134
2135 unsigned int deadCount;
2136 };
2137
2138 bool
2139 DeadCodeElim::buryAll(Program *prog)
2140 {
2141 do {
2142 deadCount = 0;
2143 if (!this->run(prog, false, false))
2144 return false;
2145 } while (deadCount);
2146
2147 return true;
2148 }
2149
2150 bool
2151 DeadCodeElim::visit(BasicBlock *bb)
2152 {
2153 Instruction *next;
2154
2155 for (Instruction *i = bb->getFirst(); i; i = next) {
2156 next = i->next;
2157 if (i->isDead()) {
2158 ++deadCount;
2159 delete_Instruction(prog, i);
2160 } else
2161 if (i->defExists(1) && (i->op == OP_VFETCH || i->op == OP_LOAD)) {
2162 checkSplitLoad(i);
2163 }
2164 }
2165 return true;
2166 }
2167
2168 void
2169 DeadCodeElim::checkSplitLoad(Instruction *ld1)
2170 {
2171 Instruction *ld2 = NULL; // can get at most 2 loads
2172 Value *def1[4];
2173 Value *def2[4];
2174 int32_t addr1, addr2;
2175 int32_t size1, size2;
2176 int d, n1, n2;
2177 uint32_t mask = 0xffffffff;
2178
2179 for (d = 0; ld1->defExists(d); ++d)
2180 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
2181 mask &= ~(1 << d);
2182 if (mask == 0xffffffff)
2183 return;
2184
2185 addr1 = ld1->getSrc(0)->reg.data.offset;
2186 n1 = n2 = 0;
2187 size1 = size2 = 0;
2188 for (d = 0; ld1->defExists(d); ++d) {
2189 if (mask & (1 << d)) {
2190 if (size1 && (addr1 & 0x7))
2191 break;
2192 def1[n1] = ld1->getDef(d);
2193 size1 += def1[n1++]->reg.size;
2194 } else
2195 if (!n1) {
2196 addr1 += ld1->getDef(d)->reg.size;
2197 } else {
2198 break;
2199 }
2200 }
2201 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
2202 if (mask & (1 << d)) {
2203 def2[n2] = ld1->getDef(d);
2204 size2 += def2[n2++]->reg.size;
2205 } else {
2206 assert(!n2);
2207 addr2 += ld1->getDef(d)->reg.size;
2208 }
2209 }
2210
2211 updateLdStOffset(ld1, addr1, func);
2212 ld1->setType(typeOfSize(size1));
2213 for (d = 0; d < 4; ++d)
2214 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
2215
2216 if (!n2)
2217 return;
2218
2219 ld2 = ld1->clone(false);
2220 updateLdStOffset(ld2, addr2, func);
2221 ld2->setType(typeOfSize(size2));
2222 for (d = 0; d < 4; ++d)
2223 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
2224
2225 ld1->bb->insertAfter(ld1, ld2);
2226 }
2227
2228 // =============================================================================
2229
2230 #define RUN_PASS(l, n, f) \
2231 if (level >= (l)) { \
2232 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
2233 INFO("PEEPHOLE: %s\n", #n); \
2234 n pass; \
2235 if (!pass.f(this)) \
2236 return false; \
2237 }
2238
2239 bool
2240 Program::optimizeSSA(int level)
2241 {
2242 RUN_PASS(1, DeadCodeElim, buryAll);
2243 RUN_PASS(1, CopyPropagation, run);
2244 RUN_PASS(2, GlobalCSE, run);
2245 RUN_PASS(1, LocalCSE, run);
2246 RUN_PASS(2, AlgebraicOpt, run);
2247 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
2248 RUN_PASS(1, ConstantFolding, foldAll);
2249 RUN_PASS(1, LoadPropagation, run);
2250 RUN_PASS(2, MemoryOpt, run);
2251 RUN_PASS(2, LocalCSE, run);
2252 RUN_PASS(0, DeadCodeElim, buryAll);
2253 return true;
2254 }
2255
2256 bool
2257 Program::optimizePostRA(int level)
2258 {
2259 RUN_PASS(2, FlatteningPass, run);
2260 return true;
2261 }
2262
2263 }